PostgreSQL Source Code  git master
proc.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * proc.c
4  * routines to manage per-process shared memory data structure
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/proc.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Interface (a):
17  * ProcSleep(), ProcWakeup(),
18  * ProcQueueAlloc() -- create a shm queue for sleeping processes
19  * ProcQueueInit() -- create a queue without allocing memory
20  *
21  * Waiting for a lock causes the backend to be put to sleep. Whoever releases
22  * the lock wakes the process up again (and gives it an error code so it knows
23  * whether it was awoken on an error condition).
24  *
25  * Interface (b):
26  *
27  * ProcReleaseLocks -- frees the locks associated with current transaction
28  *
29  * ProcKill -- destroys the shared memory state (and locks)
30  * associated with the process.
31  */
32 #include "postgres.h"
33 
34 #include <signal.h>
35 #include <unistd.h>
36 #include <sys/time.h>
37 
38 #include "access/transam.h"
39 #include "access/twophase.h"
40 #include "access/xact.h"
41 #include "miscadmin.h"
42 #include "pgstat.h"
43 #include "postmaster/autovacuum.h"
44 #include "replication/slot.h"
45 #include "replication/syncrep.h"
46 #include "replication/walsender.h"
48 #include "storage/ipc.h"
49 #include "storage/lmgr.h"
50 #include "storage/pmsignal.h"
51 #include "storage/proc.h"
52 #include "storage/procarray.h"
53 #include "storage/procsignal.h"
54 #include "storage/spin.h"
55 #include "storage/standby.h"
56 #include "utils/timeout.h"
57 #include "utils/timestamp.h"
58 
59 /* GUC variables */
60 int DeadlockTimeout = 1000;
62 int LockTimeout = 0;
65 bool log_lock_waits = false;
66 
67 /* Pointer to this process's PGPROC struct, if any */
68 PGPROC *MyProc = NULL;
69 
70 /*
71  * This spinlock protects the freelist of recycled PGPROC structures.
72  * We cannot use an LWLock because the LWLock manager depends on already
73  * having a PGPROC and a wait semaphore! But these structures are touched
74  * relatively infrequently (only at backend startup or shutdown) and not for
75  * very long, so a spinlock is okay.
76  */
78 
79 /* Pointers to shared-memory structures */
83 
84 /* If we are waiting for a lock, this points to the associated LOCALLOCK */
85 static LOCALLOCK *lockAwaited = NULL;
86 
88 
89 /* Is a deadlock check pending? */
90 static volatile sig_atomic_t got_deadlock_timeout;
91 
92 static void RemoveProcFromArray(int code, Datum arg);
93 static void ProcKill(int code, Datum arg);
94 static void AuxiliaryProcKill(int code, Datum arg);
95 static void CheckDeadLock(void);
96 
97 
98 /*
99  * Report shared-memory space needed by InitProcGlobal.
100  */
101 Size
103 {
104  Size size = 0;
105  Size TotalProcs =
107 
108  /* ProcGlobal */
109  size = add_size(size, sizeof(PROC_HDR));
110  size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
111  size = add_size(size, sizeof(slock_t));
112 
113  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
114  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
115  size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
116 
117  return size;
118 }
119 
120 /*
121  * Report number of semaphores needed by InitProcGlobal.
122  */
123 int
125 {
126  /*
127  * We need a sema per backend (including autovacuum), plus one for each
128  * auxiliary process.
129  */
131 }
132 
133 /*
134  * InitProcGlobal -
135  * Initialize the global process table during postmaster or standalone
136  * backend startup.
137  *
138  * We also create all the per-process semaphores we will need to support
139  * the requested number of backends. We used to allocate semaphores
140  * only when backends were actually started up, but that is bad because
141  * it lets Postgres fail under load --- a lot of Unix systems are
142  * (mis)configured with small limits on the number of semaphores, and
143  * running out when trying to start another backend is a common failure.
144  * So, now we grab enough semaphores to support the desired max number
145  * of backends immediately at initialization --- if the sysadmin has set
146  * MaxConnections, max_worker_processes, max_wal_senders, or
147  * autovacuum_max_workers higher than his kernel will support, he'll
148  * find out sooner rather than later.
149  *
150  * Another reason for creating semaphores here is that the semaphore
151  * implementation typically requires us to create semaphores in the
152  * postmaster, not in backends.
153  *
154  * Note: this is NOT called by individual backends under a postmaster,
155  * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
156  * pointers must be propagated specially for EXEC_BACKEND operation.
157  */
158 void
160 {
161  PGPROC *procs;
162  int i,
163  j;
164  bool found;
166 
167  /* Create the ProcGlobal shared structure */
168  ProcGlobal = (PROC_HDR *)
169  ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
170  Assert(!found);
171 
172  /*
173  * Initialize the data structures.
174  */
176  ProcGlobal->freeProcs = NULL;
177  ProcGlobal->autovacFreeProcs = NULL;
178  ProcGlobal->bgworkerFreeProcs = NULL;
179  ProcGlobal->walsenderFreeProcs = NULL;
180  ProcGlobal->startupProc = NULL;
181  ProcGlobal->startupProcPid = 0;
182  ProcGlobal->startupBufferPinWaitBufId = -1;
183  ProcGlobal->walwriterLatch = NULL;
184  ProcGlobal->checkpointerLatch = NULL;
187 
188  /*
189  * Create and initialize all the PGPROC structures we'll need. There are
190  * five separate consumers: (1) normal backends, (2) autovacuum workers
191  * and the autovacuum launcher, (3) background workers, (4) auxiliary
192  * processes, and (5) prepared transactions. Each PGPROC structure is
193  * dedicated to exactly one of these purposes, and they do not move
194  * between groups.
195  */
196  procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
197  MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
198  ProcGlobal->allProcs = procs;
199  /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
201 
202  /*
203  * Allocate arrays mirroring PGPROC fields in a dense manner. See
204  * PROC_HDR.
205  *
206  * XXX: It might make sense to increase padding for these arrays, given
207  * how hotly they are accessed.
208  */
209  ProcGlobal->xids =
210  (TransactionId *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->xids));
211  MemSet(ProcGlobal->xids, 0, TotalProcs * sizeof(*ProcGlobal->xids));
212  ProcGlobal->subxidStates = (XidCacheStatus *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->subxidStates));
213  MemSet(ProcGlobal->subxidStates, 0, TotalProcs * sizeof(*ProcGlobal->subxidStates));
214  ProcGlobal->statusFlags = (uint8 *) ShmemAlloc(TotalProcs * sizeof(*ProcGlobal->statusFlags));
215  MemSet(ProcGlobal->statusFlags, 0, TotalProcs * sizeof(*ProcGlobal->statusFlags));
216 
217  for (i = 0; i < TotalProcs; i++)
218  {
219  /* Common initialization for all PGPROCs, regardless of type. */
220 
221  /*
222  * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
223  * dummy PGPROCs don't need these though - they're never associated
224  * with a real process
225  */
226  if (i < MaxBackends + NUM_AUXILIARY_PROCS)
227  {
228  procs[i].sem = PGSemaphoreCreate();
229  InitSharedLatch(&(procs[i].procLatch));
230  LWLockInitialize(&(procs[i].fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
231  }
232  procs[i].pgprocno = i;
233 
234  /*
235  * Newly created PGPROCs for normal backends, autovacuum and bgworkers
236  * must be queued up on the appropriate free list. Because there can
237  * only ever be a small, fixed number of auxiliary processes, no free
238  * list is used in that case; InitAuxiliaryProcess() instead uses a
239  * linear search. PGPROCs for prepared transactions are added to a
240  * free list by TwoPhaseShmemInit().
241  */
242  if (i < MaxConnections)
243  {
244  /* PGPROC for normal backend, add to freeProcs list */
245  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
246  ProcGlobal->freeProcs = &procs[i];
247  procs[i].procgloballist = &ProcGlobal->freeProcs;
248  }
249  else if (i < MaxConnections + autovacuum_max_workers + 1)
250  {
251  /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
252  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
253  ProcGlobal->autovacFreeProcs = &procs[i];
254  procs[i].procgloballist = &ProcGlobal->autovacFreeProcs;
255  }
257  {
258  /* PGPROC for bgworker, add to bgworkerFreeProcs list */
259  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
260  ProcGlobal->bgworkerFreeProcs = &procs[i];
261  procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
262  }
263  else if (i < MaxBackends)
264  {
265  /* PGPROC for walsender, add to walsenderFreeProcs list */
266  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->walsenderFreeProcs;
267  ProcGlobal->walsenderFreeProcs = &procs[i];
268  procs[i].procgloballist = &ProcGlobal->walsenderFreeProcs;
269  }
270 
271  /* Initialize myProcLocks[] shared memory queues. */
272  for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
273  SHMQueueInit(&(procs[i].myProcLocks[j]));
274 
275  /* Initialize lockGroupMembers list. */
276  dlist_init(&procs[i].lockGroupMembers);
277 
278  /*
279  * Initialize the atomic variables, otherwise, it won't be safe to
280  * access them for backends that aren't currently in use.
281  */
282  pg_atomic_init_u32(&(procs[i].procArrayGroupNext), INVALID_PGPROCNO);
283  pg_atomic_init_u32(&(procs[i].clogGroupNext), INVALID_PGPROCNO);
284  pg_atomic_init_u64(&(procs[i].waitStart), 0);
285  }
286 
287  /*
288  * Save pointers to the blocks of PGPROC structures reserved for auxiliary
289  * processes and prepared transactions.
290  */
291  AuxiliaryProcs = &procs[MaxBackends];
292  PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
293 
294  /* Create ProcStructLock spinlock, too */
295  ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
297 }
298 
299 /*
300  * InitProcess -- initialize a per-process data structure for this backend
301  */
302 void
304 {
305  PGPROC *volatile *procgloballist;
306 
307  /*
308  * ProcGlobal should be set up already (if we are a backend, we inherit
309  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
310  */
311  if (ProcGlobal == NULL)
312  elog(PANIC, "proc header uninitialized");
313 
314  if (MyProc != NULL)
315  elog(ERROR, "you already exist");
316 
317  /* Decide which list should supply our PGPROC. */
319  procgloballist = &ProcGlobal->autovacFreeProcs;
320  else if (IsBackgroundWorker)
321  procgloballist = &ProcGlobal->bgworkerFreeProcs;
322  else if (am_walsender)
323  procgloballist = &ProcGlobal->walsenderFreeProcs;
324  else
325  procgloballist = &ProcGlobal->freeProcs;
326 
327  /*
328  * Try to get a proc struct from the appropriate free list. If this
329  * fails, we must be out of PGPROC structures (not to mention semaphores).
330  *
331  * While we are holding the ProcStructLock, also copy the current shared
332  * estimate of spins_per_delay to local storage.
333  */
335 
337 
338  MyProc = *procgloballist;
339 
340  if (MyProc != NULL)
341  {
342  *procgloballist = (PGPROC *) MyProc->links.next;
344  }
345  else
346  {
347  /*
348  * If we reach here, all the PGPROCs are in use. This is one of the
349  * possible places to detect "too many backends", so give the standard
350  * error message. XXX do we need to give a different failure message
351  * in the autovacuum case?
352  */
354  if (am_walsender)
355  ereport(FATAL,
356  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
357  errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
358  max_wal_senders)));
359  ereport(FATAL,
360  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
361  errmsg("sorry, too many clients already")));
362  }
363 
364  /*
365  * Cross-check that the PGPROC is of the type we expect; if this were not
366  * the case, it would get returned to the wrong list.
367  */
368  Assert(MyProc->procgloballist == procgloballist);
369 
370  /*
371  * Now that we have a PGPROC, mark ourselves as an active postmaster
372  * child; this is so that the postmaster can detect it if we exit without
373  * cleaning up. (XXX autovac launcher currently doesn't participate in
374  * this; it probably should.)
375  */
378 
379  /*
380  * Initialize all fields of MyProc, except for those previously
381  * initialized by InitProcGlobal.
382  */
383  SHMQueueElemInit(&(MyProc->links));
386  MyProc->fpVXIDLock = false;
388  MyProc->xid = InvalidTransactionId;
389  MyProc->xmin = InvalidTransactionId;
390  MyProc->pid = MyProcPid;
391  /* backendId, databaseId and roleId will be filled in later */
392  MyProc->backendId = InvalidBackendId;
393  MyProc->databaseId = InvalidOid;
394  MyProc->roleId = InvalidOid;
395  MyProc->tempNamespaceId = InvalidOid;
397  MyProc->delayChkpt = false;
398  MyProc->statusFlags = 0;
399  /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
401  MyProc->statusFlags |= PROC_IS_AUTOVACUUM;
402  MyProc->lwWaiting = false;
403  MyProc->lwWaitMode = 0;
404  MyProc->waitLock = NULL;
405  MyProc->waitProcLock = NULL;
406  pg_atomic_write_u64(&MyProc->waitStart, 0);
407 #ifdef USE_ASSERT_CHECKING
408  {
409  int i;
410 
411  /* Last process should have released all locks. */
412  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
413  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
414  }
415 #endif
416  MyProc->recoveryConflictPending = false;
417 
418  /* Initialize fields for sync rep */
419  MyProc->waitLSN = 0;
421  SHMQueueElemInit(&(MyProc->syncRepLinks));
422 
423  /* Initialize fields for group XID clearing. */
424  MyProc->procArrayGroupMember = false;
427 
428  /* Check that group locking fields are in a proper initial state. */
429  Assert(MyProc->lockGroupLeader == NULL);
431 
432  /* Initialize wait event information. */
433  MyProc->wait_event_info = 0;
434 
435  /* Initialize fields for group transaction status update. */
436  MyProc->clogGroupMember = false;
439  MyProc->clogGroupMemberPage = -1;
442 
443  /*
444  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
445  * on it. That allows us to repoint the process latch, which so far
446  * points to process local one, to the shared one.
447  */
448  OwnLatch(&MyProc->procLatch);
450 
451  /* now that we have a proc, report wait events to shared memory */
453 
454  /*
455  * We might be reusing a semaphore that belonged to a failed process. So
456  * be careful and reinitialize its value here. (This is not strictly
457  * necessary anymore, but seems like a good idea for cleanliness.)
458  */
459  PGSemaphoreReset(MyProc->sem);
460 
461  /*
462  * Arrange to clean up at backend exit.
463  */
465 
466  /*
467  * Now that we have a PGPROC, we could try to acquire locks, so initialize
468  * local state needed for LWLocks, and the deadlock checker.
469  */
472 }
473 
474 /*
475  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
476  *
477  * This is separate from InitProcess because we can't acquire LWLocks until
478  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
479  * work until after we've done CreateSharedMemoryAndSemaphores.
480  */
481 void
483 {
484  Assert(MyProc != NULL);
485 
486  /*
487  * Add our PGPROC to the PGPROC array in shared memory.
488  */
489  ProcArrayAdd(MyProc);
490 
491  /*
492  * Arrange to clean that up at backend exit.
493  */
495 }
496 
497 /*
498  * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
499  *
500  * This is called by bgwriter and similar processes so that they will have a
501  * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
502  * and sema that are assigned are one of the extra ones created during
503  * InitProcGlobal.
504  *
505  * Auxiliary processes are presently not expected to wait for real (lockmgr)
506  * locks, so we need not set up the deadlock checker. They are never added
507  * to the ProcArray or the sinval messaging mechanism, either. They also
508  * don't get a VXID assigned, since this is only useful when we actually
509  * hold lockmgr locks.
510  *
511  * Startup process however uses locks but never waits for them in the
512  * normal backend sense. Startup process also takes part in sinval messaging
513  * as a sendOnly process, so never reads messages from sinval queue. So
514  * Startup process does have a VXID and does show up in pg_locks.
515  */
516 void
518 {
519  PGPROC *auxproc;
520  int proctype;
521 
522  /*
523  * ProcGlobal should be set up already (if we are a backend, we inherit
524  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
525  */
526  if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
527  elog(PANIC, "proc header uninitialized");
528 
529  if (MyProc != NULL)
530  elog(ERROR, "you already exist");
531 
532  /*
533  * We use the ProcStructLock to protect assignment and releasing of
534  * AuxiliaryProcs entries.
535  *
536  * While we are holding the ProcStructLock, also copy the current shared
537  * estimate of spins_per_delay to local storage.
538  */
540 
542 
543  /*
544  * Find a free auxproc ... *big* trouble if there isn't one ...
545  */
546  for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
547  {
548  auxproc = &AuxiliaryProcs[proctype];
549  if (auxproc->pid == 0)
550  break;
551  }
552  if (proctype >= NUM_AUXILIARY_PROCS)
553  {
555  elog(FATAL, "all AuxiliaryProcs are in use");
556  }
557 
558  /* Mark auxiliary proc as in use by me */
559  /* use volatile pointer to prevent code rearrangement */
560  ((volatile PGPROC *) auxproc)->pid = MyProcPid;
561 
562  MyProc = auxproc;
563 
565 
566  /*
567  * Initialize all fields of MyProc, except for those previously
568  * initialized by InitProcGlobal.
569  */
570  SHMQueueElemInit(&(MyProc->links));
573  MyProc->fpVXIDLock = false;
575  MyProc->xid = InvalidTransactionId;
576  MyProc->xmin = InvalidTransactionId;
577  MyProc->backendId = InvalidBackendId;
578  MyProc->databaseId = InvalidOid;
579  MyProc->roleId = InvalidOid;
580  MyProc->tempNamespaceId = InvalidOid;
582  MyProc->delayChkpt = false;
583  MyProc->statusFlags = 0;
584  MyProc->lwWaiting = false;
585  MyProc->lwWaitMode = 0;
586  MyProc->waitLock = NULL;
587  MyProc->waitProcLock = NULL;
588  pg_atomic_write_u64(&MyProc->waitStart, 0);
589 #ifdef USE_ASSERT_CHECKING
590  {
591  int i;
592 
593  /* Last process should have released all locks. */
594  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
595  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
596  }
597 #endif
598 
599  /*
600  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
601  * on it. That allows us to repoint the process latch, which so far
602  * points to process local one, to the shared one.
603  */
604  OwnLatch(&MyProc->procLatch);
606 
607  /* now that we have a proc, report wait events to shared memory */
609 
610  /* Check that group locking fields are in a proper initial state. */
611  Assert(MyProc->lockGroupLeader == NULL);
613 
614  /*
615  * We might be reusing a semaphore that belonged to a failed process. So
616  * be careful and reinitialize its value here. (This is not strictly
617  * necessary anymore, but seems like a good idea for cleanliness.)
618  */
619  PGSemaphoreReset(MyProc->sem);
620 
621  /*
622  * Arrange to clean up at process exit.
623  */
625 }
626 
627 /*
628  * Record the PID and PGPROC structures for the Startup process, for use in
629  * ProcSendSignal(). See comments there for further explanation.
630  */
631 void
633 {
635 
636  ProcGlobal->startupProc = MyProc;
637  ProcGlobal->startupProcPid = MyProcPid;
638 
640 }
641 
642 /*
643  * Used from bufmgr to share the value of the buffer that Startup waits on,
644  * or to reset the value to "not waiting" (-1). This allows processing
645  * of recovery conflicts for buffer pins. Set is made before backends look
646  * at this value, so locking not required, especially since the set is
647  * an atomic integer set operation.
648  */
649 void
651 {
652  /* use volatile pointer to prevent code rearrangement */
653  volatile PROC_HDR *procglobal = ProcGlobal;
654 
655  procglobal->startupBufferPinWaitBufId = bufid;
656 }
657 
658 /*
659  * Used by backends when they receive a request to check for buffer pin waits.
660  */
661 int
663 {
664  /* use volatile pointer to prevent code rearrangement */
665  volatile PROC_HDR *procglobal = ProcGlobal;
666 
667  return procglobal->startupBufferPinWaitBufId;
668 }
669 
670 /*
671  * Check whether there are at least N free PGPROC objects.
672  *
673  * Note: this is designed on the assumption that N will generally be small.
674  */
675 bool
677 {
678  PGPROC *proc;
679 
681 
682  proc = ProcGlobal->freeProcs;
683 
684  while (n > 0 && proc != NULL)
685  {
686  proc = (PGPROC *) proc->links.next;
687  n--;
688  }
689 
691 
692  return (n <= 0);
693 }
694 
695 /*
696  * Check if the current process is awaiting a lock.
697  */
698 bool
700 {
701  if (lockAwaited == NULL)
702  return false;
703 
704  return true;
705 }
706 
707 /*
708  * Cancel any pending wait for lock, when aborting a transaction, and revert
709  * any strong lock count acquisition for a lock being acquired.
710  *
711  * (Normally, this would only happen if we accept a cancel/die
712  * interrupt while waiting; but an ereport(ERROR) before or during the lock
713  * wait is within the realm of possibility, too.)
714  */
715 void
717 {
718  LWLock *partitionLock;
719  DisableTimeoutParams timeouts[2];
720 
721  HOLD_INTERRUPTS();
722 
724 
725  /* Nothing to do if we weren't waiting for a lock */
726  if (lockAwaited == NULL)
727  {
729  return;
730  }
731 
732  /*
733  * Turn off the deadlock and lock timeout timers, if they are still
734  * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
735  * indicator flag, since this function is executed before
736  * ProcessInterrupts when responding to SIGINT; else we'd lose the
737  * knowledge that the SIGINT came from a lock timeout and not an external
738  * source.
739  */
740  timeouts[0].id = DEADLOCK_TIMEOUT;
741  timeouts[0].keep_indicator = false;
742  timeouts[1].id = LOCK_TIMEOUT;
743  timeouts[1].keep_indicator = true;
744  disable_timeouts(timeouts, 2);
745 
746  /* Unlink myself from the wait queue, if on it (might not be anymore!) */
747  partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
748  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
749 
750  if (MyProc->links.next != NULL)
751  {
752  /* We could not have been granted the lock yet */
753  RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
754  }
755  else
756  {
757  /*
758  * Somebody kicked us off the lock queue already. Perhaps they
759  * granted us the lock, or perhaps they detected a deadlock. If they
760  * did grant us the lock, we'd better remember it in our local lock
761  * table.
762  */
763  if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
765  }
766 
767  lockAwaited = NULL;
768 
769  LWLockRelease(partitionLock);
770 
772 }
773 
774 
775 /*
776  * ProcReleaseLocks() -- release locks associated with current transaction
777  * at main transaction commit or abort
778  *
779  * At main transaction commit, we release standard locks except session locks.
780  * At main transaction abort, we release all locks including session locks.
781  *
782  * Advisory locks are released only if they are transaction-level;
783  * session-level holds remain, whether this is a commit or not.
784  *
785  * At subtransaction commit, we don't release any locks (so this func is not
786  * needed at all); we will defer the releasing to the parent transaction.
787  * At subtransaction abort, we release all locks held by the subtransaction;
788  * this is implemented by retail releasing of the locks under control of
789  * the ResourceOwner mechanism.
790  */
791 void
792 ProcReleaseLocks(bool isCommit)
793 {
794  if (!MyProc)
795  return;
796  /* If waiting, get off wait queue (should only be needed after error) */
798  /* Release standard locks, including session-level if aborting */
800  /* Release transaction-level advisory locks */
802 }
803 
804 
805 /*
806  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
807  */
808 static void
810 {
811  Assert(MyProc != NULL);
813 }
814 
815 /*
816  * ProcKill() -- Destroy the per-proc data structure for
817  * this process. Release any of its held LW locks.
818  */
819 static void
820 ProcKill(int code, Datum arg)
821 {
822  PGPROC *proc;
823  PGPROC *volatile *procgloballist;
824 
825  Assert(MyProc != NULL);
826 
827  /* Make sure we're out of the sync rep lists */
829 
830 #ifdef USE_ASSERT_CHECKING
831  {
832  int i;
833 
834  /* Last process should have released all locks. */
835  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
836  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
837  }
838 #endif
839 
840  /*
841  * Release any LW locks I am holding. There really shouldn't be any, but
842  * it's cheap to check again before we cut the knees off the LWLock
843  * facility by releasing our PGPROC ...
844  */
846 
847  /* Cancel any pending condition variable sleep, too */
849 
850  /* Make sure active replication slots are released */
851  if (MyReplicationSlot != NULL)
853 
854  /* Also cleanup all the temporary slots. */
856 
857  /*
858  * Detach from any lock group of which we are a member. If the leader
859  * exist before all other group members, its PGPROC will remain allocated
860  * until the last group process exits; that process must return the
861  * leader's PGPROC to the appropriate list.
862  */
863  if (MyProc->lockGroupLeader != NULL)
864  {
865  PGPROC *leader = MyProc->lockGroupLeader;
866  LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
867 
868  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
870  dlist_delete(&MyProc->lockGroupLink);
871  if (dlist_is_empty(&leader->lockGroupMembers))
872  {
873  leader->lockGroupLeader = NULL;
874  if (leader != MyProc)
875  {
876  procgloballist = leader->procgloballist;
877 
878  /* Leader exited first; return its PGPROC. */
880  leader->links.next = (SHM_QUEUE *) *procgloballist;
881  *procgloballist = leader;
883  }
884  }
885  else if (leader != MyProc)
886  MyProc->lockGroupLeader = NULL;
887  LWLockRelease(leader_lwlock);
888  }
889 
890  /*
891  * Reset MyLatch to the process local one. This is so that signal
892  * handlers et al can continue using the latch after the shared latch
893  * isn't ours anymore.
894  *
895  * Similarly, stop reporting wait events to MyProc->wait_event_info.
896  *
897  * After that clear MyProc and disown the shared latch.
898  */
901 
902  proc = MyProc;
903  MyProc = NULL;
904  DisownLatch(&proc->procLatch);
905 
906  procgloballist = proc->procgloballist;
908 
909  /*
910  * If we're still a member of a locking group, that means we're a leader
911  * which has somehow exited before its children. The last remaining child
912  * will release our PGPROC. Otherwise, release it now.
913  */
914  if (proc->lockGroupLeader == NULL)
915  {
916  /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
918 
919  /* Return PGPROC structure (and semaphore) to appropriate freelist */
920  proc->links.next = (SHM_QUEUE *) *procgloballist;
921  *procgloballist = proc;
922  }
923 
924  /* Update shared estimate of spins_per_delay */
925  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
926 
928 
929  /*
930  * This process is no longer present in shared memory in any meaningful
931  * way, so tell the postmaster we've cleaned up acceptably well. (XXX
932  * autovac launcher should be included here someday)
933  */
936 
937  /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
938  if (AutovacuumLauncherPid != 0)
940 }
941 
942 /*
943  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
944  * processes (bgwriter, etc). The PGPROC and sema are not released, only
945  * marked as not-in-use.
946  */
947 static void
949 {
950  int proctype = DatumGetInt32(arg);
952  PGPROC *proc;
953 
954  Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
955 
956  auxproc = &AuxiliaryProcs[proctype];
957 
958  Assert(MyProc == auxproc);
959 
960  /* Release any LW locks I am holding (see notes above) */
962 
963  /* Cancel any pending condition variable sleep, too */
965 
966  /* look at the equivalent ProcKill() code for comments */
969 
970  proc = MyProc;
971  MyProc = NULL;
972  DisownLatch(&proc->procLatch);
973 
975 
976  /* Mark auxiliary proc no longer in use */
977  proc->pid = 0;
978 
979  /* Update shared estimate of spins_per_delay */
980  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
981 
983 }
984 
985 /*
986  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
987  * given its PID
988  *
989  * Returns NULL if not found.
990  */
991 PGPROC *
993 {
994  PGPROC *result = NULL;
995  int index;
996 
997  if (pid == 0) /* never match dummy PGPROCs */
998  return NULL;
999 
1000  for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
1001  {
1002  PGPROC *proc = &AuxiliaryProcs[index];
1003 
1004  if (proc->pid == pid)
1005  {
1006  result = proc;
1007  break;
1008  }
1009  }
1010  return result;
1011 }
1012 
1013 /*
1014  * ProcQueue package: routines for putting processes to sleep
1015  * and waking them up
1016  */
1017 
1018 /*
1019  * ProcQueueAlloc -- alloc/attach to a shared memory process queue
1020  *
1021  * Returns: a pointer to the queue
1022  * Side Effects: Initializes the queue if it wasn't there before
1023  */
1024 #ifdef NOT_USED
1025 PROC_QUEUE *
1026 ProcQueueAlloc(const char *name)
1027 {
1028  PROC_QUEUE *queue;
1029  bool found;
1030 
1031  queue = (PROC_QUEUE *)
1032  ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
1033 
1034  if (!found)
1035  ProcQueueInit(queue);
1036 
1037  return queue;
1038 }
1039 #endif
1040 
1041 /*
1042  * ProcQueueInit -- initialize a shared memory process queue
1043  */
1044 void
1046 {
1047  SHMQueueInit(&(queue->links));
1048  queue->size = 0;
1049 }
1050 
1051 
1052 /*
1053  * ProcSleep -- put a process to sleep on the specified lock
1054  *
1055  * Caller must have set MyProc->heldLocks to reflect locks already held
1056  * on the lockable object by this process (under all XIDs).
1057  *
1058  * The lock table's partition lock must be held at entry, and will be held
1059  * at exit.
1060  *
1061  * Result: PROC_WAIT_STATUS_OK if we acquired the lock, PROC_WAIT_STATUS_ERROR if not (deadlock).
1062  *
1063  * ASSUME: that no one will fiddle with the queue until after
1064  * we release the partition lock.
1065  *
1066  * NOTES: The process queue is now a priority queue for locking.
1067  */
1069 ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
1070 {
1071  LOCKMODE lockmode = locallock->tag.mode;
1072  LOCK *lock = locallock->lock;
1073  PROCLOCK *proclock = locallock->proclock;
1074  uint32 hashcode = locallock->hashcode;
1075  LWLock *partitionLock = LockHashPartitionLock(hashcode);
1076  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1077  LOCKMASK myHeldLocks = MyProc->heldLocks;
1078  TimestampTz standbyWaitStart = 0;
1079  bool early_deadlock = false;
1080  bool allow_autovacuum_cancel = true;
1081  bool logged_recovery_conflict = false;
1082  ProcWaitStatus myWaitStatus;
1083  PGPROC *proc;
1084  PGPROC *leader = MyProc->lockGroupLeader;
1085  int i;
1086 
1087  /*
1088  * If group locking is in use, locks held by members of my locking group
1089  * need to be included in myHeldLocks. This is not required for relation
1090  * extension or page locks which conflict among group members. However,
1091  * including them in myHeldLocks will give group members the priority to
1092  * get those locks as compared to other backends which are also trying to
1093  * acquire those locks. OTOH, we can avoid giving priority to group
1094  * members for that kind of locks, but there doesn't appear to be a clear
1095  * advantage of the same.
1096  */
1097  if (leader != NULL)
1098  {
1099  SHM_QUEUE *procLocks = &(lock->procLocks);
1100  PROCLOCK *otherproclock;
1101 
1102  otherproclock = (PROCLOCK *)
1103  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1104  while (otherproclock != NULL)
1105  {
1106  if (otherproclock->groupLeader == leader)
1107  myHeldLocks |= otherproclock->holdMask;
1108  otherproclock = (PROCLOCK *)
1109  SHMQueueNext(procLocks, &otherproclock->lockLink,
1110  offsetof(PROCLOCK, lockLink));
1111  }
1112  }
1113 
1114  /*
1115  * Determine where to add myself in the wait queue.
1116  *
1117  * Normally I should go at the end of the queue. However, if I already
1118  * hold locks that conflict with the request of any previous waiter, put
1119  * myself in the queue just in front of the first such waiter. This is not
1120  * a necessary step, since deadlock detection would move me to before that
1121  * waiter anyway; but it's relatively cheap to detect such a conflict
1122  * immediately, and avoid delaying till deadlock timeout.
1123  *
1124  * Special case: if I find I should go in front of some waiter, check to
1125  * see if I conflict with already-held locks or the requests before that
1126  * waiter. If not, then just grant myself the requested lock immediately.
1127  * This is the same as the test for immediate grant in LockAcquire, except
1128  * we are only considering the part of the wait queue before my insertion
1129  * point.
1130  */
1131  if (myHeldLocks != 0)
1132  {
1133  LOCKMASK aheadRequests = 0;
1134 
1135  proc = (PGPROC *) waitQueue->links.next;
1136  for (i = 0; i < waitQueue->size; i++)
1137  {
1138  /*
1139  * If we're part of the same locking group as this waiter, its
1140  * locks neither conflict with ours nor contribute to
1141  * aheadRequests.
1142  */
1143  if (leader != NULL && leader == proc->lockGroupLeader)
1144  {
1145  proc = (PGPROC *) proc->links.next;
1146  continue;
1147  }
1148  /* Must he wait for me? */
1149  if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1150  {
1151  /* Must I wait for him ? */
1152  if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1153  {
1154  /*
1155  * Yes, so we have a deadlock. Easiest way to clean up
1156  * correctly is to call RemoveFromWaitQueue(), but we
1157  * can't do that until we are *on* the wait queue. So, set
1158  * a flag to check below, and break out of loop. Also,
1159  * record deadlock info for later message.
1160  */
1161  RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1162  early_deadlock = true;
1163  break;
1164  }
1165  /* I must go before this waiter. Check special case. */
1166  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1167  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1168  proclock))
1169  {
1170  /* Skip the wait and just grant myself the lock. */
1171  GrantLock(lock, proclock, lockmode);
1172  GrantAwaitedLock();
1173  return PROC_WAIT_STATUS_OK;
1174  }
1175  /* Break out of loop to put myself before him */
1176  break;
1177  }
1178  /* Nope, so advance to next waiter */
1179  aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1180  proc = (PGPROC *) proc->links.next;
1181  }
1182 
1183  /*
1184  * If we fall out of loop normally, proc points to waitQueue head, so
1185  * we will insert at tail of queue as desired.
1186  */
1187  }
1188  else
1189  {
1190  /* I hold no locks, so I can't push in front of anyone. */
1191  proc = (PGPROC *) &(waitQueue->links);
1192  }
1193 
1194  /*
1195  * Insert self into queue, ahead of the given proc (or at tail of queue).
1196  */
1197  SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
1198  waitQueue->size++;
1199 
1200  lock->waitMask |= LOCKBIT_ON(lockmode);
1201 
1202  /* Set up wait information in PGPROC object, too */
1203  MyProc->waitLock = lock;
1204  MyProc->waitProcLock = proclock;
1205  MyProc->waitLockMode = lockmode;
1206 
1208 
1209  /*
1210  * If we detected deadlock, give up without waiting. This must agree with
1211  * CheckDeadLock's recovery code.
1212  */
1213  if (early_deadlock)
1214  {
1215  RemoveFromWaitQueue(MyProc, hashcode);
1216  return PROC_WAIT_STATUS_ERROR;
1217  }
1218 
1219  /* mark that we are waiting for a lock */
1220  lockAwaited = locallock;
1221 
1222  /*
1223  * Release the lock table's partition lock.
1224  *
1225  * NOTE: this may also cause us to exit critical-section state, possibly
1226  * allowing a cancel/die interrupt to be accepted. This is OK because we
1227  * have recorded the fact that we are waiting for a lock, and so
1228  * LockErrorCleanup will clean up if cancel/die happens.
1229  */
1230  LWLockRelease(partitionLock);
1231 
1232  /*
1233  * Also, now that we will successfully clean up after an ereport, it's
1234  * safe to check to see if there's a buffer pin deadlock against the
1235  * Startup process. Of course, that's only necessary if we're doing Hot
1236  * Standby and are not the Startup process ourselves.
1237  */
1238  if (RecoveryInProgress() && !InRecovery)
1240 
1241  /* Reset deadlock_state before enabling the timeout handler */
1243  got_deadlock_timeout = false;
1244 
1245  /*
1246  * Set timer so we can wake up after awhile and check for a deadlock. If a
1247  * deadlock is detected, the handler sets MyProc->waitStatus =
1248  * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1249  * rather than success.
1250  *
1251  * By delaying the check until we've waited for a bit, we can avoid
1252  * running the rather expensive deadlock-check code in most cases.
1253  *
1254  * If LockTimeout is set, also enable the timeout for that. We can save a
1255  * few cycles by enabling both timeout sources in one call.
1256  *
1257  * If InHotStandby we set lock waits slightly later for clarity with other
1258  * code.
1259  */
1260  if (!InHotStandby)
1261  {
1262  if (LockTimeout > 0)
1263  {
1264  EnableTimeoutParams timeouts[2];
1265 
1266  timeouts[0].id = DEADLOCK_TIMEOUT;
1267  timeouts[0].type = TMPARAM_AFTER;
1268  timeouts[0].delay_ms = DeadlockTimeout;
1269  timeouts[1].id = LOCK_TIMEOUT;
1270  timeouts[1].type = TMPARAM_AFTER;
1271  timeouts[1].delay_ms = LockTimeout;
1272  enable_timeouts(timeouts, 2);
1273  }
1274  else
1276 
1277  /*
1278  * Use the current time obtained for the deadlock timeout timer as
1279  * waitStart (i.e., the time when this process started waiting for the
1280  * lock). Since getting the current time newly can cause overhead, we
1281  * reuse the already-obtained time to avoid that overhead.
1282  *
1283  * Note that waitStart is updated without holding the lock table's
1284  * partition lock, to avoid the overhead by additional lock
1285  * acquisition. This can cause "waitstart" in pg_locks to become NULL
1286  * for a very short period of time after the wait started even though
1287  * "granted" is false. This is OK in practice because we can assume
1288  * that users are likely to look at "waitstart" when waiting for the
1289  * lock for a long time.
1290  */
1291  pg_atomic_write_u64(&MyProc->waitStart,
1293  }
1294  else if (log_recovery_conflict_waits)
1295  {
1296  /*
1297  * Set the wait start timestamp if logging is enabled and in hot
1298  * standby.
1299  */
1300  standbyWaitStart = GetCurrentTimestamp();
1301  }
1302 
1303  /*
1304  * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1305  * will not wait. But a set latch does not necessarily mean that the lock
1306  * is free now, as there are many other sources for latch sets than
1307  * somebody releasing the lock.
1308  *
1309  * We process interrupts whenever the latch has been set, so cancel/die
1310  * interrupts are processed quickly. This means we must not mind losing
1311  * control to a cancel/die interrupt here. We don't, because we have no
1312  * shared-state-change work to do after being granted the lock (the
1313  * grantor did it all). We do have to worry about canceling the deadlock
1314  * timeout and updating the locallock table, but if we lose control to an
1315  * error, LockErrorCleanup will fix that up.
1316  */
1317  do
1318  {
1319  if (InHotStandby)
1320  {
1321  bool maybe_log_conflict =
1322  (standbyWaitStart != 0 && !logged_recovery_conflict);
1323 
1324  /* Set a timer and wait for that or for the lock to be granted */
1326  maybe_log_conflict);
1327 
1328  /*
1329  * Emit the log message if the startup process is waiting longer
1330  * than deadlock_timeout for recovery conflict on lock.
1331  */
1332  if (maybe_log_conflict)
1333  {
1335 
1336  if (TimestampDifferenceExceeds(standbyWaitStart, now,
1337  DeadlockTimeout))
1338  {
1339  VirtualTransactionId *vxids;
1340  int cnt;
1341 
1342  vxids = GetLockConflicts(&locallock->tag.lock,
1343  AccessExclusiveLock, &cnt);
1344 
1345  /*
1346  * Log the recovery conflict and the list of PIDs of
1347  * backends holding the conflicting lock. Note that we do
1348  * logging even if there are no such backends right now
1349  * because the startup process here has already waited
1350  * longer than deadlock_timeout.
1351  */
1353  standbyWaitStart, now,
1354  cnt > 0 ? vxids : NULL, true);
1355  logged_recovery_conflict = true;
1356  }
1357  }
1358  }
1359  else
1360  {
1362  PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1364  /* check for deadlocks first, as that's probably log-worthy */
1366  {
1367  CheckDeadLock();
1368  got_deadlock_timeout = false;
1369  }
1371  }
1372 
1373  /*
1374  * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1375  * else asynchronously. Read it just once per loop to prevent
1376  * surprising behavior (such as missing log messages).
1377  */
1378  myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1379 
1380  /*
1381  * If we are not deadlocked, but are waiting on an autovacuum-induced
1382  * task, send a signal to interrupt it.
1383  */
1384  if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1385  {
1386  PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1387  uint8 statusFlags;
1388  uint8 lockmethod_copy;
1389  LOCKTAG locktag_copy;
1390 
1391  /*
1392  * Grab info we need, then release lock immediately. Note this
1393  * coding means that there is a tiny chance that the process
1394  * terminates its current transaction and starts a different one
1395  * before we have a change to send the signal; the worst possible
1396  * consequence is that a for-wraparound vacuum is cancelled. But
1397  * that could happen in any case unless we were to do kill() with
1398  * the lock held, which is much more undesirable.
1399  */
1400  LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1401  statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1402  lockmethod_copy = lock->tag.locktag_lockmethodid;
1403  locktag_copy = lock->tag;
1404  LWLockRelease(ProcArrayLock);
1405 
1406  /*
1407  * Only do it if the worker is not working to protect against Xid
1408  * wraparound.
1409  */
1410  if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1411  !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1412  {
1413  int pid = autovac->pid;
1414 
1415  /* report the case, if configured to do so */
1417  {
1418  StringInfoData locktagbuf;
1419  StringInfoData logbuf; /* errdetail for server log */
1420 
1421  initStringInfo(&locktagbuf);
1422  initStringInfo(&logbuf);
1423  DescribeLockTag(&locktagbuf, &locktag_copy);
1424  appendStringInfo(&logbuf,
1425  "Process %d waits for %s on %s.",
1426  MyProcPid,
1427  GetLockmodeName(lockmethod_copy, lockmode),
1428  locktagbuf.data);
1429 
1430  ereport(DEBUG1,
1431  (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1432  pid),
1433  errdetail_log("%s", logbuf.data)));
1434 
1435  pfree(locktagbuf.data);
1436  pfree(logbuf.data);
1437  }
1438 
1439  /* send the autovacuum worker Back to Old Kent Road */
1440  if (kill(pid, SIGINT) < 0)
1441  {
1442  /*
1443  * There's a race condition here: once we release the
1444  * ProcArrayLock, it's possible for the autovac worker to
1445  * close up shop and exit before we can do the kill().
1446  * Therefore, we do not whinge about no-such-process.
1447  * Other errors such as EPERM could conceivably happen if
1448  * the kernel recycles the PID fast enough, but such cases
1449  * seem improbable enough that it's probably best to issue
1450  * a warning if we see some other errno.
1451  */
1452  if (errno != ESRCH)
1453  ereport(WARNING,
1454  (errmsg("could not send signal to process %d: %m",
1455  pid)));
1456  }
1457  }
1458 
1459  /* prevent signal from being sent again more than once */
1460  allow_autovacuum_cancel = false;
1461  }
1462 
1463  /*
1464  * If awoken after the deadlock check interrupt has run, and
1465  * log_lock_waits is on, then report about the wait.
1466  */
1468  {
1470  lock_waiters_sbuf,
1471  lock_holders_sbuf;
1472  const char *modename;
1473  long secs;
1474  int usecs;
1475  long msecs;
1476  SHM_QUEUE *procLocks;
1477  PROCLOCK *proclock;
1478  bool first_holder = true,
1479  first_waiter = true;
1480  int lockHoldersNum = 0;
1481 
1482  initStringInfo(&buf);
1483  initStringInfo(&lock_waiters_sbuf);
1484  initStringInfo(&lock_holders_sbuf);
1485 
1486  DescribeLockTag(&buf, &locallock->tag.lock);
1487  modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1488  lockmode);
1491  &secs, &usecs);
1492  msecs = secs * 1000 + usecs / 1000;
1493  usecs = usecs % 1000;
1494 
1495  /*
1496  * we loop over the lock's procLocks to gather a list of all
1497  * holders and waiters. Thus we will be able to provide more
1498  * detailed information for lock debugging purposes.
1499  *
1500  * lock->procLocks contains all processes which hold or wait for
1501  * this lock.
1502  */
1503 
1504  LWLockAcquire(partitionLock, LW_SHARED);
1505 
1506  procLocks = &(lock->procLocks);
1507  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
1508  offsetof(PROCLOCK, lockLink));
1509 
1510  while (proclock)
1511  {
1512  /*
1513  * we are a waiter if myProc->waitProcLock == proclock; we are
1514  * a holder if it is NULL or something different
1515  */
1516  if (proclock->tag.myProc->waitProcLock == proclock)
1517  {
1518  if (first_waiter)
1519  {
1520  appendStringInfo(&lock_waiters_sbuf, "%d",
1521  proclock->tag.myProc->pid);
1522  first_waiter = false;
1523  }
1524  else
1525  appendStringInfo(&lock_waiters_sbuf, ", %d",
1526  proclock->tag.myProc->pid);
1527  }
1528  else
1529  {
1530  if (first_holder)
1531  {
1532  appendStringInfo(&lock_holders_sbuf, "%d",
1533  proclock->tag.myProc->pid);
1534  first_holder = false;
1535  }
1536  else
1537  appendStringInfo(&lock_holders_sbuf, ", %d",
1538  proclock->tag.myProc->pid);
1539 
1540  lockHoldersNum++;
1541  }
1542 
1543  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
1544  offsetof(PROCLOCK, lockLink));
1545  }
1546 
1547  LWLockRelease(partitionLock);
1548 
1550  ereport(LOG,
1551  (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1552  MyProcPid, modename, buf.data, msecs, usecs),
1553  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1554  "Processes holding the lock: %s. Wait queue: %s.",
1555  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1556  else if (deadlock_state == DS_HARD_DEADLOCK)
1557  {
1558  /*
1559  * This message is a bit redundant with the error that will be
1560  * reported subsequently, but in some cases the error report
1561  * might not make it to the log (eg, if it's caught by an
1562  * exception handler), and we want to ensure all long-wait
1563  * events get logged.
1564  */
1565  ereport(LOG,
1566  (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1567  MyProcPid, modename, buf.data, msecs, usecs),
1568  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1569  "Processes holding the lock: %s. Wait queue: %s.",
1570  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1571  }
1572 
1573  if (myWaitStatus == PROC_WAIT_STATUS_WAITING)
1574  ereport(LOG,
1575  (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1576  MyProcPid, modename, buf.data, msecs, usecs),
1577  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1578  "Processes holding the lock: %s. Wait queue: %s.",
1579  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1580  else if (myWaitStatus == PROC_WAIT_STATUS_OK)
1581  ereport(LOG,
1582  (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1583  MyProcPid, modename, buf.data, msecs, usecs)));
1584  else
1585  {
1586  Assert(myWaitStatus == PROC_WAIT_STATUS_ERROR);
1587 
1588  /*
1589  * Currently, the deadlock checker always kicks its own
1590  * process, which means that we'll only see
1591  * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1592  * DS_HARD_DEADLOCK, and there's no need to print redundant
1593  * messages. But for completeness and future-proofing, print
1594  * a message if it looks like someone else kicked us off the
1595  * lock.
1596  */
1598  ereport(LOG,
1599  (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1600  MyProcPid, modename, buf.data, msecs, usecs),
1601  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1602  "Processes holding the lock: %s. Wait queue: %s.",
1603  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1604  }
1605 
1606  /*
1607  * At this point we might still need to wait for the lock. Reset
1608  * state so we don't print the above messages again.
1609  */
1611 
1612  pfree(buf.data);
1613  pfree(lock_holders_sbuf.data);
1614  pfree(lock_waiters_sbuf.data);
1615  }
1616  } while (myWaitStatus == PROC_WAIT_STATUS_WAITING);
1617 
1618  /*
1619  * Disable the timers, if they are still running. As in LockErrorCleanup,
1620  * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1621  * already caused QueryCancelPending to become set, we want the cancel to
1622  * be reported as a lock timeout, not a user cancel.
1623  */
1624  if (!InHotStandby)
1625  {
1626  if (LockTimeout > 0)
1627  {
1628  DisableTimeoutParams timeouts[2];
1629 
1630  timeouts[0].id = DEADLOCK_TIMEOUT;
1631  timeouts[0].keep_indicator = false;
1632  timeouts[1].id = LOCK_TIMEOUT;
1633  timeouts[1].keep_indicator = true;
1634  disable_timeouts(timeouts, 2);
1635  }
1636  else
1638  }
1639 
1640  /*
1641  * Emit the log message if recovery conflict on lock was resolved but the
1642  * startup process waited longer than deadlock_timeout for it.
1643  */
1644  if (InHotStandby && logged_recovery_conflict)
1646  standbyWaitStart, GetCurrentTimestamp(),
1647  NULL, false);
1648 
1649  /*
1650  * Re-acquire the lock table's partition lock. We have to do this to hold
1651  * off cancel/die interrupts before we can mess with lockAwaited (else we
1652  * might have a missed or duplicated locallock update).
1653  */
1654  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1655 
1656  /*
1657  * We no longer want LockErrorCleanup to do anything.
1658  */
1659  lockAwaited = NULL;
1660 
1661  /*
1662  * If we got the lock, be sure to remember it in the locallock table.
1663  */
1664  if (MyProc->waitStatus == PROC_WAIT_STATUS_OK)
1665  GrantAwaitedLock();
1666 
1667  /*
1668  * We don't have to do anything else, because the awaker did all the
1669  * necessary update of the lock table and MyProc.
1670  */
1671  return MyProc->waitStatus;
1672 }
1673 
1674 
1675 /*
1676  * ProcWakeup -- wake up a process by setting its latch.
1677  *
1678  * Also remove the process from the wait queue and set its links invalid.
1679  * RETURN: the next process in the wait queue.
1680  *
1681  * The appropriate lock partition lock must be held by caller.
1682  *
1683  * XXX: presently, this code is only used for the "success" case, and only
1684  * works correctly for that case. To clean up in failure case, would need
1685  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1686  * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1687  */
1688 PGPROC *
1690 {
1691  PGPROC *retProc;
1692 
1693  /* Proc should be sleeping ... */
1694  if (proc->links.prev == NULL ||
1695  proc->links.next == NULL)
1696  return NULL;
1698 
1699  /* Save next process before we zap the list link */
1700  retProc = (PGPROC *) proc->links.next;
1701 
1702  /* Remove process from wait queue */
1703  SHMQueueDelete(&(proc->links));
1704  (proc->waitLock->waitProcs.size)--;
1705 
1706  /* Clean up process' state and pass it the ok/fail signal */
1707  proc->waitLock = NULL;
1708  proc->waitProcLock = NULL;
1709  proc->waitStatus = waitStatus;
1710  pg_atomic_write_u64(&MyProc->waitStart, 0);
1711 
1712  /* And awaken it */
1713  SetLatch(&proc->procLatch);
1714 
1715  return retProc;
1716 }
1717 
1718 /*
1719  * ProcLockWakeup -- routine for waking up processes when a lock is
1720  * released (or a prior waiter is aborted). Scan all waiters
1721  * for lock, waken any that are no longer blocked.
1722  *
1723  * The appropriate lock partition lock must be held by caller.
1724  */
1725 void
1726 ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1727 {
1728  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1729  int queue_size = waitQueue->size;
1730  PGPROC *proc;
1731  LOCKMASK aheadRequests = 0;
1732 
1733  Assert(queue_size >= 0);
1734 
1735  if (queue_size == 0)
1736  return;
1737 
1738  proc = (PGPROC *) waitQueue->links.next;
1739 
1740  while (queue_size-- > 0)
1741  {
1742  LOCKMODE lockmode = proc->waitLockMode;
1743 
1744  /*
1745  * Waken if (a) doesn't conflict with requests of earlier waiters, and
1746  * (b) doesn't conflict with already-held locks.
1747  */
1748  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1749  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1750  proc->waitProcLock))
1751  {
1752  /* OK to waken */
1753  GrantLock(lock, proc->waitProcLock, lockmode);
1754  proc = ProcWakeup(proc, PROC_WAIT_STATUS_OK);
1755 
1756  /*
1757  * ProcWakeup removes proc from the lock's waiting process queue
1758  * and returns the next proc in chain; don't use proc's next-link,
1759  * because it's been cleared.
1760  */
1761  }
1762  else
1763  {
1764  /*
1765  * Cannot wake this guy. Remember his request for later checks.
1766  */
1767  aheadRequests |= LOCKBIT_ON(lockmode);
1768  proc = (PGPROC *) proc->links.next;
1769  }
1770  }
1771 
1772  Assert(waitQueue->size >= 0);
1773 }
1774 
1775 /*
1776  * CheckDeadLock
1777  *
1778  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1779  * lock to be released by some other process. Check if there's a deadlock; if
1780  * not, just return. (But signal ProcSleep to log a message, if
1781  * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1782  * the lock's wait queue and signal an error to ProcSleep.
1783  */
1784 static void
1786 {
1787  int i;
1788 
1789  /*
1790  * Acquire exclusive lock on the entire shared lock data structures. Must
1791  * grab LWLocks in partition-number order to avoid LWLock deadlock.
1792  *
1793  * Note that the deadlock check interrupt had better not be enabled
1794  * anywhere that this process itself holds lock partition locks, else this
1795  * will wait forever. Also note that LWLockAcquire creates a critical
1796  * section, so that this routine cannot be interrupted by cancel/die
1797  * interrupts.
1798  */
1799  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1801 
1802  /*
1803  * Check to see if we've been awoken by anyone in the interim.
1804  *
1805  * If we have, we can return and resume our transaction -- happy day.
1806  * Before we are awoken the process releasing the lock grants it to us so
1807  * we know that we don't have to wait anymore.
1808  *
1809  * We check by looking to see if we've been unlinked from the wait queue.
1810  * This is safe because we hold the lock partition lock.
1811  */
1812  if (MyProc->links.prev == NULL ||
1813  MyProc->links.next == NULL)
1814  goto check_done;
1815 
1816 #ifdef LOCK_DEBUG
1817  if (Debug_deadlocks)
1818  DumpAllLocks();
1819 #endif
1820 
1821  /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1822  deadlock_state = DeadLockCheck(MyProc);
1823 
1825  {
1826  /*
1827  * Oops. We have a deadlock.
1828  *
1829  * Get this process out of wait state. (Note: we could do this more
1830  * efficiently by relying on lockAwaited, but use this coding to
1831  * preserve the flexibility to kill some other transaction than the
1832  * one detecting the deadlock.)
1833  *
1834  * RemoveFromWaitQueue sets MyProc->waitStatus to
1835  * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1836  * return from the signal handler.
1837  */
1838  Assert(MyProc->waitLock != NULL);
1839  RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1840 
1841  /*
1842  * We're done here. Transaction abort caused by the error that
1843  * ProcSleep will raise will cause any other locks we hold to be
1844  * released, thus allowing other processes to wake up; we don't need
1845  * to do that here. NOTE: an exception is that releasing locks we
1846  * hold doesn't consider the possibility of waiters that were blocked
1847  * behind us on the lock we just failed to get, and might now be
1848  * wakable because we're not in front of them anymore. However,
1849  * RemoveFromWaitQueue took care of waking up any such processes.
1850  */
1851  }
1852 
1853  /*
1854  * And release locks. We do this in reverse order for two reasons: (1)
1855  * Anyone else who needs more than one of the locks will be trying to lock
1856  * them in increasing order; we don't want to release the other process
1857  * until it can get all the locks it needs. (2) This avoids O(N^2)
1858  * behavior inside LWLockRelease.
1859  */
1860 check_done:
1861  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1863 }
1864 
1865 /*
1866  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1867  *
1868  * NB: Runs inside a signal handler, be careful.
1869  */
1870 void
1872 {
1873  int save_errno = errno;
1874 
1875  got_deadlock_timeout = true;
1876 
1877  /*
1878  * Have to set the latch again, even if handle_sig_alarm already did. Back
1879  * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1880  * ever would be a problem, but setting a set latch again is cheap.
1881  *
1882  * Note that, when this function runs inside procsignal_sigusr1_handler(),
1883  * the handler function sets the latch again after the latch is set here.
1884  */
1885  SetLatch(MyLatch);
1886  errno = save_errno;
1887 }
1888 
1889 /*
1890  * ProcWaitForSignal - wait for a signal from another backend.
1891  *
1892  * As this uses the generic process latch the caller has to be robust against
1893  * unrelated wakeups: Always check that the desired state has occurred, and
1894  * wait again if not.
1895  */
1896 void
1897 ProcWaitForSignal(uint32 wait_event_info)
1898 {
1900  wait_event_info);
1903 }
1904 
1905 /*
1906  * ProcSendSignal - send a signal to a backend identified by PID
1907  */
1908 void
1910 {
1911  PGPROC *proc = NULL;
1912 
1913  if (RecoveryInProgress())
1914  {
1916 
1917  /*
1918  * Check to see whether it is the Startup process we wish to signal.
1919  * This call is made by the buffer manager when it wishes to wake up a
1920  * process that has been waiting for a pin in so it can obtain a
1921  * cleanup lock using LockBufferForCleanup(). Startup is not a normal
1922  * backend, so BackendPidGetProc() will not return any pid at all. So
1923  * we remember the information for this special case.
1924  */
1925  if (pid == ProcGlobal->startupProcPid)
1926  proc = ProcGlobal->startupProc;
1927 
1929  }
1930 
1931  if (proc == NULL)
1932  proc = BackendPidGetProc(pid);
1933 
1934  if (proc != NULL)
1935  {
1936  SetLatch(&proc->procLatch);
1937  }
1938 }
1939 
1940 /*
1941  * BecomeLockGroupLeader - designate process as lock group leader
1942  *
1943  * Once this function has returned, other processes can join the lock group
1944  * by calling BecomeLockGroupMember.
1945  */
1946 void
1948 {
1949  LWLock *leader_lwlock;
1950 
1951  /* If we already did it, we don't need to do it again. */
1952  if (MyProc->lockGroupLeader == MyProc)
1953  return;
1954 
1955  /* We had better not be a follower. */
1956  Assert(MyProc->lockGroupLeader == NULL);
1957 
1958  /* Create single-member group, containing only ourselves. */
1959  leader_lwlock = LockHashPartitionLockByProc(MyProc);
1960  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1961  MyProc->lockGroupLeader = MyProc;
1962  dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
1963  LWLockRelease(leader_lwlock);
1964 }
1965 
1966 /*
1967  * BecomeLockGroupMember - designate process as lock group member
1968  *
1969  * This is pretty straightforward except for the possibility that the leader
1970  * whose group we're trying to join might exit before we manage to do so;
1971  * and the PGPROC might get recycled for an unrelated process. To avoid
1972  * that, we require the caller to pass the PID of the intended PGPROC as
1973  * an interlock. Returns true if we successfully join the intended lock
1974  * group, and false if not.
1975  */
1976 bool
1978 {
1979  LWLock *leader_lwlock;
1980  bool ok = false;
1981 
1982  /* Group leader can't become member of group */
1983  Assert(MyProc != leader);
1984 
1985  /* Can't already be a member of a group */
1986  Assert(MyProc->lockGroupLeader == NULL);
1987 
1988  /* PID must be valid. */
1989  Assert(pid != 0);
1990 
1991  /*
1992  * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1993  * accesses leader->pgprocno in a PGPROC that might be free. This is safe
1994  * because all PGPROCs' pgprocno fields are set during shared memory
1995  * initialization and never change thereafter; so we will acquire the
1996  * correct lock even if the leader PGPROC is in process of being recycled.
1997  */
1998  leader_lwlock = LockHashPartitionLockByProc(leader);
1999  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
2000 
2001  /* Is this the leader we're looking for? */
2002  if (leader->pid == pid && leader->lockGroupLeader == leader)
2003  {
2004  /* OK, join the group */
2005  ok = true;
2006  MyProc->lockGroupLeader = leader;
2007  dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
2008  }
2009  LWLockRelease(leader_lwlock);
2010 
2011  return ok;
2012 }
void InitAuxiliaryProcess(void)
Definition: proc.c:517
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:749
PROCLOCKTAG tag
Definition: lock.h:363
int slock_t
Definition: s_lock.h:934
static void AuxiliaryProcKill(int code, Datum arg)
Definition: proc.c:948
void InitSharedLatch(Latch *latch)
Definition: latch.c:371
uint32 hashcode
Definition: lock.h:423
bool procArrayGroupMember
Definition: proc.h:216
static void ProcKill(int code, Datum arg)
Definition: proc.c:820
Definition: lwlock.h:31
TimeoutId id
Definition: timeout.h:56
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
LOCALLOCKTAG tag
Definition: lock.h:420
XidStatus clogGroupMemberXidStatus
Definition: proc.h:232
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:195
bool IsWaitingForLock(void)
Definition: proc.c:699
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:43
static void RemoveProcFromArray(int code, Datum arg)
Definition: proc.c:809
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4095
LOCKTAG lock
Definition: lock.h:401
void GrantAwaitedLock(void)
Definition: lock.c:1785
BackendId backendId
Definition: proc.h:153
uint32 TransactionId
Definition: c.h:587
uint32 wait_event_info
Definition: proc.h:226
#define DatumGetInt32(X)
Definition: postgres.h:516
int LOCKMODE
Definition: lockdefs.h:26
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
XLogRecPtr clogGroupMemberLsn
Definition: proc.h:236
dlist_head lockGroupMembers
Definition: proc.h:252
LOCKMODE mode
Definition: lock.h:402
XidCacheStatus * subxidStates
Definition: proc.h:327
PROCLOCK * proclock
Definition: lock.h:425
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1580
Oid tempNamespaceId
Definition: proc.h:157
SHM_QUEUE links
Definition: lock.h:32
PGPROC * BackendPidGetProc(int pid)
Definition: procarray.c:3133
PGPROC * MyProc
Definition: proc.c:68
int64 TimestampTz
Definition: timestamp.h:39
LOCKMASK holdMask
Definition: lock.h:367
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1554
SHM_QUEUE links
Definition: proc.h:124
TimeoutType type
Definition: timeout.h:57
struct SHM_QUEUE * next
Definition: shmem.h:31
void PGSemaphoreReset(PGSemaphore sema)
Definition: posix_sema.c:295
#define SpinLockInit(lock)
Definition: spin.h:60
LOCKMODE waitLockMode
Definition: proc.h:181
bool lwWaiting
Definition: proc.h:170
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:317
bool InRecovery
Definition: xlog.c:207
LOCKTAG tag
Definition: lock.h:302
#define PROC_VACUUM_FOR_WRAPAROUND
Definition: proc.h:60
unsigned char uint8
Definition: c.h:439
Definition: lock.h:166
PGPROC * PreparedXactProcs
Definition: proc.c:82
const LOCKMASK * conflictTab
Definition: lock.h:115
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:249
#define LockHashPartitionLock(hashcode)
Definition: lock.h:518
#define SYNC_REP_NOT_WAITING
Definition: syncrep.h:31
SHM_QUEUE lockLink
Definition: lock.h:369
PGPROC * bgworkerFreeProcs
Definition: proc.h:342
#define InHotStandby
Definition: xlog.h:74
Oid roleId
Definition: proc.h:155
int errcode(int sqlerrcode)
Definition: elog.c:698
PROC_HDR * ProcGlobal
Definition: proc.c:80
#define MemSet(start, val, len)
Definition: c.h:1008
void MarkPostmasterChildInactive(void)
Definition: pmsignal.c:324
#define kill(pid, sig)
Definition: win32_port.h:454
#define PG_WAIT_LOCK
Definition: wait_event.h:19
uint8 statusFlags
Definition: proc.h:189
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1977
uint8 lwWaitMode
Definition: proc.h:171
void DisownLatch(Latch *latch)
Definition: latch.c:424
void * ShmemAlloc(Size size)
Definition: shmem.c:161
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
pg_atomic_uint32 clogGroupNext
Definition: proc.h:230
ProcWaitStatus waitStatus
Definition: proc.h:128
#define SIGUSR2
Definition: win32_port.h:172
bool fpVXIDLock
Definition: proc.h:243
#define LOG
Definition: elog.h:26
bool RecoveryInProgress(void)
Definition: xlog.c:8248
void SetLatch(Latch *latch)
Definition: latch.c:567
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:654
void PublishStartupProcessInformation(void)
Definition: proc.c:632
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1711
#define PANIC
Definition: elog.h:50
PGSemaphore PGSemaphoreCreate(void)
Definition: posix_sema.c:262
PGPROC * autovacFreeProcs
Definition: proc.h:340
bool HaveNFreeProcs(int n)
Definition: proc.c:676
PGPROC * walsenderFreeProcs
Definition: proc.h:344
void ResetLatch(Latch *latch)
Definition: latch.c:660
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:438
Latch procLatch
Definition: proc.h:130
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:452
#define DEFAULT_LOCKMETHOD
Definition: lock.h:129
PROC_QUEUE waitProcs
Definition: lock.h:308
bool IsBackgroundWorker
Definition: globals.c:114
void InitProcGlobal(void)
Definition: proc.c:159
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition: deadlock.c:1162
Definition: type.h:89
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:133
bool isBackgroundWorker
Definition: proc.h:160
int IdleSessionTimeout
Definition: proc.c:64
void ProcSendSignal(int pid)
Definition: proc.c:1909
bool am_walsender
Definition: walsender.c:115
#define SpinLockAcquire(lock)
Definition: spin.h:62
void SwitchBackToLocalLatch(void)
Definition: miscinit.c:217
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:521
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:572
PGPROC * ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
Definition: proc.c:1689
void pfree(void *pointer)
Definition: mcxt.c:1169
dlist_node lockGroupLink
Definition: proc.h:253
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:415
Latch * walwriterLatch
Definition: proc.h:350
void ConditionVariableCancelSleep(void)
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
int spins_per_delay
Definition: proc.h:354
#define ERROR
Definition: elog.h:46
bool delayChkpt
Definition: proc.h:187
int max_prepared_xacts
Definition: twophase.c:117
int AutovacuumLauncherPid
Definition: autovacuum.c:306
void OwnLatch(Latch *latch)
Definition: latch.c:404
int IdleInTransactionSessionTimeout
Definition: proc.c:63
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1069
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
static DeadLockState deadlock_state
Definition: proc.c:87
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition: procarray.c:552
#define FATAL
Definition: elog.h:49
TimeoutId id
Definition: timeout.h:67
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1100
int MaxBackends
Definition: globals.c:139
TransactionId xmin
Definition: proc.h:138
PROCLOCK * waitProcLock
Definition: proc.h:180
bool message_level_is_interesting(int elevel)
Definition: elog.c:270
void InitProcess(void)
Definition: proc.c:303
void ResolveRecoveryConflictWithLock(LOCKTAG locktag, bool logging_conflict)
Definition: standby.c:583
void InitDeadLockChecking(void)
Definition: deadlock.c:143
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
PGPROC * AuxiliaryPidGetProc(int pid)
Definition: proc.c:992
int clogGroupMemberPage
Definition: proc.h:234
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:650
static char * buf
Definition: pg_test_fsync.c:68
bool recoveryConflictPending
Definition: proc.h:167
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1045
bool IsUnderPostmaster
Definition: globals.c:112
DeadLockState DeadLockCheck(PGPROC *proc)
Definition: deadlock.c:217
static volatile sig_atomic_t got_deadlock_timeout
Definition: proc.c:90
void AbortStrongLockAcquire(void)
Definition: lock.c:1756
#define USER_LOCKMETHOD
Definition: lock.h:130
#define InvalidTransactionId
Definition: transam.h:31
TransactionId * xids
Definition: proc.h:321
TransactionId clogGroupMemberXid
Definition: proc.h:231
Oid databaseId
Definition: proc.h:154
void ProcReleaseLocks(bool isCommit)
Definition: proc.c:792
unsigned int uint32
Definition: c.h:441
int errdetail_log(const char *fmt,...)
Definition: elog.c:1090
void ReplicationSlotRelease(void)
Definition: slot.c:469
PGPROC ** procgloballist
Definition: proc.h:125
void CheckRecoveryConflictDeadlock(void)
Definition: standby.c:862
Definition: lock.h:299
LOCK * waitLock
Definition: proc.h:179
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:516
#define NUM_AUXILIARY_PROCS
Definition: proc.h:377
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3454
void BecomeLockGroupLeader(void)
Definition: proc.c:1947
bool log_recovery_conflict_waits
Definition: standby.c:42
void LockErrorCleanup(void)
Definition: proc.c:716
#define INVALID_PGPROCNO
Definition: proc.h:80
LOCKMASK waitMask
Definition: lock.h:306
Size ProcGlobalShmemSize(void)
Definition: proc.c:102
pg_atomic_uint32 procArrayGroupNext
Definition: proc.h:218
Definition: proc.h:315
int max_wal_senders
Definition: walsender.c:121
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1897
SHM_QUEUE procLocks
Definition: lock.h:307
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:736
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:206
#define WARNING
Definition: elog.h:40
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:662
void pgstat_set_wait_event_storage(uint32 *wait_event_info)
Definition: wait_event.c:50
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
#define LockHashPartitionLockByProc(leader_pgproc)
Definition: lock.h:533
#define SpinLockRelease(lock)
Definition: spin.h:64
int startupProcPid
Definition: proc.h:357
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1726
Size mul_size(Size s1, Size s2)
Definition: shmem.c:519
#define InvalidBackendId
Definition: backendid.h:23
uintptr_t Datum
Definition: postgres.h:411
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
NON_EXEC_STATIC slock_t * ProcStructLock
Definition: proc.c:77
int MaxConnections
Definition: globals.c:136
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1917
ProcWaitStatus
Definition: proc.h:82
void SwitchToSharedLatch(void)
Definition: miscinit.c:197
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1111
int autovacuum_max_workers
Definition: autovacuum.c:116
#define InvalidOid
Definition: postgres_ext.h:36
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:2909
#define ereport(elevel,...)
Definition: elog.h:157
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1419
Latch * checkpointerLatch
Definition: proc.h:352
#define IsAnyAutoVacuumProcess()
Definition: autovacuum.h:55
int errmsg_internal(const char *fmt,...)
Definition: elog.c:996
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:524
bool IsAutoVacuumLauncherProcess(void)
Definition: autovacuum.c:3448
ReplicationSlot * MyReplicationSlot
Definition: slot.c:96
struct SHM_QUEUE * prev
Definition: shmem.h:30
uint8 locktag_type
Definition: lock.h:172
DeadLockState
Definition: lock.h:500
#define Assert(condition)
Definition: c.h:804
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2179
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition: proc.c:81
bool log_lock_waits
Definition: proc.c:65
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
PGPROC * freeProcs
Definition: proc.h:338
static LOCALLOCK * lockAwaited
Definition: proc.c:85
size_t Size
Definition: c.h:540
int LockTimeout
Definition: proc.c:62
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1199
LOCK * lock
Definition: lock.h:424
SHM_QUEUE syncRepLinks
Definition: proc.h:201
uint32 allProcCount
Definition: proc.h:336
void SyncRepCleanupAtProcExit(void)
Definition: syncrep.c:382
int LOCKMASK
Definition: lockdefs.h:25
const char * name
Definition: encode.c:515
static void CheckDeadLock(void)
Definition: proc.c:1785
uint8 locktag_lockmethodid
Definition: lock.h:173
int StatementTimeout
Definition: proc.c:61
PGPROC * myProc
Definition: lock.h:357
TransactionId xid
Definition: proc.h:133
void InitProcessPhase2(void)
Definition: proc.c:482
#define LOCKBIT_ON(lockmode)
Definition: lock.h:88
pg_atomic_uint32 clogGroupFirst
Definition: proc.h:348
#define AccessExclusiveLock
Definition: lockdefs.h:45
Definition: lock.h:360
void SHMQueueElemInit(SHM_QUEUE *queue)
Definition: shmqueue.c:57
#define Int32GetDatum(X)
Definition: postgres.h:523
int pgprocno
Definition: proc.h:150
pg_atomic_uint32 procArrayGroupFirst
Definition: proc.h:346
int errmsg(const char *fmt,...)
Definition: elog.c:909
bool clogGroupMember
Definition: proc.h:229
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
int startupBufferPinWaitBufId
Definition: proc.h:359
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:131
#define elog(elevel,...)
Definition: elog.h:232
#define InvalidLocalTransactionId
Definition: lock.h:69
int i
int pgxactoff
Definition: proc.h:148
int size
Definition: lock.h:33
void * arg
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
int max_worker_processes
Definition: globals.c:137
struct Latch * MyLatch
Definition: globals.c:57
void ReplicationSlotCleanup(void)
Definition: slot.c:525
int DeadlockTimeout
Definition: proc.c:60
void CheckDeadLockAlert(void)
Definition: proc.c:1871
PGPROC * allProcs
Definition: proc.h:318
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:120
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:208
void pgstat_reset_wait_event_storage(void)
Definition: wait_event.c:62
PGPROC * startupProc
Definition: proc.h:356
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition: deadlock.c:293
uint8 * statusFlags
Definition: proc.h:333
void LWLockReleaseAll(void)
Definition: lwlock.c:1902
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:621
pg_atomic_uint64 waitStart
Definition: proc.h:184
PGSemaphore sem
Definition: proc.h:127
int syncRepState
Definition: proc.h:200
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1656
Definition: proc.h:121
int pid
Definition: proc.h:146
#define WL_LATCH_SET
Definition: latch.h:125
XLogRecPtr waitLSN
Definition: proc.h:199
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1544
void ProcArrayAdd(PGPROC *proc)
Definition: procarray.c:456
PGPROC * lockGroupLeader
Definition: proc.h:251
LocalTransactionId fpLocalTransactionId
Definition: proc.h:244
#define PROC_IS_AUTOVACUUM
Definition: proc.h:54
#define offsetof(type, field)
Definition: c.h:727
TransactionId procArrayGroupMemberXid
Definition: proc.h:224
int ProcGlobalSemas(void)
Definition: proc.c:124
LOCKMASK heldLocks
Definition: proc.h:182
void InitLWLockAccess(void)
Definition: lwlock.c:579
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:87
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:130
PGPROC * groupLeader
Definition: lock.h:366
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:155
#define TRANSACTION_STATUS_IN_PROGRESS
Definition: clog.h:27
#define DEFAULT_SPINS_PER_DELAY
Definition: s_lock.h:1012
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241
void MarkPostmasterChildActive(void)
Definition: pmsignal.c:291
LocalTransactionId lxid
Definition: proc.h:143
#define NON_EXEC_STATIC
Definition: c.h:1358