PostgreSQL Source Code  git master
proc.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * proc.c
4  * routines to manage per-process shared memory data structure
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/proc.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Interface (a):
17  * ProcSleep(), ProcWakeup(),
18  * ProcQueueAlloc() -- create a shm queue for sleeping processes
19  * ProcQueueInit() -- create a queue without allocing memory
20  *
21  * Waiting for a lock causes the backend to be put to sleep. Whoever releases
22  * the lock wakes the process up again (and gives it an error code so it knows
23  * whether it was awoken on an error condition).
24  *
25  * Interface (b):
26  *
27  * ProcReleaseLocks -- frees the locks associated with current transaction
28  *
29  * ProcKill -- destroys the shared memory state (and locks)
30  * associated with the process.
31  */
32 #include "postgres.h"
33 
34 #include <signal.h>
35 #include <unistd.h>
36 #include <sys/time.h>
37 
38 #include "access/transam.h"
39 #include "access/twophase.h"
40 #include "access/xact.h"
41 #include "miscadmin.h"
42 #include "pgstat.h"
43 #include "postmaster/autovacuum.h"
44 #include "replication/slot.h"
45 #include "replication/syncrep.h"
46 #include "replication/walsender.h"
48 #include "storage/ipc.h"
49 #include "storage/lmgr.h"
50 #include "storage/pmsignal.h"
51 #include "storage/proc.h"
52 #include "storage/procarray.h"
53 #include "storage/procsignal.h"
54 #include "storage/spin.h"
55 #include "storage/standby.h"
56 #include "utils/timeout.h"
57 #include "utils/timestamp.h"
58 
59 /* GUC variables */
60 int DeadlockTimeout = 1000;
62 int LockTimeout = 0;
64 bool log_lock_waits = false;
65 
66 /* Pointer to this process's PGPROC and PGXACT structs, if any */
67 PGPROC *MyProc = NULL;
68 PGXACT *MyPgXact = NULL;
69 
70 /*
71  * This spinlock protects the freelist of recycled PGPROC structures.
72  * We cannot use an LWLock because the LWLock manager depends on already
73  * having a PGPROC and a wait semaphore! But these structures are touched
74  * relatively infrequently (only at backend startup or shutdown) and not for
75  * very long, so a spinlock is okay.
76  */
78 
79 /* Pointers to shared-memory structures */
83 
84 /* If we are waiting for a lock, this points to the associated LOCALLOCK */
85 static LOCALLOCK *lockAwaited = NULL;
86 
88 
89 /* Is a deadlock check pending? */
90 static volatile sig_atomic_t got_deadlock_timeout;
91 
92 static void RemoveProcFromArray(int code, Datum arg);
93 static void ProcKill(int code, Datum arg);
94 static void AuxiliaryProcKill(int code, Datum arg);
95 static void CheckDeadLock(void);
96 
97 
98 /*
99  * Report shared-memory space needed by InitProcGlobal.
100  */
101 Size
103 {
104  Size size = 0;
105 
106  /* ProcGlobal */
107  size = add_size(size, sizeof(PROC_HDR));
108  /* MyProcs, including autovacuum workers and launcher */
109  size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC)));
110  /* AuxiliaryProcs */
111  size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGPROC)));
112  /* Prepared xacts */
113  size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGPROC)));
114  /* ProcStructLock */
115  size = add_size(size, sizeof(slock_t));
116 
117  size = add_size(size, mul_size(MaxBackends, sizeof(PGXACT)));
118  size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGXACT)));
119  size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGXACT)));
120 
121  return size;
122 }
123 
124 /*
125  * Report number of semaphores needed by InitProcGlobal.
126  */
127 int
129 {
130  /*
131  * We need a sema per backend (including autovacuum), plus one for each
132  * auxiliary process.
133  */
135 }
136 
137 /*
138  * InitProcGlobal -
139  * Initialize the global process table during postmaster or standalone
140  * backend startup.
141  *
142  * We also create all the per-process semaphores we will need to support
143  * the requested number of backends. We used to allocate semaphores
144  * only when backends were actually started up, but that is bad because
145  * it lets Postgres fail under load --- a lot of Unix systems are
146  * (mis)configured with small limits on the number of semaphores, and
147  * running out when trying to start another backend is a common failure.
148  * So, now we grab enough semaphores to support the desired max number
149  * of backends immediately at initialization --- if the sysadmin has set
150  * MaxConnections, max_worker_processes, max_wal_senders, or
151  * autovacuum_max_workers higher than his kernel will support, he'll
152  * find out sooner rather than later.
153  *
154  * Another reason for creating semaphores here is that the semaphore
155  * implementation typically requires us to create semaphores in the
156  * postmaster, not in backends.
157  *
158  * Note: this is NOT called by individual backends under a postmaster,
159  * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
160  * pointers must be propagated specially for EXEC_BACKEND operation.
161  */
162 void
164 {
165  PGPROC *procs;
166  PGXACT *pgxacts;
167  int i,
168  j;
169  bool found;
171 
172  /* Create the ProcGlobal shared structure */
173  ProcGlobal = (PROC_HDR *)
174  ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
175  Assert(!found);
176 
177  /*
178  * Initialize the data structures.
179  */
181  ProcGlobal->freeProcs = NULL;
182  ProcGlobal->autovacFreeProcs = NULL;
183  ProcGlobal->bgworkerFreeProcs = NULL;
184  ProcGlobal->walsenderFreeProcs = NULL;
185  ProcGlobal->startupProc = NULL;
186  ProcGlobal->startupProcPid = 0;
187  ProcGlobal->startupBufferPinWaitBufId = -1;
188  ProcGlobal->walwriterLatch = NULL;
189  ProcGlobal->checkpointerLatch = NULL;
192 
193  /*
194  * Create and initialize all the PGPROC structures we'll need. There are
195  * five separate consumers: (1) normal backends, (2) autovacuum workers
196  * and the autovacuum launcher, (3) background workers, (4) auxiliary
197  * processes, and (5) prepared transactions. Each PGPROC structure is
198  * dedicated to exactly one of these purposes, and they do not move
199  * between groups.
200  */
201  procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
202  MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
203  ProcGlobal->allProcs = procs;
204  /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
206 
207  /*
208  * Also allocate a separate array of PGXACT structures. This is separate
209  * from the main PGPROC array so that the most heavily accessed data is
210  * stored contiguously in memory in as few cache lines as possible. This
211  * provides significant performance benefits, especially on a
212  * multiprocessor system. There is one PGXACT structure for every PGPROC
213  * structure.
214  */
215  pgxacts = (PGXACT *) ShmemAlloc(TotalProcs * sizeof(PGXACT));
216  MemSet(pgxacts, 0, TotalProcs * sizeof(PGXACT));
217  ProcGlobal->allPgXact = pgxacts;
218 
219  for (i = 0; i < TotalProcs; i++)
220  {
221  /* Common initialization for all PGPROCs, regardless of type. */
222 
223  /*
224  * Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
225  * dummy PGPROCs don't need these though - they're never associated
226  * with a real process
227  */
228  if (i < MaxBackends + NUM_AUXILIARY_PROCS)
229  {
230  procs[i].sem = PGSemaphoreCreate();
231  InitSharedLatch(&(procs[i].procLatch));
232  LWLockInitialize(&(procs[i].backendLock), LWTRANCHE_PROC);
233  }
234  procs[i].pgprocno = i;
235 
236  /*
237  * Newly created PGPROCs for normal backends, autovacuum and bgworkers
238  * must be queued up on the appropriate free list. Because there can
239  * only ever be a small, fixed number of auxiliary processes, no free
240  * list is used in that case; InitAuxiliaryProcess() instead uses a
241  * linear search. PGPROCs for prepared transactions are added to a
242  * free list by TwoPhaseShmemInit().
243  */
244  if (i < MaxConnections)
245  {
246  /* PGPROC for normal backend, add to freeProcs list */
247  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
248  ProcGlobal->freeProcs = &procs[i];
249  procs[i].procgloballist = &ProcGlobal->freeProcs;
250  }
251  else if (i < MaxConnections + autovacuum_max_workers + 1)
252  {
253  /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
254  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
255  ProcGlobal->autovacFreeProcs = &procs[i];
256  procs[i].procgloballist = &ProcGlobal->autovacFreeProcs;
257  }
259  {
260  /* PGPROC for bgworker, add to bgworkerFreeProcs list */
261  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
262  ProcGlobal->bgworkerFreeProcs = &procs[i];
263  procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
264  }
265  else if (i < MaxBackends)
266  {
267  /* PGPROC for walsender, add to walsenderFreeProcs list */
268  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->walsenderFreeProcs;
269  ProcGlobal->walsenderFreeProcs = &procs[i];
270  procs[i].procgloballist = &ProcGlobal->walsenderFreeProcs;
271  }
272 
273  /* Initialize myProcLocks[] shared memory queues. */
274  for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
275  SHMQueueInit(&(procs[i].myProcLocks[j]));
276 
277  /* Initialize lockGroupMembers list. */
278  dlist_init(&procs[i].lockGroupMembers);
279 
280  /*
281  * Initialize the atomic variables, otherwise, it won't be safe to
282  * access them for backends that aren't currently in use.
283  */
284  pg_atomic_init_u32(&(procs[i].procArrayGroupNext), INVALID_PGPROCNO);
285  pg_atomic_init_u32(&(procs[i].clogGroupNext), INVALID_PGPROCNO);
286  }
287 
288  /*
289  * Save pointers to the blocks of PGPROC structures reserved for auxiliary
290  * processes and prepared transactions.
291  */
292  AuxiliaryProcs = &procs[MaxBackends];
293  PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
294 
295  /* Create ProcStructLock spinlock, too */
296  ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
298 }
299 
300 /*
301  * InitProcess -- initialize a per-process data structure for this backend
302  */
303 void
305 {
306  PGPROC *volatile *procgloballist;
307 
308  /*
309  * ProcGlobal should be set up already (if we are a backend, we inherit
310  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
311  */
312  if (ProcGlobal == NULL)
313  elog(PANIC, "proc header uninitialized");
314 
315  if (MyProc != NULL)
316  elog(ERROR, "you already exist");
317 
318  /* Decide which list should supply our PGPROC. */
320  procgloballist = &ProcGlobal->autovacFreeProcs;
321  else if (IsBackgroundWorker)
322  procgloballist = &ProcGlobal->bgworkerFreeProcs;
323  else if (am_walsender)
324  procgloballist = &ProcGlobal->walsenderFreeProcs;
325  else
326  procgloballist = &ProcGlobal->freeProcs;
327 
328  /*
329  * Try to get a proc struct from the appropriate free list. If this
330  * fails, we must be out of PGPROC structures (not to mention semaphores).
331  *
332  * While we are holding the ProcStructLock, also copy the current shared
333  * estimate of spins_per_delay to local storage.
334  */
336 
338 
339  MyProc = *procgloballist;
340 
341  if (MyProc != NULL)
342  {
343  *procgloballist = (PGPROC *) MyProc->links.next;
345  }
346  else
347  {
348  /*
349  * If we reach here, all the PGPROCs are in use. This is one of the
350  * possible places to detect "too many backends", so give the standard
351  * error message. XXX do we need to give a different failure message
352  * in the autovacuum case?
353  */
355  if (am_walsender)
356  ereport(FATAL,
357  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
358  errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
359  max_wal_senders)));
360  ereport(FATAL,
361  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
362  errmsg("sorry, too many clients already")));
363  }
364  MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
365 
366  /*
367  * Cross-check that the PGPROC is of the type we expect; if this were not
368  * the case, it would get returned to the wrong list.
369  */
370  Assert(MyProc->procgloballist == procgloballist);
371 
372  /*
373  * Now that we have a PGPROC, mark ourselves as an active postmaster
374  * child; this is so that the postmaster can detect it if we exit without
375  * cleaning up. (XXX autovac launcher currently doesn't participate in
376  * this; it probably should.)
377  */
380 
381  /*
382  * Initialize all fields of MyProc, except for those previously
383  * initialized by InitProcGlobal.
384  */
385  SHMQueueElemInit(&(MyProc->links));
386  MyProc->waitStatus = STATUS_OK;
388  MyProc->fpVXIDLock = false;
390  MyPgXact->xid = InvalidTransactionId;
391  MyPgXact->xmin = InvalidTransactionId;
392  MyProc->pid = MyProcPid;
393  /* backendId, databaseId and roleId will be filled in later */
394  MyProc->backendId = InvalidBackendId;
395  MyProc->databaseId = InvalidOid;
396  MyProc->roleId = InvalidOid;
397  MyProc->tempNamespaceId = InvalidOid;
399  MyPgXact->delayChkpt = false;
400  MyPgXact->vacuumFlags = 0;
401  /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
403  MyPgXact->vacuumFlags |= PROC_IS_AUTOVACUUM;
404  MyProc->lwWaiting = false;
405  MyProc->lwWaitMode = 0;
406  MyProc->waitLock = NULL;
407  MyProc->waitProcLock = NULL;
408 #ifdef USE_ASSERT_CHECKING
409  {
410  int i;
411 
412  /* Last process should have released all locks. */
413  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
414  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
415  }
416 #endif
417  MyProc->recoveryConflictPending = false;
418 
419  /* Initialize fields for sync rep */
420  MyProc->waitLSN = 0;
422  SHMQueueElemInit(&(MyProc->syncRepLinks));
423 
424  /* Initialize fields for group XID clearing. */
425  MyProc->procArrayGroupMember = false;
428 
429  /* Check that group locking fields are in a proper initial state. */
430  Assert(MyProc->lockGroupLeader == NULL);
432 
433  /* Initialize wait event information. */
434  MyProc->wait_event_info = 0;
435 
436  /* Initialize fields for group transaction status update. */
437  MyProc->clogGroupMember = false;
440  MyProc->clogGroupMemberPage = -1;
443 
444  /*
445  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
446  * on it. That allows us to repoint the process latch, which so far
447  * points to process local one, to the shared one.
448  */
449  OwnLatch(&MyProc->procLatch);
451 
452  /*
453  * We might be reusing a semaphore that belonged to a failed process. So
454  * be careful and reinitialize its value here. (This is not strictly
455  * necessary anymore, but seems like a good idea for cleanliness.)
456  */
457  PGSemaphoreReset(MyProc->sem);
458 
459  /*
460  * Arrange to clean up at backend exit.
461  */
463 
464  /*
465  * Now that we have a PGPROC, we could try to acquire locks, so initialize
466  * local state needed for LWLocks, and the deadlock checker.
467  */
470 }
471 
472 /*
473  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
474  *
475  * This is separate from InitProcess because we can't acquire LWLocks until
476  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
477  * work until after we've done CreateSharedMemoryAndSemaphores.
478  */
479 void
481 {
482  Assert(MyProc != NULL);
483 
484  /*
485  * Add our PGPROC to the PGPROC array in shared memory.
486  */
487  ProcArrayAdd(MyProc);
488 
489  /*
490  * Arrange to clean that up at backend exit.
491  */
493 }
494 
495 /*
496  * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
497  *
498  * This is called by bgwriter and similar processes so that they will have a
499  * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
500  * and sema that are assigned are one of the extra ones created during
501  * InitProcGlobal.
502  *
503  * Auxiliary processes are presently not expected to wait for real (lockmgr)
504  * locks, so we need not set up the deadlock checker. They are never added
505  * to the ProcArray or the sinval messaging mechanism, either. They also
506  * don't get a VXID assigned, since this is only useful when we actually
507  * hold lockmgr locks.
508  *
509  * Startup process however uses locks but never waits for them in the
510  * normal backend sense. Startup process also takes part in sinval messaging
511  * as a sendOnly process, so never reads messages from sinval queue. So
512  * Startup process does have a VXID and does show up in pg_locks.
513  */
514 void
516 {
517  PGPROC *auxproc;
518  int proctype;
519 
520  /*
521  * ProcGlobal should be set up already (if we are a backend, we inherit
522  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
523  */
524  if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
525  elog(PANIC, "proc header uninitialized");
526 
527  if (MyProc != NULL)
528  elog(ERROR, "you already exist");
529 
530  /*
531  * We use the ProcStructLock to protect assignment and releasing of
532  * AuxiliaryProcs entries.
533  *
534  * While we are holding the ProcStructLock, also copy the current shared
535  * estimate of spins_per_delay to local storage.
536  */
538 
540 
541  /*
542  * Find a free auxproc ... *big* trouble if there isn't one ...
543  */
544  for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
545  {
546  auxproc = &AuxiliaryProcs[proctype];
547  if (auxproc->pid == 0)
548  break;
549  }
550  if (proctype >= NUM_AUXILIARY_PROCS)
551  {
553  elog(FATAL, "all AuxiliaryProcs are in use");
554  }
555 
556  /* Mark auxiliary proc as in use by me */
557  /* use volatile pointer to prevent code rearrangement */
558  ((volatile PGPROC *) auxproc)->pid = MyProcPid;
559 
560  MyProc = auxproc;
561  MyPgXact = &ProcGlobal->allPgXact[auxproc->pgprocno];
562 
564 
565  /*
566  * Initialize all fields of MyProc, except for those previously
567  * initialized by InitProcGlobal.
568  */
569  SHMQueueElemInit(&(MyProc->links));
570  MyProc->waitStatus = STATUS_OK;
572  MyProc->fpVXIDLock = false;
574  MyPgXact->xid = InvalidTransactionId;
575  MyPgXact->xmin = InvalidTransactionId;
576  MyProc->backendId = InvalidBackendId;
577  MyProc->databaseId = InvalidOid;
578  MyProc->roleId = InvalidOid;
579  MyProc->tempNamespaceId = InvalidOid;
581  MyPgXact->delayChkpt = false;
582  MyPgXact->vacuumFlags = 0;
583  MyProc->lwWaiting = false;
584  MyProc->lwWaitMode = 0;
585  MyProc->waitLock = NULL;
586  MyProc->waitProcLock = NULL;
587 #ifdef USE_ASSERT_CHECKING
588  {
589  int i;
590 
591  /* Last process should have released all locks. */
592  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
593  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
594  }
595 #endif
596 
597  /*
598  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
599  * on it. That allows us to repoint the process latch, which so far
600  * points to process local one, to the shared one.
601  */
602  OwnLatch(&MyProc->procLatch);
604 
605  /* Check that group locking fields are in a proper initial state. */
606  Assert(MyProc->lockGroupLeader == NULL);
608 
609  /*
610  * We might be reusing a semaphore that belonged to a failed process. So
611  * be careful and reinitialize its value here. (This is not strictly
612  * necessary anymore, but seems like a good idea for cleanliness.)
613  */
614  PGSemaphoreReset(MyProc->sem);
615 
616  /*
617  * Arrange to clean up at process exit.
618  */
620 }
621 
622 /*
623  * Record the PID and PGPROC structures for the Startup process, for use in
624  * ProcSendSignal(). See comments there for further explanation.
625  */
626 void
628 {
630 
631  ProcGlobal->startupProc = MyProc;
632  ProcGlobal->startupProcPid = MyProcPid;
633 
635 }
636 
637 /*
638  * Used from bufmgr to share the value of the buffer that Startup waits on,
639  * or to reset the value to "not waiting" (-1). This allows processing
640  * of recovery conflicts for buffer pins. Set is made before backends look
641  * at this value, so locking not required, especially since the set is
642  * an atomic integer set operation.
643  */
644 void
646 {
647  /* use volatile pointer to prevent code rearrangement */
648  volatile PROC_HDR *procglobal = ProcGlobal;
649 
650  procglobal->startupBufferPinWaitBufId = bufid;
651 }
652 
653 /*
654  * Used by backends when they receive a request to check for buffer pin waits.
655  */
656 int
658 {
659  /* use volatile pointer to prevent code rearrangement */
660  volatile PROC_HDR *procglobal = ProcGlobal;
661 
662  return procglobal->startupBufferPinWaitBufId;
663 }
664 
665 /*
666  * Check whether there are at least N free PGPROC objects.
667  *
668  * Note: this is designed on the assumption that N will generally be small.
669  */
670 bool
672 {
673  PGPROC *proc;
674 
676 
677  proc = ProcGlobal->freeProcs;
678 
679  while (n > 0 && proc != NULL)
680  {
681  proc = (PGPROC *) proc->links.next;
682  n--;
683  }
684 
686 
687  return (n <= 0);
688 }
689 
690 /*
691  * Check if the current process is awaiting a lock.
692  */
693 bool
695 {
696  if (lockAwaited == NULL)
697  return false;
698 
699  return true;
700 }
701 
702 /*
703  * Cancel any pending wait for lock, when aborting a transaction, and revert
704  * any strong lock count acquisition for a lock being acquired.
705  *
706  * (Normally, this would only happen if we accept a cancel/die
707  * interrupt while waiting; but an ereport(ERROR) before or during the lock
708  * wait is within the realm of possibility, too.)
709  */
710 void
712 {
713  LWLock *partitionLock;
714  DisableTimeoutParams timeouts[2];
715 
716  HOLD_INTERRUPTS();
717 
719 
720  /* Nothing to do if we weren't waiting for a lock */
721  if (lockAwaited == NULL)
722  {
724  return;
725  }
726 
727  /*
728  * Turn off the deadlock and lock timeout timers, if they are still
729  * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
730  * indicator flag, since this function is executed before
731  * ProcessInterrupts when responding to SIGINT; else we'd lose the
732  * knowledge that the SIGINT came from a lock timeout and not an external
733  * source.
734  */
735  timeouts[0].id = DEADLOCK_TIMEOUT;
736  timeouts[0].keep_indicator = false;
737  timeouts[1].id = LOCK_TIMEOUT;
738  timeouts[1].keep_indicator = true;
739  disable_timeouts(timeouts, 2);
740 
741  /* Unlink myself from the wait queue, if on it (might not be anymore!) */
742  partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
743  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
744 
745  if (MyProc->links.next != NULL)
746  {
747  /* We could not have been granted the lock yet */
748  RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
749  }
750  else
751  {
752  /*
753  * Somebody kicked us off the lock queue already. Perhaps they
754  * granted us the lock, or perhaps they detected a deadlock. If they
755  * did grant us the lock, we'd better remember it in our local lock
756  * table.
757  */
758  if (MyProc->waitStatus == STATUS_OK)
760  }
761 
762  lockAwaited = NULL;
763 
764  LWLockRelease(partitionLock);
765 
767 }
768 
769 
770 /*
771  * ProcReleaseLocks() -- release locks associated with current transaction
772  * at main transaction commit or abort
773  *
774  * At main transaction commit, we release standard locks except session locks.
775  * At main transaction abort, we release all locks including session locks.
776  *
777  * Advisory locks are released only if they are transaction-level;
778  * session-level holds remain, whether this is a commit or not.
779  *
780  * At subtransaction commit, we don't release any locks (so this func is not
781  * needed at all); we will defer the releasing to the parent transaction.
782  * At subtransaction abort, we release all locks held by the subtransaction;
783  * this is implemented by retail releasing of the locks under control of
784  * the ResourceOwner mechanism.
785  */
786 void
787 ProcReleaseLocks(bool isCommit)
788 {
789  if (!MyProc)
790  return;
791  /* If waiting, get off wait queue (should only be needed after error) */
793  /* Release standard locks, including session-level if aborting */
795  /* Release transaction-level advisory locks */
797 }
798 
799 
800 /*
801  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
802  */
803 static void
805 {
806  Assert(MyProc != NULL);
808 }
809 
810 /*
811  * ProcKill() -- Destroy the per-proc data structure for
812  * this process. Release any of its held LW locks.
813  */
814 static void
815 ProcKill(int code, Datum arg)
816 {
817  PGPROC *proc;
818  PGPROC *volatile *procgloballist;
819 
820  Assert(MyProc != NULL);
821 
822  /* Make sure we're out of the sync rep lists */
824 
825 #ifdef USE_ASSERT_CHECKING
826  {
827  int i;
828 
829  /* Last process should have released all locks. */
830  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
831  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
832  }
833 #endif
834 
835  /*
836  * Release any LW locks I am holding. There really shouldn't be any, but
837  * it's cheap to check again before we cut the knees off the LWLock
838  * facility by releasing our PGPROC ...
839  */
841 
842  /* Cancel any pending condition variable sleep, too */
844 
845  /* Make sure active replication slots are released */
846  if (MyReplicationSlot != NULL)
848 
849  /* Also cleanup all the temporary slots. */
851 
852  /*
853  * Detach from any lock group of which we are a member. If the leader
854  * exist before all other group members, its PGPROC will remain allocated
855  * until the last group process exits; that process must return the
856  * leader's PGPROC to the appropriate list.
857  */
858  if (MyProc->lockGroupLeader != NULL)
859  {
860  PGPROC *leader = MyProc->lockGroupLeader;
861  LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
862 
863  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
865  dlist_delete(&MyProc->lockGroupLink);
866  if (dlist_is_empty(&leader->lockGroupMembers))
867  {
868  leader->lockGroupLeader = NULL;
869  if (leader != MyProc)
870  {
871  procgloballist = leader->procgloballist;
872 
873  /* Leader exited first; return its PGPROC. */
875  leader->links.next = (SHM_QUEUE *) *procgloballist;
876  *procgloballist = leader;
878  }
879  }
880  else if (leader != MyProc)
881  MyProc->lockGroupLeader = NULL;
882  LWLockRelease(leader_lwlock);
883  }
884 
885  /*
886  * Reset MyLatch to the process local one. This is so that signal
887  * handlers et al can continue using the latch after the shared latch
888  * isn't ours anymore. After that clear MyProc and disown the shared
889  * latch.
890  */
892  proc = MyProc;
893  MyProc = NULL;
894  DisownLatch(&proc->procLatch);
895 
896  procgloballist = proc->procgloballist;
898 
899  /*
900  * If we're still a member of a locking group, that means we're a leader
901  * which has somehow exited before its children. The last remaining child
902  * will release our PGPROC. Otherwise, release it now.
903  */
904  if (proc->lockGroupLeader == NULL)
905  {
906  /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
908 
909  /* Return PGPROC structure (and semaphore) to appropriate freelist */
910  proc->links.next = (SHM_QUEUE *) *procgloballist;
911  *procgloballist = proc;
912  }
913 
914  /* Update shared estimate of spins_per_delay */
915  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
916 
918 
919  /*
920  * This process is no longer present in shared memory in any meaningful
921  * way, so tell the postmaster we've cleaned up acceptably well. (XXX
922  * autovac launcher should be included here someday)
923  */
926 
927  /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
928  if (AutovacuumLauncherPid != 0)
930 }
931 
932 /*
933  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
934  * processes (bgwriter, etc). The PGPROC and sema are not released, only
935  * marked as not-in-use.
936  */
937 static void
939 {
940  int proctype = DatumGetInt32(arg);
942  PGPROC *proc;
943 
944  Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
945 
946  auxproc = &AuxiliaryProcs[proctype];
947 
948  Assert(MyProc == auxproc);
949 
950  /* Release any LW locks I am holding (see notes above) */
952 
953  /* Cancel any pending condition variable sleep, too */
955 
956  /*
957  * Reset MyLatch to the process local one. This is so that signal
958  * handlers et al can continue using the latch after the shared latch
959  * isn't ours anymore. After that clear MyProc and disown the shared
960  * latch.
961  */
963  proc = MyProc;
964  MyProc = NULL;
965  DisownLatch(&proc->procLatch);
966 
968 
969  /* Mark auxiliary proc no longer in use */
970  proc->pid = 0;
971 
972  /* Update shared estimate of spins_per_delay */
973  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
974 
976 }
977 
978 /*
979  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
980  * given its PID
981  *
982  * Returns NULL if not found.
983  */
984 PGPROC *
986 {
987  PGPROC *result = NULL;
988  int index;
989 
990  if (pid == 0) /* never match dummy PGPROCs */
991  return NULL;
992 
993  for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
994  {
995  PGPROC *proc = &AuxiliaryProcs[index];
996 
997  if (proc->pid == pid)
998  {
999  result = proc;
1000  break;
1001  }
1002  }
1003  return result;
1004 }
1005 
1006 /*
1007  * ProcQueue package: routines for putting processes to sleep
1008  * and waking them up
1009  */
1010 
1011 /*
1012  * ProcQueueAlloc -- alloc/attach to a shared memory process queue
1013  *
1014  * Returns: a pointer to the queue
1015  * Side Effects: Initializes the queue if it wasn't there before
1016  */
1017 #ifdef NOT_USED
1018 PROC_QUEUE *
1019 ProcQueueAlloc(const char *name)
1020 {
1021  PROC_QUEUE *queue;
1022  bool found;
1023 
1024  queue = (PROC_QUEUE *)
1025  ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
1026 
1027  if (!found)
1028  ProcQueueInit(queue);
1029 
1030  return queue;
1031 }
1032 #endif
1033 
1034 /*
1035  * ProcQueueInit -- initialize a shared memory process queue
1036  */
1037 void
1039 {
1040  SHMQueueInit(&(queue->links));
1041  queue->size = 0;
1042 }
1043 
1044 
1045 /*
1046  * ProcSleep -- put a process to sleep on the specified lock
1047  *
1048  * Caller must have set MyProc->heldLocks to reflect locks already held
1049  * on the lockable object by this process (under all XIDs).
1050  *
1051  * The lock table's partition lock must be held at entry, and will be held
1052  * at exit.
1053  *
1054  * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
1055  *
1056  * ASSUME: that no one will fiddle with the queue until after
1057  * we release the partition lock.
1058  *
1059  * NOTES: The process queue is now a priority queue for locking.
1060  */
1061 int
1062 ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
1063 {
1064  LOCKMODE lockmode = locallock->tag.mode;
1065  LOCK *lock = locallock->lock;
1066  PROCLOCK *proclock = locallock->proclock;
1067  uint32 hashcode = locallock->hashcode;
1068  LWLock *partitionLock = LockHashPartitionLock(hashcode);
1069  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1070  LOCKMASK myHeldLocks = MyProc->heldLocks;
1071  bool early_deadlock = false;
1072  bool allow_autovacuum_cancel = true;
1073  int myWaitStatus;
1074  PGPROC *proc;
1075  PGPROC *leader = MyProc->lockGroupLeader;
1076  int i;
1077 
1078  /*
1079  * If group locking is in use, locks held by members of my locking group
1080  * need to be included in myHeldLocks. This is not required for relation
1081  * extension or page locks which conflict among group members. However,
1082  * including them in myHeldLocks will give group members the priority to
1083  * get those locks as compared to other backends which are also trying to
1084  * acquire those locks. OTOH, we can avoid giving priority to group
1085  * members for that kind of locks, but there doesn't appear to be a clear
1086  * advantage of the same.
1087  */
1088  if (leader != NULL)
1089  {
1090  SHM_QUEUE *procLocks = &(lock->procLocks);
1091  PROCLOCK *otherproclock;
1092 
1093  otherproclock = (PROCLOCK *)
1094  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1095  while (otherproclock != NULL)
1096  {
1097  if (otherproclock->groupLeader == leader)
1098  myHeldLocks |= otherproclock->holdMask;
1099  otherproclock = (PROCLOCK *)
1100  SHMQueueNext(procLocks, &otherproclock->lockLink,
1101  offsetof(PROCLOCK, lockLink));
1102  }
1103  }
1104 
1105  /*
1106  * Determine where to add myself in the wait queue.
1107  *
1108  * Normally I should go at the end of the queue. However, if I already
1109  * hold locks that conflict with the request of any previous waiter, put
1110  * myself in the queue just in front of the first such waiter. This is not
1111  * a necessary step, since deadlock detection would move me to before that
1112  * waiter anyway; but it's relatively cheap to detect such a conflict
1113  * immediately, and avoid delaying till deadlock timeout.
1114  *
1115  * Special case: if I find I should go in front of some waiter, check to
1116  * see if I conflict with already-held locks or the requests before that
1117  * waiter. If not, then just grant myself the requested lock immediately.
1118  * This is the same as the test for immediate grant in LockAcquire, except
1119  * we are only considering the part of the wait queue before my insertion
1120  * point.
1121  */
1122  if (myHeldLocks != 0)
1123  {
1124  LOCKMASK aheadRequests = 0;
1125 
1126  proc = (PGPROC *) waitQueue->links.next;
1127  for (i = 0; i < waitQueue->size; i++)
1128  {
1129  /*
1130  * If we're part of the same locking group as this waiter, its
1131  * locks neither conflict with ours nor contribute to
1132  * aheadRequests.
1133  */
1134  if (leader != NULL && leader == proc->lockGroupLeader)
1135  {
1136  proc = (PGPROC *) proc->links.next;
1137  continue;
1138  }
1139  /* Must he wait for me? */
1140  if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1141  {
1142  /* Must I wait for him ? */
1143  if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1144  {
1145  /*
1146  * Yes, so we have a deadlock. Easiest way to clean up
1147  * correctly is to call RemoveFromWaitQueue(), but we
1148  * can't do that until we are *on* the wait queue. So, set
1149  * a flag to check below, and break out of loop. Also,
1150  * record deadlock info for later message.
1151  */
1152  RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1153  early_deadlock = true;
1154  break;
1155  }
1156  /* I must go before this waiter. Check special case. */
1157  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1158  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1159  proclock))
1160  {
1161  /* Skip the wait and just grant myself the lock. */
1162  GrantLock(lock, proclock, lockmode);
1163  GrantAwaitedLock();
1164  return STATUS_OK;
1165  }
1166  /* Break out of loop to put myself before him */
1167  break;
1168  }
1169  /* Nope, so advance to next waiter */
1170  aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1171  proc = (PGPROC *) proc->links.next;
1172  }
1173 
1174  /*
1175  * If we fall out of loop normally, proc points to waitQueue head, so
1176  * we will insert at tail of queue as desired.
1177  */
1178  }
1179  else
1180  {
1181  /* I hold no locks, so I can't push in front of anyone. */
1182  proc = (PGPROC *) &(waitQueue->links);
1183  }
1184 
1185  /*
1186  * Insert self into queue, ahead of the given proc (or at tail of queue).
1187  */
1188  SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
1189  waitQueue->size++;
1190 
1191  lock->waitMask |= LOCKBIT_ON(lockmode);
1192 
1193  /* Set up wait information in PGPROC object, too */
1194  MyProc->waitLock = lock;
1195  MyProc->waitProcLock = proclock;
1196  MyProc->waitLockMode = lockmode;
1197 
1198  MyProc->waitStatus = STATUS_WAITING;
1199 
1200  /*
1201  * If we detected deadlock, give up without waiting. This must agree with
1202  * CheckDeadLock's recovery code.
1203  */
1204  if (early_deadlock)
1205  {
1206  RemoveFromWaitQueue(MyProc, hashcode);
1207  return STATUS_ERROR;
1208  }
1209 
1210  /* mark that we are waiting for a lock */
1211  lockAwaited = locallock;
1212 
1213  /*
1214  * Release the lock table's partition lock.
1215  *
1216  * NOTE: this may also cause us to exit critical-section state, possibly
1217  * allowing a cancel/die interrupt to be accepted. This is OK because we
1218  * have recorded the fact that we are waiting for a lock, and so
1219  * LockErrorCleanup will clean up if cancel/die happens.
1220  */
1221  LWLockRelease(partitionLock);
1222 
1223  /*
1224  * Also, now that we will successfully clean up after an ereport, it's
1225  * safe to check to see if there's a buffer pin deadlock against the
1226  * Startup process. Of course, that's only necessary if we're doing Hot
1227  * Standby and are not the Startup process ourselves.
1228  */
1229  if (RecoveryInProgress() && !InRecovery)
1231 
1232  /* Reset deadlock_state before enabling the timeout handler */
1234  got_deadlock_timeout = false;
1235 
1236  /*
1237  * Set timer so we can wake up after awhile and check for a deadlock. If a
1238  * deadlock is detected, the handler sets MyProc->waitStatus =
1239  * STATUS_ERROR, allowing us to know that we must report failure rather
1240  * than success.
1241  *
1242  * By delaying the check until we've waited for a bit, we can avoid
1243  * running the rather expensive deadlock-check code in most cases.
1244  *
1245  * If LockTimeout is set, also enable the timeout for that. We can save a
1246  * few cycles by enabling both timeout sources in one call.
1247  *
1248  * If InHotStandby we set lock waits slightly later for clarity with other
1249  * code.
1250  */
1251  if (!InHotStandby)
1252  {
1253  if (LockTimeout > 0)
1254  {
1255  EnableTimeoutParams timeouts[2];
1256 
1257  timeouts[0].id = DEADLOCK_TIMEOUT;
1258  timeouts[0].type = TMPARAM_AFTER;
1259  timeouts[0].delay_ms = DeadlockTimeout;
1260  timeouts[1].id = LOCK_TIMEOUT;
1261  timeouts[1].type = TMPARAM_AFTER;
1262  timeouts[1].delay_ms = LockTimeout;
1263  enable_timeouts(timeouts, 2);
1264  }
1265  else
1267  }
1268 
1269  /*
1270  * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1271  * will not wait. But a set latch does not necessarily mean that the lock
1272  * is free now, as there are many other sources for latch sets than
1273  * somebody releasing the lock.
1274  *
1275  * We process interrupts whenever the latch has been set, so cancel/die
1276  * interrupts are processed quickly. This means we must not mind losing
1277  * control to a cancel/die interrupt here. We don't, because we have no
1278  * shared-state-change work to do after being granted the lock (the
1279  * grantor did it all). We do have to worry about canceling the deadlock
1280  * timeout and updating the locallock table, but if we lose control to an
1281  * error, LockErrorCleanup will fix that up.
1282  */
1283  do
1284  {
1285  if (InHotStandby)
1286  {
1287  /* Set a timer and wait for that or for the Lock to be granted */
1289  }
1290  else
1291  {
1293  PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1295  /* check for deadlocks first, as that's probably log-worthy */
1297  {
1298  CheckDeadLock();
1299  got_deadlock_timeout = false;
1300  }
1302  }
1303 
1304  /*
1305  * waitStatus could change from STATUS_WAITING to something else
1306  * asynchronously. Read it just once per loop to prevent surprising
1307  * behavior (such as missing log messages).
1308  */
1309  myWaitStatus = *((volatile int *) &MyProc->waitStatus);
1310 
1311  /*
1312  * If we are not deadlocked, but are waiting on an autovacuum-induced
1313  * task, send a signal to interrupt it.
1314  */
1315  if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1316  {
1317  PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1318  PGXACT *autovac_pgxact = &ProcGlobal->allPgXact[autovac->pgprocno];
1319 
1320  LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1321 
1322  /*
1323  * Only do it if the worker is not working to protect against Xid
1324  * wraparound.
1325  */
1326  if ((autovac_pgxact->vacuumFlags & PROC_IS_AUTOVACUUM) &&
1327  !(autovac_pgxact->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND))
1328  {
1329  int pid = autovac->pid;
1330  StringInfoData locktagbuf;
1331  StringInfoData logbuf; /* errdetail for server log */
1332 
1333  initStringInfo(&locktagbuf);
1334  initStringInfo(&logbuf);
1335  DescribeLockTag(&locktagbuf, &lock->tag);
1336  appendStringInfo(&logbuf,
1337  _("Process %d waits for %s on %s."),
1338  MyProcPid,
1340  lockmode),
1341  locktagbuf.data);
1342 
1343  /* release lock as quickly as possible */
1344  LWLockRelease(ProcArrayLock);
1345 
1346  /* send the autovacuum worker Back to Old Kent Road */
1347  ereport(DEBUG1,
1348  (errmsg("sending cancel to blocking autovacuum PID %d",
1349  pid),
1350  errdetail_log("%s", logbuf.data)));
1351 
1352  if (kill(pid, SIGINT) < 0)
1353  {
1354  /*
1355  * There's a race condition here: once we release the
1356  * ProcArrayLock, it's possible for the autovac worker to
1357  * close up shop and exit before we can do the kill().
1358  * Therefore, we do not whinge about no-such-process.
1359  * Other errors such as EPERM could conceivably happen if
1360  * the kernel recycles the PID fast enough, but such cases
1361  * seem improbable enough that it's probably best to issue
1362  * a warning if we see some other errno.
1363  */
1364  if (errno != ESRCH)
1365  ereport(WARNING,
1366  (errmsg("could not send signal to process %d: %m",
1367  pid)));
1368  }
1369 
1370  pfree(logbuf.data);
1371  pfree(locktagbuf.data);
1372  }
1373  else
1374  LWLockRelease(ProcArrayLock);
1375 
1376  /* prevent signal from being resent more than once */
1377  allow_autovacuum_cancel = false;
1378  }
1379 
1380  /*
1381  * If awoken after the deadlock check interrupt has run, and
1382  * log_lock_waits is on, then report about the wait.
1383  */
1385  {
1387  lock_waiters_sbuf,
1388  lock_holders_sbuf;
1389  const char *modename;
1390  long secs;
1391  int usecs;
1392  long msecs;
1393  SHM_QUEUE *procLocks;
1394  PROCLOCK *proclock;
1395  bool first_holder = true,
1396  first_waiter = true;
1397  int lockHoldersNum = 0;
1398 
1399  initStringInfo(&buf);
1400  initStringInfo(&lock_waiters_sbuf);
1401  initStringInfo(&lock_holders_sbuf);
1402 
1403  DescribeLockTag(&buf, &locallock->tag.lock);
1404  modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1405  lockmode);
1408  &secs, &usecs);
1409  msecs = secs * 1000 + usecs / 1000;
1410  usecs = usecs % 1000;
1411 
1412  /*
1413  * we loop over the lock's procLocks to gather a list of all
1414  * holders and waiters. Thus we will be able to provide more
1415  * detailed information for lock debugging purposes.
1416  *
1417  * lock->procLocks contains all processes which hold or wait for
1418  * this lock.
1419  */
1420 
1421  LWLockAcquire(partitionLock, LW_SHARED);
1422 
1423  procLocks = &(lock->procLocks);
1424  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
1425  offsetof(PROCLOCK, lockLink));
1426 
1427  while (proclock)
1428  {
1429  /*
1430  * we are a waiter if myProc->waitProcLock == proclock; we are
1431  * a holder if it is NULL or something different
1432  */
1433  if (proclock->tag.myProc->waitProcLock == proclock)
1434  {
1435  if (first_waiter)
1436  {
1437  appendStringInfo(&lock_waiters_sbuf, "%d",
1438  proclock->tag.myProc->pid);
1439  first_waiter = false;
1440  }
1441  else
1442  appendStringInfo(&lock_waiters_sbuf, ", %d",
1443  proclock->tag.myProc->pid);
1444  }
1445  else
1446  {
1447  if (first_holder)
1448  {
1449  appendStringInfo(&lock_holders_sbuf, "%d",
1450  proclock->tag.myProc->pid);
1451  first_holder = false;
1452  }
1453  else
1454  appendStringInfo(&lock_holders_sbuf, ", %d",
1455  proclock->tag.myProc->pid);
1456 
1457  lockHoldersNum++;
1458  }
1459 
1460  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
1461  offsetof(PROCLOCK, lockLink));
1462  }
1463 
1464  LWLockRelease(partitionLock);
1465 
1467  ereport(LOG,
1468  (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1469  MyProcPid, modename, buf.data, msecs, usecs),
1470  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1471  "Processes holding the lock: %s. Wait queue: %s.",
1472  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1473  else if (deadlock_state == DS_HARD_DEADLOCK)
1474  {
1475  /*
1476  * This message is a bit redundant with the error that will be
1477  * reported subsequently, but in some cases the error report
1478  * might not make it to the log (eg, if it's caught by an
1479  * exception handler), and we want to ensure all long-wait
1480  * events get logged.
1481  */
1482  ereport(LOG,
1483  (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1484  MyProcPid, modename, buf.data, msecs, usecs),
1485  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1486  "Processes holding the lock: %s. Wait queue: %s.",
1487  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1488  }
1489 
1490  if (myWaitStatus == STATUS_WAITING)
1491  ereport(LOG,
1492  (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1493  MyProcPid, modename, buf.data, msecs, usecs),
1494  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1495  "Processes holding the lock: %s. Wait queue: %s.",
1496  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1497  else if (myWaitStatus == STATUS_OK)
1498  ereport(LOG,
1499  (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1500  MyProcPid, modename, buf.data, msecs, usecs)));
1501  else
1502  {
1503  Assert(myWaitStatus == STATUS_ERROR);
1504 
1505  /*
1506  * Currently, the deadlock checker always kicks its own
1507  * process, which means that we'll only see STATUS_ERROR when
1508  * deadlock_state == DS_HARD_DEADLOCK, and there's no need to
1509  * print redundant messages. But for completeness and
1510  * future-proofing, print a message if it looks like someone
1511  * else kicked us off the lock.
1512  */
1514  ereport(LOG,
1515  (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1516  MyProcPid, modename, buf.data, msecs, usecs),
1517  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1518  "Processes holding the lock: %s. Wait queue: %s.",
1519  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1520  }
1521 
1522  /*
1523  * At this point we might still need to wait for the lock. Reset
1524  * state so we don't print the above messages again.
1525  */
1527 
1528  pfree(buf.data);
1529  pfree(lock_holders_sbuf.data);
1530  pfree(lock_waiters_sbuf.data);
1531  }
1532  } while (myWaitStatus == STATUS_WAITING);
1533 
1534  /*
1535  * Disable the timers, if they are still running. As in LockErrorCleanup,
1536  * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1537  * already caused QueryCancelPending to become set, we want the cancel to
1538  * be reported as a lock timeout, not a user cancel.
1539  */
1540  if (!InHotStandby)
1541  {
1542  if (LockTimeout > 0)
1543  {
1544  DisableTimeoutParams timeouts[2];
1545 
1546  timeouts[0].id = DEADLOCK_TIMEOUT;
1547  timeouts[0].keep_indicator = false;
1548  timeouts[1].id = LOCK_TIMEOUT;
1549  timeouts[1].keep_indicator = true;
1550  disable_timeouts(timeouts, 2);
1551  }
1552  else
1554  }
1555 
1556  /*
1557  * Re-acquire the lock table's partition lock. We have to do this to hold
1558  * off cancel/die interrupts before we can mess with lockAwaited (else we
1559  * might have a missed or duplicated locallock update).
1560  */
1561  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1562 
1563  /*
1564  * We no longer want LockErrorCleanup to do anything.
1565  */
1566  lockAwaited = NULL;
1567 
1568  /*
1569  * If we got the lock, be sure to remember it in the locallock table.
1570  */
1571  if (MyProc->waitStatus == STATUS_OK)
1572  GrantAwaitedLock();
1573 
1574  /*
1575  * We don't have to do anything else, because the awaker did all the
1576  * necessary update of the lock table and MyProc.
1577  */
1578  return MyProc->waitStatus;
1579 }
1580 
1581 
1582 /*
1583  * ProcWakeup -- wake up a process by setting its latch.
1584  *
1585  * Also remove the process from the wait queue and set its links invalid.
1586  * RETURN: the next process in the wait queue.
1587  *
1588  * The appropriate lock partition lock must be held by caller.
1589  *
1590  * XXX: presently, this code is only used for the "success" case, and only
1591  * works correctly for that case. To clean up in failure case, would need
1592  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1593  * Hence, in practice the waitStatus parameter must be STATUS_OK.
1594  */
1595 PGPROC *
1596 ProcWakeup(PGPROC *proc, int waitStatus)
1597 {
1598  PGPROC *retProc;
1599 
1600  /* Proc should be sleeping ... */
1601  if (proc->links.prev == NULL ||
1602  proc->links.next == NULL)
1603  return NULL;
1604  Assert(proc->waitStatus == STATUS_WAITING);
1605 
1606  /* Save next process before we zap the list link */
1607  retProc = (PGPROC *) proc->links.next;
1608 
1609  /* Remove process from wait queue */
1610  SHMQueueDelete(&(proc->links));
1611  (proc->waitLock->waitProcs.size)--;
1612 
1613  /* Clean up process' state and pass it the ok/fail signal */
1614  proc->waitLock = NULL;
1615  proc->waitProcLock = NULL;
1616  proc->waitStatus = waitStatus;
1617 
1618  /* And awaken it */
1619  SetLatch(&proc->procLatch);
1620 
1621  return retProc;
1622 }
1623 
1624 /*
1625  * ProcLockWakeup -- routine for waking up processes when a lock is
1626  * released (or a prior waiter is aborted). Scan all waiters
1627  * for lock, waken any that are no longer blocked.
1628  *
1629  * The appropriate lock partition lock must be held by caller.
1630  */
1631 void
1632 ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1633 {
1634  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1635  int queue_size = waitQueue->size;
1636  PGPROC *proc;
1637  LOCKMASK aheadRequests = 0;
1638 
1639  Assert(queue_size >= 0);
1640 
1641  if (queue_size == 0)
1642  return;
1643 
1644  proc = (PGPROC *) waitQueue->links.next;
1645 
1646  while (queue_size-- > 0)
1647  {
1648  LOCKMODE lockmode = proc->waitLockMode;
1649 
1650  /*
1651  * Waken if (a) doesn't conflict with requests of earlier waiters, and
1652  * (b) doesn't conflict with already-held locks.
1653  */
1654  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1655  !LockCheckConflicts(lockMethodTable, lockmode, lock,
1656  proc->waitProcLock))
1657  {
1658  /* OK to waken */
1659  GrantLock(lock, proc->waitProcLock, lockmode);
1660  proc = ProcWakeup(proc, STATUS_OK);
1661 
1662  /*
1663  * ProcWakeup removes proc from the lock's waiting process queue
1664  * and returns the next proc in chain; don't use proc's next-link,
1665  * because it's been cleared.
1666  */
1667  }
1668  else
1669  {
1670  /*
1671  * Cannot wake this guy. Remember his request for later checks.
1672  */
1673  aheadRequests |= LOCKBIT_ON(lockmode);
1674  proc = (PGPROC *) proc->links.next;
1675  }
1676  }
1677 
1678  Assert(waitQueue->size >= 0);
1679 }
1680 
1681 /*
1682  * CheckDeadLock
1683  *
1684  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1685  * lock to be released by some other process. Check if there's a deadlock; if
1686  * not, just return. (But signal ProcSleep to log a message, if
1687  * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1688  * the lock's wait queue and signal an error to ProcSleep.
1689  */
1690 static void
1692 {
1693  int i;
1694 
1695  /*
1696  * Acquire exclusive lock on the entire shared lock data structures. Must
1697  * grab LWLocks in partition-number order to avoid LWLock deadlock.
1698  *
1699  * Note that the deadlock check interrupt had better not be enabled
1700  * anywhere that this process itself holds lock partition locks, else this
1701  * will wait forever. Also note that LWLockAcquire creates a critical
1702  * section, so that this routine cannot be interrupted by cancel/die
1703  * interrupts.
1704  */
1705  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1707 
1708  /*
1709  * Check to see if we've been awoken by anyone in the interim.
1710  *
1711  * If we have, we can return and resume our transaction -- happy day.
1712  * Before we are awoken the process releasing the lock grants it to us so
1713  * we know that we don't have to wait anymore.
1714  *
1715  * We check by looking to see if we've been unlinked from the wait queue.
1716  * This is safe because we hold the lock partition lock.
1717  */
1718  if (MyProc->links.prev == NULL ||
1719  MyProc->links.next == NULL)
1720  goto check_done;
1721 
1722 #ifdef LOCK_DEBUG
1723  if (Debug_deadlocks)
1724  DumpAllLocks();
1725 #endif
1726 
1727  /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1728  deadlock_state = DeadLockCheck(MyProc);
1729 
1731  {
1732  /*
1733  * Oops. We have a deadlock.
1734  *
1735  * Get this process out of wait state. (Note: we could do this more
1736  * efficiently by relying on lockAwaited, but use this coding to
1737  * preserve the flexibility to kill some other transaction than the
1738  * one detecting the deadlock.)
1739  *
1740  * RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so
1741  * ProcSleep will report an error after we return from the signal
1742  * handler.
1743  */
1744  Assert(MyProc->waitLock != NULL);
1745  RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1746 
1747  /*
1748  * We're done here. Transaction abort caused by the error that
1749  * ProcSleep will raise will cause any other locks we hold to be
1750  * released, thus allowing other processes to wake up; we don't need
1751  * to do that here. NOTE: an exception is that releasing locks we
1752  * hold doesn't consider the possibility of waiters that were blocked
1753  * behind us on the lock we just failed to get, and might now be
1754  * wakable because we're not in front of them anymore. However,
1755  * RemoveFromWaitQueue took care of waking up any such processes.
1756  */
1757  }
1758 
1759  /*
1760  * And release locks. We do this in reverse order for two reasons: (1)
1761  * Anyone else who needs more than one of the locks will be trying to lock
1762  * them in increasing order; we don't want to release the other process
1763  * until it can get all the locks it needs. (2) This avoids O(N^2)
1764  * behavior inside LWLockRelease.
1765  */
1766 check_done:
1767  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1769 }
1770 
1771 /*
1772  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1773  *
1774  * NB: Runs inside a signal handler, be careful.
1775  */
1776 void
1778 {
1779  int save_errno = errno;
1780 
1781  got_deadlock_timeout = true;
1782 
1783  /*
1784  * Have to set the latch again, even if handle_sig_alarm already did. Back
1785  * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1786  * ever would be a problem, but setting a set latch again is cheap.
1787  */
1788  SetLatch(MyLatch);
1789  errno = save_errno;
1790 }
1791 
1792 /*
1793  * ProcWaitForSignal - wait for a signal from another backend.
1794  *
1795  * As this uses the generic process latch the caller has to be robust against
1796  * unrelated wakeups: Always check that the desired state has occurred, and
1797  * wait again if not.
1798  */
1799 void
1800 ProcWaitForSignal(uint32 wait_event_info)
1801 {
1803  wait_event_info);
1806 }
1807 
1808 /*
1809  * ProcSendSignal - send a signal to a backend identified by PID
1810  */
1811 void
1813 {
1814  PGPROC *proc = NULL;
1815 
1816  if (RecoveryInProgress())
1817  {
1819 
1820  /*
1821  * Check to see whether it is the Startup process we wish to signal.
1822  * This call is made by the buffer manager when it wishes to wake up a
1823  * process that has been waiting for a pin in so it can obtain a
1824  * cleanup lock using LockBufferForCleanup(). Startup is not a normal
1825  * backend, so BackendPidGetProc() will not return any pid at all. So
1826  * we remember the information for this special case.
1827  */
1828  if (pid == ProcGlobal->startupProcPid)
1829  proc = ProcGlobal->startupProc;
1830 
1832  }
1833 
1834  if (proc == NULL)
1835  proc = BackendPidGetProc(pid);
1836 
1837  if (proc != NULL)
1838  {
1839  SetLatch(&proc->procLatch);
1840  }
1841 }
1842 
1843 /*
1844  * BecomeLockGroupLeader - designate process as lock group leader
1845  *
1846  * Once this function has returned, other processes can join the lock group
1847  * by calling BecomeLockGroupMember.
1848  */
1849 void
1851 {
1852  LWLock *leader_lwlock;
1853 
1854  /* If we already did it, we don't need to do it again. */
1855  if (MyProc->lockGroupLeader == MyProc)
1856  return;
1857 
1858  /* We had better not be a follower. */
1859  Assert(MyProc->lockGroupLeader == NULL);
1860 
1861  /* Create single-member group, containing only ourselves. */
1862  leader_lwlock = LockHashPartitionLockByProc(MyProc);
1863  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1864  MyProc->lockGroupLeader = MyProc;
1865  dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
1866  LWLockRelease(leader_lwlock);
1867 }
1868 
1869 /*
1870  * BecomeLockGroupMember - designate process as lock group member
1871  *
1872  * This is pretty straightforward except for the possibility that the leader
1873  * whose group we're trying to join might exit before we manage to do so;
1874  * and the PGPROC might get recycled for an unrelated process. To avoid
1875  * that, we require the caller to pass the PID of the intended PGPROC as
1876  * an interlock. Returns true if we successfully join the intended lock
1877  * group, and false if not.
1878  */
1879 bool
1881 {
1882  LWLock *leader_lwlock;
1883  bool ok = false;
1884 
1885  /* Group leader can't become member of group */
1886  Assert(MyProc != leader);
1887 
1888  /* Can't already be a member of a group */
1889  Assert(MyProc->lockGroupLeader == NULL);
1890 
1891  /* PID must be valid. */
1892  Assert(pid != 0);
1893 
1894  /*
1895  * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1896  * accesses leader->pgprocno in a PGPROC that might be free. This is safe
1897  * because all PGPROCs' pgprocno fields are set during shared memory
1898  * initialization and never change thereafter; so we will acquire the
1899  * correct lock even if the leader PGPROC is in process of being recycled.
1900  */
1901  leader_lwlock = LockHashPartitionLockByProc(leader);
1902  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1903 
1904  /* Is this the leader we're looking for? */
1905  if (leader->pid == pid && leader->lockGroupLeader == leader)
1906  {
1907  /* OK, join the group */
1908  ok = true;
1909  MyProc->lockGroupLeader = leader;
1910  dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
1911  }
1912  LWLockRelease(leader_lwlock);
1913 
1914  return ok;
1915 }
void InitAuxiliaryProcess(void)
Definition: proc.c:515
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:668
PROCLOCKTAG tag
Definition: lock.h:351
int slock_t
Definition: s_lock.h:934
void ResolveRecoveryConflictWithLock(LOCKTAG locktag)
Definition: standby.c:403
static void AuxiliaryProcKill(int code, Datum arg)
Definition: proc.c:938
void InitSharedLatch(Latch *latch)
Definition: latch.c:281
uint32 hashcode
Definition: lock.h:411
bool procArrayGroupMember
Definition: proc.h:166
#define PG_WAIT_LOCK
Definition: pgstat.h:786
static void ProcKill(int code, Datum arg)
Definition: proc.c:815
Definition: lwlock.h:32
TimeoutId id
Definition: timeout.h:54
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
LOCALLOCKTAG tag
Definition: lock.h:408
XidStatus clogGroupMemberXidStatus
Definition: proc.h:182
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:195
bool IsWaitingForLock(void)
Definition: proc.c:694
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:40
static void RemoveProcFromArray(int code, Datum arg)
Definition: proc.c:804
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4016
LOCKTAG lock
Definition: lock.h:389
void GrantAwaitedLock(void)
Definition: lock.c:1786
BackendId backendId
Definition: proc.h:113
uint32 wait_event_info
Definition: proc.h:176
Definition: proc.h:222
#define DatumGetInt32(X)
Definition: postgres.h:472
int LOCKMODE
Definition: lockdefs.h:26
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
XLogRecPtr clogGroupMemberLsn
Definition: proc.h:186
dlist_head lockGroupMembers
Definition: proc.h:204
LOCKMODE mode
Definition: lock.h:390
PROCLOCK * proclock
Definition: lock.h:413
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1583
Oid tempNamespaceId
Definition: proc.h:117
TransactionId xmin
Definition: proc.h:228
SHM_QUEUE links
Definition: lock.h:31
PGPROC * BackendPidGetProc(int pid)
Definition: procarray.c:2366
PGXACT * allPgXact
Definition: proc.h:249
PGPROC * MyProc
Definition: proc.c:67
LOCKMASK holdMask
Definition: lock.h:355
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1555
TransactionId xid
Definition: proc.h:224
SHM_QUEUE links
Definition: proc.h:98
TimeoutType type
Definition: timeout.h:55
struct SHM_QUEUE * next
Definition: shmem.h:31
void PGSemaphoreReset(PGSemaphore sema)
Definition: posix_sema.c:295
#define SpinLockInit(lock)
Definition: spin.h:60
LOCKMODE waitLockMode
Definition: proc.h:141
bool lwWaiting
Definition: proc.h:130
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:317
bool InRecovery
Definition: xlog.c:203
LOCKTAG tag
Definition: lock.h:290
#define PROC_VACUUM_FOR_WRAPAROUND
Definition: proc.h:56
PGPROC * PreparedXactProcs
Definition: proc.c:82
const LOCKMASK * conflictTab
Definition: lock.h:113
#define LockHashPartitionLock(hashcode)
Definition: lock.h:504
#define SYNC_REP_NOT_WAITING
Definition: syncrep.h:31
SHM_QUEUE lockLink
Definition: lock.h:357
PGPROC * bgworkerFreeProcs
Definition: proc.h:257
#define InHotStandby
Definition: xlog.h:74
Oid roleId
Definition: proc.h:115
int errcode(int sqlerrcode)
Definition: elog.c:610
PROC_HDR * ProcGlobal
Definition: proc.c:80
#define STATUS_ERROR
Definition: c.h:1134
#define MemSet(start, val, len)
Definition: c.h:971
void MarkPostmasterChildInactive(void)
Definition: pmsignal.c:289
#define kill(pid, sig)
Definition: win32_port.h:426
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1880
uint8 lwWaitMode
Definition: proc.h:131
void DisownLatch(Latch *latch)
Definition: latch.c:337
void * ShmemAlloc(Size size)
Definition: shmem.c:161
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
pg_atomic_uint32 clogGroupNext
Definition: proc.h:180
#define SIGUSR2
Definition: win32_port.h:166
bool fpVXIDLock
Definition: proc.h:195
#define LOG
Definition: elog.h:26
bool RecoveryInProgress(void)
Definition: xlog.c:8016
void SetLatch(Latch *latch)
Definition: latch.c:457
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:565
void PublishStartupProcessInformation(void)
Definition: proc.c:627
#define PANIC
Definition: elog.h:53
PGSemaphore PGSemaphoreCreate(void)
Definition: posix_sema.c:262
PGPROC * autovacFreeProcs
Definition: proc.h:255
bool HaveNFreeProcs(int n)
Definition: proc.c:671
PGPROC * walsenderFreeProcs
Definition: proc.h:259
void ResetLatch(Latch *latch)
Definition: latch.c:540
Latch procLatch
Definition: proc.h:104
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:365
PGXACT * MyPgXact
Definition: proc.c:68
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
PROC_QUEUE waitProcs
Definition: lock.h:296
uint8 vacuumFlags
Definition: proc.h:233
bool IsBackgroundWorker
Definition: globals.c:111
void InitProcGlobal(void)
Definition: proc.c:163
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition: deadlock.c:1164
Definition: type.h:89
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1727
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:118
bool isBackgroundWorker
Definition: proc.h:120
void ProcSendSignal(int pid)
Definition: proc.c:1812
bool am_walsender
Definition: walsender.c:115
#define SpinLockAcquire(lock)
Definition: spin.h:62
void SwitchBackToLocalLatch(void)
Definition: miscinit.c:188
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:507
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:483
void pfree(void *pointer)
Definition: mcxt.c:1056
dlist_node lockGroupLink
Definition: proc.h:205
Latch * walwriterLatch
Definition: proc.h:265
void ConditionVariableCancelSleep(void)
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
int spins_per_delay
Definition: proc.h:269
#define ERROR
Definition: elog.h:43
int max_prepared_xacts
Definition: twophase.c:117
int AutovacuumLauncherPid
Definition: autovacuum.c:305
void OwnLatch(Latch *latch)
Definition: latch.c:317
int IdleInTransactionSessionTimeout
Definition: proc.c:63
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:392
static DeadLockState deadlock_state
Definition: proc.c:87
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition: procarray.c:336
#define FATAL
Definition: elog.h:52
TimeoutId id
Definition: timeout.h:65
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1085
int MaxBackends
Definition: globals.c:135
PROCLOCK * waitProcLock
Definition: proc.h:140
void InitProcess(void)
Definition: proc.c:304
void InitDeadLockChecking(void)
Definition: deadlock.c:143
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
PGPROC * AuxiliaryPidGetProc(int pid)
Definition: proc.c:985
int clogGroupMemberPage
Definition: proc.h:184
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:645
static char * buf
Definition: pg_test_fsync.c:67
bool recoveryConflictPending
Definition: proc.h:127
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1038
bool IsUnderPostmaster
Definition: globals.c:109
DeadLockState DeadLockCheck(PGPROC *proc)
Definition: deadlock.c:217
static volatile sig_atomic_t got_deadlock_timeout
Definition: proc.c:90
void AbortStrongLockAcquire(void)
Definition: lock.c:1757
#define USER_LOCKMETHOD
Definition: lock.h:128
#define InvalidTransactionId
Definition: transam.h:31
TransactionId clogGroupMemberXid
Definition: proc.h:181
Oid databaseId
Definition: proc.h:114
void ProcReleaseLocks(bool isCommit)
Definition: proc.c:787
unsigned int uint32
Definition: c.h:367
int errdetail_log(const char *fmt,...)
Definition: elog.c:1005
void ReplicationSlotRelease(void)
Definition: slot.c:424
PGPROC ** procgloballist
Definition: proc.h:99
void CheckRecoveryConflictDeadlock(void)
Definition: standby.c:564
Definition: lock.h:287
LOCK * waitLock
Definition: proc.h:139
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:517
#define NUM_AUXILIARY_PROCS
Definition: proc.h:292
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3302
void BecomeLockGroupLeader(void)
Definition: proc.c:1850
#define STATUS_OK
Definition: c.h:1133
bool delayChkpt
Definition: proc.h:235
void LockErrorCleanup(void)
Definition: proc.c:711
#define INVALID_PGPROCNO
Definition: proc.h:77
LOCKMASK waitMask
Definition: lock.h:294
Size ProcGlobalShmemSize(void)
Definition: proc.c:102
pg_atomic_uint32 procArrayGroupNext
Definition: proc.h:168
Definition: proc.h:244
int max_wal_senders
Definition: walsender.c:121
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1800
SHM_QUEUE procLocks
Definition: lock.h:295
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:679
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:206
#define WARNING
Definition: elog.h:40
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:657
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
#define LockHashPartitionLockByProc(leader_pgproc)
Definition: lock.h:519
#define SpinLockRelease(lock)
Definition: spin.h:64
int startupProcPid
Definition: proc.h:272
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1632
Size mul_size(Size s1, Size s2)
Definition: shmem.c:515
#define InvalidBackendId
Definition: backendid.h:23
uintptr_t Datum
Definition: postgres.h:367
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
NON_EXEC_STATIC slock_t * ProcStructLock
Definition: proc.c:77
int MaxConnections
Definition: globals.c:132
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1918
void SwitchToSharedLatch(void)
Definition: miscinit.c:169
int waitStatus
Definition: proc.h:102
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1026
int autovacuum_max_workers
Definition: autovacuum.c:115
#define InvalidOid
Definition: postgres_ext.h:36
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
#define ereport(elevel,...)
Definition: elog.h:144
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1420
Latch * checkpointerLatch
Definition: proc.h:267
#define IsAnyAutoVacuumProcess()
Definition: autovacuum.h:55
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:435
bool IsAutoVacuumLauncherProcess(void)
Definition: autovacuum.c:3296
ReplicationSlot * MyReplicationSlot
Definition: slot.c:96
struct SHM_QUEUE * prev
Definition: shmem.h:30
uint8 locktag_type
Definition: lock.h:169
DeadLockState
Definition: lock.h:486
#define Assert(condition)
Definition: c.h:738
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2180
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition: proc.c:81
bool log_lock_waits
Definition: proc.c:64
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
PGPROC * freeProcs
Definition: proc.h:253
static LOCALLOCK * lockAwaited
Definition: proc.c:85
size_t Size
Definition: c.h:466
int LockTimeout
Definition: proc.c:62
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1123
LOCK * lock
Definition: lock.h:412
SHM_QUEUE syncRepLinks
Definition: proc.h:153
uint32 allProcCount
Definition: proc.h:251
void SyncRepCleanupAtProcExit(void)
Definition: syncrep.c:369
int LOCKMASK
Definition: lockdefs.h:25
const char * name
Definition: encode.c:521
static void CheckDeadLock(void)
Definition: proc.c:1691
uint8 locktag_lockmethodid
Definition: lock.h:170
int StatementTimeout
Definition: proc.c:61
PGPROC * myProc
Definition: lock.h:345
void InitProcessPhase2(void)
Definition: proc.c:480
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
pg_atomic_uint32 clogGroupFirst
Definition: proc.h:263
Definition: lock.h:348
void SHMQueueElemInit(SHM_QUEUE *queue)
Definition: shmqueue.c:57
#define Int32GetDatum(X)
Definition: postgres.h:479
int pgprocno
Definition: proc.h:110
pg_atomic_uint32 procArrayGroupFirst
Definition: proc.h:261
int errmsg(const char *fmt,...)
Definition: elog.c:824
bool clogGroupMember
Definition: proc.h:179
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
int ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1062
int startupBufferPinWaitBufId
Definition: proc.h:274
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:116
#define elog(elevel,...)
Definition: elog.h:214
#define InvalidLocalTransactionId
Definition: lock.h:68
int i
int size
Definition: lock.h:32
void * arg
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
int max_worker_processes
Definition: globals.c:133
struct Latch * MyLatch
Definition: globals.c:54
void ReplicationSlotCleanup(void)
Definition: slot.c:479
int DeadlockTimeout
Definition: proc.c:60
void CheckDeadLockAlert(void)
Definition: proc.c:1777
PGPROC * allProcs
Definition: proc.h:247
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:160
PGPROC * startupProc
Definition: proc.h:271
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
#define STATUS_WAITING
Definition: c.h:1136
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition: deadlock.c:293
void LWLockReleaseAll(void)
Definition: lwlock.c:1826
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:532
PGSemaphore sem
Definition: proc.h:101
int syncRepState
Definition: proc.h:152
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1657
PGPROC * ProcWakeup(PGPROC *proc, int waitStatus)
Definition: proc.c:1596
Definition: proc.h:95
int pid
Definition: proc.h:109
#define WL_LATCH_SET
Definition: latch.h:124
#define _(x)
Definition: elog.c:88
XLogRecPtr waitLSN
Definition: proc.h:151
void ProcArrayAdd(PGPROC *proc)
Definition: procarray.c:278
PGPROC * lockGroupLeader
Definition: proc.h:203
LocalTransactionId fpLocalTransactionId
Definition: proc.h:196
#define PROC_IS_AUTOVACUUM
Definition: proc.h:53
#define offsetof(type, field)
Definition: c.h:661
TransactionId procArrayGroupMemberXid
Definition: proc.h:174
int ProcGlobalSemas(void)
Definition: proc.c:128
LOCKMASK heldLocks
Definition: proc.h:142
void InitLWLockAccess(void)
Definition: lwlock.c:537
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117
#define WL_EXIT_ON_PM_DEATH
Definition: latch.h:129
PGPROC * groupLeader
Definition: lock.h:354
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:121
#define TRANSACTION_STATUS_IN_PROGRESS
Definition: clog.h:26
#define DEFAULT_SPINS_PER_DELAY
Definition: s_lock.h:1012
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241
void MarkPostmasterChildActive(void)
Definition: pmsignal.c:256
LocalTransactionId lxid
Definition: proc.h:106
#define NON_EXEC_STATIC
Definition: c.h:1314