PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
proc.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * proc.c
4  * routines to manage per-process shared memory data structure
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/proc.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Interface (a):
17  * ProcSleep(), ProcWakeup(),
18  * ProcQueueAlloc() -- create a shm queue for sleeping processes
19  * ProcQueueInit() -- create a queue without allocing memory
20  *
21  * Waiting for a lock causes the backend to be put to sleep. Whoever releases
22  * the lock wakes the process up again (and gives it an error code so it knows
23  * whether it was awoken on an error condition).
24  *
25  * Interface (b):
26  *
27  * ProcReleaseLocks -- frees the locks associated with current transaction
28  *
29  * ProcKill -- destroys the shared memory state (and locks)
30  * associated with the process.
31  */
32 #include "postgres.h"
33 
34 #include <signal.h>
35 #include <unistd.h>
36 #include <sys/time.h>
37 
38 #include "access/transam.h"
39 #include "access/twophase.h"
40 #include "access/xact.h"
41 #include "miscadmin.h"
42 #include "pgstat.h"
43 #include "postmaster/autovacuum.h"
44 #include "replication/slot.h"
45 #include "replication/syncrep.h"
47 #include "storage/standby.h"
48 #include "storage/ipc.h"
49 #include "storage/lmgr.h"
50 #include "storage/pmsignal.h"
51 #include "storage/proc.h"
52 #include "storage/procarray.h"
53 #include "storage/procsignal.h"
54 #include "storage/spin.h"
55 #include "utils/timeout.h"
56 #include "utils/timestamp.h"
57 
58 
59 /* GUC variables */
60 int DeadlockTimeout = 1000;
62 int LockTimeout = 0;
64 bool log_lock_waits = false;
65 
66 /* Pointer to this process's PGPROC and PGXACT structs, if any */
69 
70 /*
71  * This spinlock protects the freelist of recycled PGPROC structures.
72  * We cannot use an LWLock because the LWLock manager depends on already
73  * having a PGPROC and a wait semaphore! But these structures are touched
74  * relatively infrequently (only at backend startup or shutdown) and not for
75  * very long, so a spinlock is okay.
76  */
78 
79 /* Pointers to shared-memory structures */
83 
84 /* If we are waiting for a lock, this points to the associated LOCALLOCK */
86 
88 
89 /* Is a deadlock check pending? */
90 static volatile sig_atomic_t got_deadlock_timeout;
91 
92 static void RemoveProcFromArray(int code, Datum arg);
93 static void ProcKill(int code, Datum arg);
94 static void AuxiliaryProcKill(int code, Datum arg);
95 static void CheckDeadLock(void);
96 
97 
98 /*
99  * Report shared-memory space needed by InitProcGlobal.
100  */
101 Size
103 {
104  Size size = 0;
105 
106  /* ProcGlobal */
107  size = add_size(size, sizeof(PROC_HDR));
108  /* MyProcs, including autovacuum workers and launcher */
109  size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC)));
110  /* AuxiliaryProcs */
111  size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGPROC)));
112  /* Prepared xacts */
113  size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGPROC)));
114  /* ProcStructLock */
115  size = add_size(size, sizeof(slock_t));
116 
117  size = add_size(size, mul_size(MaxBackends, sizeof(PGXACT)));
118  size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGXACT)));
119  size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGXACT)));
120 
121  return size;
122 }
123 
124 /*
125  * Report number of semaphores needed by InitProcGlobal.
126  */
127 int
129 {
130  /*
131  * We need a sema per backend (including autovacuum), plus one for each
132  * auxiliary process.
133  */
135 }
136 
137 /*
138  * InitProcGlobal -
139  * Initialize the global process table during postmaster or standalone
140  * backend startup.
141  *
142  * We also create all the per-process semaphores we will need to support
143  * the requested number of backends. We used to allocate semaphores
144  * only when backends were actually started up, but that is bad because
145  * it lets Postgres fail under load --- a lot of Unix systems are
146  * (mis)configured with small limits on the number of semaphores, and
147  * running out when trying to start another backend is a common failure.
148  * So, now we grab enough semaphores to support the desired max number
149  * of backends immediately at initialization --- if the sysadmin has set
150  * MaxConnections, max_worker_processes, or autovacuum_max_workers higher
151  * than his kernel will support, he'll find out sooner rather than later.
152  *
153  * Another reason for creating semaphores here is that the semaphore
154  * implementation typically requires us to create semaphores in the
155  * postmaster, not in backends.
156  *
157  * Note: this is NOT called by individual backends under a postmaster,
158  * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
159  * pointers must be propagated specially for EXEC_BACKEND operation.
160  */
161 void
163 {
164  PGPROC *procs;
165  PGXACT *pgxacts;
166  int i,
167  j;
168  bool found;
170 
171  /* Create the ProcGlobal shared structure */
172  ProcGlobal = (PROC_HDR *)
173  ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
174  Assert(!found);
175 
176  /*
177  * Initialize the data structures.
178  */
180  ProcGlobal->freeProcs = NULL;
181  ProcGlobal->autovacFreeProcs = NULL;
182  ProcGlobal->bgworkerFreeProcs = NULL;
183  ProcGlobal->startupProc = NULL;
184  ProcGlobal->startupProcPid = 0;
185  ProcGlobal->startupBufferPinWaitBufId = -1;
186  ProcGlobal->walwriterLatch = NULL;
187  ProcGlobal->checkpointerLatch = NULL;
189 
190  /*
191  * Create and initialize all the PGPROC structures we'll need. There are
192  * five separate consumers: (1) normal backends, (2) autovacuum workers
193  * and the autovacuum launcher, (3) background workers, (4) auxiliary
194  * processes, and (5) prepared transactions. Each PGPROC structure is
195  * dedicated to exactly one of these purposes, and they do not move
196  * between groups.
197  */
198  procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
199  MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
200  ProcGlobal->allProcs = procs;
201  /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
203 
204  /*
205  * Also allocate a separate array of PGXACT structures. This is separate
206  * from the main PGPROC array so that the most heavily accessed data is
207  * stored contiguously in memory in as few cache lines as possible. This
208  * provides significant performance benefits, especially on a
209  * multiprocessor system. There is one PGXACT structure for every PGPROC
210  * structure.
211  */
212  pgxacts = (PGXACT *) ShmemAlloc(TotalProcs * sizeof(PGXACT));
213  MemSet(pgxacts, 0, TotalProcs * sizeof(PGXACT));
214  ProcGlobal->allPgXact = pgxacts;
215 
216  for (i = 0; i < TotalProcs; i++)
217  {
218  /* Common initialization for all PGPROCs, regardless of type. */
219 
220  /*
221  * Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
222  * dummy PGPROCs don't need these though - they're never associated
223  * with a real process
224  */
225  if (i < MaxBackends + NUM_AUXILIARY_PROCS)
226  {
227  procs[i].sem = PGSemaphoreCreate();
228  InitSharedLatch(&(procs[i].procLatch));
229  LWLockInitialize(&(procs[i].backendLock), LWTRANCHE_PROC);
230  }
231  procs[i].pgprocno = i;
232 
233  /*
234  * Newly created PGPROCs for normal backends, autovacuum and bgworkers
235  * must be queued up on the appropriate free list. Because there can
236  * only ever be a small, fixed number of auxiliary processes, no free
237  * list is used in that case; InitAuxiliaryProcess() instead uses a
238  * linear search. PGPROCs for prepared transactions are added to a
239  * free list by TwoPhaseShmemInit().
240  */
241  if (i < MaxConnections)
242  {
243  /* PGPROC for normal backend, add to freeProcs list */
244  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
245  ProcGlobal->freeProcs = &procs[i];
246  procs[i].procgloballist = &ProcGlobal->freeProcs;
247  }
248  else if (i < MaxConnections + autovacuum_max_workers + 1)
249  {
250  /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
251  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
252  ProcGlobal->autovacFreeProcs = &procs[i];
253  procs[i].procgloballist = &ProcGlobal->autovacFreeProcs;
254  }
255  else if (i < MaxBackends)
256  {
257  /* PGPROC for bgworker, add to bgworkerFreeProcs list */
258  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
259  ProcGlobal->bgworkerFreeProcs = &procs[i];
260  procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
261  }
262 
263  /* Initialize myProcLocks[] shared memory queues. */
264  for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
265  SHMQueueInit(&(procs[i].myProcLocks[j]));
266 
267  /* Initialize lockGroupMembers list. */
268  dlist_init(&procs[i].lockGroupMembers);
269  }
270 
271  /*
272  * Save pointers to the blocks of PGPROC structures reserved for auxiliary
273  * processes and prepared transactions.
274  */
275  AuxiliaryProcs = &procs[MaxBackends];
276  PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
277 
278  /* Create ProcStructLock spinlock, too */
279  ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
281 }
282 
283 /*
284  * InitProcess -- initialize a per-process data structure for this backend
285  */
286 void
288 {
289  PGPROC *volatile * procgloballist;
290 
291  /*
292  * ProcGlobal should be set up already (if we are a backend, we inherit
293  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
294  */
295  if (ProcGlobal == NULL)
296  elog(PANIC, "proc header uninitialized");
297 
298  if (MyProc != NULL)
299  elog(ERROR, "you already exist");
300 
301  /* Decide which list should supply our PGPROC. */
303  procgloballist = &ProcGlobal->autovacFreeProcs;
304  else if (IsBackgroundWorker)
305  procgloballist = &ProcGlobal->bgworkerFreeProcs;
306  else
307  procgloballist = &ProcGlobal->freeProcs;
308 
309  /*
310  * Try to get a proc struct from the appropriate free list. If this
311  * fails, we must be out of PGPROC structures (not to mention semaphores).
312  *
313  * While we are holding the ProcStructLock, also copy the current shared
314  * estimate of spins_per_delay to local storage.
315  */
317 
319 
320  MyProc = *procgloballist;
321 
322  if (MyProc != NULL)
323  {
324  *procgloballist = (PGPROC *) MyProc->links.next;
326  }
327  else
328  {
329  /*
330  * If we reach here, all the PGPROCs are in use. This is one of the
331  * possible places to detect "too many backends", so give the standard
332  * error message. XXX do we need to give a different failure message
333  * in the autovacuum case?
334  */
336  ereport(FATAL,
337  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
338  errmsg("sorry, too many clients already")));
339  }
340  MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
341 
342  /*
343  * Cross-check that the PGPROC is of the type we expect; if this were not
344  * the case, it would get returned to the wrong list.
345  */
346  Assert(MyProc->procgloballist == procgloballist);
347 
348  /*
349  * Now that we have a PGPROC, mark ourselves as an active postmaster
350  * child; this is so that the postmaster can detect it if we exit without
351  * cleaning up. (XXX autovac launcher currently doesn't participate in
352  * this; it probably should.)
353  */
356 
357  /*
358  * Initialize all fields of MyProc, except for those previously
359  * initialized by InitProcGlobal.
360  */
361  SHMQueueElemInit(&(MyProc->links));
362  MyProc->waitStatus = STATUS_OK;
364  MyProc->fpVXIDLock = false;
366  MyPgXact->xid = InvalidTransactionId;
367  MyPgXact->xmin = InvalidTransactionId;
368  MyProc->pid = MyProcPid;
369  /* backendId, databaseId and roleId will be filled in later */
370  MyProc->backendId = InvalidBackendId;
371  MyProc->databaseId = InvalidOid;
372  MyProc->roleId = InvalidOid;
374  MyPgXact->delayChkpt = false;
375  MyPgXact->vacuumFlags = 0;
376  /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
378  MyPgXact->vacuumFlags |= PROC_IS_AUTOVACUUM;
379  MyProc->lwWaiting = false;
380  MyProc->lwWaitMode = 0;
381  MyProc->waitLock = NULL;
382  MyProc->waitProcLock = NULL;
383 #ifdef USE_ASSERT_CHECKING
384  {
385  int i;
386 
387  /* Last process should have released all locks. */
388  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
389  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
390  }
391 #endif
392  MyProc->recoveryConflictPending = false;
393 
394  /* Initialize fields for sync rep */
395  MyProc->waitLSN = 0;
397  SHMQueueElemInit(&(MyProc->syncRepLinks));
398 
399  /* Initialize fields for group XID clearing. */
400  MyProc->procArrayGroupMember = false;
403 
404  /* Check that group locking fields are in a proper initial state. */
405  Assert(MyProc->lockGroupLeader == NULL);
407 
408  /* Initialize wait event information. */
409  MyProc->wait_event_info = 0;
410 
411  /*
412  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
413  * on it. That allows us to repoint the process latch, which so far
414  * points to process local one, to the shared one.
415  */
416  OwnLatch(&MyProc->procLatch);
418 
419  /*
420  * We might be reusing a semaphore that belonged to a failed process. So
421  * be careful and reinitialize its value here. (This is not strictly
422  * necessary anymore, but seems like a good idea for cleanliness.)
423  */
424  PGSemaphoreReset(MyProc->sem);
425 
426  /*
427  * Arrange to clean up at backend exit.
428  */
430 
431  /*
432  * Now that we have a PGPROC, we could try to acquire locks, so initialize
433  * local state needed for LWLocks, and the deadlock checker.
434  */
437 }
438 
439 /*
440  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
441  *
442  * This is separate from InitProcess because we can't acquire LWLocks until
443  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
444  * work until after we've done CreateSharedMemoryAndSemaphores.
445  */
446 void
448 {
449  Assert(MyProc != NULL);
450 
451  /*
452  * Add our PGPROC to the PGPROC array in shared memory.
453  */
454  ProcArrayAdd(MyProc);
455 
456  /*
457  * Arrange to clean that up at backend exit.
458  */
460 }
461 
462 /*
463  * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
464  *
465  * This is called by bgwriter and similar processes so that they will have a
466  * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
467  * and sema that are assigned are one of the extra ones created during
468  * InitProcGlobal.
469  *
470  * Auxiliary processes are presently not expected to wait for real (lockmgr)
471  * locks, so we need not set up the deadlock checker. They are never added
472  * to the ProcArray or the sinval messaging mechanism, either. They also
473  * don't get a VXID assigned, since this is only useful when we actually
474  * hold lockmgr locks.
475  *
476  * Startup process however uses locks but never waits for them in the
477  * normal backend sense. Startup process also takes part in sinval messaging
478  * as a sendOnly process, so never reads messages from sinval queue. So
479  * Startup process does have a VXID and does show up in pg_locks.
480  */
481 void
483 {
484  PGPROC *auxproc;
485  int proctype;
486 
487  /*
488  * ProcGlobal should be set up already (if we are a backend, we inherit
489  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
490  */
491  if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
492  elog(PANIC, "proc header uninitialized");
493 
494  if (MyProc != NULL)
495  elog(ERROR, "you already exist");
496 
497  /*
498  * We use the ProcStructLock to protect assignment and releasing of
499  * AuxiliaryProcs entries.
500  *
501  * While we are holding the ProcStructLock, also copy the current shared
502  * estimate of spins_per_delay to local storage.
503  */
505 
507 
508  /*
509  * Find a free auxproc ... *big* trouble if there isn't one ...
510  */
511  for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
512  {
513  auxproc = &AuxiliaryProcs[proctype];
514  if (auxproc->pid == 0)
515  break;
516  }
517  if (proctype >= NUM_AUXILIARY_PROCS)
518  {
520  elog(FATAL, "all AuxiliaryProcs are in use");
521  }
522 
523  /* Mark auxiliary proc as in use by me */
524  /* use volatile pointer to prevent code rearrangement */
525  ((volatile PGPROC *) auxproc)->pid = MyProcPid;
526 
527  MyProc = auxproc;
528  MyPgXact = &ProcGlobal->allPgXact[auxproc->pgprocno];
529 
531 
532  /*
533  * Initialize all fields of MyProc, except for those previously
534  * initialized by InitProcGlobal.
535  */
536  SHMQueueElemInit(&(MyProc->links));
537  MyProc->waitStatus = STATUS_OK;
539  MyProc->fpVXIDLock = false;
541  MyPgXact->xid = InvalidTransactionId;
542  MyPgXact->xmin = InvalidTransactionId;
543  MyProc->backendId = InvalidBackendId;
544  MyProc->databaseId = InvalidOid;
545  MyProc->roleId = InvalidOid;
547  MyPgXact->delayChkpt = false;
548  MyPgXact->vacuumFlags = 0;
549  MyProc->lwWaiting = false;
550  MyProc->lwWaitMode = 0;
551  MyProc->waitLock = NULL;
552  MyProc->waitProcLock = NULL;
553 #ifdef USE_ASSERT_CHECKING
554  {
555  int i;
556 
557  /* Last process should have released all locks. */
558  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
559  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
560  }
561 #endif
562 
563  /*
564  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
565  * on it. That allows us to repoint the process latch, which so far
566  * points to process local one, to the shared one.
567  */
568  OwnLatch(&MyProc->procLatch);
570 
571  /* Check that group locking fields are in a proper initial state. */
572  Assert(MyProc->lockGroupLeader == NULL);
574 
575  /*
576  * We might be reusing a semaphore that belonged to a failed process. So
577  * be careful and reinitialize its value here. (This is not strictly
578  * necessary anymore, but seems like a good idea for cleanliness.)
579  */
580  PGSemaphoreReset(MyProc->sem);
581 
582  /*
583  * Arrange to clean up at process exit.
584  */
586 }
587 
588 /*
589  * Record the PID and PGPROC structures for the Startup process, for use in
590  * ProcSendSignal(). See comments there for further explanation.
591  */
592 void
594 {
596 
597  ProcGlobal->startupProc = MyProc;
598  ProcGlobal->startupProcPid = MyProcPid;
599 
601 }
602 
603 /*
604  * Used from bufgr to share the value of the buffer that Startup waits on,
605  * or to reset the value to "not waiting" (-1). This allows processing
606  * of recovery conflicts for buffer pins. Set is made before backends look
607  * at this value, so locking not required, especially since the set is
608  * an atomic integer set operation.
609  */
610 void
612 {
613  /* use volatile pointer to prevent code rearrangement */
614  volatile PROC_HDR *procglobal = ProcGlobal;
615 
616  procglobal->startupBufferPinWaitBufId = bufid;
617 }
618 
619 /*
620  * Used by backends when they receive a request to check for buffer pin waits.
621  */
622 int
624 {
625  /* use volatile pointer to prevent code rearrangement */
626  volatile PROC_HDR *procglobal = ProcGlobal;
627 
628  return procglobal->startupBufferPinWaitBufId;
629 }
630 
631 /*
632  * Check whether there are at least N free PGPROC objects.
633  *
634  * Note: this is designed on the assumption that N will generally be small.
635  */
636 bool
638 {
639  PGPROC *proc;
640 
642 
643  proc = ProcGlobal->freeProcs;
644 
645  while (n > 0 && proc != NULL)
646  {
647  proc = (PGPROC *) proc->links.next;
648  n--;
649  }
650 
652 
653  return (n <= 0);
654 }
655 
656 /*
657  * Check if the current process is awaiting a lock.
658  */
659 bool
661 {
662  if (lockAwaited == NULL)
663  return false;
664 
665  return true;
666 }
667 
668 /*
669  * Cancel any pending wait for lock, when aborting a transaction, and revert
670  * any strong lock count acquisition for a lock being acquired.
671  *
672  * (Normally, this would only happen if we accept a cancel/die
673  * interrupt while waiting; but an ereport(ERROR) before or during the lock
674  * wait is within the realm of possibility, too.)
675  */
676 void
678 {
679  LWLock *partitionLock;
680  DisableTimeoutParams timeouts[2];
681 
682  HOLD_INTERRUPTS();
683 
685 
686  /* Nothing to do if we weren't waiting for a lock */
687  if (lockAwaited == NULL)
688  {
690  return;
691  }
692 
693  /*
694  * Turn off the deadlock and lock timeout timers, if they are still
695  * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
696  * indicator flag, since this function is executed before
697  * ProcessInterrupts when responding to SIGINT; else we'd lose the
698  * knowledge that the SIGINT came from a lock timeout and not an external
699  * source.
700  */
701  timeouts[0].id = DEADLOCK_TIMEOUT;
702  timeouts[0].keep_indicator = false;
703  timeouts[1].id = LOCK_TIMEOUT;
704  timeouts[1].keep_indicator = true;
705  disable_timeouts(timeouts, 2);
706 
707  /* Unlink myself from the wait queue, if on it (might not be anymore!) */
708  partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
709  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
710 
711  if (MyProc->links.next != NULL)
712  {
713  /* We could not have been granted the lock yet */
714  RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
715  }
716  else
717  {
718  /*
719  * Somebody kicked us off the lock queue already. Perhaps they
720  * granted us the lock, or perhaps they detected a deadlock. If they
721  * did grant us the lock, we'd better remember it in our local lock
722  * table.
723  */
724  if (MyProc->waitStatus == STATUS_OK)
726  }
727 
728  lockAwaited = NULL;
729 
730  LWLockRelease(partitionLock);
731 
733 }
734 
735 
736 /*
737  * ProcReleaseLocks() -- release locks associated with current transaction
738  * at main transaction commit or abort
739  *
740  * At main transaction commit, we release standard locks except session locks.
741  * At main transaction abort, we release all locks including session locks.
742  *
743  * Advisory locks are released only if they are transaction-level;
744  * session-level holds remain, whether this is a commit or not.
745  *
746  * At subtransaction commit, we don't release any locks (so this func is not
747  * needed at all); we will defer the releasing to the parent transaction.
748  * At subtransaction abort, we release all locks held by the subtransaction;
749  * this is implemented by retail releasing of the locks under control of
750  * the ResourceOwner mechanism.
751  */
752 void
753 ProcReleaseLocks(bool isCommit)
754 {
755  if (!MyProc)
756  return;
757  /* If waiting, get off wait queue (should only be needed after error) */
759  /* Release standard locks, including session-level if aborting */
761  /* Release transaction-level advisory locks */
763 }
764 
765 
766 /*
767  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
768  */
769 static void
771 {
772  Assert(MyProc != NULL);
774 }
775 
776 /*
777  * ProcKill() -- Destroy the per-proc data structure for
778  * this process. Release any of its held LW locks.
779  */
780 static void
781 ProcKill(int code, Datum arg)
782 {
783  PGPROC *proc;
784  PGPROC *volatile * procgloballist;
785 
786  Assert(MyProc != NULL);
787 
788  /* Make sure we're out of the sync rep lists */
790 
791 #ifdef USE_ASSERT_CHECKING
792  {
793  int i;
794 
795  /* Last process should have released all locks. */
796  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
797  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
798  }
799 #endif
800 
801  /*
802  * Release any LW locks I am holding. There really shouldn't be any, but
803  * it's cheap to check again before we cut the knees off the LWLock
804  * facility by releasing our PGPROC ...
805  */
807 
808  /* Cancel any pending condition variable sleep, too */
810 
811  /* Make sure active replication slots are released */
812  if (MyReplicationSlot != NULL)
814 
815  /* Also cleanup all the temporary slots. */
817 
818  /*
819  * Detach from any lock group of which we are a member. If the leader
820  * exist before all other group members, it's PGPROC will remain allocated
821  * until the last group process exits; that process must return the
822  * leader's PGPROC to the appropriate list.
823  */
824  if (MyProc->lockGroupLeader != NULL)
825  {
826  PGPROC *leader = MyProc->lockGroupLeader;
827  LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
828 
829  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
831  dlist_delete(&MyProc->lockGroupLink);
832  if (dlist_is_empty(&leader->lockGroupMembers))
833  {
834  leader->lockGroupLeader = NULL;
835  if (leader != MyProc)
836  {
837  procgloballist = leader->procgloballist;
838 
839  /* Leader exited first; return its PGPROC. */
841  leader->links.next = (SHM_QUEUE *) *procgloballist;
842  *procgloballist = leader;
844  }
845  }
846  else if (leader != MyProc)
847  MyProc->lockGroupLeader = NULL;
848  LWLockRelease(leader_lwlock);
849  }
850 
851  /*
852  * Reset MyLatch to the process local one. This is so that signal
853  * handlers et al can continue using the latch after the shared latch
854  * isn't ours anymore. After that clear MyProc and disown the shared
855  * latch.
856  */
858  proc = MyProc;
859  MyProc = NULL;
860  DisownLatch(&proc->procLatch);
861 
862  procgloballist = proc->procgloballist;
864 
865  /*
866  * If we're still a member of a locking group, that means we're a leader
867  * which has somehow exited before its children. The last remaining child
868  * will release our PGPROC. Otherwise, release it now.
869  */
870  if (proc->lockGroupLeader == NULL)
871  {
872  /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
874 
875  /* Return PGPROC structure (and semaphore) to appropriate freelist */
876  proc->links.next = (SHM_QUEUE *) *procgloballist;
877  *procgloballist = proc;
878  }
879 
880  /* Update shared estimate of spins_per_delay */
881  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
882 
884 
885  /*
886  * This process is no longer present in shared memory in any meaningful
887  * way, so tell the postmaster we've cleaned up acceptably well. (XXX
888  * autovac launcher should be included here someday)
889  */
892 
893  /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
894  if (AutovacuumLauncherPid != 0)
896 }
897 
898 /*
899  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
900  * processes (bgwriter, etc). The PGPROC and sema are not released, only
901  * marked as not-in-use.
902  */
903 static void
905 {
906  int proctype = DatumGetInt32(arg);
908  PGPROC *proc;
909 
910  Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
911 
912  auxproc = &AuxiliaryProcs[proctype];
913 
914  Assert(MyProc == auxproc);
915 
916  /* Release any LW locks I am holding (see notes above) */
918 
919  /* Cancel any pending condition variable sleep, too */
921 
922  /*
923  * Reset MyLatch to the process local one. This is so that signal
924  * handlers et al can continue using the latch after the shared latch
925  * isn't ours anymore. After that clear MyProc and disown the shared
926  * latch.
927  */
929  proc = MyProc;
930  MyProc = NULL;
931  DisownLatch(&proc->procLatch);
932 
934 
935  /* Mark auxiliary proc no longer in use */
936  proc->pid = 0;
937 
938  /* Update shared estimate of spins_per_delay */
939  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
940 
942 }
943 
944 
945 /*
946  * ProcQueue package: routines for putting processes to sleep
947  * and waking them up
948  */
949 
950 /*
951  * ProcQueueAlloc -- alloc/attach to a shared memory process queue
952  *
953  * Returns: a pointer to the queue
954  * Side Effects: Initializes the queue if it wasn't there before
955  */
956 #ifdef NOT_USED
957 PROC_QUEUE *
958 ProcQueueAlloc(const char *name)
959 {
960  PROC_QUEUE *queue;
961  bool found;
962 
963  queue = (PROC_QUEUE *)
964  ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
965 
966  if (!found)
967  ProcQueueInit(queue);
968 
969  return queue;
970 }
971 #endif
972 
973 /*
974  * ProcQueueInit -- initialize a shared memory process queue
975  */
976 void
978 {
979  SHMQueueInit(&(queue->links));
980  queue->size = 0;
981 }
982 
983 
984 /*
985  * ProcSleep -- put a process to sleep on the specified lock
986  *
987  * Caller must have set MyProc->heldLocks to reflect locks already held
988  * on the lockable object by this process (under all XIDs).
989  *
990  * The lock table's partition lock must be held at entry, and will be held
991  * at exit.
992  *
993  * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
994  *
995  * ASSUME: that no one will fiddle with the queue until after
996  * we release the partition lock.
997  *
998  * NOTES: The process queue is now a priority queue for locking.
999  */
1000 int
1001 ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
1002 {
1003  LOCKMODE lockmode = locallock->tag.mode;
1004  LOCK *lock = locallock->lock;
1005  PROCLOCK *proclock = locallock->proclock;
1006  uint32 hashcode = locallock->hashcode;
1007  LWLock *partitionLock = LockHashPartitionLock(hashcode);
1008  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1009  LOCKMASK myHeldLocks = MyProc->heldLocks;
1010  bool early_deadlock = false;
1011  bool allow_autovacuum_cancel = true;
1012  int myWaitStatus;
1013  PGPROC *proc;
1014  PGPROC *leader = MyProc->lockGroupLeader;
1015  int i;
1016 
1017  /*
1018  * If group locking is in use, locks held by members of my locking group
1019  * need to be included in myHeldLocks.
1020  */
1021  if (leader != NULL)
1022  {
1023  SHM_QUEUE *procLocks = &(lock->procLocks);
1024  PROCLOCK *otherproclock;
1025 
1026  otherproclock = (PROCLOCK *)
1027  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1028  while (otherproclock != NULL)
1029  {
1030  if (otherproclock->groupLeader == leader)
1031  myHeldLocks |= otherproclock->holdMask;
1032  otherproclock = (PROCLOCK *)
1033  SHMQueueNext(procLocks, &otherproclock->lockLink,
1034  offsetof(PROCLOCK, lockLink));
1035  }
1036  }
1037 
1038  /*
1039  * Determine where to add myself in the wait queue.
1040  *
1041  * Normally I should go at the end of the queue. However, if I already
1042  * hold locks that conflict with the request of any previous waiter, put
1043  * myself in the queue just in front of the first such waiter. This is not
1044  * a necessary step, since deadlock detection would move me to before that
1045  * waiter anyway; but it's relatively cheap to detect such a conflict
1046  * immediately, and avoid delaying till deadlock timeout.
1047  *
1048  * Special case: if I find I should go in front of some waiter, check to
1049  * see if I conflict with already-held locks or the requests before that
1050  * waiter. If not, then just grant myself the requested lock immediately.
1051  * This is the same as the test for immediate grant in LockAcquire, except
1052  * we are only considering the part of the wait queue before my insertion
1053  * point.
1054  */
1055  if (myHeldLocks != 0)
1056  {
1057  LOCKMASK aheadRequests = 0;
1058 
1059  proc = (PGPROC *) waitQueue->links.next;
1060  for (i = 0; i < waitQueue->size; i++)
1061  {
1062  /*
1063  * If we're part of the same locking group as this waiter, its
1064  * locks neither conflict with ours nor contribute to
1065  * aheadRequests.
1066  */
1067  if (leader != NULL && leader == proc->lockGroupLeader)
1068  {
1069  proc = (PGPROC *) proc->links.next;
1070  continue;
1071  }
1072  /* Must he wait for me? */
1073  if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1074  {
1075  /* Must I wait for him ? */
1076  if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1077  {
1078  /*
1079  * Yes, so we have a deadlock. Easiest way to clean up
1080  * correctly is to call RemoveFromWaitQueue(), but we
1081  * can't do that until we are *on* the wait queue. So, set
1082  * a flag to check below, and break out of loop. Also,
1083  * record deadlock info for later message.
1084  */
1085  RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1086  early_deadlock = true;
1087  break;
1088  }
1089  /* I must go before this waiter. Check special case. */
1090  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1091  LockCheckConflicts(lockMethodTable,
1092  lockmode,
1093  lock,
1094  proclock) == STATUS_OK)
1095  {
1096  /* Skip the wait and just grant myself the lock. */
1097  GrantLock(lock, proclock, lockmode);
1098  GrantAwaitedLock();
1099  return STATUS_OK;
1100  }
1101  /* Break out of loop to put myself before him */
1102  break;
1103  }
1104  /* Nope, so advance to next waiter */
1105  aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1106  proc = (PGPROC *) proc->links.next;
1107  }
1108 
1109  /*
1110  * If we fall out of loop normally, proc points to waitQueue head, so
1111  * we will insert at tail of queue as desired.
1112  */
1113  }
1114  else
1115  {
1116  /* I hold no locks, so I can't push in front of anyone. */
1117  proc = (PGPROC *) &(waitQueue->links);
1118  }
1119 
1120  /*
1121  * Insert self into queue, ahead of the given proc (or at tail of queue).
1122  */
1123  SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
1124  waitQueue->size++;
1125 
1126  lock->waitMask |= LOCKBIT_ON(lockmode);
1127 
1128  /* Set up wait information in PGPROC object, too */
1129  MyProc->waitLock = lock;
1130  MyProc->waitProcLock = proclock;
1131  MyProc->waitLockMode = lockmode;
1132 
1133  MyProc->waitStatus = STATUS_WAITING;
1134 
1135  /*
1136  * If we detected deadlock, give up without waiting. This must agree with
1137  * CheckDeadLock's recovery code, except that we shouldn't release the
1138  * semaphore since we haven't tried to lock it yet.
1139  */
1140  if (early_deadlock)
1141  {
1142  RemoveFromWaitQueue(MyProc, hashcode);
1143  return STATUS_ERROR;
1144  }
1145 
1146  /* mark that we are waiting for a lock */
1147  lockAwaited = locallock;
1148 
1149  /*
1150  * Release the lock table's partition lock.
1151  *
1152  * NOTE: this may also cause us to exit critical-section state, possibly
1153  * allowing a cancel/die interrupt to be accepted. This is OK because we
1154  * have recorded the fact that we are waiting for a lock, and so
1155  * LockErrorCleanup will clean up if cancel/die happens.
1156  */
1157  LWLockRelease(partitionLock);
1158 
1159  /*
1160  * Also, now that we will successfully clean up after an ereport, it's
1161  * safe to check to see if there's a buffer pin deadlock against the
1162  * Startup process. Of course, that's only necessary if we're doing Hot
1163  * Standby and are not the Startup process ourselves.
1164  */
1165  if (RecoveryInProgress() && !InRecovery)
1167 
1168  /* Reset deadlock_state before enabling the timeout handler */
1170  got_deadlock_timeout = false;
1171 
1172  /*
1173  * Set timer so we can wake up after awhile and check for a deadlock. If a
1174  * deadlock is detected, the handler releases the process's semaphore and
1175  * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
1176  * must report failure rather than success.
1177  *
1178  * By delaying the check until we've waited for a bit, we can avoid
1179  * running the rather expensive deadlock-check code in most cases.
1180  *
1181  * If LockTimeout is set, also enable the timeout for that. We can save a
1182  * few cycles by enabling both timeout sources in one call.
1183  *
1184  * If InHotStandby we set lock waits slightly later for clarity with other
1185  * code.
1186  */
1187  if (!InHotStandby)
1188  {
1189  if (LockTimeout > 0)
1190  {
1191  EnableTimeoutParams timeouts[2];
1192 
1193  timeouts[0].id = DEADLOCK_TIMEOUT;
1194  timeouts[0].type = TMPARAM_AFTER;
1195  timeouts[0].delay_ms = DeadlockTimeout;
1196  timeouts[1].id = LOCK_TIMEOUT;
1197  timeouts[1].type = TMPARAM_AFTER;
1198  timeouts[1].delay_ms = LockTimeout;
1199  enable_timeouts(timeouts, 2);
1200  }
1201  else
1203  }
1204 
1205  /*
1206  * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1207  * will not wait. But a set latch does not necessarily mean that the lock
1208  * is free now, as there are many other sources for latch sets than
1209  * somebody releasing the lock.
1210  *
1211  * We process interrupts whenever the latch has been set, so cancel/die
1212  * interrupts are processed quickly. This means we must not mind losing
1213  * control to a cancel/die interrupt here. We don't, because we have no
1214  * shared-state-change work to do after being granted the lock (the
1215  * grantor did it all). We do have to worry about canceling the deadlock
1216  * timeout and updating the locallock table, but if we lose control to an
1217  * error, LockErrorCleanup will fix that up.
1218  */
1219  do
1220  {
1221  if (InHotStandby)
1222  {
1223  /* Set a timer and wait for that or for the Lock to be granted */
1225  }
1226  else
1227  {
1229  PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1231  /* check for deadlocks first, as that's probably log-worthy */
1233  {
1234  CheckDeadLock();
1235  got_deadlock_timeout = false;
1236  }
1238  }
1239 
1240  /*
1241  * waitStatus could change from STATUS_WAITING to something else
1242  * asynchronously. Read it just once per loop to prevent surprising
1243  * behavior (such as missing log messages).
1244  */
1245  myWaitStatus = *((volatile int *) &MyProc->waitStatus);
1246 
1247  /*
1248  * If we are not deadlocked, but are waiting on an autovacuum-induced
1249  * task, send a signal to interrupt it.
1250  */
1251  if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1252  {
1253  PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1254  PGXACT *autovac_pgxact = &ProcGlobal->allPgXact[autovac->pgprocno];
1255 
1256  LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1257 
1258  /*
1259  * Only do it if the worker is not working to protect against Xid
1260  * wraparound.
1261  */
1262  if ((autovac_pgxact->vacuumFlags & PROC_IS_AUTOVACUUM) &&
1263  !(autovac_pgxact->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND))
1264  {
1265  int pid = autovac->pid;
1266  StringInfoData locktagbuf;
1267  StringInfoData logbuf; /* errdetail for server log */
1268 
1269  initStringInfo(&locktagbuf);
1270  initStringInfo(&logbuf);
1271  DescribeLockTag(&locktagbuf, &lock->tag);
1272  appendStringInfo(&logbuf,
1273  _("Process %d waits for %s on %s."),
1274  MyProcPid,
1276  lockmode),
1277  locktagbuf.data);
1278 
1279  /* release lock as quickly as possible */
1280  LWLockRelease(ProcArrayLock);
1281 
1282  /* send the autovacuum worker Back to Old Kent Road */
1283  ereport(DEBUG1,
1284  (errmsg("sending cancel to blocking autovacuum PID %d",
1285  pid),
1286  errdetail_log("%s", logbuf.data)));
1287 
1288  if (kill(pid, SIGINT) < 0)
1289  {
1290  /*
1291  * There's a race condition here: once we release the
1292  * ProcArrayLock, it's possible for the autovac worker to
1293  * close up shop and exit before we can do the kill().
1294  * Therefore, we do not whinge about no-such-process.
1295  * Other errors such as EPERM could conceivably happen if
1296  * the kernel recycles the PID fast enough, but such cases
1297  * seem improbable enough that it's probably best to issue
1298  * a warning if we see some other errno.
1299  */
1300  if (errno != ESRCH)
1301  ereport(WARNING,
1302  (errmsg("could not send signal to process %d: %m",
1303  pid)));
1304  }
1305 
1306  pfree(logbuf.data);
1307  pfree(locktagbuf.data);
1308  }
1309  else
1310  LWLockRelease(ProcArrayLock);
1311 
1312  /* prevent signal from being resent more than once */
1313  allow_autovacuum_cancel = false;
1314  }
1315 
1316  /*
1317  * If awoken after the deadlock check interrupt has run, and
1318  * log_lock_waits is on, then report about the wait.
1319  */
1321  {
1323  lock_waiters_sbuf,
1324  lock_holders_sbuf;
1325  const char *modename;
1326  long secs;
1327  int usecs;
1328  long msecs;
1329  SHM_QUEUE *procLocks;
1330  PROCLOCK *proclock;
1331  bool first_holder = true,
1332  first_waiter = true;
1333  int lockHoldersNum = 0;
1334 
1335  initStringInfo(&buf);
1336  initStringInfo(&lock_waiters_sbuf);
1337  initStringInfo(&lock_holders_sbuf);
1338 
1339  DescribeLockTag(&buf, &locallock->tag.lock);
1340  modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1341  lockmode);
1344  &secs, &usecs);
1345  msecs = secs * 1000 + usecs / 1000;
1346  usecs = usecs % 1000;
1347 
1348  /*
1349  * we loop over the lock's procLocks to gather a list of all
1350  * holders and waiters. Thus we will be able to provide more
1351  * detailed information for lock debugging purposes.
1352  *
1353  * lock->procLocks contains all processes which hold or wait for
1354  * this lock.
1355  */
1356 
1357  LWLockAcquire(partitionLock, LW_SHARED);
1358 
1359  procLocks = &(lock->procLocks);
1360  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
1361  offsetof(PROCLOCK, lockLink));
1362 
1363  while (proclock)
1364  {
1365  /*
1366  * we are a waiter if myProc->waitProcLock == proclock; we are
1367  * a holder if it is NULL or something different
1368  */
1369  if (proclock->tag.myProc->waitProcLock == proclock)
1370  {
1371  if (first_waiter)
1372  {
1373  appendStringInfo(&lock_waiters_sbuf, "%d",
1374  proclock->tag.myProc->pid);
1375  first_waiter = false;
1376  }
1377  else
1378  appendStringInfo(&lock_waiters_sbuf, ", %d",
1379  proclock->tag.myProc->pid);
1380  }
1381  else
1382  {
1383  if (first_holder)
1384  {
1385  appendStringInfo(&lock_holders_sbuf, "%d",
1386  proclock->tag.myProc->pid);
1387  first_holder = false;
1388  }
1389  else
1390  appendStringInfo(&lock_holders_sbuf, ", %d",
1391  proclock->tag.myProc->pid);
1392 
1393  lockHoldersNum++;
1394  }
1395 
1396  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
1397  offsetof(PROCLOCK, lockLink));
1398  }
1399 
1400  LWLockRelease(partitionLock);
1401 
1403  ereport(LOG,
1404  (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1405  MyProcPid, modename, buf.data, msecs, usecs),
1406  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1407  "Processes holding the lock: %s. Wait queue: %s.",
1408  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1409  else if (deadlock_state == DS_HARD_DEADLOCK)
1410  {
1411  /*
1412  * This message is a bit redundant with the error that will be
1413  * reported subsequently, but in some cases the error report
1414  * might not make it to the log (eg, if it's caught by an
1415  * exception handler), and we want to ensure all long-wait
1416  * events get logged.
1417  */
1418  ereport(LOG,
1419  (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1420  MyProcPid, modename, buf.data, msecs, usecs),
1421  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1422  "Processes holding the lock: %s. Wait queue: %s.",
1423  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1424  }
1425 
1426  if (myWaitStatus == STATUS_WAITING)
1427  ereport(LOG,
1428  (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1429  MyProcPid, modename, buf.data, msecs, usecs),
1430  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1431  "Processes holding the lock: %s. Wait queue: %s.",
1432  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1433  else if (myWaitStatus == STATUS_OK)
1434  ereport(LOG,
1435  (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1436  MyProcPid, modename, buf.data, msecs, usecs)));
1437  else
1438  {
1439  Assert(myWaitStatus == STATUS_ERROR);
1440 
1441  /*
1442  * Currently, the deadlock checker always kicks its own
1443  * process, which means that we'll only see STATUS_ERROR when
1444  * deadlock_state == DS_HARD_DEADLOCK, and there's no need to
1445  * print redundant messages. But for completeness and
1446  * future-proofing, print a message if it looks like someone
1447  * else kicked us off the lock.
1448  */
1450  ereport(LOG,
1451  (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1452  MyProcPid, modename, buf.data, msecs, usecs),
1453  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1454  "Processes holding the lock: %s. Wait queue: %s.",
1455  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1456  }
1457 
1458  /*
1459  * At this point we might still need to wait for the lock. Reset
1460  * state so we don't print the above messages again.
1461  */
1463 
1464  pfree(buf.data);
1465  pfree(lock_holders_sbuf.data);
1466  pfree(lock_waiters_sbuf.data);
1467  }
1468  } while (myWaitStatus == STATUS_WAITING);
1469 
1470  /*
1471  * Disable the timers, if they are still running. As in LockErrorCleanup,
1472  * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1473  * already caused QueryCancelPending to become set, we want the cancel to
1474  * be reported as a lock timeout, not a user cancel.
1475  */
1476  if (!InHotStandby)
1477  {
1478  if (LockTimeout > 0)
1479  {
1480  DisableTimeoutParams timeouts[2];
1481 
1482  timeouts[0].id = DEADLOCK_TIMEOUT;
1483  timeouts[0].keep_indicator = false;
1484  timeouts[1].id = LOCK_TIMEOUT;
1485  timeouts[1].keep_indicator = true;
1486  disable_timeouts(timeouts, 2);
1487  }
1488  else
1490  }
1491 
1492  /*
1493  * Re-acquire the lock table's partition lock. We have to do this to hold
1494  * off cancel/die interrupts before we can mess with lockAwaited (else we
1495  * might have a missed or duplicated locallock update).
1496  */
1497  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1498 
1499  /*
1500  * We no longer want LockErrorCleanup to do anything.
1501  */
1502  lockAwaited = NULL;
1503 
1504  /*
1505  * If we got the lock, be sure to remember it in the locallock table.
1506  */
1507  if (MyProc->waitStatus == STATUS_OK)
1508  GrantAwaitedLock();
1509 
1510  /*
1511  * We don't have to do anything else, because the awaker did all the
1512  * necessary update of the lock table and MyProc.
1513  */
1514  return MyProc->waitStatus;
1515 }
1516 
1517 
1518 /*
1519  * ProcWakeup -- wake up a process by releasing its private semaphore.
1520  *
1521  * Also remove the process from the wait queue and set its links invalid.
1522  * RETURN: the next process in the wait queue.
1523  *
1524  * The appropriate lock partition lock must be held by caller.
1525  *
1526  * XXX: presently, this code is only used for the "success" case, and only
1527  * works correctly for that case. To clean up in failure case, would need
1528  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1529  * Hence, in practice the waitStatus parameter must be STATUS_OK.
1530  */
1531 PGPROC *
1532 ProcWakeup(PGPROC *proc, int waitStatus)
1533 {
1534  PGPROC *retProc;
1535 
1536  /* Proc should be sleeping ... */
1537  if (proc->links.prev == NULL ||
1538  proc->links.next == NULL)
1539  return NULL;
1540  Assert(proc->waitStatus == STATUS_WAITING);
1541 
1542  /* Save next process before we zap the list link */
1543  retProc = (PGPROC *) proc->links.next;
1544 
1545  /* Remove process from wait queue */
1546  SHMQueueDelete(&(proc->links));
1547  (proc->waitLock->waitProcs.size)--;
1548 
1549  /* Clean up process' state and pass it the ok/fail signal */
1550  proc->waitLock = NULL;
1551  proc->waitProcLock = NULL;
1552  proc->waitStatus = waitStatus;
1553 
1554  /* And awaken it */
1555  SetLatch(&proc->procLatch);
1556 
1557  return retProc;
1558 }
1559 
1560 /*
1561  * ProcLockWakeup -- routine for waking up processes when a lock is
1562  * released (or a prior waiter is aborted). Scan all waiters
1563  * for lock, waken any that are no longer blocked.
1564  *
1565  * The appropriate lock partition lock must be held by caller.
1566  */
1567 void
1568 ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1569 {
1570  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1571  int queue_size = waitQueue->size;
1572  PGPROC *proc;
1573  LOCKMASK aheadRequests = 0;
1574 
1575  Assert(queue_size >= 0);
1576 
1577  if (queue_size == 0)
1578  return;
1579 
1580  proc = (PGPROC *) waitQueue->links.next;
1581 
1582  while (queue_size-- > 0)
1583  {
1584  LOCKMODE lockmode = proc->waitLockMode;
1585 
1586  /*
1587  * Waken if (a) doesn't conflict with requests of earlier waiters, and
1588  * (b) doesn't conflict with already-held locks.
1589  */
1590  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1591  LockCheckConflicts(lockMethodTable,
1592  lockmode,
1593  lock,
1594  proc->waitProcLock) == STATUS_OK)
1595  {
1596  /* OK to waken */
1597  GrantLock(lock, proc->waitProcLock, lockmode);
1598  proc = ProcWakeup(proc, STATUS_OK);
1599 
1600  /*
1601  * ProcWakeup removes proc from the lock's waiting process queue
1602  * and returns the next proc in chain; don't use proc's next-link,
1603  * because it's been cleared.
1604  */
1605  }
1606  else
1607  {
1608  /*
1609  * Cannot wake this guy. Remember his request for later checks.
1610  */
1611  aheadRequests |= LOCKBIT_ON(lockmode);
1612  proc = (PGPROC *) proc->links.next;
1613  }
1614  }
1615 
1616  Assert(waitQueue->size >= 0);
1617 }
1618 
1619 /*
1620  * CheckDeadLock
1621  *
1622  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1623  * lock to be released by some other process. Check if there's a deadlock; if
1624  * not, just return. (But signal ProcSleep to log a message, if
1625  * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1626  * the lock's wait queue and signal an error to ProcSleep.
1627  */
1628 static void
1630 {
1631  int i;
1632 
1633  /*
1634  * Acquire exclusive lock on the entire shared lock data structures. Must
1635  * grab LWLocks in partition-number order to avoid LWLock deadlock.
1636  *
1637  * Note that the deadlock check interrupt had better not be enabled
1638  * anywhere that this process itself holds lock partition locks, else this
1639  * will wait forever. Also note that LWLockAcquire creates a critical
1640  * section, so that this routine cannot be interrupted by cancel/die
1641  * interrupts.
1642  */
1643  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1645 
1646  /*
1647  * Check to see if we've been awoken by anyone in the interim.
1648  *
1649  * If we have, we can return and resume our transaction -- happy day.
1650  * Before we are awoken the process releasing the lock grants it to us so
1651  * we know that we don't have to wait anymore.
1652  *
1653  * We check by looking to see if we've been unlinked from the wait queue.
1654  * This is quicker than checking our semaphore's state, since no kernel
1655  * call is needed, and it is safe because we hold the lock partition lock.
1656  */
1657  if (MyProc->links.prev == NULL ||
1658  MyProc->links.next == NULL)
1659  goto check_done;
1660 
1661 #ifdef LOCK_DEBUG
1662  if (Debug_deadlocks)
1663  DumpAllLocks();
1664 #endif
1665 
1666  /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1667  deadlock_state = DeadLockCheck(MyProc);
1668 
1670  {
1671  /*
1672  * Oops. We have a deadlock.
1673  *
1674  * Get this process out of wait state. (Note: we could do this more
1675  * efficiently by relying on lockAwaited, but use this coding to
1676  * preserve the flexibility to kill some other transaction than the
1677  * one detecting the deadlock.)
1678  *
1679  * RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so
1680  * ProcSleep will report an error after we return from the signal
1681  * handler.
1682  */
1683  Assert(MyProc->waitLock != NULL);
1684  RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1685 
1686  /*
1687  * We're done here. Transaction abort caused by the error that
1688  * ProcSleep will raise will cause any other locks we hold to be
1689  * released, thus allowing other processes to wake up; we don't need
1690  * to do that here. NOTE: an exception is that releasing locks we
1691  * hold doesn't consider the possibility of waiters that were blocked
1692  * behind us on the lock we just failed to get, and might now be
1693  * wakable because we're not in front of them anymore. However,
1694  * RemoveFromWaitQueue took care of waking up any such processes.
1695  */
1696  }
1697 
1698  /*
1699  * And release locks. We do this in reverse order for two reasons: (1)
1700  * Anyone else who needs more than one of the locks will be trying to lock
1701  * them in increasing order; we don't want to release the other process
1702  * until it can get all the locks it needs. (2) This avoids O(N^2)
1703  * behavior inside LWLockRelease.
1704  */
1705 check_done:
1706  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1708 }
1709 
1710 /*
1711  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1712  *
1713  * NB: Runs inside a signal handler, be careful.
1714  */
1715 void
1717 {
1718  int save_errno = errno;
1719 
1720  got_deadlock_timeout = true;
1721 
1722  /*
1723  * Have to set the latch again, even if handle_sig_alarm already did. Back
1724  * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1725  * ever would be a problem, but setting a set latch again is cheap.
1726  */
1727  SetLatch(MyLatch);
1728  errno = save_errno;
1729 }
1730 
1731 /*
1732  * ProcWaitForSignal - wait for a signal from another backend.
1733  *
1734  * As this uses the generic process latch the caller has to be robust against
1735  * unrelated wakeups: Always check that the desired state has occurred, and
1736  * wait again if not.
1737  */
1738 void
1739 ProcWaitForSignal(uint32 wait_event_info)
1740 {
1741  WaitLatch(MyLatch, WL_LATCH_SET, 0, wait_event_info);
1744 }
1745 
1746 /*
1747  * ProcSendSignal - send a signal to a backend identified by PID
1748  */
1749 void
1751 {
1752  PGPROC *proc = NULL;
1753 
1754  if (RecoveryInProgress())
1755  {
1757 
1758  /*
1759  * Check to see whether it is the Startup process we wish to signal.
1760  * This call is made by the buffer manager when it wishes to wake up a
1761  * process that has been waiting for a pin in so it can obtain a
1762  * cleanup lock using LockBufferForCleanup(). Startup is not a normal
1763  * backend, so BackendPidGetProc() will not return any pid at all. So
1764  * we remember the information for this special case.
1765  */
1766  if (pid == ProcGlobal->startupProcPid)
1767  proc = ProcGlobal->startupProc;
1768 
1770  }
1771 
1772  if (proc == NULL)
1773  proc = BackendPidGetProc(pid);
1774 
1775  if (proc != NULL)
1776  {
1777  SetLatch(&proc->procLatch);
1778  }
1779 }
1780 
1781 /*
1782  * BecomeLockGroupLeader - designate process as lock group leader
1783  *
1784  * Once this function has returned, other processes can join the lock group
1785  * by calling BecomeLockGroupMember.
1786  */
1787 void
1789 {
1790  LWLock *leader_lwlock;
1791 
1792  /* If we already did it, we don't need to do it again. */
1793  if (MyProc->lockGroupLeader == MyProc)
1794  return;
1795 
1796  /* We had better not be a follower. */
1797  Assert(MyProc->lockGroupLeader == NULL);
1798 
1799  /* Create single-member group, containing only ourselves. */
1800  leader_lwlock = LockHashPartitionLockByProc(MyProc);
1801  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1802  MyProc->lockGroupLeader = MyProc;
1803  dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
1804  LWLockRelease(leader_lwlock);
1805 }
1806 
1807 /*
1808  * BecomeLockGroupMember - designate process as lock group member
1809  *
1810  * This is pretty straightforward except for the possibility that the leader
1811  * whose group we're trying to join might exit before we manage to do so;
1812  * and the PGPROC might get recycled for an unrelated process. To avoid
1813  * that, we require the caller to pass the PID of the intended PGPROC as
1814  * an interlock. Returns true if we successfully join the intended lock
1815  * group, and false if not.
1816  */
1817 bool
1819 {
1820  LWLock *leader_lwlock;
1821  bool ok = false;
1822 
1823  /* Group leader can't become member of group */
1824  Assert(MyProc != leader);
1825 
1826  /* Can't already be a member of a group */
1827  Assert(MyProc->lockGroupLeader == NULL);
1828 
1829  /* PID must be valid. */
1830  Assert(pid != 0);
1831 
1832  /*
1833  * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1834  * accesses leader->pgprocno in a PGPROC that might be free. This is safe
1835  * because all PGPROCs' pgprocno fields are set during shared memory
1836  * initialization and never change thereafter; so we will acquire the
1837  * correct lock even if the leader PGPROC is in process of being recycled.
1838  */
1839  leader_lwlock = LockHashPartitionLockByProc(leader);
1840  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1841 
1842  /* Is this the leader we're looking for? */
1843  if (leader->pid == pid && leader->lockGroupLeader == leader)
1844  {
1845  /* OK, join the group */
1846  ok = true;
1847  MyProc->lockGroupLeader = leader;
1848  dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
1849  }
1850  LWLockRelease(leader_lwlock);
1851 
1852  return ok;
1853 }
void InitAuxiliaryProcess(void)
Definition: proc.c:482
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:653
PROCLOCKTAG tag
Definition: lock.h:348
int slock_t
Definition: s_lock.h:888
void ResolveRecoveryConflictWithLock(LOCKTAG locktag)
Definition: standby.c:362
static void AuxiliaryProcKill(int code, Datum arg)
Definition: proc.c:904
uint32 hashcode
Definition: lock.h:410
bool procArrayGroupMember
Definition: proc.h:152
#define PG_WAIT_LOCK
Definition: pgstat.h:719
static void ProcKill(int code, Datum arg)
Definition: proc.c:781
Definition: lwlock.h:32
TimeoutId id
Definition: timeout.h:54
int LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1292
LOCALLOCKTAG tag
Definition: lock.h:405
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:196
bool IsWaitingForLock(void)
Definition: proc.c:660
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:38
static void RemoveProcFromArray(int code, Datum arg)
Definition: proc.c:770
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:3845
LOCKTAG lock
Definition: lock.h:386
void GrantAwaitedLock(void)
Definition: lock.c:1643
BackendId backendId
Definition: proc.h:102
uint32 wait_event_info
Definition: proc.h:162
Definition: proc.h:197
#define DatumGetInt32(X)
Definition: postgres.h:480
int LOCKMODE
Definition: lockdefs.h:26
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
dlist_head lockGroupMembers
Definition: proc.h:179
LOCKMODE mode
Definition: lock.h:387
PROCLOCK * proclock
Definition: lock.h:409
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1688
TransactionId xmin
Definition: proc.h:203
SHM_QUEUE links
Definition: lock.h:32
PGPROC * BackendPidGetProc(int pid)
Definition: procarray.c:2329
PGXACT * allPgXact
Definition: proc.h:224
PGPROC * MyProc
Definition: proc.c:67
LOCKMASK holdMask
Definition: lock.h:352
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1415
TransactionId xid
Definition: proc.h:199
SHM_QUEUE links
Definition: proc.h:87
TimeoutType type
Definition: timeout.h:55
struct SHM_QUEUE * next
Definition: shmem.h:31
void PGSemaphoreReset(PGSemaphore sema)
Definition: posix_sema.c:278
#define SpinLockInit(lock)
Definition: spin.h:60
LOCKMODE waitLockMode
Definition: proc.h:127
bool lwWaiting
Definition: proc.h:116
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:317
bool InRecovery
Definition: xlog.c:190
LOCKTAG tag
Definition: lock.h:288
#define PROC_VACUUM_FOR_WRAPAROUND
Definition: proc.h:46
PGPROC * PreparedXactProcs
Definition: proc.c:82
const LOCKMASK * conflictTab
Definition: lock.h:115
#define LockHashPartitionLock(hashcode)
Definition: lock.h:498
#define SYNC_REP_NOT_WAITING
Definition: syncrep.h:31
SHM_QUEUE lockLink
Definition: lock.h:354
PGPROC * bgworkerFreeProcs
Definition: proc.h:232
#define InHotStandby
Definition: xlog.h:74
Oid roleId
Definition: proc.h:104
int errcode(int sqlerrcode)
Definition: elog.c:575
PROC_HDR * ProcGlobal
Definition: proc.c:80
#define STATUS_ERROR
Definition: c.h:971
#define MemSet(start, val, len)
Definition: c.h:852
void MarkPostmasterChildInactive(void)
Definition: pmsignal.c:256
void ReplicationSlotCleanup()
Definition: slot.c:412
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1818
uint8 lwWaitMode
Definition: proc.h:117
void * ShmemAlloc(Size size)
Definition: shmem.c:157
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
void ResetLatch(volatile Latch *latch)
Definition: latch.c:462
bool fpVXIDLock
Definition: proc.h:170
#define LOG
Definition: elog.h:26
bool RecoveryInProgress(void)
Definition: xlog.c:7825
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:561
void PublishStartupProcessInformation(void)
Definition: proc.c:593
#define PANIC
Definition: elog.h:53
PGSemaphore PGSemaphoreCreate(void)
Definition: posix_sema.c:245
PGPROC * autovacFreeProcs
Definition: proc.h:230
bool HaveNFreeProcs(int n)
Definition: proc.c:637
Latch procLatch
Definition: proc.h:93
PGXACT * MyPgXact
Definition: proc.c:68
#define DEFAULT_LOCKMETHOD
Definition: lock.h:129
PROC_QUEUE waitProcs
Definition: lock.h:294
uint8 vacuumFlags
Definition: proc.h:208
bool IsBackgroundWorker
Definition: globals.c:102
void InitProcGlobal(void)
Definition: proc.c:162
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition: deadlock.c:1149
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1714
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:116
bool isBackgroundWorker
Definition: proc.h:106
void ProcSendSignal(int pid)
Definition: proc.c:1750
#define SpinLockAcquire(lock)
Definition: spin.h:62
void SwitchBackToLocalLatch(void)
Definition: miscinit.c:264
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:501
int WaitLatch(volatile Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:301
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:476
void pfree(void *pointer)
Definition: mcxt.c:992
dlist_node lockGroupLink
Definition: proc.h:180
Latch * walwriterLatch
Definition: proc.h:236
void ConditionVariableCancelSleep(void)
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:110
int spins_per_delay
Definition: proc.h:240
#define ERROR
Definition: elog.h:43
int max_prepared_xacts
Definition: twophase.c:99
int AutovacuumLauncherPid
Definition: autovacuum.c:283
int IdleInTransactionSessionTimeout
Definition: proc.c:63
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:372
static DeadLockState deadlock_state
Definition: proc.c:87
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition: procarray.c:331
#define FATAL
Definition: elog.h:52
TimeoutId id
Definition: timeout.h:65
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:954
int MaxBackends
Definition: globals.c:126
PROCLOCK * waitProcLock
Definition: proc.h:126
void InitProcess(void)
Definition: proc.c:287
void InitDeadLockChecking(void)
Definition: deadlock.c:143
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:348
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:611
static char * buf
Definition: pg_test_fsync.c:65
bool recoveryConflictPending
Definition: proc.h:113
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:977
bool IsUnderPostmaster
Definition: globals.c:100
DeadLockState DeadLockCheck(PGPROC *proc)
Definition: deadlock.c:217
static volatile sig_atomic_t got_deadlock_timeout
Definition: proc.c:90
void AbortStrongLockAcquire(void)
Definition: lock.c:1614
#define USER_LOCKMETHOD
Definition: lock.h:130
#define InvalidTransactionId
Definition: transam.h:31
Oid databaseId
Definition: proc.h:103
void ProcReleaseLocks(bool isCommit)
Definition: proc.c:753
unsigned int uint32
Definition: c.h:265
int errdetail_log(const char *fmt,...)
Definition: elog.c:921
void ReplicationSlotRelease(void)
Definition: slot.c:374
PGPROC ** procgloballist
Definition: proc.h:88
void CheckRecoveryConflictDeadlock(void)
Definition: standby.c:515
void OwnLatch(volatile Latch *latch)
Definition: latch.c:253
Definition: lock.h:285
LOCK * waitLock
Definition: proc.h:125
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:490
#define NUM_AUXILIARY_PROCS
Definition: proc.h:263
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:2989
void BecomeLockGroupLeader(void)
Definition: proc.c:1788
#define ereport(elevel, rest)
Definition: elog.h:122
#define STATUS_OK
Definition: c.h:970
bool delayChkpt
Definition: proc.h:210
void LockErrorCleanup(void)
Definition: proc.c:677
#define INVALID_PGPROCNO
Definition: proc.h:66
LOCKMASK waitMask
Definition: lock.h:292
Size ProcGlobalShmemSize(void)
Definition: proc.c:102
pg_atomic_uint32 procArrayGroupNext
Definition: proc.h:154
Definition: proc.h:219
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1739
SHM_QUEUE procLocks
Definition: lock.h:293
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:666
void initStringInfo(StringInfo str)
Definition: stringinfo.c:65
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:207
#define WARNING
Definition: elog.h:40
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:623
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
#define LockHashPartitionLockByProc(leader_pgproc)
Definition: lock.h:513
#define SpinLockRelease(lock)
Definition: spin.h:64
int startupProcPid
Definition: proc.h:243
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1568
Size mul_size(Size s1, Size s2)
Definition: shmem.c:492
#define InvalidBackendId
Definition: backendid.h:23
uintptr_t Datum
Definition: postgres.h:374
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
NON_EXEC_STATIC slock_t * ProcStructLock
Definition: proc.c:77
int MaxConnections
Definition: globals.c:123
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1761
void SwitchToSharedLatch(void)
Definition: miscinit.c:245
int waitStatus
Definition: proc.h:91
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:942
int autovacuum_max_workers
Definition: autovacuum.c:112
#define InvalidOid
Definition: postgres_ext.h:36
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
Latch * checkpointerLatch
Definition: proc.h:238
#define IsAnyAutoVacuumProcess()
Definition: autovacuum.h:42
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:428
bool IsAutoVacuumLauncherProcess(void)
Definition: autovacuum.c:2983
ReplicationSlot * MyReplicationSlot
Definition: slot.c:95
void SetLatch(volatile Latch *latch)
Definition: latch.c:380
struct SHM_QUEUE * prev
Definition: shmem.h:30
uint8 locktag_type
Definition: lock.h:185
DeadLockState
Definition: lock.h:480
#define NULL
Definition: c.h:226
#define Assert(condition)
Definition: c.h:670
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2014
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition: proc.c:81
bool log_lock_waits
Definition: proc.c:64
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
PGPROC * freeProcs
Definition: proc.h:228
static LOCALLOCK * lockAwaited
Definition: proc.c:85
size_t Size
Definition: c.h:352
int LockTimeout
Definition: proc.c:62
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1110
LOCK * lock
Definition: lock.h:408
SHM_QUEUE syncRepLinks
Definition: proc.h:139
uint32 allProcCount
Definition: proc.h:226
void SyncRepCleanupAtProcExit(void)
Definition: syncrep.c:356
int LOCKMASK
Definition: lockdefs.h:25
const char * name
Definition: encode.c:521
static void CheckDeadLock(void)
Definition: proc.c:1629
uint8 locktag_lockmethodid
Definition: lock.h:186
int StatementTimeout
Definition: proc.c:61
PGPROC * myProc
Definition: lock.h:342
void InitProcessPhase2(void)
Definition: proc.c:447
#define LOCKBIT_ON(lockmode)
Definition: lock.h:88
Definition: lock.h:345
void SHMQueueElemInit(SHM_QUEUE *queue)
Definition: shmqueue.c:57
#define Int32GetDatum(X)
Definition: postgres.h:487
int pgprocno
Definition: proc.h:99
pg_atomic_uint32 procArrayGroupFirst
Definition: proc.h:234
int errmsg(const char *fmt,...)
Definition: elog.c:797
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
int ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1001
void DisownLatch(volatile Latch *latch)
Definition: latch.c:273
int startupBufferPinWaitBufId
Definition: proc.h:245
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:114
#define InvalidLocalTransactionId
Definition: lock.h:70
int i
int size
Definition: lock.h:33
void * arg
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
struct Latch * MyLatch
Definition: globals.c:51
int DeadlockTimeout
Definition: proc.c:60
void CheckDeadLockAlert(void)
Definition: proc.c:1716
PGPROC * allProcs
Definition: proc.h:222
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:97
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:146
PGPROC * startupProc
Definition: proc.h:242
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:233
#define elog
Definition: elog.h:219
#define STATUS_WAITING
Definition: c.h:974
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition: deadlock.c:293
void LWLockReleaseAll(void)
Definition: lwlock.c:1813
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:525
PGSemaphore sem
Definition: proc.h:90
int syncRepState
Definition: proc.h:138
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:985
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1791
void InitSharedLatch(volatile Latch *latch)
Definition: latch.c:217
PGPROC * ProcWakeup(PGPROC *proc, int waitStatus)
Definition: proc.c:1532
Definition: proc.h:84
int pid
Definition: proc.h:98
#define WL_LATCH_SET
Definition: latch.h:124
#define _(x)
Definition: elog.c:84
XLogRecPtr waitLSN
Definition: proc.h:137
void ProcArrayAdd(PGPROC *proc)
Definition: procarray.c:273
#define SIGUSR2
Definition: win32.h:212
PGPROC * lockGroupLeader
Definition: proc.h:178
LocalTransactionId fpLocalTransactionId
Definition: proc.h:171
#define PROC_IS_AUTOVACUUM
Definition: proc.h:43
#define offsetof(type, field)
Definition: c.h:550
TransactionId procArrayGroupMemberXid
Definition: proc.h:160
int ProcGlobalSemas(void)
Definition: proc.c:128
LOCKMASK heldLocks
Definition: proc.h:128
void InitLWLockAccess(void)
Definition: lwlock.c:524
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117
PGPROC * groupLeader
Definition: lock.h:351
#define DEFAULT_SPINS_PER_DELAY
Definition: s_lock.h:966
void MarkPostmasterChildActive(void)
Definition: pmsignal.c:223
LocalTransactionId lxid
Definition: proc.h:95
#define NON_EXEC_STATIC
Definition: c.h:1120