PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
proc.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * proc.c
4  * routines to manage per-process shared memory data structure
5  *
6  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/proc.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Interface (a):
17  * ProcSleep(), ProcWakeup(),
18  * ProcQueueAlloc() -- create a shm queue for sleeping processes
19  * ProcQueueInit() -- create a queue without allocing memory
20  *
21  * Waiting for a lock causes the backend to be put to sleep. Whoever releases
22  * the lock wakes the process up again (and gives it an error code so it knows
23  * whether it was awoken on an error condition).
24  *
25  * Interface (b):
26  *
27  * ProcReleaseLocks -- frees the locks associated with current transaction
28  *
29  * ProcKill -- destroys the shared memory state (and locks)
30  * associated with the process.
31  */
32 #include "postgres.h"
33 
34 #include <signal.h>
35 #include <unistd.h>
36 #include <sys/time.h>
37 
38 #include "access/transam.h"
39 #include "access/twophase.h"
40 #include "access/xact.h"
41 #include "miscadmin.h"
42 #include "pgstat.h"
43 #include "postmaster/autovacuum.h"
44 #include "replication/slot.h"
45 #include "replication/syncrep.h"
47 #include "storage/standby.h"
48 #include "storage/ipc.h"
49 #include "storage/lmgr.h"
50 #include "storage/pmsignal.h"
51 #include "storage/proc.h"
52 #include "storage/procarray.h"
53 #include "storage/procsignal.h"
54 #include "storage/spin.h"
55 #include "utils/timeout.h"
56 #include "utils/timestamp.h"
57 
58 
59 /* GUC variables */
60 int DeadlockTimeout = 1000;
62 int LockTimeout = 0;
64 bool log_lock_waits = false;
65 
66 /* Pointer to this process's PGPROC and PGXACT structs, if any */
67 PGPROC *MyProc = NULL;
68 PGXACT *MyPgXact = NULL;
69 
70 /*
71  * This spinlock protects the freelist of recycled PGPROC structures.
72  * We cannot use an LWLock because the LWLock manager depends on already
73  * having a PGPROC and a wait semaphore! But these structures are touched
74  * relatively infrequently (only at backend startup or shutdown) and not for
75  * very long, so a spinlock is okay.
76  */
78 
79 /* Pointers to shared-memory structures */
83 
84 /* If we are waiting for a lock, this points to the associated LOCALLOCK */
85 static LOCALLOCK *lockAwaited = NULL;
86 
88 
89 /* Is a deadlock check pending? */
90 static volatile sig_atomic_t got_deadlock_timeout;
91 
92 static void RemoveProcFromArray(int code, Datum arg);
93 static void ProcKill(int code, Datum arg);
94 static void AuxiliaryProcKill(int code, Datum arg);
95 static void CheckDeadLock(void);
96 
97 
98 /*
99  * Report shared-memory space needed by InitProcGlobal.
100  */
101 Size
103 {
104  Size size = 0;
105 
106  /* ProcGlobal */
107  size = add_size(size, sizeof(PROC_HDR));
108  /* MyProcs, including autovacuum workers and launcher */
109  size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC)));
110  /* AuxiliaryProcs */
111  size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGPROC)));
112  /* Prepared xacts */
113  size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGPROC)));
114  /* ProcStructLock */
115  size = add_size(size, sizeof(slock_t));
116 
117  size = add_size(size, mul_size(MaxBackends, sizeof(PGXACT)));
118  size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGXACT)));
119  size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGXACT)));
120 
121  return size;
122 }
123 
124 /*
125  * Report number of semaphores needed by InitProcGlobal.
126  */
127 int
129 {
130  /*
131  * We need a sema per backend (including autovacuum), plus one for each
132  * auxiliary process.
133  */
135 }
136 
137 /*
138  * InitProcGlobal -
139  * Initialize the global process table during postmaster or standalone
140  * backend startup.
141  *
142  * We also create all the per-process semaphores we will need to support
143  * the requested number of backends. We used to allocate semaphores
144  * only when backends were actually started up, but that is bad because
145  * it lets Postgres fail under load --- a lot of Unix systems are
146  * (mis)configured with small limits on the number of semaphores, and
147  * running out when trying to start another backend is a common failure.
148  * So, now we grab enough semaphores to support the desired max number
149  * of backends immediately at initialization --- if the sysadmin has set
150  * MaxConnections, max_worker_processes, or autovacuum_max_workers higher
151  * than his kernel will support, he'll find out sooner rather than later.
152  *
153  * Another reason for creating semaphores here is that the semaphore
154  * implementation typically requires us to create semaphores in the
155  * postmaster, not in backends.
156  *
157  * Note: this is NOT called by individual backends under a postmaster,
158  * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
159  * pointers must be propagated specially for EXEC_BACKEND operation.
160  */
161 void
163 {
164  PGPROC *procs;
165  PGXACT *pgxacts;
166  int i,
167  j;
168  bool found;
170 
171  /* Create the ProcGlobal shared structure */
172  ProcGlobal = (PROC_HDR *)
173  ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
174  Assert(!found);
175 
176  /*
177  * Initialize the data structures.
178  */
180  ProcGlobal->freeProcs = NULL;
181  ProcGlobal->autovacFreeProcs = NULL;
182  ProcGlobal->bgworkerFreeProcs = NULL;
183  ProcGlobal->startupProc = NULL;
184  ProcGlobal->startupProcPid = 0;
185  ProcGlobal->startupBufferPinWaitBufId = -1;
186  ProcGlobal->walwriterLatch = NULL;
187  ProcGlobal->checkpointerLatch = NULL;
190 
191  /*
192  * Create and initialize all the PGPROC structures we'll need. There are
193  * five separate consumers: (1) normal backends, (2) autovacuum workers
194  * and the autovacuum launcher, (3) background workers, (4) auxiliary
195  * processes, and (5) prepared transactions. Each PGPROC structure is
196  * dedicated to exactly one of these purposes, and they do not move
197  * between groups.
198  */
199  procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
200  MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
201  ProcGlobal->allProcs = procs;
202  /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
204 
205  /*
206  * Also allocate a separate array of PGXACT structures. This is separate
207  * from the main PGPROC array so that the most heavily accessed data is
208  * stored contiguously in memory in as few cache lines as possible. This
209  * provides significant performance benefits, especially on a
210  * multiprocessor system. There is one PGXACT structure for every PGPROC
211  * structure.
212  */
213  pgxacts = (PGXACT *) ShmemAlloc(TotalProcs * sizeof(PGXACT));
214  MemSet(pgxacts, 0, TotalProcs * sizeof(PGXACT));
215  ProcGlobal->allPgXact = pgxacts;
216 
217  for (i = 0; i < TotalProcs; i++)
218  {
219  /* Common initialization for all PGPROCs, regardless of type. */
220 
221  /*
222  * Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
223  * dummy PGPROCs don't need these though - they're never associated
224  * with a real process
225  */
226  if (i < MaxBackends + NUM_AUXILIARY_PROCS)
227  {
228  procs[i].sem = PGSemaphoreCreate();
229  InitSharedLatch(&(procs[i].procLatch));
230  LWLockInitialize(&(procs[i].backendLock), LWTRANCHE_PROC);
231  }
232  procs[i].pgprocno = i;
233 
234  /*
235  * Newly created PGPROCs for normal backends, autovacuum and bgworkers
236  * must be queued up on the appropriate free list. Because there can
237  * only ever be a small, fixed number of auxiliary processes, no free
238  * list is used in that case; InitAuxiliaryProcess() instead uses a
239  * linear search. PGPROCs for prepared transactions are added to a
240  * free list by TwoPhaseShmemInit().
241  */
242  if (i < MaxConnections)
243  {
244  /* PGPROC for normal backend, add to freeProcs list */
245  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
246  ProcGlobal->freeProcs = &procs[i];
247  procs[i].procgloballist = &ProcGlobal->freeProcs;
248  }
249  else if (i < MaxConnections + autovacuum_max_workers + 1)
250  {
251  /* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
252  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
253  ProcGlobal->autovacFreeProcs = &procs[i];
254  procs[i].procgloballist = &ProcGlobal->autovacFreeProcs;
255  }
256  else if (i < MaxBackends)
257  {
258  /* PGPROC for bgworker, add to bgworkerFreeProcs list */
259  procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
260  ProcGlobal->bgworkerFreeProcs = &procs[i];
261  procs[i].procgloballist = &ProcGlobal->bgworkerFreeProcs;
262  }
263 
264  /* Initialize myProcLocks[] shared memory queues. */
265  for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
266  SHMQueueInit(&(procs[i].myProcLocks[j]));
267 
268  /* Initialize lockGroupMembers list. */
269  dlist_init(&procs[i].lockGroupMembers);
270  }
271 
272  /*
273  * Save pointers to the blocks of PGPROC structures reserved for auxiliary
274  * processes and prepared transactions.
275  */
276  AuxiliaryProcs = &procs[MaxBackends];
277  PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
278 
279  /* Create ProcStructLock spinlock, too */
280  ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
282 }
283 
284 /*
285  * InitProcess -- initialize a per-process data structure for this backend
286  */
287 void
289 {
290  PGPROC *volatile *procgloballist;
291 
292  /*
293  * ProcGlobal should be set up already (if we are a backend, we inherit
294  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
295  */
296  if (ProcGlobal == NULL)
297  elog(PANIC, "proc header uninitialized");
298 
299  if (MyProc != NULL)
300  elog(ERROR, "you already exist");
301 
302  /* Decide which list should supply our PGPROC. */
304  procgloballist = &ProcGlobal->autovacFreeProcs;
305  else if (IsBackgroundWorker)
306  procgloballist = &ProcGlobal->bgworkerFreeProcs;
307  else
308  procgloballist = &ProcGlobal->freeProcs;
309 
310  /*
311  * Try to get a proc struct from the appropriate free list. If this
312  * fails, we must be out of PGPROC structures (not to mention semaphores).
313  *
314  * While we are holding the ProcStructLock, also copy the current shared
315  * estimate of spins_per_delay to local storage.
316  */
318 
320 
321  MyProc = *procgloballist;
322 
323  if (MyProc != NULL)
324  {
325  *procgloballist = (PGPROC *) MyProc->links.next;
327  }
328  else
329  {
330  /*
331  * If we reach here, all the PGPROCs are in use. This is one of the
332  * possible places to detect "too many backends", so give the standard
333  * error message. XXX do we need to give a different failure message
334  * in the autovacuum case?
335  */
337  ereport(FATAL,
338  (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
339  errmsg("sorry, too many clients already")));
340  }
341  MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
342 
343  /*
344  * Cross-check that the PGPROC is of the type we expect; if this were not
345  * the case, it would get returned to the wrong list.
346  */
347  Assert(MyProc->procgloballist == procgloballist);
348 
349  /*
350  * Now that we have a PGPROC, mark ourselves as an active postmaster
351  * child; this is so that the postmaster can detect it if we exit without
352  * cleaning up. (XXX autovac launcher currently doesn't participate in
353  * this; it probably should.)
354  */
357 
358  /*
359  * Initialize all fields of MyProc, except for those previously
360  * initialized by InitProcGlobal.
361  */
362  SHMQueueElemInit(&(MyProc->links));
363  MyProc->waitStatus = STATUS_OK;
365  MyProc->fpVXIDLock = false;
367  MyPgXact->xid = InvalidTransactionId;
368  MyPgXact->xmin = InvalidTransactionId;
369  MyProc->pid = MyProcPid;
370  /* backendId, databaseId and roleId will be filled in later */
371  MyProc->backendId = InvalidBackendId;
372  MyProc->databaseId = InvalidOid;
373  MyProc->roleId = InvalidOid;
375  MyPgXact->delayChkpt = false;
376  MyPgXact->vacuumFlags = 0;
377  /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
379  MyPgXact->vacuumFlags |= PROC_IS_AUTOVACUUM;
380  MyProc->lwWaiting = false;
381  MyProc->lwWaitMode = 0;
382  MyProc->waitLock = NULL;
383  MyProc->waitProcLock = NULL;
384 #ifdef USE_ASSERT_CHECKING
385  {
386  int i;
387 
388  /* Last process should have released all locks. */
389  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
390  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
391  }
392 #endif
393  MyProc->recoveryConflictPending = false;
394 
395  /* Initialize fields for sync rep */
396  MyProc->waitLSN = 0;
398  SHMQueueElemInit(&(MyProc->syncRepLinks));
399 
400  /* Initialize fields for group XID clearing. */
401  MyProc->procArrayGroupMember = false;
404 
405  /* Check that group locking fields are in a proper initial state. */
406  Assert(MyProc->lockGroupLeader == NULL);
408 
409  /* Initialize wait event information. */
410  MyProc->wait_event_info = 0;
411 
412  /* Initialize fields for group transaction status update. */
413  MyProc->clogGroupMember = false;
416  MyProc->clogGroupMemberPage = -1;
419 
420  /*
421  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
422  * on it. That allows us to repoint the process latch, which so far
423  * points to process local one, to the shared one.
424  */
425  OwnLatch(&MyProc->procLatch);
427 
428  /*
429  * We might be reusing a semaphore that belonged to a failed process. So
430  * be careful and reinitialize its value here. (This is not strictly
431  * necessary anymore, but seems like a good idea for cleanliness.)
432  */
433  PGSemaphoreReset(MyProc->sem);
434 
435  /*
436  * Arrange to clean up at backend exit.
437  */
439 
440  /*
441  * Now that we have a PGPROC, we could try to acquire locks, so initialize
442  * local state needed for LWLocks, and the deadlock checker.
443  */
446 }
447 
448 /*
449  * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
450  *
451  * This is separate from InitProcess because we can't acquire LWLocks until
452  * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
453  * work until after we've done CreateSharedMemoryAndSemaphores.
454  */
455 void
457 {
458  Assert(MyProc != NULL);
459 
460  /*
461  * Add our PGPROC to the PGPROC array in shared memory.
462  */
463  ProcArrayAdd(MyProc);
464 
465  /*
466  * Arrange to clean that up at backend exit.
467  */
469 }
470 
471 /*
472  * InitAuxiliaryProcess -- create a per-auxiliary-process data structure
473  *
474  * This is called by bgwriter and similar processes so that they will have a
475  * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
476  * and sema that are assigned are one of the extra ones created during
477  * InitProcGlobal.
478  *
479  * Auxiliary processes are presently not expected to wait for real (lockmgr)
480  * locks, so we need not set up the deadlock checker. They are never added
481  * to the ProcArray or the sinval messaging mechanism, either. They also
482  * don't get a VXID assigned, since this is only useful when we actually
483  * hold lockmgr locks.
484  *
485  * Startup process however uses locks but never waits for them in the
486  * normal backend sense. Startup process also takes part in sinval messaging
487  * as a sendOnly process, so never reads messages from sinval queue. So
488  * Startup process does have a VXID and does show up in pg_locks.
489  */
490 void
492 {
493  PGPROC *auxproc;
494  int proctype;
495 
496  /*
497  * ProcGlobal should be set up already (if we are a backend, we inherit
498  * this by fork() or EXEC_BACKEND mechanism from the postmaster).
499  */
500  if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
501  elog(PANIC, "proc header uninitialized");
502 
503  if (MyProc != NULL)
504  elog(ERROR, "you already exist");
505 
506  /*
507  * We use the ProcStructLock to protect assignment and releasing of
508  * AuxiliaryProcs entries.
509  *
510  * While we are holding the ProcStructLock, also copy the current shared
511  * estimate of spins_per_delay to local storage.
512  */
514 
516 
517  /*
518  * Find a free auxproc ... *big* trouble if there isn't one ...
519  */
520  for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
521  {
522  auxproc = &AuxiliaryProcs[proctype];
523  if (auxproc->pid == 0)
524  break;
525  }
526  if (proctype >= NUM_AUXILIARY_PROCS)
527  {
529  elog(FATAL, "all AuxiliaryProcs are in use");
530  }
531 
532  /* Mark auxiliary proc as in use by me */
533  /* use volatile pointer to prevent code rearrangement */
534  ((volatile PGPROC *) auxproc)->pid = MyProcPid;
535 
536  MyProc = auxproc;
537  MyPgXact = &ProcGlobal->allPgXact[auxproc->pgprocno];
538 
540 
541  /*
542  * Initialize all fields of MyProc, except for those previously
543  * initialized by InitProcGlobal.
544  */
545  SHMQueueElemInit(&(MyProc->links));
546  MyProc->waitStatus = STATUS_OK;
548  MyProc->fpVXIDLock = false;
550  MyPgXact->xid = InvalidTransactionId;
551  MyPgXact->xmin = InvalidTransactionId;
552  MyProc->backendId = InvalidBackendId;
553  MyProc->databaseId = InvalidOid;
554  MyProc->roleId = InvalidOid;
556  MyPgXact->delayChkpt = false;
557  MyPgXact->vacuumFlags = 0;
558  MyProc->lwWaiting = false;
559  MyProc->lwWaitMode = 0;
560  MyProc->waitLock = NULL;
561  MyProc->waitProcLock = NULL;
562 #ifdef USE_ASSERT_CHECKING
563  {
564  int i;
565 
566  /* Last process should have released all locks. */
567  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
568  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
569  }
570 #endif
571 
572  /*
573  * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
574  * on it. That allows us to repoint the process latch, which so far
575  * points to process local one, to the shared one.
576  */
577  OwnLatch(&MyProc->procLatch);
579 
580  /* Check that group locking fields are in a proper initial state. */
581  Assert(MyProc->lockGroupLeader == NULL);
583 
584  /*
585  * We might be reusing a semaphore that belonged to a failed process. So
586  * be careful and reinitialize its value here. (This is not strictly
587  * necessary anymore, but seems like a good idea for cleanliness.)
588  */
589  PGSemaphoreReset(MyProc->sem);
590 
591  /*
592  * Arrange to clean up at process exit.
593  */
595 }
596 
597 /*
598  * Record the PID and PGPROC structures for the Startup process, for use in
599  * ProcSendSignal(). See comments there for further explanation.
600  */
601 void
603 {
605 
606  ProcGlobal->startupProc = MyProc;
607  ProcGlobal->startupProcPid = MyProcPid;
608 
610 }
611 
612 /*
613  * Used from bufgr to share the value of the buffer that Startup waits on,
614  * or to reset the value to "not waiting" (-1). This allows processing
615  * of recovery conflicts for buffer pins. Set is made before backends look
616  * at this value, so locking not required, especially since the set is
617  * an atomic integer set operation.
618  */
619 void
621 {
622  /* use volatile pointer to prevent code rearrangement */
623  volatile PROC_HDR *procglobal = ProcGlobal;
624 
625  procglobal->startupBufferPinWaitBufId = bufid;
626 }
627 
628 /*
629  * Used by backends when they receive a request to check for buffer pin waits.
630  */
631 int
633 {
634  /* use volatile pointer to prevent code rearrangement */
635  volatile PROC_HDR *procglobal = ProcGlobal;
636 
637  return procglobal->startupBufferPinWaitBufId;
638 }
639 
640 /*
641  * Check whether there are at least N free PGPROC objects.
642  *
643  * Note: this is designed on the assumption that N will generally be small.
644  */
645 bool
647 {
648  PGPROC *proc;
649 
651 
652  proc = ProcGlobal->freeProcs;
653 
654  while (n > 0 && proc != NULL)
655  {
656  proc = (PGPROC *) proc->links.next;
657  n--;
658  }
659 
661 
662  return (n <= 0);
663 }
664 
665 /*
666  * Check if the current process is awaiting a lock.
667  */
668 bool
670 {
671  if (lockAwaited == NULL)
672  return false;
673 
674  return true;
675 }
676 
677 /*
678  * Cancel any pending wait for lock, when aborting a transaction, and revert
679  * any strong lock count acquisition for a lock being acquired.
680  *
681  * (Normally, this would only happen if we accept a cancel/die
682  * interrupt while waiting; but an ereport(ERROR) before or during the lock
683  * wait is within the realm of possibility, too.)
684  */
685 void
687 {
688  LWLock *partitionLock;
689  DisableTimeoutParams timeouts[2];
690 
691  HOLD_INTERRUPTS();
692 
694 
695  /* Nothing to do if we weren't waiting for a lock */
696  if (lockAwaited == NULL)
697  {
699  return;
700  }
701 
702  /*
703  * Turn off the deadlock and lock timeout timers, if they are still
704  * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
705  * indicator flag, since this function is executed before
706  * ProcessInterrupts when responding to SIGINT; else we'd lose the
707  * knowledge that the SIGINT came from a lock timeout and not an external
708  * source.
709  */
710  timeouts[0].id = DEADLOCK_TIMEOUT;
711  timeouts[0].keep_indicator = false;
712  timeouts[1].id = LOCK_TIMEOUT;
713  timeouts[1].keep_indicator = true;
714  disable_timeouts(timeouts, 2);
715 
716  /* Unlink myself from the wait queue, if on it (might not be anymore!) */
717  partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
718  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
719 
720  if (MyProc->links.next != NULL)
721  {
722  /* We could not have been granted the lock yet */
723  RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
724  }
725  else
726  {
727  /*
728  * Somebody kicked us off the lock queue already. Perhaps they
729  * granted us the lock, or perhaps they detected a deadlock. If they
730  * did grant us the lock, we'd better remember it in our local lock
731  * table.
732  */
733  if (MyProc->waitStatus == STATUS_OK)
735  }
736 
737  lockAwaited = NULL;
738 
739  LWLockRelease(partitionLock);
740 
742 }
743 
744 
745 /*
746  * ProcReleaseLocks() -- release locks associated with current transaction
747  * at main transaction commit or abort
748  *
749  * At main transaction commit, we release standard locks except session locks.
750  * At main transaction abort, we release all locks including session locks.
751  *
752  * Advisory locks are released only if they are transaction-level;
753  * session-level holds remain, whether this is a commit or not.
754  *
755  * At subtransaction commit, we don't release any locks (so this func is not
756  * needed at all); we will defer the releasing to the parent transaction.
757  * At subtransaction abort, we release all locks held by the subtransaction;
758  * this is implemented by retail releasing of the locks under control of
759  * the ResourceOwner mechanism.
760  */
761 void
762 ProcReleaseLocks(bool isCommit)
763 {
764  if (!MyProc)
765  return;
766  /* If waiting, get off wait queue (should only be needed after error) */
768  /* Release standard locks, including session-level if aborting */
770  /* Release transaction-level advisory locks */
772 }
773 
774 
775 /*
776  * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
777  */
778 static void
780 {
781  Assert(MyProc != NULL);
783 }
784 
785 /*
786  * ProcKill() -- Destroy the per-proc data structure for
787  * this process. Release any of its held LW locks.
788  */
789 static void
790 ProcKill(int code, Datum arg)
791 {
792  PGPROC *proc;
793  PGPROC *volatile *procgloballist;
794 
795  Assert(MyProc != NULL);
796 
797  /* Make sure we're out of the sync rep lists */
799 
800 #ifdef USE_ASSERT_CHECKING
801  {
802  int i;
803 
804  /* Last process should have released all locks. */
805  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
806  Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
807  }
808 #endif
809 
810  /*
811  * Release any LW locks I am holding. There really shouldn't be any, but
812  * it's cheap to check again before we cut the knees off the LWLock
813  * facility by releasing our PGPROC ...
814  */
816 
817  /* Cancel any pending condition variable sleep, too */
819 
820  /* Make sure active replication slots are released */
821  if (MyReplicationSlot != NULL)
823 
824  /* Also cleanup all the temporary slots. */
826 
827  /*
828  * Detach from any lock group of which we are a member. If the leader
829  * exist before all other group members, it's PGPROC will remain allocated
830  * until the last group process exits; that process must return the
831  * leader's PGPROC to the appropriate list.
832  */
833  if (MyProc->lockGroupLeader != NULL)
834  {
835  PGPROC *leader = MyProc->lockGroupLeader;
836  LWLock *leader_lwlock = LockHashPartitionLockByProc(leader);
837 
838  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
840  dlist_delete(&MyProc->lockGroupLink);
841  if (dlist_is_empty(&leader->lockGroupMembers))
842  {
843  leader->lockGroupLeader = NULL;
844  if (leader != MyProc)
845  {
846  procgloballist = leader->procgloballist;
847 
848  /* Leader exited first; return its PGPROC. */
850  leader->links.next = (SHM_QUEUE *) *procgloballist;
851  *procgloballist = leader;
853  }
854  }
855  else if (leader != MyProc)
856  MyProc->lockGroupLeader = NULL;
857  LWLockRelease(leader_lwlock);
858  }
859 
860  /*
861  * Reset MyLatch to the process local one. This is so that signal
862  * handlers et al can continue using the latch after the shared latch
863  * isn't ours anymore. After that clear MyProc and disown the shared
864  * latch.
865  */
867  proc = MyProc;
868  MyProc = NULL;
869  DisownLatch(&proc->procLatch);
870 
871  procgloballist = proc->procgloballist;
873 
874  /*
875  * If we're still a member of a locking group, that means we're a leader
876  * which has somehow exited before its children. The last remaining child
877  * will release our PGPROC. Otherwise, release it now.
878  */
879  if (proc->lockGroupLeader == NULL)
880  {
881  /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
883 
884  /* Return PGPROC structure (and semaphore) to appropriate freelist */
885  proc->links.next = (SHM_QUEUE *) *procgloballist;
886  *procgloballist = proc;
887  }
888 
889  /* Update shared estimate of spins_per_delay */
890  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
891 
893 
894  /*
895  * This process is no longer present in shared memory in any meaningful
896  * way, so tell the postmaster we've cleaned up acceptably well. (XXX
897  * autovac launcher should be included here someday)
898  */
901 
902  /* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
903  if (AutovacuumLauncherPid != 0)
905 }
906 
907 /*
908  * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
909  * processes (bgwriter, etc). The PGPROC and sema are not released, only
910  * marked as not-in-use.
911  */
912 static void
914 {
915  int proctype = DatumGetInt32(arg);
917  PGPROC *proc;
918 
919  Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
920 
921  auxproc = &AuxiliaryProcs[proctype];
922 
923  Assert(MyProc == auxproc);
924 
925  /* Release any LW locks I am holding (see notes above) */
927 
928  /* Cancel any pending condition variable sleep, too */
930 
931  /*
932  * Reset MyLatch to the process local one. This is so that signal
933  * handlers et al can continue using the latch after the shared latch
934  * isn't ours anymore. After that clear MyProc and disown the shared
935  * latch.
936  */
938  proc = MyProc;
939  MyProc = NULL;
940  DisownLatch(&proc->procLatch);
941 
943 
944  /* Mark auxiliary proc no longer in use */
945  proc->pid = 0;
946 
947  /* Update shared estimate of spins_per_delay */
948  ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
949 
951 }
952 
953 /*
954  * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
955  * given its PID
956  *
957  * Returns NULL if not found.
958  */
959 PGPROC *
961 {
962  PGPROC *result = NULL;
963  int index;
964 
965  if (pid == 0) /* never match dummy PGPROCs */
966  return NULL;
967 
968  for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
969  {
970  PGPROC *proc = &AuxiliaryProcs[index];
971 
972  if (proc->pid == pid)
973  {
974  result = proc;
975  break;
976  }
977  }
978  return result;
979 }
980 
981 /*
982  * ProcQueue package: routines for putting processes to sleep
983  * and waking them up
984  */
985 
986 /*
987  * ProcQueueAlloc -- alloc/attach to a shared memory process queue
988  *
989  * Returns: a pointer to the queue
990  * Side Effects: Initializes the queue if it wasn't there before
991  */
992 #ifdef NOT_USED
993 PROC_QUEUE *
994 ProcQueueAlloc(const char *name)
995 {
996  PROC_QUEUE *queue;
997  bool found;
998 
999  queue = (PROC_QUEUE *)
1000  ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
1001 
1002  if (!found)
1003  ProcQueueInit(queue);
1004 
1005  return queue;
1006 }
1007 #endif
1008 
1009 /*
1010  * ProcQueueInit -- initialize a shared memory process queue
1011  */
1012 void
1014 {
1015  SHMQueueInit(&(queue->links));
1016  queue->size = 0;
1017 }
1018 
1019 
1020 /*
1021  * ProcSleep -- put a process to sleep on the specified lock
1022  *
1023  * Caller must have set MyProc->heldLocks to reflect locks already held
1024  * on the lockable object by this process (under all XIDs).
1025  *
1026  * The lock table's partition lock must be held at entry, and will be held
1027  * at exit.
1028  *
1029  * Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
1030  *
1031  * ASSUME: that no one will fiddle with the queue until after
1032  * we release the partition lock.
1033  *
1034  * NOTES: The process queue is now a priority queue for locking.
1035  */
1036 int
1037 ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
1038 {
1039  LOCKMODE lockmode = locallock->tag.mode;
1040  LOCK *lock = locallock->lock;
1041  PROCLOCK *proclock = locallock->proclock;
1042  uint32 hashcode = locallock->hashcode;
1043  LWLock *partitionLock = LockHashPartitionLock(hashcode);
1044  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1045  LOCKMASK myHeldLocks = MyProc->heldLocks;
1046  bool early_deadlock = false;
1047  bool allow_autovacuum_cancel = true;
1048  int myWaitStatus;
1049  PGPROC *proc;
1050  PGPROC *leader = MyProc->lockGroupLeader;
1051  int i;
1052 
1053  /*
1054  * If group locking is in use, locks held by members of my locking group
1055  * need to be included in myHeldLocks.
1056  */
1057  if (leader != NULL)
1058  {
1059  SHM_QUEUE *procLocks = &(lock->procLocks);
1060  PROCLOCK *otherproclock;
1061 
1062  otherproclock = (PROCLOCK *)
1063  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1064  while (otherproclock != NULL)
1065  {
1066  if (otherproclock->groupLeader == leader)
1067  myHeldLocks |= otherproclock->holdMask;
1068  otherproclock = (PROCLOCK *)
1069  SHMQueueNext(procLocks, &otherproclock->lockLink,
1070  offsetof(PROCLOCK, lockLink));
1071  }
1072  }
1073 
1074  /*
1075  * Determine where to add myself in the wait queue.
1076  *
1077  * Normally I should go at the end of the queue. However, if I already
1078  * hold locks that conflict with the request of any previous waiter, put
1079  * myself in the queue just in front of the first such waiter. This is not
1080  * a necessary step, since deadlock detection would move me to before that
1081  * waiter anyway; but it's relatively cheap to detect such a conflict
1082  * immediately, and avoid delaying till deadlock timeout.
1083  *
1084  * Special case: if I find I should go in front of some waiter, check to
1085  * see if I conflict with already-held locks or the requests before that
1086  * waiter. If not, then just grant myself the requested lock immediately.
1087  * This is the same as the test for immediate grant in LockAcquire, except
1088  * we are only considering the part of the wait queue before my insertion
1089  * point.
1090  */
1091  if (myHeldLocks != 0)
1092  {
1093  LOCKMASK aheadRequests = 0;
1094 
1095  proc = (PGPROC *) waitQueue->links.next;
1096  for (i = 0; i < waitQueue->size; i++)
1097  {
1098  /*
1099  * If we're part of the same locking group as this waiter, its
1100  * locks neither conflict with ours nor contribute to
1101  * aheadRequests.
1102  */
1103  if (leader != NULL && leader == proc->lockGroupLeader)
1104  {
1105  proc = (PGPROC *) proc->links.next;
1106  continue;
1107  }
1108  /* Must he wait for me? */
1109  if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1110  {
1111  /* Must I wait for him ? */
1112  if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1113  {
1114  /*
1115  * Yes, so we have a deadlock. Easiest way to clean up
1116  * correctly is to call RemoveFromWaitQueue(), but we
1117  * can't do that until we are *on* the wait queue. So, set
1118  * a flag to check below, and break out of loop. Also,
1119  * record deadlock info for later message.
1120  */
1121  RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1122  early_deadlock = true;
1123  break;
1124  }
1125  /* I must go before this waiter. Check special case. */
1126  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1127  LockCheckConflicts(lockMethodTable,
1128  lockmode,
1129  lock,
1130  proclock) == STATUS_OK)
1131  {
1132  /* Skip the wait and just grant myself the lock. */
1133  GrantLock(lock, proclock, lockmode);
1134  GrantAwaitedLock();
1135  return STATUS_OK;
1136  }
1137  /* Break out of loop to put myself before him */
1138  break;
1139  }
1140  /* Nope, so advance to next waiter */
1141  aheadRequests |= LOCKBIT_ON(proc->waitLockMode);
1142  proc = (PGPROC *) proc->links.next;
1143  }
1144 
1145  /*
1146  * If we fall out of loop normally, proc points to waitQueue head, so
1147  * we will insert at tail of queue as desired.
1148  */
1149  }
1150  else
1151  {
1152  /* I hold no locks, so I can't push in front of anyone. */
1153  proc = (PGPROC *) &(waitQueue->links);
1154  }
1155 
1156  /*
1157  * Insert self into queue, ahead of the given proc (or at tail of queue).
1158  */
1159  SHMQueueInsertBefore(&(proc->links), &(MyProc->links));
1160  waitQueue->size++;
1161 
1162  lock->waitMask |= LOCKBIT_ON(lockmode);
1163 
1164  /* Set up wait information in PGPROC object, too */
1165  MyProc->waitLock = lock;
1166  MyProc->waitProcLock = proclock;
1167  MyProc->waitLockMode = lockmode;
1168 
1169  MyProc->waitStatus = STATUS_WAITING;
1170 
1171  /*
1172  * If we detected deadlock, give up without waiting. This must agree with
1173  * CheckDeadLock's recovery code, except that we shouldn't release the
1174  * semaphore since we haven't tried to lock it yet.
1175  */
1176  if (early_deadlock)
1177  {
1178  RemoveFromWaitQueue(MyProc, hashcode);
1179  return STATUS_ERROR;
1180  }
1181 
1182  /* mark that we are waiting for a lock */
1183  lockAwaited = locallock;
1184 
1185  /*
1186  * Release the lock table's partition lock.
1187  *
1188  * NOTE: this may also cause us to exit critical-section state, possibly
1189  * allowing a cancel/die interrupt to be accepted. This is OK because we
1190  * have recorded the fact that we are waiting for a lock, and so
1191  * LockErrorCleanup will clean up if cancel/die happens.
1192  */
1193  LWLockRelease(partitionLock);
1194 
1195  /*
1196  * Also, now that we will successfully clean up after an ereport, it's
1197  * safe to check to see if there's a buffer pin deadlock against the
1198  * Startup process. Of course, that's only necessary if we're doing Hot
1199  * Standby and are not the Startup process ourselves.
1200  */
1201  if (RecoveryInProgress() && !InRecovery)
1203 
1204  /* Reset deadlock_state before enabling the timeout handler */
1206  got_deadlock_timeout = false;
1207 
1208  /*
1209  * Set timer so we can wake up after awhile and check for a deadlock. If a
1210  * deadlock is detected, the handler releases the process's semaphore and
1211  * sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
1212  * must report failure rather than success.
1213  *
1214  * By delaying the check until we've waited for a bit, we can avoid
1215  * running the rather expensive deadlock-check code in most cases.
1216  *
1217  * If LockTimeout is set, also enable the timeout for that. We can save a
1218  * few cycles by enabling both timeout sources in one call.
1219  *
1220  * If InHotStandby we set lock waits slightly later for clarity with other
1221  * code.
1222  */
1223  if (!InHotStandby)
1224  {
1225  if (LockTimeout > 0)
1226  {
1227  EnableTimeoutParams timeouts[2];
1228 
1229  timeouts[0].id = DEADLOCK_TIMEOUT;
1230  timeouts[0].type = TMPARAM_AFTER;
1231  timeouts[0].delay_ms = DeadlockTimeout;
1232  timeouts[1].id = LOCK_TIMEOUT;
1233  timeouts[1].type = TMPARAM_AFTER;
1234  timeouts[1].delay_ms = LockTimeout;
1235  enable_timeouts(timeouts, 2);
1236  }
1237  else
1239  }
1240 
1241  /*
1242  * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1243  * will not wait. But a set latch does not necessarily mean that the lock
1244  * is free now, as there are many other sources for latch sets than
1245  * somebody releasing the lock.
1246  *
1247  * We process interrupts whenever the latch has been set, so cancel/die
1248  * interrupts are processed quickly. This means we must not mind losing
1249  * control to a cancel/die interrupt here. We don't, because we have no
1250  * shared-state-change work to do after being granted the lock (the
1251  * grantor did it all). We do have to worry about canceling the deadlock
1252  * timeout and updating the locallock table, but if we lose control to an
1253  * error, LockErrorCleanup will fix that up.
1254  */
1255  do
1256  {
1257  if (InHotStandby)
1258  {
1259  /* Set a timer and wait for that or for the Lock to be granted */
1261  }
1262  else
1263  {
1265  PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1267  /* check for deadlocks first, as that's probably log-worthy */
1269  {
1270  CheckDeadLock();
1271  got_deadlock_timeout = false;
1272  }
1274  }
1275 
1276  /*
1277  * waitStatus could change from STATUS_WAITING to something else
1278  * asynchronously. Read it just once per loop to prevent surprising
1279  * behavior (such as missing log messages).
1280  */
1281  myWaitStatus = *((volatile int *) &MyProc->waitStatus);
1282 
1283  /*
1284  * If we are not deadlocked, but are waiting on an autovacuum-induced
1285  * task, send a signal to interrupt it.
1286  */
1287  if (deadlock_state == DS_BLOCKED_BY_AUTOVACUUM && allow_autovacuum_cancel)
1288  {
1289  PGPROC *autovac = GetBlockingAutoVacuumPgproc();
1290  PGXACT *autovac_pgxact = &ProcGlobal->allPgXact[autovac->pgprocno];
1291 
1292  LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
1293 
1294  /*
1295  * Only do it if the worker is not working to protect against Xid
1296  * wraparound.
1297  */
1298  if ((autovac_pgxact->vacuumFlags & PROC_IS_AUTOVACUUM) &&
1299  !(autovac_pgxact->vacuumFlags & PROC_VACUUM_FOR_WRAPAROUND))
1300  {
1301  int pid = autovac->pid;
1302  StringInfoData locktagbuf;
1303  StringInfoData logbuf; /* errdetail for server log */
1304 
1305  initStringInfo(&locktagbuf);
1306  initStringInfo(&logbuf);
1307  DescribeLockTag(&locktagbuf, &lock->tag);
1308  appendStringInfo(&logbuf,
1309  _("Process %d waits for %s on %s."),
1310  MyProcPid,
1312  lockmode),
1313  locktagbuf.data);
1314 
1315  /* release lock as quickly as possible */
1316  LWLockRelease(ProcArrayLock);
1317 
1318  /* send the autovacuum worker Back to Old Kent Road */
1319  ereport(DEBUG1,
1320  (errmsg("sending cancel to blocking autovacuum PID %d",
1321  pid),
1322  errdetail_log("%s", logbuf.data)));
1323 
1324  if (kill(pid, SIGINT) < 0)
1325  {
1326  /*
1327  * There's a race condition here: once we release the
1328  * ProcArrayLock, it's possible for the autovac worker to
1329  * close up shop and exit before we can do the kill().
1330  * Therefore, we do not whinge about no-such-process.
1331  * Other errors such as EPERM could conceivably happen if
1332  * the kernel recycles the PID fast enough, but such cases
1333  * seem improbable enough that it's probably best to issue
1334  * a warning if we see some other errno.
1335  */
1336  if (errno != ESRCH)
1337  ereport(WARNING,
1338  (errmsg("could not send signal to process %d: %m",
1339  pid)));
1340  }
1341 
1342  pfree(logbuf.data);
1343  pfree(locktagbuf.data);
1344  }
1345  else
1346  LWLockRelease(ProcArrayLock);
1347 
1348  /* prevent signal from being resent more than once */
1349  allow_autovacuum_cancel = false;
1350  }
1351 
1352  /*
1353  * If awoken after the deadlock check interrupt has run, and
1354  * log_lock_waits is on, then report about the wait.
1355  */
1357  {
1359  lock_waiters_sbuf,
1360  lock_holders_sbuf;
1361  const char *modename;
1362  long secs;
1363  int usecs;
1364  long msecs;
1365  SHM_QUEUE *procLocks;
1366  PROCLOCK *proclock;
1367  bool first_holder = true,
1368  first_waiter = true;
1369  int lockHoldersNum = 0;
1370 
1371  initStringInfo(&buf);
1372  initStringInfo(&lock_waiters_sbuf);
1373  initStringInfo(&lock_holders_sbuf);
1374 
1375  DescribeLockTag(&buf, &locallock->tag.lock);
1376  modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1377  lockmode);
1380  &secs, &usecs);
1381  msecs = secs * 1000 + usecs / 1000;
1382  usecs = usecs % 1000;
1383 
1384  /*
1385  * we loop over the lock's procLocks to gather a list of all
1386  * holders and waiters. Thus we will be able to provide more
1387  * detailed information for lock debugging purposes.
1388  *
1389  * lock->procLocks contains all processes which hold or wait for
1390  * this lock.
1391  */
1392 
1393  LWLockAcquire(partitionLock, LW_SHARED);
1394 
1395  procLocks = &(lock->procLocks);
1396  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
1397  offsetof(PROCLOCK, lockLink));
1398 
1399  while (proclock)
1400  {
1401  /*
1402  * we are a waiter if myProc->waitProcLock == proclock; we are
1403  * a holder if it is NULL or something different
1404  */
1405  if (proclock->tag.myProc->waitProcLock == proclock)
1406  {
1407  if (first_waiter)
1408  {
1409  appendStringInfo(&lock_waiters_sbuf, "%d",
1410  proclock->tag.myProc->pid);
1411  first_waiter = false;
1412  }
1413  else
1414  appendStringInfo(&lock_waiters_sbuf, ", %d",
1415  proclock->tag.myProc->pid);
1416  }
1417  else
1418  {
1419  if (first_holder)
1420  {
1421  appendStringInfo(&lock_holders_sbuf, "%d",
1422  proclock->tag.myProc->pid);
1423  first_holder = false;
1424  }
1425  else
1426  appendStringInfo(&lock_holders_sbuf, ", %d",
1427  proclock->tag.myProc->pid);
1428 
1429  lockHoldersNum++;
1430  }
1431 
1432  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
1433  offsetof(PROCLOCK, lockLink));
1434  }
1435 
1436  LWLockRelease(partitionLock);
1437 
1439  ereport(LOG,
1440  (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1441  MyProcPid, modename, buf.data, msecs, usecs),
1442  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1443  "Processes holding the lock: %s. Wait queue: %s.",
1444  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1445  else if (deadlock_state == DS_HARD_DEADLOCK)
1446  {
1447  /*
1448  * This message is a bit redundant with the error that will be
1449  * reported subsequently, but in some cases the error report
1450  * might not make it to the log (eg, if it's caught by an
1451  * exception handler), and we want to ensure all long-wait
1452  * events get logged.
1453  */
1454  ereport(LOG,
1455  (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1456  MyProcPid, modename, buf.data, msecs, usecs),
1457  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1458  "Processes holding the lock: %s. Wait queue: %s.",
1459  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1460  }
1461 
1462  if (myWaitStatus == STATUS_WAITING)
1463  ereport(LOG,
1464  (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1465  MyProcPid, modename, buf.data, msecs, usecs),
1466  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1467  "Processes holding the lock: %s. Wait queue: %s.",
1468  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1469  else if (myWaitStatus == STATUS_OK)
1470  ereport(LOG,
1471  (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1472  MyProcPid, modename, buf.data, msecs, usecs)));
1473  else
1474  {
1475  Assert(myWaitStatus == STATUS_ERROR);
1476 
1477  /*
1478  * Currently, the deadlock checker always kicks its own
1479  * process, which means that we'll only see STATUS_ERROR when
1480  * deadlock_state == DS_HARD_DEADLOCK, and there's no need to
1481  * print redundant messages. But for completeness and
1482  * future-proofing, print a message if it looks like someone
1483  * else kicked us off the lock.
1484  */
1486  ereport(LOG,
1487  (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1488  MyProcPid, modename, buf.data, msecs, usecs),
1489  (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1490  "Processes holding the lock: %s. Wait queue: %s.",
1491  lockHoldersNum, lock_holders_sbuf.data, lock_waiters_sbuf.data))));
1492  }
1493 
1494  /*
1495  * At this point we might still need to wait for the lock. Reset
1496  * state so we don't print the above messages again.
1497  */
1499 
1500  pfree(buf.data);
1501  pfree(lock_holders_sbuf.data);
1502  pfree(lock_waiters_sbuf.data);
1503  }
1504  } while (myWaitStatus == STATUS_WAITING);
1505 
1506  /*
1507  * Disable the timers, if they are still running. As in LockErrorCleanup,
1508  * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1509  * already caused QueryCancelPending to become set, we want the cancel to
1510  * be reported as a lock timeout, not a user cancel.
1511  */
1512  if (!InHotStandby)
1513  {
1514  if (LockTimeout > 0)
1515  {
1516  DisableTimeoutParams timeouts[2];
1517 
1518  timeouts[0].id = DEADLOCK_TIMEOUT;
1519  timeouts[0].keep_indicator = false;
1520  timeouts[1].id = LOCK_TIMEOUT;
1521  timeouts[1].keep_indicator = true;
1522  disable_timeouts(timeouts, 2);
1523  }
1524  else
1526  }
1527 
1528  /*
1529  * Re-acquire the lock table's partition lock. We have to do this to hold
1530  * off cancel/die interrupts before we can mess with lockAwaited (else we
1531  * might have a missed or duplicated locallock update).
1532  */
1533  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1534 
1535  /*
1536  * We no longer want LockErrorCleanup to do anything.
1537  */
1538  lockAwaited = NULL;
1539 
1540  /*
1541  * If we got the lock, be sure to remember it in the locallock table.
1542  */
1543  if (MyProc->waitStatus == STATUS_OK)
1544  GrantAwaitedLock();
1545 
1546  /*
1547  * We don't have to do anything else, because the awaker did all the
1548  * necessary update of the lock table and MyProc.
1549  */
1550  return MyProc->waitStatus;
1551 }
1552 
1553 
1554 /*
1555  * ProcWakeup -- wake up a process by releasing its private semaphore.
1556  *
1557  * Also remove the process from the wait queue and set its links invalid.
1558  * RETURN: the next process in the wait queue.
1559  *
1560  * The appropriate lock partition lock must be held by caller.
1561  *
1562  * XXX: presently, this code is only used for the "success" case, and only
1563  * works correctly for that case. To clean up in failure case, would need
1564  * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1565  * Hence, in practice the waitStatus parameter must be STATUS_OK.
1566  */
1567 PGPROC *
1568 ProcWakeup(PGPROC *proc, int waitStatus)
1569 {
1570  PGPROC *retProc;
1571 
1572  /* Proc should be sleeping ... */
1573  if (proc->links.prev == NULL ||
1574  proc->links.next == NULL)
1575  return NULL;
1576  Assert(proc->waitStatus == STATUS_WAITING);
1577 
1578  /* Save next process before we zap the list link */
1579  retProc = (PGPROC *) proc->links.next;
1580 
1581  /* Remove process from wait queue */
1582  SHMQueueDelete(&(proc->links));
1583  (proc->waitLock->waitProcs.size)--;
1584 
1585  /* Clean up process' state and pass it the ok/fail signal */
1586  proc->waitLock = NULL;
1587  proc->waitProcLock = NULL;
1588  proc->waitStatus = waitStatus;
1589 
1590  /* And awaken it */
1591  SetLatch(&proc->procLatch);
1592 
1593  return retProc;
1594 }
1595 
1596 /*
1597  * ProcLockWakeup -- routine for waking up processes when a lock is
1598  * released (or a prior waiter is aborted). Scan all waiters
1599  * for lock, waken any that are no longer blocked.
1600  *
1601  * The appropriate lock partition lock must be held by caller.
1602  */
1603 void
1604 ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
1605 {
1606  PROC_QUEUE *waitQueue = &(lock->waitProcs);
1607  int queue_size = waitQueue->size;
1608  PGPROC *proc;
1609  LOCKMASK aheadRequests = 0;
1610 
1611  Assert(queue_size >= 0);
1612 
1613  if (queue_size == 0)
1614  return;
1615 
1616  proc = (PGPROC *) waitQueue->links.next;
1617 
1618  while (queue_size-- > 0)
1619  {
1620  LOCKMODE lockmode = proc->waitLockMode;
1621 
1622  /*
1623  * Waken if (a) doesn't conflict with requests of earlier waiters, and
1624  * (b) doesn't conflict with already-held locks.
1625  */
1626  if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1627  LockCheckConflicts(lockMethodTable,
1628  lockmode,
1629  lock,
1630  proc->waitProcLock) == STATUS_OK)
1631  {
1632  /* OK to waken */
1633  GrantLock(lock, proc->waitProcLock, lockmode);
1634  proc = ProcWakeup(proc, STATUS_OK);
1635 
1636  /*
1637  * ProcWakeup removes proc from the lock's waiting process queue
1638  * and returns the next proc in chain; don't use proc's next-link,
1639  * because it's been cleared.
1640  */
1641  }
1642  else
1643  {
1644  /*
1645  * Cannot wake this guy. Remember his request for later checks.
1646  */
1647  aheadRequests |= LOCKBIT_ON(lockmode);
1648  proc = (PGPROC *) proc->links.next;
1649  }
1650  }
1651 
1652  Assert(waitQueue->size >= 0);
1653 }
1654 
1655 /*
1656  * CheckDeadLock
1657  *
1658  * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1659  * lock to be released by some other process. Check if there's a deadlock; if
1660  * not, just return. (But signal ProcSleep to log a message, if
1661  * log_lock_waits is true.) If we have a real deadlock, remove ourselves from
1662  * the lock's wait queue and signal an error to ProcSleep.
1663  */
1664 static void
1666 {
1667  int i;
1668 
1669  /*
1670  * Acquire exclusive lock on the entire shared lock data structures. Must
1671  * grab LWLocks in partition-number order to avoid LWLock deadlock.
1672  *
1673  * Note that the deadlock check interrupt had better not be enabled
1674  * anywhere that this process itself holds lock partition locks, else this
1675  * will wait forever. Also note that LWLockAcquire creates a critical
1676  * section, so that this routine cannot be interrupted by cancel/die
1677  * interrupts.
1678  */
1679  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1681 
1682  /*
1683  * Check to see if we've been awoken by anyone in the interim.
1684  *
1685  * If we have, we can return and resume our transaction -- happy day.
1686  * Before we are awoken the process releasing the lock grants it to us so
1687  * we know that we don't have to wait anymore.
1688  *
1689  * We check by looking to see if we've been unlinked from the wait queue.
1690  * This is quicker than checking our semaphore's state, since no kernel
1691  * call is needed, and it is safe because we hold the lock partition lock.
1692  */
1693  if (MyProc->links.prev == NULL ||
1694  MyProc->links.next == NULL)
1695  goto check_done;
1696 
1697 #ifdef LOCK_DEBUG
1698  if (Debug_deadlocks)
1699  DumpAllLocks();
1700 #endif
1701 
1702  /* Run the deadlock check, and set deadlock_state for use by ProcSleep */
1703  deadlock_state = DeadLockCheck(MyProc);
1704 
1706  {
1707  /*
1708  * Oops. We have a deadlock.
1709  *
1710  * Get this process out of wait state. (Note: we could do this more
1711  * efficiently by relying on lockAwaited, but use this coding to
1712  * preserve the flexibility to kill some other transaction than the
1713  * one detecting the deadlock.)
1714  *
1715  * RemoveFromWaitQueue sets MyProc->waitStatus to STATUS_ERROR, so
1716  * ProcSleep will report an error after we return from the signal
1717  * handler.
1718  */
1719  Assert(MyProc->waitLock != NULL);
1720  RemoveFromWaitQueue(MyProc, LockTagHashCode(&(MyProc->waitLock->tag)));
1721 
1722  /*
1723  * We're done here. Transaction abort caused by the error that
1724  * ProcSleep will raise will cause any other locks we hold to be
1725  * released, thus allowing other processes to wake up; we don't need
1726  * to do that here. NOTE: an exception is that releasing locks we
1727  * hold doesn't consider the possibility of waiters that were blocked
1728  * behind us on the lock we just failed to get, and might now be
1729  * wakable because we're not in front of them anymore. However,
1730  * RemoveFromWaitQueue took care of waking up any such processes.
1731  */
1732  }
1733 
1734  /*
1735  * And release locks. We do this in reverse order for two reasons: (1)
1736  * Anyone else who needs more than one of the locks will be trying to lock
1737  * them in increasing order; we don't want to release the other process
1738  * until it can get all the locks it needs. (2) This avoids O(N^2)
1739  * behavior inside LWLockRelease.
1740  */
1741 check_done:
1742  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1744 }
1745 
1746 /*
1747  * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1748  *
1749  * NB: Runs inside a signal handler, be careful.
1750  */
1751 void
1753 {
1754  int save_errno = errno;
1755 
1756  got_deadlock_timeout = true;
1757 
1758  /*
1759  * Have to set the latch again, even if handle_sig_alarm already did. Back
1760  * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1761  * ever would be a problem, but setting a set latch again is cheap.
1762  */
1763  SetLatch(MyLatch);
1764  errno = save_errno;
1765 }
1766 
1767 /*
1768  * ProcWaitForSignal - wait for a signal from another backend.
1769  *
1770  * As this uses the generic process latch the caller has to be robust against
1771  * unrelated wakeups: Always check that the desired state has occurred, and
1772  * wait again if not.
1773  */
1774 void
1775 ProcWaitForSignal(uint32 wait_event_info)
1776 {
1777  WaitLatch(MyLatch, WL_LATCH_SET, 0, wait_event_info);
1780 }
1781 
1782 /*
1783  * ProcSendSignal - send a signal to a backend identified by PID
1784  */
1785 void
1787 {
1788  PGPROC *proc = NULL;
1789 
1790  if (RecoveryInProgress())
1791  {
1793 
1794  /*
1795  * Check to see whether it is the Startup process we wish to signal.
1796  * This call is made by the buffer manager when it wishes to wake up a
1797  * process that has been waiting for a pin in so it can obtain a
1798  * cleanup lock using LockBufferForCleanup(). Startup is not a normal
1799  * backend, so BackendPidGetProc() will not return any pid at all. So
1800  * we remember the information for this special case.
1801  */
1802  if (pid == ProcGlobal->startupProcPid)
1803  proc = ProcGlobal->startupProc;
1804 
1806  }
1807 
1808  if (proc == NULL)
1809  proc = BackendPidGetProc(pid);
1810 
1811  if (proc != NULL)
1812  {
1813  SetLatch(&proc->procLatch);
1814  }
1815 }
1816 
1817 /*
1818  * BecomeLockGroupLeader - designate process as lock group leader
1819  *
1820  * Once this function has returned, other processes can join the lock group
1821  * by calling BecomeLockGroupMember.
1822  */
1823 void
1825 {
1826  LWLock *leader_lwlock;
1827 
1828  /* If we already did it, we don't need to do it again. */
1829  if (MyProc->lockGroupLeader == MyProc)
1830  return;
1831 
1832  /* We had better not be a follower. */
1833  Assert(MyProc->lockGroupLeader == NULL);
1834 
1835  /* Create single-member group, containing only ourselves. */
1836  leader_lwlock = LockHashPartitionLockByProc(MyProc);
1837  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1838  MyProc->lockGroupLeader = MyProc;
1839  dlist_push_head(&MyProc->lockGroupMembers, &MyProc->lockGroupLink);
1840  LWLockRelease(leader_lwlock);
1841 }
1842 
1843 /*
1844  * BecomeLockGroupMember - designate process as lock group member
1845  *
1846  * This is pretty straightforward except for the possibility that the leader
1847  * whose group we're trying to join might exit before we manage to do so;
1848  * and the PGPROC might get recycled for an unrelated process. To avoid
1849  * that, we require the caller to pass the PID of the intended PGPROC as
1850  * an interlock. Returns true if we successfully join the intended lock
1851  * group, and false if not.
1852  */
1853 bool
1855 {
1856  LWLock *leader_lwlock;
1857  bool ok = false;
1858 
1859  /* Group leader can't become member of group */
1860  Assert(MyProc != leader);
1861 
1862  /* Can't already be a member of a group */
1863  Assert(MyProc->lockGroupLeader == NULL);
1864 
1865  /* PID must be valid. */
1866  Assert(pid != 0);
1867 
1868  /*
1869  * Get lock protecting the group fields. Note LockHashPartitionLockByProc
1870  * accesses leader->pgprocno in a PGPROC that might be free. This is safe
1871  * because all PGPROCs' pgprocno fields are set during shared memory
1872  * initialization and never change thereafter; so we will acquire the
1873  * correct lock even if the leader PGPROC is in process of being recycled.
1874  */
1875  leader_lwlock = LockHashPartitionLockByProc(leader);
1876  LWLockAcquire(leader_lwlock, LW_EXCLUSIVE);
1877 
1878  /* Is this the leader we're looking for? */
1879  if (leader->pid == pid && leader->lockGroupLeader == leader)
1880  {
1881  /* OK, join the group */
1882  ok = true;
1883  MyProc->lockGroupLeader = leader;
1884  dlist_push_tail(&leader->lockGroupMembers, &MyProc->lockGroupLink);
1885  }
1886  LWLockRelease(leader_lwlock);
1887 
1888  return ok;
1889 }
void InitAuxiliaryProcess(void)
Definition: proc.c:491
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:653
PROCLOCKTAG tag
Definition: lock.h:347
int slock_t
Definition: s_lock.h:888
void ResolveRecoveryConflictWithLock(LOCKTAG locktag)
Definition: standby.c:362
static void AuxiliaryProcKill(int code, Datum arg)
Definition: proc.c:913
uint32 hashcode
Definition: lock.h:409
bool procArrayGroupMember
Definition: proc.h:163
#define PG_WAIT_LOCK
Definition: pgstat.h:738
static void ProcKill(int code, Datum arg)
Definition: proc.c:790
Definition: lwlock.h:32
TimeoutId id
Definition: timeout.h:54
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
int LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1292
LOCALLOCKTAG tag
Definition: lock.h:404
XidStatus clogGroupMemberXidStatus
Definition: proc.h:179
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:196
bool IsWaitingForLock(void)
Definition: proc.c:669
#define DEBUG1
Definition: elog.h:25
int MyProcPid
Definition: globals.c:39
static void RemoveProcFromArray(int code, Datum arg)
Definition: proc.c:779
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:3845
LOCKTAG lock
Definition: lock.h:385
void GrantAwaitedLock(void)
Definition: lock.c:1643
BackendId backendId
Definition: proc.h:113
uint32 wait_event_info
Definition: proc.h:173
Definition: proc.h:219
#define DatumGetInt32(X)
Definition: postgres.h:478
int LOCKMODE
Definition: lockdefs.h:26
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:300
XLogRecPtr clogGroupMemberLsn
Definition: proc.h:183
dlist_head lockGroupMembers
Definition: proc.h:201
LOCKMODE mode
Definition: lock.h:386
PROCLOCK * proclock
Definition: lock.h:408
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1570
TransactionId xmin
Definition: proc.h:225
SHM_QUEUE links
Definition: lock.h:32
PGPROC * BackendPidGetProc(int pid)
Definition: procarray.c:2346
PGXACT * allPgXact
Definition: proc.h:246
PGPROC * MyProc
Definition: proc.c:67
LOCKMASK holdMask
Definition: lock.h:351
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1415
TransactionId xid
Definition: proc.h:221
SHM_QUEUE links
Definition: proc.h:98
TimeoutType type
Definition: timeout.h:55
struct SHM_QUEUE * next
Definition: shmem.h:31
void PGSemaphoreReset(PGSemaphore sema)
Definition: posix_sema.c:278
#define SpinLockInit(lock)
Definition: spin.h:60
LOCKMODE waitLockMode
Definition: proc.h:138
bool lwWaiting
Definition: proc.h:127
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:317
bool InRecovery
Definition: xlog.c:194
LOCKTAG tag
Definition: lock.h:287
#define PROC_VACUUM_FOR_WRAPAROUND
Definition: proc.h:56
PGPROC * PreparedXactProcs
Definition: proc.c:82
const LOCKMASK * conflictTab
Definition: lock.h:114
#define LockHashPartitionLock(hashcode)
Definition: lock.h:497
#define SYNC_REP_NOT_WAITING
Definition: syncrep.h:31
SHM_QUEUE lockLink
Definition: lock.h:353
PGPROC * bgworkerFreeProcs
Definition: proc.h:254
#define InHotStandby
Definition: xlog.h:74
Oid roleId
Definition: proc.h:115
int errcode(int sqlerrcode)
Definition: elog.c:575
PROC_HDR * ProcGlobal
Definition: proc.c:80
#define STATUS_ERROR
Definition: c.h:982
#define MemSet(start, val, len)
Definition: c.h:863
void MarkPostmasterChildInactive(void)
Definition: pmsignal.c:256
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition: proc.c:1854
uint8 lwWaitMode
Definition: proc.h:128
void * ShmemAlloc(Size size)
Definition: shmem.c:157
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
pg_atomic_uint32 clogGroupNext
Definition: proc.h:177
void ResetLatch(volatile Latch *latch)
Definition: latch.c:497
bool fpVXIDLock
Definition: proc.h:192
#define LOG
Definition: elog.h:26
bool RecoveryInProgress(void)
Definition: xlog.c:7954
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:561
void PublishStartupProcessInformation(void)
Definition: proc.c:602
#define PANIC
Definition: elog.h:53
PGSemaphore PGSemaphoreCreate(void)
Definition: posix_sema.c:245
PGPROC * autovacFreeProcs
Definition: proc.h:252
bool HaveNFreeProcs(int n)
Definition: proc.c:646
Latch procLatch
Definition: proc.h:104
PGXACT * MyPgXact
Definition: proc.c:68
#define DEFAULT_LOCKMETHOD
Definition: lock.h:128
PROC_QUEUE waitProcs
Definition: lock.h:293
uint8 vacuumFlags
Definition: proc.h:230
bool IsBackgroundWorker
Definition: globals.c:103
void InitProcGlobal(void)
Definition: proc.c:162
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition: deadlock.c:1149
Definition: type.h:89
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1721
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:117
bool isBackgroundWorker
Definition: proc.h:117
void ProcSendSignal(int pid)
Definition: proc.c:1786
#define SpinLockAcquire(lock)
Definition: spin.h:62
void SwitchBackToLocalLatch(void)
Definition: miscinit.c:265
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:500
int WaitLatch(volatile Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:336
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:476
void pfree(void *pointer)
Definition: mcxt.c:949
dlist_node lockGroupLink
Definition: proc.h:202
Latch * walwriterLatch
Definition: proc.h:260
void ConditionVariableCancelSleep(void)
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:78
int spins_per_delay
Definition: proc.h:264
#define ERROR
Definition: elog.h:43
int max_prepared_xacts
Definition: twophase.c:117
int AutovacuumLauncherPid
Definition: autovacuum.c:306
int IdleInTransactionSessionTimeout
Definition: proc.c:63
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:372
static DeadLockState deadlock_state
Definition: proc.c:87
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition: procarray.c:333
#define FATAL
Definition: elog.h:52
TimeoutId id
Definition: timeout.h:65
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:954
int MaxBackends
Definition: globals.c:126
PROCLOCK * waitProcLock
Definition: proc.h:137
void InitProcess(void)
Definition: proc.c:288
void InitDeadLockChecking(void)
Definition: deadlock.c:143
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:348
PGPROC * AuxiliaryPidGetProc(int pid)
Definition: proc.c:960
int clogGroupMemberPage
Definition: proc.h:181
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:620
static char * buf
Definition: pg_test_fsync.c:67
bool recoveryConflictPending
Definition: proc.h:124
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1013
bool IsUnderPostmaster
Definition: globals.c:101
DeadLockState DeadLockCheck(PGPROC *proc)
Definition: deadlock.c:217
static volatile sig_atomic_t got_deadlock_timeout
Definition: proc.c:90
void AbortStrongLockAcquire(void)
Definition: lock.c:1614
#define USER_LOCKMETHOD
Definition: lock.h:129
#define InvalidTransactionId
Definition: transam.h:31
TransactionId clogGroupMemberXid
Definition: proc.h:178
Oid databaseId
Definition: proc.h:114
void ProcReleaseLocks(bool isCommit)
Definition: proc.c:762
unsigned int uint32
Definition: c.h:258
int errdetail_log(const char *fmt,...)
Definition: elog.c:921
void ReplicationSlotRelease(void)
Definition: slot.c:416
PGPROC ** procgloballist
Definition: proc.h:99
void CheckRecoveryConflictDeadlock(void)
Definition: standby.c:515
void OwnLatch(volatile Latch *latch)
Definition: latch.c:288
Definition: lock.h:284
LOCK * waitLock
Definition: proc.h:136
static void dlist_delete(dlist_node *node)
Definition: ilist.h:358
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:490
#define NUM_AUXILIARY_PROCS
Definition: proc.h:287
bool IsAutoVacuumWorkerProcess(void)
Definition: autovacuum.c:3256
void BecomeLockGroupLeader(void)
Definition: proc.c:1824
#define ereport(elevel, rest)
Definition: elog.h:122
#define STATUS_OK
Definition: c.h:981
bool delayChkpt
Definition: proc.h:232
void LockErrorCleanup(void)
Definition: proc.c:686
#define INVALID_PGPROCNO
Definition: proc.h:77
LOCKMASK waitMask
Definition: lock.h:291
Size ProcGlobalShmemSize(void)
Definition: proc.c:102
pg_atomic_uint32 procArrayGroupNext
Definition: proc.h:165
Definition: proc.h:241
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1775
SHM_QUEUE procLocks
Definition: lock.h:292
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:673
void initStringInfo(StringInfo str)
Definition: stringinfo.c:46
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:207
#define WARNING
Definition: elog.h:40
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:632
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
#define LockHashPartitionLockByProc(leader_pgproc)
Definition: lock.h:512
#define SpinLockRelease(lock)
Definition: spin.h:64
int startupProcPid
Definition: proc.h:267
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1604
Size mul_size(Size s1, Size s2)
Definition: shmem.c:492
#define InvalidBackendId
Definition: backendid.h:23
uintptr_t Datum
Definition: postgres.h:372
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
NON_EXEC_STATIC slock_t * ProcStructLock
Definition: proc.c:77
int MaxConnections
Definition: globals.c:123
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1761
void SwitchToSharedLatch(void)
Definition: miscinit.c:246
int waitStatus
Definition: proc.h:102
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:942
int autovacuum_max_workers
Definition: autovacuum.c:115
#define InvalidOid
Definition: postgres_ext.h:36
static void dlist_init(dlist_head *head)
Definition: ilist.h:278
Latch * checkpointerLatch
Definition: proc.h:262
#define IsAnyAutoVacuumProcess()
Definition: autovacuum.h:53
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:428
bool IsAutoVacuumLauncherProcess(void)
Definition: autovacuum.c:3250
ReplicationSlot * MyReplicationSlot
Definition: slot.c:96
void SetLatch(volatile Latch *latch)
Definition: latch.c:414
struct SHM_QUEUE * prev
Definition: shmem.h:30
uint8 locktag_type
Definition: lock.h:184
DeadLockState
Definition: lock.h:479
#define Assert(condition)
Definition: c.h:681
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2014
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition: proc.c:81
bool log_lock_waits
Definition: proc.c:64
static bool dlist_is_empty(dlist_head *head)
Definition: ilist.h:289
PGPROC * freeProcs
Definition: proc.h:250
static LOCALLOCK * lockAwaited
Definition: proc.c:85
size_t Size
Definition: c.h:350
int LockTimeout
Definition: proc.c:62
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1117
LOCK * lock
Definition: lock.h:407
SHM_QUEUE syncRepLinks
Definition: proc.h:150
uint32 allProcCount
Definition: proc.h:248
void SyncRepCleanupAtProcExit(void)
Definition: syncrep.c:363
int LOCKMASK
Definition: lockdefs.h:25
const char * name
Definition: encode.c:521
static void CheckDeadLock(void)
Definition: proc.c:1665
uint8 locktag_lockmethodid
Definition: lock.h:185
int StatementTimeout
Definition: proc.c:61
PGPROC * myProc
Definition: lock.h:341
void InitProcessPhase2(void)
Definition: proc.c:456
#define LOCKBIT_ON(lockmode)
Definition: lock.h:87
pg_atomic_uint32 clogGroupFirst
Definition: proc.h:258
Definition: lock.h:344
void SHMQueueElemInit(SHM_QUEUE *queue)
Definition: shmqueue.c:57
#define Int32GetDatum(X)
Definition: postgres.h:485
int pgprocno
Definition: proc.h:110
pg_atomic_uint32 procArrayGroupFirst
Definition: proc.h:256
int errmsg(const char *fmt,...)
Definition: elog.c:797
bool clogGroupMember
Definition: proc.h:176
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
int ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1037
void DisownLatch(volatile Latch *latch)
Definition: latch.c:308
int startupBufferPinWaitBufId
Definition: proc.h:269
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:115
#define InvalidLocalTransactionId
Definition: lock.h:69
int i
int size
Definition: lock.h:33
void * arg
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
struct Latch * MyLatch
Definition: globals.c:52
void ReplicationSlotCleanup(void)
Definition: slot.c:471
int DeadlockTimeout
Definition: proc.c:60
void CheckDeadLockAlert(void)
Definition: proc.c:1752
PGPROC * allProcs
Definition: proc.h:244
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:157
PGPROC * startupProc
Definition: proc.h:266
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:234
#define elog
Definition: elog.h:219
#define STATUS_WAITING
Definition: c.h:985
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition: deadlock.c:293
void LWLockReleaseAll(void)
Definition: lwlock.c:1820
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:525
PGSemaphore sem
Definition: proc.h:101
int syncRepState
Definition: proc.h:149
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:996
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1624
void InitSharedLatch(volatile Latch *latch)
Definition: latch.c:252
PGPROC * ProcWakeup(PGPROC *proc, int waitStatus)
Definition: proc.c:1568
Definition: proc.h:95
int pid
Definition: proc.h:109
#define WL_LATCH_SET
Definition: latch.h:124
#define _(x)
Definition: elog.c:84
XLogRecPtr waitLSN
Definition: proc.h:148
void ProcArrayAdd(PGPROC *proc)
Definition: procarray.c:275
#define SIGUSR2
Definition: win32.h:203
PGPROC * lockGroupLeader
Definition: proc.h:200
LocalTransactionId fpLocalTransactionId
Definition: proc.h:193
#define PROC_IS_AUTOVACUUM
Definition: proc.h:53
#define offsetof(type, field)
Definition: c.h:549
TransactionId procArrayGroupMemberXid
Definition: proc.h:171
int ProcGlobalSemas(void)
Definition: proc.c:128
LOCKMASK heldLocks
Definition: proc.h:139
void InitLWLockAccess(void)
Definition: lwlock.c:531
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117
PGPROC * groupLeader
Definition: lock.h:350
#define TRANSACTION_STATUS_IN_PROGRESS
Definition: clog.h:26
#define DEFAULT_SPINS_PER_DELAY
Definition: s_lock.h:966
void MarkPostmasterChildActive(void)
Definition: pmsignal.c:223
LocalTransactionId lxid
Definition: proc.h:106
#define NON_EXEC_STATIC
Definition: c.h:1119