PostgreSQL Source Code git master
Loading...
Searching...
No Matches
proc.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * proc.c
4 * routines to manage per-process shared memory data structure
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/storage/lmgr/proc.c
12 *
13 *-------------------------------------------------------------------------
14 */
15/*
16 * Interface (a):
17 * JoinWaitQueue(), ProcSleep(), ProcWakeup()
18 *
19 * Waiting for a lock causes the backend to be put to sleep. Whoever releases
20 * the lock wakes the process up again (and gives it an error code so it knows
21 * whether it was awoken on an error condition).
22 *
23 * Interface (b):
24 *
25 * ProcReleaseLocks -- frees the locks associated with current transaction
26 *
27 * ProcKill -- destroys the shared memory state (and locks)
28 * associated with the process.
29 */
30#include "postgres.h"
31
32#include <signal.h>
33#include <unistd.h>
34#include <sys/time.h>
35
36#include "access/transam.h"
37#include "access/twophase.h"
38#include "access/xlogutils.h"
39#include "access/xlogwait.h"
40#include "miscadmin.h"
41#include "pgstat.h"
44#include "replication/syncrep.h"
46#include "storage/ipc.h"
47#include "storage/lmgr.h"
48#include "storage/pmsignal.h"
49#include "storage/proc.h"
50#include "storage/procarray.h"
51#include "storage/procsignal.h"
52#include "storage/spin.h"
53#include "storage/standby.h"
54#include "utils/timeout.h"
55#include "utils/timestamp.h"
56
57/* GUC variables */
58int DeadlockTimeout = 1000;
64bool log_lock_waits = true;
65
66/* Pointer to this process's PGPROC struct, if any */
68
69/* Pointers to shared-memory structures */
73
74/* Is a deadlock check pending? */
76
77static void RemoveProcFromArray(int code, Datum arg);
78static void ProcKill(int code, Datum arg);
79static void AuxiliaryProcKill(int code, Datum arg);
80static DeadLockState CheckDeadLock(void);
81
82
83/*
84 * Report shared-memory space needed by PGPROC.
85 */
86static Size
88{
89 Size size = 0;
92
93 size = add_size(size, mul_size(TotalProcs, sizeof(PGPROC)));
94 size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->xids)));
95 size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->subxidStates)));
96 size = add_size(size, mul_size(TotalProcs, sizeof(*ProcGlobal->statusFlags)));
97
98 return size;
99}
100
101/*
102 * Report shared-memory space needed by Fast-Path locks.
103 */
104static Size
106{
107 Size size = 0;
112
113 /*
114 * Memory needed for PGPROC fast-path lock arrays. Make sure the sizes are
115 * nicely aligned in each backend.
116 */
119
121
122 return size;
123}
124
125/*
126 * Report shared-memory space needed by InitProcGlobal.
127 */
128Size
130{
131 Size size = 0;
132
133 /* ProcGlobal */
134 size = add_size(size, sizeof(PROC_HDR));
135 size = add_size(size, sizeof(slock_t));
136
138 size = add_size(size, PGProcShmemSize());
139 size = add_size(size, FastPathLockShmemSize());
140
141 return size;
142}
143
144/*
145 * Report number of semaphores needed by InitProcGlobal.
146 */
147int
149{
150 /*
151 * We need a sema per backend (including autovacuum), plus one for each
152 * auxiliary process.
153 */
155}
156
157/*
158 * InitProcGlobal -
159 * Initialize the global process table during postmaster or standalone
160 * backend startup.
161 *
162 * We also create all the per-process semaphores we will need to support
163 * the requested number of backends. We used to allocate semaphores
164 * only when backends were actually started up, but that is bad because
165 * it lets Postgres fail under load --- a lot of Unix systems are
166 * (mis)configured with small limits on the number of semaphores, and
167 * running out when trying to start another backend is a common failure.
168 * So, now we grab enough semaphores to support the desired max number
169 * of backends immediately at initialization --- if the sysadmin has set
170 * MaxConnections, max_worker_processes, max_wal_senders, or
171 * autovacuum_worker_slots higher than his kernel will support, he'll
172 * find out sooner rather than later.
173 *
174 * Another reason for creating semaphores here is that the semaphore
175 * implementation typically requires us to create semaphores in the
176 * postmaster, not in backends.
177 *
178 * Note: this is NOT called by individual backends under a postmaster,
179 * not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
180 * pointers must be propagated specially for EXEC_BACKEND operation.
181 */
182void
184{
185 PGPROC *procs;
186 int i,
187 j;
188 bool found;
190
191 /* Used for setup of per-backend fast-path slots. */
192 char *fpPtr,
197 char *ptr;
198
199 /* Create the ProcGlobal shared structure */
200 ProcGlobal = (PROC_HDR *)
201 ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
202 Assert(!found);
203
204 /*
205 * Initialize the data structures.
206 */
218
219 /*
220 * Create and initialize all the PGPROC structures we'll need. There are
221 * six separate consumers: (1) normal backends, (2) autovacuum workers and
222 * special workers, (3) background workers, (4) walsenders, (5) auxiliary
223 * processes, and (6) prepared transactions. (For largely-historical
224 * reasons, we combine autovacuum and special workers into one category
225 * with a single freelist.) Each PGPROC structure is dedicated to exactly
226 * one of these purposes, and they do not move between groups.
227 */
229
230 ptr = ShmemInitStruct("PGPROC structures",
232 &found);
233
234 MemSet(ptr, 0, requestSize);
235
236 procs = (PGPROC *) ptr;
237 ptr = ptr + TotalProcs * sizeof(PGPROC);
238
239 ProcGlobal->allProcs = procs;
240 /* XXX allProcCount isn't really all of them; it excludes prepared xacts */
242
243 /*
244 * Allocate arrays mirroring PGPROC fields in a dense manner. See
245 * PROC_HDR.
246 *
247 * XXX: It might make sense to increase padding for these arrays, given
248 * how hotly they are accessed.
249 */
250 ProcGlobal->xids = (TransactionId *) ptr;
251 ptr = ptr + (TotalProcs * sizeof(*ProcGlobal->xids));
252
254 ptr = ptr + (TotalProcs * sizeof(*ProcGlobal->subxidStates));
255
256 ProcGlobal->statusFlags = (uint8 *) ptr;
257 ptr = ptr + (TotalProcs * sizeof(*ProcGlobal->statusFlags));
258
259 /* make sure wer didn't overflow */
260 Assert((ptr > (char *) procs) && (ptr <= (char *) procs + requestSize));
261
262 /*
263 * Allocate arrays for fast-path locks. Those are variable-length, so
264 * can't be included in PGPROC directly. We allocate a separate piece of
265 * shared memory and then divide that between backends.
266 */
269
271
272 fpPtr = ShmemInitStruct("Fast-Path Lock Array",
274 &found);
275
277
278 /* For asserts checking we did not overflow. */
280
281 /* Reserve space for semaphores. */
283
284 for (i = 0; i < TotalProcs; i++)
285 {
286 PGPROC *proc = &procs[i];
287
288 /* Common initialization for all PGPROCs, regardless of type. */
289
290 /*
291 * Set the fast-path lock arrays, and move the pointer. We interleave
292 * the two arrays, to (hopefully) get some locality for each backend.
293 */
294 proc->fpLockBits = (uint64 *) fpPtr;
296
297 proc->fpRelId = (Oid *) fpPtr;
299
301
302 /*
303 * Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
304 * dummy PGPROCs don't need these though - they're never associated
305 * with a real process
306 */
308 {
309 proc->sem = PGSemaphoreCreate();
310 InitSharedLatch(&(proc->procLatch));
312 }
313
314 /*
315 * Newly created PGPROCs for normal backends, autovacuum workers,
316 * special workers, bgworkers, and walsenders must be queued up on the
317 * appropriate free list. Because there can only ever be a small,
318 * fixed number of auxiliary processes, no free list is used in that
319 * case; InitAuxiliaryProcess() instead uses a linear search. PGPROCs
320 * for prepared transactions are added to a free list by
321 * TwoPhaseShmemInit().
322 */
323 if (i < MaxConnections)
324 {
325 /* PGPROC for normal backend, add to freeProcs list */
328 }
330 {
331 /* PGPROC for AV or special worker, add to autovacFreeProcs list */
334 }
336 {
337 /* PGPROC for bgworker, add to bgworkerFreeProcs list */
340 }
341 else if (i < MaxBackends)
342 {
343 /* PGPROC for walsender, add to walsenderFreeProcs list */
346 }
347
348 /* Initialize myProcLocks[] shared memory queues. */
349 for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
350 dlist_init(&(proc->myProcLocks[j]));
351
352 /* Initialize lockGroupMembers list. */
354
355 /*
356 * Initialize the atomic variables, otherwise, it won't be safe to
357 * access them for backends that aren't currently in use.
358 */
361 pg_atomic_init_u64(&(proc->waitStart), 0);
362 }
363
364 /* Should have consumed exactly the expected amount of fast-path memory. */
366
367 /*
368 * Save pointers to the blocks of PGPROC structures reserved for auxiliary
369 * processes and prepared transactions.
370 */
371 AuxiliaryProcs = &procs[MaxBackends];
373}
374
375/*
376 * InitProcess -- initialize a per-process PGPROC entry for this backend
377 */
378void
380{
381 dlist_head *procgloballist;
382
383 /*
384 * ProcGlobal should be set up already (if we are a backend, we inherit
385 * this by fork() or EXEC_BACKEND mechanism from the postmaster).
386 */
387 if (ProcGlobal == NULL)
388 elog(PANIC, "proc header uninitialized");
389
390 if (MyProc != NULL)
391 elog(ERROR, "you already exist");
392
393 /*
394 * Before we start accessing the shared memory in a serious way, mark
395 * ourselves as an active postmaster child; this is so that the postmaster
396 * can detect it if we exit without cleaning up.
397 */
400
401 /*
402 * Decide which list should supply our PGPROC. This logic must match the
403 * way the freelists were constructed in InitProcGlobal().
404 */
406 procgloballist = &ProcGlobal->autovacFreeProcs;
407 else if (AmBackgroundWorkerProcess())
408 procgloballist = &ProcGlobal->bgworkerFreeProcs;
409 else if (AmWalSenderProcess())
410 procgloballist = &ProcGlobal->walsenderFreeProcs;
411 else
412 procgloballist = &ProcGlobal->freeProcs;
413
414 /*
415 * Try to get a proc struct from the appropriate free list. If this
416 * fails, we must be out of PGPROC structures (not to mention semaphores).
417 *
418 * While we are holding the spinlock, also copy the current shared
419 * estimate of spins_per_delay to local storage.
420 */
422
424
425 if (!dlist_is_empty(procgloballist))
426 {
427 MyProc = dlist_container(PGPROC, freeProcsLink, dlist_pop_head_node(procgloballist));
429 }
430 else
431 {
432 /*
433 * If we reach here, all the PGPROCs are in use. This is one of the
434 * possible places to detect "too many backends", so give the standard
435 * error message. XXX do we need to give a different failure message
436 * in the autovacuum case?
437 */
439 if (AmWalSenderProcess())
442 errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
446 errmsg("sorry, too many clients already")));
447 }
449
450 /*
451 * Cross-check that the PGPROC is of the type we expect; if this were not
452 * the case, it would get returned to the wrong list.
453 */
454 Assert(MyProc->procgloballist == procgloballist);
455
456 /*
457 * Initialize all fields of MyProc, except for those previously
458 * initialized by InitProcGlobal.
459 */
462 MyProc->fpVXIDLock = false;
469 /* databaseId and roleId will be filled in later */
475 MyProc->statusFlags = 0;
476 /* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
480 MyProc->lwWaitMode = 0;
485#ifdef USE_ASSERT_CHECKING
486 {
487 int i;
488
489 /* Last process should have released all locks. */
490 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
492 }
493#endif
495
496 /* Initialize fields for sync rep */
500
501 /* Initialize fields for group XID clearing. */
505
506 /* Check that group locking fields are in a proper initial state. */
509
510 /* Initialize wait event information. */
512
513 /* Initialize fields for group transaction status update. */
514 MyProc->clogGroupMember = false;
520
521 /*
522 * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
523 * on it. That allows us to repoint the process latch, which so far
524 * points to process local one, to the shared one.
525 */
528
529 /* now that we have a proc, report wait events to shared memory */
531
532 /*
533 * We might be reusing a semaphore that belonged to a failed process. So
534 * be careful and reinitialize its value here. (This is not strictly
535 * necessary anymore, but seems like a good idea for cleanliness.)
536 */
538
539 /*
540 * Arrange to clean up at backend exit.
541 */
543
544 /*
545 * Now that we have a PGPROC, we could try to acquire locks, so initialize
546 * local state needed for LWLocks, and the deadlock checker.
547 */
550
551#ifdef EXEC_BACKEND
552
553 /*
554 * Initialize backend-local pointers to all the shared data structures.
555 * (We couldn't do this until now because it needs LWLocks.)
556 */
559#endif
560}
561
562/*
563 * InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
564 *
565 * This is separate from InitProcess because we can't acquire LWLocks until
566 * we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
567 * work until after we've done AttachSharedMemoryStructs.
568 */
569void
571{
572 Assert(MyProc != NULL);
573
574 /*
575 * Add our PGPROC to the PGPROC array in shared memory.
576 */
578
579 /*
580 * Arrange to clean that up at backend exit.
581 */
583}
584
585/*
586 * InitAuxiliaryProcess -- create a PGPROC entry for an auxiliary process
587 *
588 * This is called by bgwriter and similar processes so that they will have a
589 * MyProc value that's real enough to let them wait for LWLocks. The PGPROC
590 * and sema that are assigned are one of the extra ones created during
591 * InitProcGlobal.
592 *
593 * Auxiliary processes are presently not expected to wait for real (lockmgr)
594 * locks, so we need not set up the deadlock checker. They are never added
595 * to the ProcArray or the sinval messaging mechanism, either. They also
596 * don't get a VXID assigned, since this is only useful when we actually
597 * hold lockmgr locks.
598 *
599 * Startup process however uses locks but never waits for them in the
600 * normal backend sense. Startup process also takes part in sinval messaging
601 * as a sendOnly process, so never reads messages from sinval queue. So
602 * Startup process does have a VXID and does show up in pg_locks.
603 */
604void
606{
608 int proctype;
609
610 /*
611 * ProcGlobal should be set up already (if we are a backend, we inherit
612 * this by fork() or EXEC_BACKEND mechanism from the postmaster).
613 */
614 if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
615 elog(PANIC, "proc header uninitialized");
616
617 if (MyProc != NULL)
618 elog(ERROR, "you already exist");
619
622
623 /*
624 * We use the freeProcsLock to protect assignment and releasing of
625 * AuxiliaryProcs entries.
626 *
627 * While we are holding the spinlock, also copy the current shared
628 * estimate of spins_per_delay to local storage.
629 */
631
633
634 /*
635 * Find a free auxproc ... *big* trouble if there isn't one ...
636 */
638 {
640 if (auxproc->pid == 0)
641 break;
642 }
644 {
646 elog(FATAL, "all AuxiliaryProcs are in use");
647 }
648
649 /* Mark auxiliary proc as in use by me */
650 /* use volatile pointer to prevent code rearrangement */
651 ((volatile PGPROC *) auxproc)->pid = MyProcPid;
652
654
655 MyProc = auxproc;
657
658 /*
659 * Initialize all fields of MyProc, except for those previously
660 * initialized by InitProcGlobal.
661 */
664 MyProc->fpVXIDLock = false;
675 MyProc->statusFlags = 0;
677 MyProc->lwWaitMode = 0;
682#ifdef USE_ASSERT_CHECKING
683 {
684 int i;
685
686 /* Last process should have released all locks. */
687 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
689 }
690#endif
691
692 /*
693 * Acquire ownership of the PGPROC's latch, so that we can use WaitLatch
694 * on it. That allows us to repoint the process latch, which so far
695 * points to process local one, to the shared one.
696 */
699
700 /* now that we have a proc, report wait events to shared memory */
702
703 /* Check that group locking fields are in a proper initial state. */
706
707 /*
708 * We might be reusing a semaphore that belonged to a failed process. So
709 * be careful and reinitialize its value here. (This is not strictly
710 * necessary anymore, but seems like a good idea for cleanliness.)
711 */
713
714 /*
715 * Arrange to clean up at process exit.
716 */
718
719 /*
720 * Now that we have a PGPROC, we could try to acquire lightweight locks.
721 * Initialize local state needed for them. (Heavyweight locks cannot be
722 * acquired in aux processes.)
723 */
725
726#ifdef EXEC_BACKEND
727
728 /*
729 * Initialize backend-local pointers to all the shared data structures.
730 * (We couldn't do this until now because it needs LWLocks.)
731 */
734#endif
735}
736
737/*
738 * Used from bufmgr to share the value of the buffer that Startup waits on,
739 * or to reset the value to "not waiting" (-1). This allows processing
740 * of recovery conflicts for buffer pins. Set is made before backends look
741 * at this value, so locking not required, especially since the set is
742 * an atomic integer set operation.
743 */
744void
746{
747 /* use volatile pointer to prevent code rearrangement */
748 volatile PROC_HDR *procglobal = ProcGlobal;
749
751}
752
753/*
754 * Used by backends when they receive a request to check for buffer pin waits.
755 */
756int
758{
759 /* use volatile pointer to prevent code rearrangement */
760 volatile PROC_HDR *procglobal = ProcGlobal;
761
763}
764
765/*
766 * Check whether there are at least N free PGPROC objects. If false is
767 * returned, *nfree will be set to the number of free PGPROC objects.
768 * Otherwise, *nfree will be set to n.
769 *
770 * Note: this is designed on the assumption that N will generally be small.
771 */
772bool
773HaveNFreeProcs(int n, int *nfree)
774{
775 dlist_iter iter;
776
777 Assert(n > 0);
778 Assert(nfree);
779
781
782 *nfree = 0;
784 {
785 (*nfree)++;
786 if (*nfree == n)
787 break;
788 }
789
791
792 return (*nfree == n);
793}
794
795/*
796 * Cancel any pending wait for lock, when aborting a transaction, and revert
797 * any strong lock count acquisition for a lock being acquired.
798 *
799 * (Normally, this would only happen if we accept a cancel/die
800 * interrupt while waiting; but an ereport(ERROR) before or during the lock
801 * wait is within the realm of possibility, too.)
802 */
803void
805{
809
811
813
814 /* Nothing to do if we weren't waiting for a lock */
816 if (lockAwaited == NULL)
817 {
819 return;
820 }
821
822 /*
823 * Turn off the deadlock and lock timeout timers, if they are still
824 * running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
825 * indicator flag, since this function is executed before
826 * ProcessInterrupts when responding to SIGINT; else we'd lose the
827 * knowledge that the SIGINT came from a lock timeout and not an external
828 * source.
829 */
831 timeouts[0].keep_indicator = false;
832 timeouts[1].id = LOCK_TIMEOUT;
833 timeouts[1].keep_indicator = true;
835
836 /* Unlink myself from the wait queue, if on it (might not be anymore!) */
839
841 {
842 /* We could not have been granted the lock yet */
844 }
845 else
846 {
847 /*
848 * Somebody kicked us off the lock queue already. Perhaps they
849 * granted us the lock, or perhaps they detected a deadlock. If they
850 * did grant us the lock, we'd better remember it in our local lock
851 * table.
852 */
855 }
856
858
860
862}
863
864
865/*
866 * ProcReleaseLocks() -- release locks associated with current transaction
867 * at main transaction commit or abort
868 *
869 * At main transaction commit, we release standard locks except session locks.
870 * At main transaction abort, we release all locks including session locks.
871 *
872 * Advisory locks are released only if they are transaction-level;
873 * session-level holds remain, whether this is a commit or not.
874 *
875 * At subtransaction commit, we don't release any locks (so this func is not
876 * needed at all); we will defer the releasing to the parent transaction.
877 * At subtransaction abort, we release all locks held by the subtransaction;
878 * this is implemented by retail releasing of the locks under control of
879 * the ResourceOwner mechanism.
880 */
881void
883{
884 if (!MyProc)
885 return;
886 /* If waiting, get off wait queue (should only be needed after error) */
888 /* Release standard locks, including session-level if aborting */
890 /* Release transaction-level advisory locks */
892}
893
894
895/*
896 * RemoveProcFromArray() -- Remove this process from the shared ProcArray.
897 */
898static void
904
905/*
906 * ProcKill() -- Destroy the per-proc data structure for
907 * this process. Release any of its held LW locks.
908 */
909static void
911{
912 PGPROC *proc;
913 dlist_head *procgloballist;
914
915 Assert(MyProc != NULL);
916
917 /* not safe if forked by system(), etc. */
918 if (MyProc->pid != (int) getpid())
919 elog(PANIC, "ProcKill() called in child process");
920
921 /* Make sure we're out of the sync rep lists */
923
924#ifdef USE_ASSERT_CHECKING
925 {
926 int i;
927
928 /* Last process should have released all locks. */
929 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
931 }
932#endif
933
934 /*
935 * Release any LW locks I am holding. There really shouldn't be any, but
936 * it's cheap to check again before we cut the knees off the LWLock
937 * facility by releasing our PGPROC ...
938 */
940
941 /*
942 * Cleanup waiting for LSN if any.
943 */
945
946 /* Cancel any pending condition variable sleep, too */
948
949 /*
950 * Detach from any lock group of which we are a member. If the leader
951 * exits before all other group members, its PGPROC will remain allocated
952 * until the last group process exits; that process must return the
953 * leader's PGPROC to the appropriate list.
954 */
956 {
957 PGPROC *leader = MyProc->lockGroupLeader;
959
963 if (dlist_is_empty(&leader->lockGroupMembers))
964 {
965 leader->lockGroupLeader = NULL;
966 if (leader != MyProc)
967 {
968 procgloballist = leader->procgloballist;
969
970 /* Leader exited first; return its PGPROC. */
972 dlist_push_head(procgloballist, &leader->freeProcsLink);
974 }
975 }
976 else if (leader != MyProc)
979 }
980
981 /*
982 * Reset MyLatch to the process local one. This is so that signal
983 * handlers et al can continue using the latch after the shared latch
984 * isn't ours anymore.
985 *
986 * Similarly, stop reporting wait events to MyProc->wait_event_info.
987 *
988 * After that clear MyProc and disown the shared latch.
989 */
992
993 proc = MyProc;
994 MyProc = NULL;
996 DisownLatch(&proc->procLatch);
997
998 /* Mark the proc no longer in use */
999 proc->pid = 0;
1002
1003 procgloballist = proc->procgloballist;
1005
1006 /*
1007 * If we're still a member of a locking group, that means we're a leader
1008 * which has somehow exited before its children. The last remaining child
1009 * will release our PGPROC. Otherwise, release it now.
1010 */
1011 if (proc->lockGroupLeader == NULL)
1012 {
1013 /* Since lockGroupLeader is NULL, lockGroupMembers should be empty. */
1015
1016 /* Return PGPROC structure (and semaphore) to appropriate freelist */
1017 dlist_push_tail(procgloballist, &proc->freeProcsLink);
1018 }
1019
1020 /* Update shared estimate of spins_per_delay */
1022
1024}
1025
1026/*
1027 * AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
1028 * processes (bgwriter, etc). The PGPROC and sema are not released, only
1029 * marked as not-in-use.
1030 */
1031static void
1033{
1034 int proctype = DatumGetInt32(arg);
1036 PGPROC *proc;
1037
1039
1040 /* not safe if forked by system(), etc. */
1041 if (MyProc->pid != (int) getpid())
1042 elog(PANIC, "AuxiliaryProcKill() called in child process");
1043
1045
1046 Assert(MyProc == auxproc);
1047
1048 /* Release any LW locks I am holding (see notes above) */
1050
1051 /* Cancel any pending condition variable sleep, too */
1053
1054 /* look at the equivalent ProcKill() code for comments */
1057
1058 proc = MyProc;
1059 MyProc = NULL;
1061 DisownLatch(&proc->procLatch);
1062
1064
1065 /* Mark auxiliary proc no longer in use */
1066 proc->pid = 0;
1069
1070 /* Update shared estimate of spins_per_delay */
1072
1074}
1075
1076/*
1077 * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
1078 * given its PID
1079 *
1080 * Returns NULL if not found.
1081 */
1082PGPROC *
1084{
1085 PGPROC *result = NULL;
1086 int index;
1087
1088 if (pid == 0) /* never match dummy PGPROCs */
1089 return NULL;
1090
1091 for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
1092 {
1093 PGPROC *proc = &AuxiliaryProcs[index];
1094
1095 if (proc->pid == pid)
1096 {
1097 result = proc;
1098 break;
1099 }
1100 }
1101 return result;
1102}
1103
1104
1105/*
1106 * JoinWaitQueue -- join the wait queue on the specified lock
1107 *
1108 * It's not actually guaranteed that we need to wait when this function is
1109 * called, because it could be that when we try to find a position at which
1110 * to insert ourself into the wait queue, we discover that we must be inserted
1111 * ahead of everyone who wants a lock that conflict with ours. In that case,
1112 * we get the lock immediately. Because of this, it's sensible for this function
1113 * to have a dontWait argument, despite the name.
1114 *
1115 * On entry, the caller has already set up LOCK and PROCLOCK entries to
1116 * reflect that we have "requested" the lock. The caller is responsible for
1117 * cleaning that up, if we end up not joining the queue after all.
1118 *
1119 * The lock table's partition lock must be held at entry, and is still held
1120 * at exit. The caller must release it before calling ProcSleep().
1121 *
1122 * Result is one of the following:
1123 *
1124 * PROC_WAIT_STATUS_OK - lock was immediately granted
1125 * PROC_WAIT_STATUS_WAITING - joined the wait queue; call ProcSleep()
1126 * PROC_WAIT_STATUS_ERROR - immediate deadlock was detected, or would
1127 * need to wait and dontWait == true
1128 *
1129 * NOTES: The process queue is now a priority queue for locking.
1130 */
1133{
1134 LOCKMODE lockmode = locallock->tag.mode;
1135 LOCK *lock = locallock->lock;
1136 PROCLOCK *proclock = locallock->proclock;
1137 uint32 hashcode = locallock->hashcode;
1143 bool early_deadlock = false;
1144 PGPROC *leader = MyProc->lockGroupLeader;
1145
1147
1148 /*
1149 * Set bitmask of locks this process already holds on this object.
1150 */
1151 myHeldLocks = MyProc->heldLocks = proclock->holdMask;
1152
1153 /*
1154 * Determine which locks we're already holding.
1155 *
1156 * If group locking is in use, locks held by members of my locking group
1157 * need to be included in myHeldLocks. This is not required for relation
1158 * extension lock which conflict among group members. However, including
1159 * them in myHeldLocks will give group members the priority to get those
1160 * locks as compared to other backends which are also trying to acquire
1161 * those locks. OTOH, we can avoid giving priority to group members for
1162 * that kind of locks, but there doesn't appear to be a clear advantage of
1163 * the same.
1164 */
1165 myProcHeldLocks = proclock->holdMask;
1167 if (leader != NULL)
1168 {
1169 dlist_iter iter;
1170
1171 dlist_foreach(iter, &lock->procLocks)
1172 {
1174
1175 otherproclock = dlist_container(PROCLOCK, lockLink, iter.cur);
1176
1177 if (otherproclock->groupLeader == leader)
1178 myHeldLocks |= otherproclock->holdMask;
1179 }
1180 }
1181
1182 /*
1183 * Determine where to add myself in the wait queue.
1184 *
1185 * Normally I should go at the end of the queue. However, if I already
1186 * hold locks that conflict with the request of any previous waiter, put
1187 * myself in the queue just in front of the first such waiter. This is not
1188 * a necessary step, since deadlock detection would move me to before that
1189 * waiter anyway; but it's relatively cheap to detect such a conflict
1190 * immediately, and avoid delaying till deadlock timeout.
1191 *
1192 * Special case: if I find I should go in front of some waiter, check to
1193 * see if I conflict with already-held locks or the requests before that
1194 * waiter. If not, then just grant myself the requested lock immediately.
1195 * This is the same as the test for immediate grant in LockAcquire, except
1196 * we are only considering the part of the wait queue before my insertion
1197 * point.
1198 */
1200 {
1202 dlist_iter iter;
1203
1205 {
1206 PGPROC *proc = dlist_container(PGPROC, waitLink, iter.cur);
1207
1208 /*
1209 * If we're part of the same locking group as this waiter, its
1210 * locks neither conflict with ours nor contribute to
1211 * aheadRequests.
1212 */
1213 if (leader != NULL && leader == proc->lockGroupLeader)
1214 continue;
1215
1216 /* Must he wait for me? */
1217 if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
1218 {
1219 /* Must I wait for him ? */
1220 if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
1221 {
1222 /*
1223 * Yes, so we have a deadlock. Easiest way to clean up
1224 * correctly is to call RemoveFromWaitQueue(), but we
1225 * can't do that until we are *on* the wait queue. So, set
1226 * a flag to check below, and break out of loop. Also,
1227 * record deadlock info for later message.
1228 */
1229 RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
1230 early_deadlock = true;
1231 break;
1232 }
1233 /* I must go before this waiter. Check special case. */
1234 if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1235 !LockCheckConflicts(lockMethodTable, lockmode, lock,
1236 proclock))
1237 {
1238 /* Skip the wait and just grant myself the lock. */
1239 GrantLock(lock, proclock, lockmode);
1240 return PROC_WAIT_STATUS_OK;
1241 }
1242
1243 /* Put myself into wait queue before conflicting process */
1244 insert_before = proc;
1245 break;
1246 }
1247 /* Nope, so advance to next waiter */
1249 }
1250 }
1251
1252 /*
1253 * If we detected deadlock, give up without waiting. This must agree with
1254 * CheckDeadLock's recovery code.
1255 */
1256 if (early_deadlock)
1258
1259 /*
1260 * At this point we know that we'd really need to sleep. If we've been
1261 * commanded not to do that, bail out.
1262 */
1263 if (dontWait)
1265
1266 /*
1267 * Insert self into queue, at the position determined above.
1268 */
1269 if (insert_before)
1271 else
1273
1274 lock->waitMask |= LOCKBIT_ON(lockmode);
1275
1276 /* Set up wait information in PGPROC object, too */
1278 MyProc->waitLock = lock;
1279 MyProc->waitProcLock = proclock;
1280 MyProc->waitLockMode = lockmode;
1281
1283
1285}
1286
1287/*
1288 * ProcSleep -- put process to sleep waiting on lock
1289 *
1290 * This must be called when JoinWaitQueue() returns PROC_WAIT_STATUS_WAITING.
1291 * Returns after the lock has been granted, or if a deadlock is detected. Can
1292 * also bail out with ereport(ERROR), if some other error condition, or a
1293 * timeout or cancellation is triggered.
1294 *
1295 * Result is one of the following:
1296 *
1297 * PROC_WAIT_STATUS_OK - lock was granted
1298 * PROC_WAIT_STATUS_ERROR - a deadlock was detected
1299 */
1302{
1303 LOCKMODE lockmode = locallock->tag.mode;
1304 LOCK *lock = locallock->lock;
1305 uint32 hashcode = locallock->hashcode;
1308 bool allow_autovacuum_cancel = true;
1309 bool logged_recovery_conflict = false;
1312
1313 /* The caller must've armed the on-error cleanup mechanism */
1316
1317 /*
1318 * Now that we will successfully clean up after an ereport, it's safe to
1319 * check to see if there's a buffer pin deadlock against the Startup
1320 * process. Of course, that's only necessary if we're doing Hot Standby
1321 * and are not the Startup process ourselves.
1322 */
1325
1326 /* Reset deadlock_state before enabling the timeout handler */
1328 got_deadlock_timeout = false;
1329
1330 /*
1331 * Set timer so we can wake up after awhile and check for a deadlock. If a
1332 * deadlock is detected, the handler sets MyProc->waitStatus =
1333 * PROC_WAIT_STATUS_ERROR, allowing us to know that we must report failure
1334 * rather than success.
1335 *
1336 * By delaying the check until we've waited for a bit, we can avoid
1337 * running the rather expensive deadlock-check code in most cases.
1338 *
1339 * If LockTimeout is set, also enable the timeout for that. We can save a
1340 * few cycles by enabling both timeout sources in one call.
1341 *
1342 * If InHotStandby we set lock waits slightly later for clarity with other
1343 * code.
1344 */
1345 if (!InHotStandby)
1346 {
1347 if (LockTimeout > 0)
1348 {
1350
1352 timeouts[0].type = TMPARAM_AFTER;
1353 timeouts[0].delay_ms = DeadlockTimeout;
1354 timeouts[1].id = LOCK_TIMEOUT;
1355 timeouts[1].type = TMPARAM_AFTER;
1356 timeouts[1].delay_ms = LockTimeout;
1358 }
1359 else
1361
1362 /*
1363 * Use the current time obtained for the deadlock timeout timer as
1364 * waitStart (i.e., the time when this process started waiting for the
1365 * lock). Since getting the current time newly can cause overhead, we
1366 * reuse the already-obtained time to avoid that overhead.
1367 *
1368 * Note that waitStart is updated without holding the lock table's
1369 * partition lock, to avoid the overhead by additional lock
1370 * acquisition. This can cause "waitstart" in pg_locks to become NULL
1371 * for a very short period of time after the wait started even though
1372 * "granted" is false. This is OK in practice because we can assume
1373 * that users are likely to look at "waitstart" when waiting for the
1374 * lock for a long time.
1375 */
1378 }
1380 {
1381 /*
1382 * Set the wait start timestamp if logging is enabled and in hot
1383 * standby.
1384 */
1386 }
1387
1388 /*
1389 * If somebody wakes us between LWLockRelease and WaitLatch, the latch
1390 * will not wait. But a set latch does not necessarily mean that the lock
1391 * is free now, as there are many other sources for latch sets than
1392 * somebody releasing the lock.
1393 *
1394 * We process interrupts whenever the latch has been set, so cancel/die
1395 * interrupts are processed quickly. This means we must not mind losing
1396 * control to a cancel/die interrupt here. We don't, because we have no
1397 * shared-state-change work to do after being granted the lock (the
1398 * grantor did it all). We do have to worry about canceling the deadlock
1399 * timeout and updating the locallock table, but if we lose control to an
1400 * error, LockErrorCleanup will fix that up.
1401 */
1402 do
1403 {
1404 if (InHotStandby)
1405 {
1406 bool maybe_log_conflict =
1408
1409 /* Set a timer and wait for that or for the lock to be granted */
1412
1413 /*
1414 * Emit the log message if the startup process is waiting longer
1415 * than deadlock_timeout for recovery conflict on lock.
1416 */
1418 {
1420
1423 {
1425 int cnt;
1426
1427 vxids = GetLockConflicts(&locallock->tag.lock,
1428 AccessExclusiveLock, &cnt);
1429
1430 /*
1431 * Log the recovery conflict and the list of PIDs of
1432 * backends holding the conflicting lock. Note that we do
1433 * logging even if there are no such backends right now
1434 * because the startup process here has already waited
1435 * longer than deadlock_timeout.
1436 */
1439 cnt > 0 ? vxids : NULL, true);
1441 }
1442 }
1443 }
1444 else
1445 {
1447 PG_WAIT_LOCK | locallock->tag.lock.locktag_type);
1449 /* check for deadlocks first, as that's probably log-worthy */
1451 {
1453 got_deadlock_timeout = false;
1454 }
1456 }
1457
1458 /*
1459 * waitStatus could change from PROC_WAIT_STATUS_WAITING to something
1460 * else asynchronously. Read it just once per loop to prevent
1461 * surprising behavior (such as missing log messages).
1462 */
1463 myWaitStatus = *((volatile ProcWaitStatus *) &MyProc->waitStatus);
1464
1465 /*
1466 * If we are not deadlocked, but are waiting on an autovacuum-induced
1467 * task, send a signal to interrupt it.
1468 */
1470 {
1472 uint8 statusFlags;
1475
1476 /*
1477 * Grab info we need, then release lock immediately. Note this
1478 * coding means that there is a tiny chance that the process
1479 * terminates its current transaction and starts a different one
1480 * before we have a change to send the signal; the worst possible
1481 * consequence is that a for-wraparound vacuum is canceled. But
1482 * that could happen in any case unless we were to do kill() with
1483 * the lock held, which is much more undesirable.
1484 */
1486 statusFlags = ProcGlobal->statusFlags[autovac->pgxactoff];
1488 locktag_copy = lock->tag;
1490
1491 /*
1492 * Only do it if the worker is not working to protect against Xid
1493 * wraparound.
1494 */
1495 if ((statusFlags & PROC_IS_AUTOVACUUM) &&
1496 !(statusFlags & PROC_VACUUM_FOR_WRAPAROUND))
1497 {
1498 int pid = autovac->pid;
1499
1500 /* report the case, if configured to do so */
1502 {
1504 StringInfoData logbuf; /* errdetail for server log */
1505
1510 "Process %d waits for %s on %s.",
1511 MyProcPid,
1513 locktagbuf.data);
1514
1516 (errmsg_internal("sending cancel to blocking autovacuum PID %d",
1517 pid),
1518 errdetail_log("%s", logbuf.data)));
1519
1520 pfree(locktagbuf.data);
1521 pfree(logbuf.data);
1522 }
1523
1524 /* send the autovacuum worker Back to Old Kent Road */
1525 if (kill(pid, SIGINT) < 0)
1526 {
1527 /*
1528 * There's a race condition here: once we release the
1529 * ProcArrayLock, it's possible for the autovac worker to
1530 * close up shop and exit before we can do the kill().
1531 * Therefore, we do not whinge about no-such-process.
1532 * Other errors such as EPERM could conceivably happen if
1533 * the kernel recycles the PID fast enough, but such cases
1534 * seem improbable enough that it's probably best to issue
1535 * a warning if we see some other errno.
1536 */
1537 if (errno != ESRCH)
1539 (errmsg("could not send signal to process %d: %m",
1540 pid)));
1541 }
1542 }
1543
1544 /* prevent signal from being sent again more than once */
1546 }
1547
1548 /*
1549 * If awoken after the deadlock check interrupt has run, and
1550 * log_lock_waits is on, then report about the wait.
1551 */
1553 {
1557 const char *modename;
1558 long secs;
1559 int usecs;
1560 long msecs;
1561 int lockHoldersNum = 0;
1562
1566
1567 DescribeLockTag(&buf, &locallock->tag.lock);
1568 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1569 lockmode);
1572 &secs, &usecs);
1573 msecs = secs * 1000 + usecs / 1000;
1574 usecs = usecs % 1000;
1575
1576 /* Gather a list of all lock holders and waiters */
1581
1583 ereport(LOG,
1584 (errmsg("process %d avoided deadlock for %s on %s by rearranging queue order after %ld.%03d ms",
1585 MyProcPid, modename, buf.data, msecs, usecs),
1586 (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1587 "Processes holding the lock: %s. Wait queue: %s.",
1589 else if (deadlock_state == DS_HARD_DEADLOCK)
1590 {
1591 /*
1592 * This message is a bit redundant with the error that will be
1593 * reported subsequently, but in some cases the error report
1594 * might not make it to the log (eg, if it's caught by an
1595 * exception handler), and we want to ensure all long-wait
1596 * events get logged.
1597 */
1598 ereport(LOG,
1599 (errmsg("process %d detected deadlock while waiting for %s on %s after %ld.%03d ms",
1600 MyProcPid, modename, buf.data, msecs, usecs),
1601 (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1602 "Processes holding the lock: %s. Wait queue: %s.",
1604 }
1605
1607 ereport(LOG,
1608 (errmsg("process %d still waiting for %s on %s after %ld.%03d ms",
1609 MyProcPid, modename, buf.data, msecs, usecs),
1610 (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1611 "Processes holding the lock: %s. Wait queue: %s.",
1614 ereport(LOG,
1615 (errmsg("process %d acquired %s on %s after %ld.%03d ms",
1616 MyProcPid, modename, buf.data, msecs, usecs)));
1617 else
1618 {
1620
1621 /*
1622 * Currently, the deadlock checker always kicks its own
1623 * process, which means that we'll only see
1624 * PROC_WAIT_STATUS_ERROR when deadlock_state ==
1625 * DS_HARD_DEADLOCK, and there's no need to print redundant
1626 * messages. But for completeness and future-proofing, print
1627 * a message if it looks like someone else kicked us off the
1628 * lock.
1629 */
1631 ereport(LOG,
1632 (errmsg("process %d failed to acquire %s on %s after %ld.%03d ms",
1633 MyProcPid, modename, buf.data, msecs, usecs),
1634 (errdetail_log_plural("Process holding the lock: %s. Wait queue: %s.",
1635 "Processes holding the lock: %s. Wait queue: %s.",
1637 }
1638
1639 /*
1640 * At this point we might still need to wait for the lock. Reset
1641 * state so we don't print the above messages again.
1642 */
1644
1645 pfree(buf.data);
1648 }
1650
1651 /*
1652 * Disable the timers, if they are still running. As in LockErrorCleanup,
1653 * we must preserve the LOCK_TIMEOUT indicator flag: if a lock timeout has
1654 * already caused QueryCancelPending to become set, we want the cancel to
1655 * be reported as a lock timeout, not a user cancel.
1656 */
1657 if (!InHotStandby)
1658 {
1659 if (LockTimeout > 0)
1660 {
1662
1664 timeouts[0].keep_indicator = false;
1665 timeouts[1].id = LOCK_TIMEOUT;
1666 timeouts[1].keep_indicator = true;
1668 }
1669 else
1671 }
1672
1673 /*
1674 * Emit the log message if recovery conflict on lock was resolved but the
1675 * startup process waited longer than deadlock_timeout for it.
1676 */
1680 NULL, false);
1681
1682 /*
1683 * We don't have to do anything else, because the awaker did all the
1684 * necessary updates of the lock table and MyProc. (The caller is
1685 * responsible for updating the local lock table.)
1686 */
1687 return myWaitStatus;
1688}
1689
1690
1691/*
1692 * ProcWakeup -- wake up a process by setting its latch.
1693 *
1694 * Also remove the process from the wait queue and set its waitLink invalid.
1695 *
1696 * The appropriate lock partition lock must be held by caller.
1697 *
1698 * XXX: presently, this code is only used for the "success" case, and only
1699 * works correctly for that case. To clean up in failure case, would need
1700 * to twiddle the lock's request counts too --- see RemoveFromWaitQueue.
1701 * Hence, in practice the waitStatus parameter must be PROC_WAIT_STATUS_OK.
1702 */
1703void
1705{
1706 if (dlist_node_is_detached(&proc->waitLink))
1707 return;
1708
1710
1711 /* Remove process from wait queue */
1713
1714 /* Clean up process' state and pass it the ok/fail signal */
1715 proc->waitLock = NULL;
1716 proc->waitProcLock = NULL;
1717 proc->waitStatus = waitStatus;
1718 pg_atomic_write_u64(&proc->waitStart, 0);
1719
1720 /* And awaken it */
1721 SetLatch(&proc->procLatch);
1722}
1723
1724/*
1725 * ProcLockWakeup -- routine for waking up processes when a lock is
1726 * released (or a prior waiter is aborted). Scan all waiters
1727 * for lock, waken any that are no longer blocked.
1728 *
1729 * The appropriate lock partition lock must be held by caller.
1730 */
1731void
1733{
1737
1739 return;
1740
1742 {
1743 PGPROC *proc = dlist_container(PGPROC, waitLink, miter.cur);
1744 LOCKMODE lockmode = proc->waitLockMode;
1745
1746 /*
1747 * Waken if (a) doesn't conflict with requests of earlier waiters, and
1748 * (b) doesn't conflict with already-held locks.
1749 */
1750 if ((lockMethodTable->conflictTab[lockmode] & aheadRequests) == 0 &&
1751 !LockCheckConflicts(lockMethodTable, lockmode, lock,
1752 proc->waitProcLock))
1753 {
1754 /* OK to waken */
1755 GrantLock(lock, proc->waitProcLock, lockmode);
1756 /* removes proc from the lock's waiting process queue */
1758 }
1759 else
1760 {
1761 /*
1762 * Lock conflicts: Don't wake, but remember requested mode for
1763 * later checks.
1764 */
1765 aheadRequests |= LOCKBIT_ON(lockmode);
1766 }
1767 }
1768}
1769
1770/*
1771 * CheckDeadLock
1772 *
1773 * We only get to this routine, if DEADLOCK_TIMEOUT fired while waiting for a
1774 * lock to be released by some other process. Check if there's a deadlock; if
1775 * not, just return. If we have a real deadlock, remove ourselves from the
1776 * lock's wait queue.
1777 */
1778static DeadLockState
1780{
1781 int i;
1782 DeadLockState result;
1783
1784 /*
1785 * Acquire exclusive lock on the entire shared lock data structures. Must
1786 * grab LWLocks in partition-number order to avoid LWLock deadlock.
1787 *
1788 * Note that the deadlock check interrupt had better not be enabled
1789 * anywhere that this process itself holds lock partition locks, else this
1790 * will wait forever. Also note that LWLockAcquire creates a critical
1791 * section, so that this routine cannot be interrupted by cancel/die
1792 * interrupts.
1793 */
1794 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
1796
1797 /*
1798 * Check to see if we've been awoken by anyone in the interim.
1799 *
1800 * If we have, we can return and resume our transaction -- happy day.
1801 * Before we are awoken the process releasing the lock grants it to us so
1802 * we know that we don't have to wait anymore.
1803 *
1804 * We check by looking to see if we've been unlinked from the wait queue.
1805 * This is safe because we hold the lock partition lock.
1806 */
1808 {
1809 result = DS_NO_DEADLOCK;
1810 goto check_done;
1811 }
1812
1813#ifdef LOCK_DEBUG
1814 if (Debug_deadlocks)
1815 DumpAllLocks();
1816#endif
1817
1818 /* Run the deadlock check */
1819 result = DeadLockCheck(MyProc);
1820
1821 if (result == DS_HARD_DEADLOCK)
1822 {
1823 /*
1824 * Oops. We have a deadlock.
1825 *
1826 * Get this process out of wait state. (Note: we could do this more
1827 * efficiently by relying on lockAwaited, but use this coding to
1828 * preserve the flexibility to kill some other transaction than the
1829 * one detecting the deadlock.)
1830 *
1831 * RemoveFromWaitQueue sets MyProc->waitStatus to
1832 * PROC_WAIT_STATUS_ERROR, so ProcSleep will report an error after we
1833 * return.
1834 */
1837
1838 /*
1839 * We're done here. Transaction abort caused by the error that
1840 * ProcSleep will raise will cause any other locks we hold to be
1841 * released, thus allowing other processes to wake up; we don't need
1842 * to do that here. NOTE: an exception is that releasing locks we
1843 * hold doesn't consider the possibility of waiters that were blocked
1844 * behind us on the lock we just failed to get, and might now be
1845 * wakable because we're not in front of them anymore. However,
1846 * RemoveFromWaitQueue took care of waking up any such processes.
1847 */
1848 }
1849
1850 /*
1851 * And release locks. We do this in reverse order for two reasons: (1)
1852 * Anyone else who needs more than one of the locks will be trying to lock
1853 * them in increasing order; we don't want to release the other process
1854 * until it can get all the locks it needs. (2) This avoids O(N^2)
1855 * behavior inside LWLockRelease.
1856 */
1858 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
1860
1861 return result;
1862}
1863
1864/*
1865 * CheckDeadLockAlert - Handle the expiry of deadlock_timeout.
1866 *
1867 * NB: Runs inside a signal handler, be careful.
1868 */
1869void
1871{
1872 int save_errno = errno;
1873
1874 got_deadlock_timeout = true;
1875
1876 /*
1877 * Have to set the latch again, even if handle_sig_alarm already did. Back
1878 * then got_deadlock_timeout wasn't yet set... It's unlikely that this
1879 * ever would be a problem, but setting a set latch again is cheap.
1880 *
1881 * Note that, when this function runs inside procsignal_sigusr1_handler(),
1882 * the handler function sets the latch again after the latch is set here.
1883 */
1885 errno = save_errno;
1886}
1887
1888/*
1889 * GetLockHoldersAndWaiters - get lock holders and waiters for a lock
1890 *
1891 * Fill lock_holders_sbuf and lock_waiters_sbuf with the PIDs of processes holding
1892 * and waiting for the lock, and set lockHoldersNum to the number of lock holders.
1893 *
1894 * The lock table's partition lock must be held on entry and remains held on exit.
1895 */
1896void
1899{
1902 LOCK *lock = locallock->lock;
1903 bool first_holder = true,
1904 first_waiter = true;
1905
1906#ifdef USE_ASSERT_CHECKING
1907 {
1908 uint32 hashcode = locallock->hashcode;
1910
1912 }
1913#endif
1914
1915 *lockHoldersNum = 0;
1916
1917 /*
1918 * Loop over the lock's procLocks to gather a list of all holders and
1919 * waiters. Thus we will be able to provide more detailed information for
1920 * lock debugging purposes.
1921 *
1922 * lock->procLocks contains all processes which hold or wait for this
1923 * lock.
1924 */
1926 {
1927 curproclock =
1928 dlist_container(PROCLOCK, lockLink, proc_iter.cur);
1929
1930 /*
1931 * We are a waiter if myProc->waitProcLock == curproclock; we are a
1932 * holder if it is NULL or something different.
1933 */
1934 if (curproclock->tag.myProc->waitProcLock == curproclock)
1935 {
1936 if (first_waiter)
1937 {
1939 curproclock->tag.myProc->pid);
1940 first_waiter = false;
1941 }
1942 else
1944 curproclock->tag.myProc->pid);
1945 }
1946 else
1947 {
1948 if (first_holder)
1949 {
1951 curproclock->tag.myProc->pid);
1952 first_holder = false;
1953 }
1954 else
1956 curproclock->tag.myProc->pid);
1957
1958 (*lockHoldersNum)++;
1959 }
1960 }
1961}
1962
1963/*
1964 * ProcWaitForSignal - wait for a signal from another backend.
1965 *
1966 * As this uses the generic process latch the caller has to be robust against
1967 * unrelated wakeups: Always check that the desired state has occurred, and
1968 * wait again if not.
1969 */
1970void
1972{
1974 wait_event_info);
1977}
1978
1979/*
1980 * ProcSendSignal - set the latch of a backend identified by ProcNumber
1981 */
1982void
1984{
1986 elog(ERROR, "procNumber out of range");
1987
1988 SetLatch(&GetPGProcByNumber(procNumber)->procLatch);
1989}
1990
1991/*
1992 * BecomeLockGroupLeader - designate process as lock group leader
1993 *
1994 * Once this function has returned, other processes can join the lock group
1995 * by calling BecomeLockGroupMember.
1996 */
1997void
1999{
2001
2002 /* If we already did it, we don't need to do it again. */
2004 return;
2005
2006 /* We had better not be a follower. */
2008
2009 /* Create single-member group, containing only ourselves. */
2015}
2016
2017/*
2018 * BecomeLockGroupMember - designate process as lock group member
2019 *
2020 * This is pretty straightforward except for the possibility that the leader
2021 * whose group we're trying to join might exit before we manage to do so;
2022 * and the PGPROC might get recycled for an unrelated process. To avoid
2023 * that, we require the caller to pass the PID of the intended PGPROC as
2024 * an interlock. Returns true if we successfully join the intended lock
2025 * group, and false if not.
2026 */
2027bool
2029{
2031 bool ok = false;
2032
2033 /* Group leader can't become member of group */
2034 Assert(MyProc != leader);
2035
2036 /* Can't already be a member of a group */
2038
2039 /* PID must be valid. */
2040 Assert(pid != 0);
2041
2042 /*
2043 * Get lock protecting the group fields. Note LockHashPartitionLockByProc
2044 * calculates the proc number based on the PGPROC slot without looking at
2045 * its contents, so we will acquire the correct lock even if the leader
2046 * PGPROC is in process of being recycled.
2047 */
2050
2051 /* Is this the leader we're looking for? */
2052 if (leader->pid == pid && leader->lockGroupLeader == leader)
2053 {
2054 /* OK, join the group */
2055 ok = true;
2056 MyProc->lockGroupLeader = leader;
2058 }
2060
2061 return ok;
2062}
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition atomics.h:485
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition atomics.h:219
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition atomics.h:274
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition atomics.h:237
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition atomics.h:453
int autovacuum_worker_slots
Definition autovacuum.c:118
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition timestamp.c:1719
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition timestamp.c:1779
TimestampTz GetCurrentTimestamp(void)
Definition timestamp.c:1643
Datum now(PG_FUNCTION_ARGS)
Definition timestamp.c:1607
#define MAXALIGN(LEN)
Definition c.h:859
uint8_t uint8
Definition c.h:577
#define PG_USED_FOR_ASSERTS_ONLY
Definition c.h:235
#define Assert(condition)
Definition c.h:906
uint64_t uint64
Definition c.h:580
uint32_t uint32
Definition c.h:579
#define MemSet(start, val, len)
Definition c.h:1056
uint32 TransactionId
Definition c.h:699
size_t Size
Definition c.h:652
#define TRANSACTION_STATUS_IN_PROGRESS
Definition clog.h:27
bool ConditionVariableCancelSleep(void)
int64 TimestampTz
Definition timestamp.h:39
PGPROC * GetBlockingAutoVacuumPgproc(void)
Definition deadlock.c:290
void RememberSimpleDeadLock(PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
Definition deadlock.c:1147
void InitDeadLockChecking(void)
Definition deadlock.c:143
DeadLockState DeadLockCheck(PGPROC *proc)
Definition deadlock.c:220
Datum arg
Definition elog.c:1322
bool message_level_is_interesting(int elevel)
Definition elog.c:284
int errcode(int sqlerrcode)
Definition elog.c:874
int errmsg(const char *fmt,...)
Definition elog.c:1093
#define LOG
Definition elog.h:31
#define FATAL
Definition elog.h:41
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:36
#define PANIC
Definition elog.h:42
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
int int int errdetail_log(const char *fmt,...) pg_attribute_printf(1
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
int int int int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...) pg_attribute_printf(1
int MyProcPid
Definition globals.c:47
ProcNumber MyProcNumber
Definition globals.c:90
bool IsUnderPostmaster
Definition globals.c:120
int MaxConnections
Definition globals.c:143
int MaxBackends
Definition globals.c:146
struct Latch * MyLatch
Definition globals.c:63
int max_worker_processes
Definition globals.c:144
static dlist_node * dlist_pop_head_node(dlist_head *head)
Definition ilist.h:450
#define dlist_foreach(iter, lhead)
Definition ilist.h:623
static void dlist_init(dlist_head *head)
Definition ilist.h:314
static void dclist_push_tail(dclist_head *head, dlist_node *node)
Definition ilist.h:709
static void dlist_delete(dlist_node *node)
Definition ilist.h:405
static bool dclist_is_empty(const dclist_head *head)
Definition ilist.h:682
static bool dlist_node_is_detached(const dlist_node *node)
Definition ilist.h:525
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition ilist.h:347
static bool dlist_is_empty(const dlist_head *head)
Definition ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition ilist.h:776
static void dclist_insert_before(dclist_head *head, dlist_node *before, dlist_node *node)
Definition ilist.h:745
#define dclist_foreach_modify(iter, lhead)
Definition ilist.h:973
static void dlist_node_init(dlist_node *node)
Definition ilist.h:325
#define dlist_container(type, membername, ptr)
Definition ilist.h:593
#define dclist_foreach(iter, lhead)
Definition ilist.h:970
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition ipc.c:372
int j
Definition isn.c:78
int i
Definition isn.c:77
void OwnLatch(Latch *latch)
Definition latch.c:126
void DisownLatch(Latch *latch)
Definition latch.c:144
void InitSharedLatch(Latch *latch)
Definition latch.c:93
void SetLatch(Latch *latch)
Definition latch.c:290
void ResetLatch(Latch *latch)
Definition latch.c:374
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition latch.c:172
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition lmgr.c:1249
void GrantAwaitedLock(void)
Definition lock.c:1889
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition lock.c:1658
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition lock.c:3069
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition lock.c:2046
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition lock.c:2307
void ResetAwaitedLock(void)
Definition lock.c:1907
void AbortStrongLockAcquire(void)
Definition lock.c:1860
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition lock.c:4252
LOCALLOCK * GetAwaitedLock(void)
Definition lock.c:1898
int FastPathLockGroupsPerBackend
Definition lock.c:202
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition lock.c:557
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition lock.c:1529
#define DEFAULT_LOCKMETHOD
Definition lock.h:127
#define LockHashPartitionLock(hashcode)
Definition lock.h:528
#define USER_LOCKMETHOD
Definition lock.h:128
#define InvalidLocalTransactionId
Definition lock.h:67
DeadLockState
Definition lock.h:511
@ DS_HARD_DEADLOCK
Definition lock.h:515
@ DS_BLOCKED_BY_AUTOVACUUM
Definition lock.h:516
@ DS_NO_DEADLOCK
Definition lock.h:513
@ DS_NOT_YET_CHECKED
Definition lock.h:512
@ DS_SOFT_DEADLOCK
Definition lock.h:514
#define LOCKBIT_ON(lockmode)
Definition lock.h:86
#define LockHashPartitionLockByProc(leader_pgproc)
Definition lock.h:543
#define LockHashPartitionLockByIndex(i)
Definition lock.h:531
int LOCKMODE
Definition lockdefs.h:26
#define AccessExclusiveLock
Definition lockdefs.h:43
int LOCKMASK
Definition lockdefs.h:25
bool LWLockHeldByMe(LWLock *lock)
Definition lwlock.c:1911
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1176
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1955
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1793
void LWLockReleaseAll(void)
Definition lwlock.c:1892
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition lwlock.c:698
void InitLWLockAccess(void)
Definition lwlock.c:550
@ LW_WS_NOT_WAITING
Definition lwlock.h:30
#define NUM_LOCK_PARTITIONS
Definition lwlock.h:95
@ LW_SHARED
Definition lwlock.h:113
@ LW_EXCLUSIVE
Definition lwlock.h:112
void pfree(void *pointer)
Definition mcxt.c:1616
#define RESUME_INTERRUPTS()
Definition miscadmin.h:136
#define AmAutoVacuumWorkerProcess()
Definition miscadmin.h:383
#define AmBackgroundWorkerProcess()
Definition miscadmin.h:384
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
#define AmWalSenderProcess()
Definition miscadmin.h:385
#define HOLD_INTERRUPTS()
Definition miscadmin.h:134
#define AmSpecialWorkerProcess()
Definition miscadmin.h:396
void SwitchToSharedLatch(void)
Definition miscinit.c:215
BackendType MyBackendType
Definition miscinit.c:64
void SwitchBackToLocalLatch(void)
Definition miscinit.c:242
static char buf[DEFAULT_XLOG_SEG_SIZE]
void RegisterPostmasterChildActive(void)
Definition pmsignal.c:290
Size PGSemaphoreShmemSize(int maxSemas)
Definition posix_sema.c:165
void PGReserveSemaphores(int maxSemas)
Definition posix_sema.c:196
void PGSemaphoreReset(PGSemaphore sema)
Definition posix_sema.c:290
PGSemaphore PGSemaphoreCreate(void)
Definition posix_sema.c:257
uint64_t Datum
Definition postgres.h:70
static Datum Int32GetDatum(int32 X)
Definition postgres.h:222
static int32 DatumGetInt32(Datum X)
Definition postgres.h:212
#define NON_EXEC_STATIC
Definition postgres.h:570
#define InvalidOid
unsigned int Oid
static int fb(int x)
#define NUM_AUXILIARY_PROCS
Definition proc.h:527
#define FastPathLockSlotsPerBackend()
Definition proc.h:94
#define GetPGProcByNumber(n)
Definition proc.h:504
#define FIRST_PREPARED_XACT_PROC_NUMBER
Definition proc.h:529
#define PROC_VACUUM_FOR_WRAPAROUND
Definition proc.h:61
#define GetNumberFromPGProc(proc)
Definition proc.h:505
#define NUM_SPECIAL_WORKER_PROCS
Definition proc.h:514
ProcWaitStatus
Definition proc.h:141
@ PROC_WAIT_STATUS_OK
Definition proc.h:142
@ PROC_WAIT_STATUS_WAITING
Definition proc.h:143
@ PROC_WAIT_STATUS_ERROR
Definition proc.h:144
#define PROC_IS_AUTOVACUUM
Definition proc.h:58
void ProcArrayAdd(PGPROC *proc)
Definition procarray.c:471
void ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
Definition procarray.c:568
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
int ProcNumber
Definition procnumber.h:24
void set_spins_per_delay(int shared_spins_per_delay)
Definition s_lock.c:207
int update_spins_per_delay(int shared_spins_per_delay)
Definition s_lock.c:218
#define DEFAULT_SPINS_PER_DELAY
Definition s_lock.h:718
Size add_size(Size s1, Size s2)
Definition shmem.c:482
Size mul_size(Size s1, Size s2)
Definition shmem.c:497
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition shmem.c:378
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition proc.c:1132
void ProcSendSignal(ProcNumber procNumber)
Definition proc.c:1983
bool log_lock_waits
Definition proc.c:64
int IdleSessionTimeout
Definition proc.c:63
PGPROC * MyProc
Definition proc.c:67
Size ProcGlobalShmemSize(void)
Definition proc.c:129
void ProcWakeup(PGPROC *proc, ProcWaitStatus waitStatus)
Definition proc.c:1704
int StatementTimeout
Definition proc.c:59
bool HaveNFreeProcs(int n, int *nfree)
Definition proc.c:773
static void RemoveProcFromArray(int code, Datum arg)
Definition proc.c:899
void InitAuxiliaryProcess(void)
Definition proc.c:605
PGPROC * PreparedXactProcs
Definition proc.c:72
int IdleInTransactionSessionTimeout
Definition proc.c:61
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition proc.c:1897
NON_EXEC_STATIC PGPROC * AuxiliaryProcs
Definition proc.c:71
int GetStartupBufferPinWaitBufId(void)
Definition proc.c:757
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition proc.c:1301
int DeadlockTimeout
Definition proc.c:58
static Size PGProcShmemSize(void)
Definition proc.c:87
int TransactionTimeout
Definition proc.c:62
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition proc.c:1732
PROC_HDR * ProcGlobal
Definition proc.c:70
static Size FastPathLockShmemSize(void)
Definition proc.c:105
int ProcGlobalSemas(void)
Definition proc.c:148
void ProcReleaseLocks(bool isCommit)
Definition proc.c:882
void LockErrorCleanup(void)
Definition proc.c:804
bool BecomeLockGroupMember(PGPROC *leader, int pid)
Definition proc.c:2028
void BecomeLockGroupLeader(void)
Definition proc.c:1998
static void ProcKill(int code, Datum arg)
Definition proc.c:910
void InitProcess(void)
Definition proc.c:379
void CheckDeadLockAlert(void)
Definition proc.c:1870
void InitProcessPhase2(void)
Definition proc.c:570
void InitProcGlobal(void)
Definition proc.c:183
static volatile sig_atomic_t got_deadlock_timeout
Definition proc.c:75
PGPROC * AuxiliaryPidGetProc(int pid)
Definition proc.c:1083
void SetStartupBufferPinWaitBufId(int bufid)
Definition proc.c:745
void ProcWaitForSignal(uint32 wait_event_info)
Definition proc.c:1971
int LockTimeout
Definition proc.c:60
static void AuxiliaryProcKill(int code, Datum arg)
Definition proc.c:1032
static DeadLockState CheckDeadLock(void)
Definition proc.c:1779
void CheckRecoveryConflictDeadlock(void)
Definition standby.c:905
bool log_recovery_conflict_waits
Definition standby.c:42
void LogRecoveryConflict(RecoveryConflictReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition standby.c:274
void ResolveRecoveryConflictWithLock(LOCKTAG locktag, bool logging_conflict)
Definition standby.c:624
@ RECOVERY_CONFLICT_LOCK
Definition standby.h:37
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition stringinfo.c:145
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
uint8 locktag_lockmethodid
Definition lock.h:173
Definition lock.h:311
LOCKTAG tag
Definition lock.h:313
dclist_head waitProcs
Definition lock.h:319
LOCKMASK waitMask
Definition lock.h:317
dlist_head procLocks
Definition lock.h:318
Definition proc.h:176
LWLock fpInfoLock
Definition proc.h:316
TransactionId xmin
Definition proc.h:234
bool procArrayGroupMember
Definition proc.h:342
LocalTransactionId lxid
Definition proc.h:223
PROCLOCK * waitProcLock
Definition proc.h:298
dlist_node freeProcsLink
Definition proc.h:178
XLogRecPtr clogGroupMemberLsn
Definition proc.h:363
pg_atomic_uint32 procArrayGroupNext
Definition proc.h:344
uint8 lwWaitMode
Definition proc.h:276
dlist_head lockGroupMembers
Definition proc.h:291
uint32 wait_event_info
Definition proc.h:370
dlist_head * procgloballist
Definition proc.h:177
Oid * fpRelId
Definition proc.h:318
BackendType backendType
Definition proc.h:190
uint8 statusFlags
Definition proc.h:202
TransactionId clogGroupMemberXid
Definition proc.h:358
Oid databaseId
Definition proc.h:193
int64 clogGroupMemberPage
Definition proc.h:361
bool clogGroupMember
Definition proc.h:356
uint64 * fpLockBits
Definition proc.h:317
struct PGPROC::@133 vxid
pg_atomic_uint64 waitStart
Definition proc.h:303
bool fpVXIDLock
Definition proc.h:319
ProcNumber procNumber
Definition proc.h:218
int pid
Definition proc.h:189
XLogRecPtr waitLSN
Definition proc.h:333
dlist_node syncRepLinks
Definition proc.h:335
int syncRepState
Definition proc.h:334
pg_atomic_uint32 clogGroupNext
Definition proc.h:357
dlist_node lockGroupLink
Definition proc.h:292
XidStatus clogGroupMemberXidStatus
Definition proc.h:359
LOCK * waitLock
Definition proc.h:296
TransactionId xid
Definition proc.h:229
LOCKMODE waitLockMode
Definition proc.h:299
int delayChkptFlags
Definition proc.h:252
dlist_node waitLink
Definition proc.h:297
PGPROC * lockGroupLeader
Definition proc.h:290
pg_atomic_uint32 pendingRecoveryConflicts
Definition proc.h:262
LocalTransactionId fpLocalTransactionId
Definition proc.h:320
TransactionId procArrayGroupMemberXid
Definition proc.h:350
LOCKMASK heldLocks
Definition proc.h:300
PGSemaphore sem
Definition proc.h:250
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition proc.h:313
Oid roleId
Definition proc.h:194
ProcWaitStatus waitStatus
Definition proc.h:306
Oid tempNamespaceId
Definition proc.h:196
uint8 lwWaiting
Definition proc.h:275
Latch procLatch
Definition proc.h:248
LOCKMASK holdMask
Definition lock.h:378
uint8 * statusFlags
Definition proc.h:456
XidCacheStatus * subxidStates
Definition proc.h:450
dlist_head autovacFreeProcs
Definition proc.h:473
dlist_head freeProcs
Definition proc.h:471
ProcNumber checkpointerProc
Definition proc.h:489
slock_t freeProcsLock
Definition proc.h:468
int startupBufferPinWaitBufId
Definition proc.h:494
PGPROC * allProcs
Definition proc.h:441
pg_atomic_uint32 clogGroupFirst
Definition proc.h:482
int spins_per_delay
Definition proc.h:492
TransactionId * xids
Definition proc.h:444
dlist_head walsenderFreeProcs
Definition proc.h:477
dlist_head bgworkerFreeProcs
Definition proc.h:475
ProcNumber walwriterProc
Definition proc.h:488
pg_atomic_uint32 procArrayGroupFirst
Definition proc.h:480
uint32 allProcCount
Definition proc.h:459
dlist_node * cur
Definition ilist.h:179
Definition type.h:96
void SyncRepCleanupAtProcExit(void)
Definition syncrep.c:416
#define SYNC_REP_NOT_WAITING
Definition syncrep.h:30
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition timeout.c:560
TimestampTz get_timeout_start_time(TimeoutId id)
Definition timeout.c:813
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition timeout.c:685
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition timeout.c:630
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition timeout.c:718
@ LOCK_TIMEOUT
Definition timeout.h:28
@ DEADLOCK_TIMEOUT
Definition timeout.h:27
@ TMPARAM_AFTER
Definition timeout.h:53
#define InvalidTransactionId
Definition transam.h:31
int max_prepared_xacts
Definition twophase.c:116
#define PG_WAIT_LOCK
void pgstat_set_wait_event_storage(uint32 *wait_event_info)
Definition wait_event.c:349
void pgstat_reset_wait_event_storage(void)
Definition wait_event.c:361
#define WL_EXIT_ON_PM_DEATH
#define WL_LATCH_SET
int max_wal_senders
Definition walsender.c:129
#define kill(pid, sig)
Definition win32_port.h:490
bool RecoveryInProgress(void)
Definition xlog.c:6443
#define InvalidXLogRecPtr
Definition xlogdefs.h:28
bool InRecovery
Definition xlogutils.c:50
#define InHotStandby
Definition xlogutils.h:60
void WaitLSNCleanup(void)
Definition xlogwait.c:338