PostgreSQL Source Code git master
Loading...
Searching...
No Matches
lock.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * lock.c
4 * POSTGRES primary lock mechanism
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/storage/lmgr/lock.c
12 *
13 * NOTES
14 * A lock table is a shared memory hash table. When
15 * a process tries to acquire a lock of a type that conflicts
16 * with existing locks, it is put to sleep using the routines
17 * in storage/lmgr/proc.c.
18 *
19 * For the most part, this code should be invoked via lmgr.c
20 * or another lock-management module, not directly.
21 *
22 * Interface:
23 *
24 * LockManagerShmemInit(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 * LockAcquire(), LockRelease(), LockReleaseAll(),
26 * LockCheckConflicts(), GrantLock()
27 *
28 *-------------------------------------------------------------------------
29 */
30#include "postgres.h"
31
32#include <signal.h>
33#include <unistd.h>
34
35#include "access/transam.h"
36#include "access/twophase.h"
38#include "access/xlog.h"
39#include "access/xlogutils.h"
40#include "miscadmin.h"
41#include "pg_trace.h"
42#include "pgstat.h"
43#include "storage/lmgr.h"
44#include "storage/proc.h"
45#include "storage/procarray.h"
46#include "storage/shmem.h"
47#include "storage/spin.h"
48#include "storage/standby.h"
49#include "storage/subsystems.h"
50#include "utils/memutils.h"
51#include "utils/ps_status.h"
52#include "utils/resowner.h"
53
54
55/* GUC variables */
56int max_locks_per_xact; /* used to set the lock table size */
57bool log_lock_failures = false;
58
59#define NLOCKENTS() \
60 mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
61
62
63/*
64 * Data structures defining the semantics of the standard lock methods.
65 *
66 * The conflict table defines the semantics of the various lock modes.
67 */
68static const LOCKMASK LockConflicts[] = {
69 0,
70
71 /* AccessShareLock */
73
74 /* RowShareLock */
76
77 /* RowExclusiveLock */
80
81 /* ShareUpdateExclusiveLock */
85
86 /* ShareLock */
90
91 /* ShareRowExclusiveLock */
95
96 /* ExclusiveLock */
101
102 /* AccessExclusiveLock */
107
108};
109
110/* Names of lock modes, for debug printouts */
111static const char *const lock_mode_names[] =
112{
113 "INVALID",
114 "AccessShareLock",
115 "RowShareLock",
116 "RowExclusiveLock",
117 "ShareUpdateExclusiveLock",
118 "ShareLock",
119 "ShareRowExclusiveLock",
120 "ExclusiveLock",
121 "AccessExclusiveLock"
122};
123
124#ifndef LOCK_DEBUG
125static bool Dummy_trace = false;
126#endif
127
132#ifdef LOCK_DEBUG
134#else
136#endif
137};
138
143#ifdef LOCK_DEBUG
145#else
147#endif
148};
149
150/*
151 * map from lock method id to the lock table data structures
152 */
153static const LockMethod LockMethods[] = {
154 NULL,
157};
158
159
160/* Record that's written to 2PC state file when a lock is persisted */
166
167
168/*
169 * Count of the number of fast path lock slots we believe to be used. This
170 * might be higher than the real number if another backend has transferred
171 * our locks to the primary lock table, but it can never be lower than the
172 * real value, since only we can acquire locks on our own behalf.
173 *
174 * XXX Allocate a static array of the maximum size. We could use a pointer
175 * and then allocate just the right size to save a couple kB, but then we
176 * would have to initialize that, while for the static array that happens
177 * automatically. Doesn't seem worth the extra complexity.
178 */
180
181/*
182 * Flag to indicate if the relation extension lock is held by this backend.
183 * This flag is used to ensure that while holding the relation extension lock
184 * we don't try to acquire a heavyweight lock on any other object. This
185 * restriction implies that the relation extension lock won't ever participate
186 * in the deadlock cycle because we can never wait for any other heavyweight
187 * lock after acquiring this lock.
188 *
189 * Such a restriction is okay for relation extension locks as unlike other
190 * heavyweight locks these are not held till the transaction end. These are
191 * taken for a short duration to extend a particular relation and then
192 * released.
193 */
195
196/*
197 * Number of fast-path locks per backend - size of the arrays in PGPROC.
198 * This is set only once during start, before initializing shared memory,
199 * and remains constant after that.
200 *
201 * We set the limit based on max_locks_per_transaction GUC, because that's
202 * the best information about expected number of locks per backend we have.
203 * See InitializeFastPathLocks() for details.
204 */
206
207/*
208 * Macros to calculate the fast-path group and index for a relation.
209 *
210 * The formula is a simple hash function, designed to spread the OIDs a bit,
211 * so that even contiguous values end up in different groups. In most cases
212 * there will be gaps anyway, but the multiplication should help a bit.
213 *
214 * The selected constant (49157) is a prime not too close to 2^k, and it's
215 * small enough to not cause overflows (in 64-bit).
216 *
217 * We can assume that FastPathLockGroupsPerBackend is a power-of-two per
218 * InitializeFastPathLocks().
219 */
220#define FAST_PATH_REL_GROUP(rel) \
221 (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
222
223/*
224 * Given the group/slot indexes, calculate the slot index in the whole array
225 * of fast-path lock slots.
226 */
227#define FAST_PATH_SLOT(group, index) \
228 (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
229 AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
230 ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
231
232/*
233 * Given a slot index (into the whole per-backend array), calculated using
234 * the FAST_PATH_SLOT macro, split it into group and index (in the group).
235 */
236#define FAST_PATH_GROUP(index) \
237 (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
238 ((index) / FP_LOCK_SLOTS_PER_GROUP))
239#define FAST_PATH_INDEX(index) \
240 (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
241 ((index) % FP_LOCK_SLOTS_PER_GROUP))
242
243/* Macros for manipulating proc->fpLockBits */
244#define FAST_PATH_BITS_PER_SLOT 3
245#define FAST_PATH_LOCKNUMBER_OFFSET 1
246#define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
247#define FAST_PATH_BITS(proc, n) (proc)->fpLockBits[FAST_PATH_GROUP(n)]
248#define FAST_PATH_GET_BITS(proc, n) \
249 ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
250#define FAST_PATH_BIT_POSITION(n, l) \
251 (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
252 AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
253 AssertMacro((n) < FastPathLockSlotsPerBackend()), \
254 ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
255#define FAST_PATH_SET_LOCKMODE(proc, n, l) \
256 FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
257#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
258 FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
259#define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
260 (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
261
262/*
263 * The fast-path lock mechanism is concerned only with relation locks on
264 * unshared relations by backends bound to a database. The fast-path
265 * mechanism exists mostly to accelerate acquisition and release of locks
266 * that rarely conflict. Because ShareUpdateExclusiveLock is
267 * self-conflicting, it can't use the fast-path mechanism; but it also does
268 * not conflict with any of the locks that do, so we can ignore it completely.
269 */
270#define EligibleForRelationFastPath(locktag, mode) \
271 ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
272 (locktag)->locktag_type == LOCKTAG_RELATION && \
273 (locktag)->locktag_field1 == MyDatabaseId && \
274 MyDatabaseId != InvalidOid && \
275 (mode) < ShareUpdateExclusiveLock)
276#define ConflictsWithRelationFastPath(locktag, mode) \
277 ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
278 (locktag)->locktag_type == LOCKTAG_RELATION && \
279 (locktag)->locktag_field1 != InvalidOid && \
280 (mode) > ShareUpdateExclusiveLock)
281
282static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
283static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
285 const LOCKTAG *locktag, uint32 hashcode);
287
288/*
289 * To make the fast-path lock mechanism work, we must have some way of
290 * preventing the use of the fast-path when a conflicting lock might be present.
291 * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
292 * and maintain an integer count of the number of "strong" lockers
293 * in each partition. When any "strong" lockers are present (which is
294 * hopefully not very often), the fast-path mechanism can't be used, and we
295 * must fall back to the slower method of pushing matching locks directly
296 * into the main lock tables.
297 *
298 * The deadlock detector does not know anything about the fast path mechanism,
299 * so any locks that might be involved in a deadlock must be transferred from
300 * the fast-path queues to the main lock table.
301 */
302
303#define FAST_PATH_STRONG_LOCK_HASH_BITS 10
304#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
305 (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
306#define FastPathStrongLockHashPartition(hashcode) \
307 ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
308
314
316
317static void LockManagerShmemRequest(void *arg);
318static void LockManagerShmemInit(void *arg);
319
324
325
326/*
327 * Pointers to hash tables containing lock state
328 *
329 * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
330 * shared memory; LockMethodLocalHash is local to each backend.
331 */
335
336
337/* private state for error cleanup */
341
342
343#ifdef LOCK_DEBUG
344
345/*------
346 * The following configuration options are available for lock debugging:
347 *
348 * TRACE_LOCKS -- give a bunch of output what's going on in this file
349 * TRACE_USERLOCKS -- same but for user locks
350 * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
351 * (use to avoid output on system tables)
352 * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
353 * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
354 *
355 * Furthermore, but in storage/lmgr/lwlock.c:
356 * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
357 *
358 * Define LOCK_DEBUG at compile time to get all these enabled.
359 * --------
360 */
361
363bool Trace_locks = false;
364bool Trace_userlocks = false;
365int Trace_lock_table = 0;
366bool Debug_deadlocks = false;
367
368
369inline static bool
370LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
371{
372 return
375 || (Trace_lock_table &&
377}
378
379
380inline static void
381LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
382{
383 if (LOCK_DEBUG_ENABLED(&lock->tag))
384 elog(LOG,
385 "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
386 "req(%d,%d,%d,%d,%d,%d,%d)=%d "
387 "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
388 where, lock,
392 lock->grantMask,
393 lock->requested[1], lock->requested[2], lock->requested[3],
394 lock->requested[4], lock->requested[5], lock->requested[6],
395 lock->requested[7], lock->nRequested,
396 lock->granted[1], lock->granted[2], lock->granted[3],
397 lock->granted[4], lock->granted[5], lock->granted[6],
398 lock->granted[7], lock->nGranted,
399 dclist_count(&lock->waitProcs),
400 LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
401}
402
403
404inline static void
405PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
406{
407 if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
408 elog(LOG,
409 "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
410 where, proclockP, proclockP->tag.myLock,
412 proclockP->tag.myProc, (int) proclockP->holdMask);
413}
414#else /* not LOCK_DEBUG */
415
416#define LOCK_PRINT(where, lock, type) ((void) 0)
417#define PROCLOCK_PRINT(where, proclockP) ((void) 0)
418#endif /* not LOCK_DEBUG */
419
420
421static uint32 proclock_hash(const void *key, Size keysize);
424 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
427static void FinishStrongLockAcquire(void);
429static void waitonlock_error_callback(void *arg);
432static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
434static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
436 bool wakeupNeeded);
438 LOCKTAG *locktag, LOCKMODE lockmode,
442
443
444/*
445 * Register the lock manager's shmem data structures.
446 *
447 * In addition to this, each backend must also call InitLockManagerAccess() to
448 * create the locallock hash table.
449 */
450static void
452{
454
455 /*
456 * Compute sizes for lock hashtables. Note that these calculations must
457 * agree with LockManagerShmemSize!
458 */
460
461 /*
462 * Hash table for LOCK structs. This stores per-locked-object
463 * information.
464 */
465 ShmemRequestHash(.name = "LOCK hash",
466 .nelems = max_table_size,
467 .ptr = &LockMethodLockHash,
468 .hash_info.keysize = sizeof(LOCKTAG),
469 .hash_info.entrysize = sizeof(LOCK),
470 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
471 .hash_flags = HASH_ELEM | HASH_BLOBS | HASH_PARTITION,
472 );
473
474 /* Assume an average of 2 holders per lock */
475 max_table_size *= 2;
476
477 ShmemRequestHash(.name = "PROCLOCK hash",
478 .nelems = max_table_size,
480 .hash_info.keysize = sizeof(PROCLOCKTAG),
481 .hash_info.entrysize = sizeof(PROCLOCK),
482 .hash_info.hash = proclock_hash,
483 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
484 .hash_flags = HASH_ELEM | HASH_FUNCTION | HASH_PARTITION,
485 );
486
487 ShmemRequestStruct(.name = "Fast Path Strong Relation Lock Data",
488 .size = sizeof(FastPathStrongRelationLockData),
489 .ptr = (void **) (void *) &FastPathStrongRelationLocks,
490 );
491}
492
493static void
498
499/*
500 * Initialize the lock manager's backend-private data structures.
501 */
502void
504{
505 /*
506 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
507 * counts and resource owner information.
508 */
509 HASHCTL info;
510
511 info.keysize = sizeof(LOCALLOCKTAG);
512 info.entrysize = sizeof(LOCALLOCK);
513
514 LockMethodLocalHash = hash_create("LOCALLOCK hash",
515 16,
516 &info,
518}
519
520
521/*
522 * Fetch the lock method table associated with a given lock
523 */
532
533/*
534 * Fetch the lock method table associated with a given locktag
535 */
544
545
546/*
547 * Compute the hash code associated with a LOCKTAG.
548 *
549 * To avoid unnecessary recomputations of the hash code, we try to do this
550 * just once per function, and then pass it around as needed. Aside from
551 * passing the hashcode to hash_search_with_hash_value(), we can extract
552 * the lock partition number from the hashcode.
553 */
554uint32
556{
557 return get_hash_value(LockMethodLockHash, locktag);
558}
559
560/*
561 * Compute the hash code associated with a PROCLOCKTAG.
562 *
563 * Because we want to use just one set of partition locks for both the
564 * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
565 * fall into the same partition number as their associated LOCKs.
566 * dynahash.c expects the partition number to be the low-order bits of
567 * the hash code, and therefore a PROCLOCKTAG's hash code must have the
568 * same low-order bits as the associated LOCKTAG's hash code. We achieve
569 * this with this specialized hash function.
570 */
571static uint32
572proclock_hash(const void *key, Size keysize)
573{
574 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
577
578 Assert(keysize == sizeof(PROCLOCKTAG));
579
580 /* Look into the associated LOCK object, and compute its hash code */
581 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
582
583 /*
584 * To make the hash code also depend on the PGPROC, we xor the proc
585 * struct's address into the hash code, left-shifted so that the
586 * partition-number bits don't change. Since this is only a hash, we
587 * don't care if we lose high-order bits of the address; use an
588 * intermediate variable to suppress cast-pointer-to-int warnings.
589 */
592
593 return lockhash;
594}
595
596/*
597 * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
598 * for its underlying LOCK.
599 *
600 * We use this just to avoid redundant calls of LockTagHashCode().
601 */
602static inline uint32
604{
605 uint32 lockhash = hashcode;
607
608 /*
609 * This must match proclock_hash()!
610 */
613
614 return lockhash;
615}
616
617/*
618 * Given two lock modes, return whether they would conflict.
619 */
620bool
622{
624
625 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
626 return true;
627
628 return false;
629}
630
631/*
632 * LockHeldByMe -- test whether lock 'locktag' is held by the current
633 * transaction
634 *
635 * Returns true if current transaction holds a lock on 'tag' of mode
636 * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
637 * ("Stronger" is defined as "numerically higher", which is a bit
638 * semantically dubious but is OK for the purposes we use this for.)
639 */
640bool
641LockHeldByMe(const LOCKTAG *locktag,
642 LOCKMODE lockmode, bool orstronger)
643{
646
647 /*
648 * See if there is a LOCALLOCK entry for this lock and lockmode
649 */
650 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
651 localtag.lock = *locktag;
652 localtag.mode = lockmode;
653
655 &localtag,
656 HASH_FIND, NULL);
657
658 if (locallock && locallock->nLocks > 0)
659 return true;
660
661 if (orstronger)
662 {
664
665 for (slockmode = lockmode + 1;
667 slockmode++)
668 {
669 if (LockHeldByMe(locktag, slockmode, false))
670 return true;
671 }
672 }
673
674 return false;
675}
676
677#ifdef USE_ASSERT_CHECKING
678/*
679 * GetLockMethodLocalHash -- return the hash of local locks, for modules that
680 * evaluate assertions based on all locks held.
681 */
682HTAB *
684{
685 return LockMethodLocalHash;
686}
687#endif
688
689/*
690 * LockHasWaiters -- look up 'locktag' and check if releasing this
691 * lock would wake up other processes waiting for it.
692 */
693bool
694LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
695{
700 LOCK *lock;
701 PROCLOCK *proclock;
703 bool hasWaiters = false;
704
706 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
709 elog(ERROR, "unrecognized lock mode: %d", lockmode);
710
711#ifdef LOCK_DEBUG
712 if (LOCK_DEBUG_ENABLED(locktag))
713 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
714 locktag->locktag_field1, locktag->locktag_field2,
715 lockMethodTable->lockModeNames[lockmode]);
716#endif
717
718 /*
719 * Find the LOCALLOCK entry for this lock and lockmode
720 */
721 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
722 localtag.lock = *locktag;
723 localtag.mode = lockmode;
724
726 &localtag,
727 HASH_FIND, NULL);
728
729 /*
730 * let the caller print its own error message, too. Do not ereport(ERROR).
731 */
732 if (!locallock || locallock->nLocks <= 0)
733 {
734 elog(WARNING, "you don't own a lock of type %s",
735 lockMethodTable->lockModeNames[lockmode]);
736 return false;
737 }
738
739 /*
740 * Check the shared lock table.
741 */
743
745
746 /*
747 * We don't need to re-find the lock or proclock, since we kept their
748 * addresses in the locallock table, and they couldn't have been removed
749 * while we were holding a lock on them.
750 */
751 lock = locallock->lock;
752 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
753 proclock = locallock->proclock;
754 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
755
756 /*
757 * Double-check that we are actually holding a lock of the type we want to
758 * release.
759 */
760 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
761 {
762 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
764 elog(WARNING, "you don't own a lock of type %s",
765 lockMethodTable->lockModeNames[lockmode]);
767 return false;
768 }
769
770 /*
771 * Do the checking.
772 */
773 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
774 hasWaiters = true;
775
777
778 return hasWaiters;
779}
780
781/*
782 * LockAcquire -- Check for lock conflicts, sleep if conflict found,
783 * set lock if/when no conflicts.
784 *
785 * Inputs:
786 * locktag: unique identifier for the lockable object
787 * lockmode: lock mode to acquire
788 * sessionLock: if true, acquire lock for session not current transaction
789 * dontWait: if true, don't wait to acquire lock
790 *
791 * Returns one of:
792 * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
793 * LOCKACQUIRE_OK lock successfully acquired
794 * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
795 * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
796 *
797 * In the normal case where dontWait=false and the caller doesn't need to
798 * distinguish a freshly acquired lock from one already taken earlier in
799 * this same transaction, there is no need to examine the return value.
800 *
801 * Side Effects: The lock is acquired and recorded in lock tables.
802 *
803 * NOTE: if we wait for the lock, there is no way to abort the wait
804 * short of aborting the transaction.
805 */
807LockAcquire(const LOCKTAG *locktag,
808 LOCKMODE lockmode,
809 bool sessionLock,
810 bool dontWait)
811{
812 return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
813 true, NULL, false);
814}
815
816/*
817 * LockAcquireExtended - allows us to specify additional options
818 *
819 * reportMemoryError specifies whether a lock request that fills the lock
820 * table should generate an ERROR or not. Passing "false" allows the caller
821 * to attempt to recover from lock-table-full situations, perhaps by forcibly
822 * canceling other lock holders and then retrying. Note, however, that the
823 * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
824 * in combination with dontWait = true, as the cause of failure couldn't be
825 * distinguished.
826 *
827 * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
828 * table entry if a lock is successfully acquired, or NULL if not.
829 *
830 * logLockFailure indicates whether to log details when a lock acquisition
831 * fails with dontWait = true.
832 */
835 LOCKMODE lockmode,
836 bool sessionLock,
837 bool dontWait,
840 bool logLockFailure)
841{
846 LOCK *lock;
847 PROCLOCK *proclock;
848 bool found;
849 ResourceOwner owner;
850 uint32 hashcode;
852 bool found_conflict;
854 bool log_lock = false;
855
857 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
860 elog(ERROR, "unrecognized lock mode: %d", lockmode);
861
862 if (RecoveryInProgress() && !InRecovery &&
863 (locktag->locktag_type == LOCKTAG_OBJECT ||
864 locktag->locktag_type == LOCKTAG_RELATION) &&
865 lockmode > RowExclusiveLock)
868 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
869 lockMethodTable->lockModeNames[lockmode]),
870 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
871
872#ifdef LOCK_DEBUG
873 if (LOCK_DEBUG_ENABLED(locktag))
874 elog(LOG, "LockAcquire: lock [%u,%u] %s",
875 locktag->locktag_field1, locktag->locktag_field2,
876 lockMethodTable->lockModeNames[lockmode]);
877#endif
878
879 /* Identify owner for lock */
880 if (sessionLock)
881 owner = NULL;
882 else
883 owner = CurrentResourceOwner;
884
885 /*
886 * Find or create a LOCALLOCK entry for this lock and lockmode
887 */
888 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
889 localtag.lock = *locktag;
890 localtag.mode = lockmode;
891
893 &localtag,
894 HASH_ENTER, &found);
895
896 /*
897 * if it's a new locallock object, initialize it
898 */
899 if (!found)
900 {
901 locallock->lock = NULL;
902 locallock->proclock = NULL;
903 locallock->hashcode = LockTagHashCode(&(localtag.lock));
904 locallock->nLocks = 0;
905 locallock->holdsStrongLockCount = false;
906 locallock->lockCleared = false;
907 locallock->numLockOwners = 0;
908 locallock->maxLockOwners = 8;
909 locallock->lockOwners = NULL; /* in case next line fails */
910 locallock->lockOwners = (LOCALLOCKOWNER *)
912 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
913 }
914 else
915 {
916 /* Make sure there will be room to remember the lock */
917 if (locallock->numLockOwners >= locallock->maxLockOwners)
918 {
919 int newsize = locallock->maxLockOwners * 2;
920
921 locallock->lockOwners = (LOCALLOCKOWNER *)
922 repalloc(locallock->lockOwners,
923 newsize * sizeof(LOCALLOCKOWNER));
924 locallock->maxLockOwners = newsize;
925 }
926 }
927 hashcode = locallock->hashcode;
928
929 if (locallockp)
931
932 /*
933 * If we already hold the lock, we can just increase the count locally.
934 *
935 * If lockCleared is already set, caller need not worry about absorbing
936 * sinval messages related to the lock's object.
937 */
938 if (locallock->nLocks > 0)
939 {
941 if (locallock->lockCleared)
943 else
945 }
946
947 /*
948 * We don't acquire any other heavyweight lock while holding the relation
949 * extension lock. We do allow to acquire the same relation extension
950 * lock more than once but that case won't reach here.
951 */
953
954 /*
955 * Prepare to emit a WAL record if acquisition of this lock needs to be
956 * replayed in a standby server.
957 *
958 * Here we prepare to log; after lock is acquired we'll issue log record.
959 * This arrangement simplifies error recovery in case the preparation step
960 * fails.
961 *
962 * Only AccessExclusiveLocks can conflict with lock types that read-only
963 * transactions can acquire in a standby server. Make sure this definition
964 * matches the one in GetRunningTransactionLocks().
965 */
966 if (lockmode >= AccessExclusiveLock &&
967 locktag->locktag_type == LOCKTAG_RELATION &&
970 {
972 log_lock = true;
973 }
974
975 /*
976 * Attempt to take lock via fast path, if eligible. But if we remember
977 * having filled up the fast path array, we don't attempt to make any
978 * further use of it until we release some locks. It's possible that some
979 * other backend has transferred some of those locks to the shared hash
980 * table, leaving space free, but it's not worth acquiring the LWLock just
981 * to check. It's also possible that we're acquiring a second or third
982 * lock type on a relation we have already locked using the fast-path, but
983 * for now we don't worry about that case either.
984 */
985 if (EligibleForRelationFastPath(locktag, lockmode))
986 {
989 {
991 bool acquired;
992
993 /*
994 * LWLockAcquire acts as a memory sequencing point, so it's safe
995 * to assume that any strong locker whose increment to
996 * FastPathStrongRelationLocks->counts becomes visible after we
997 * test it has yet to begin to transfer fast-path locks.
998 */
1001 acquired = false;
1002 else
1004 lockmode);
1006 if (acquired)
1007 {
1008 /*
1009 * The locallock might contain stale pointers to some old
1010 * shared objects; we MUST reset these to null before
1011 * considering the lock to be acquired via fast-path.
1012 */
1013 locallock->lock = NULL;
1014 locallock->proclock = NULL;
1015 GrantLockLocal(locallock, owner);
1016 return LOCKACQUIRE_OK;
1017 }
1018 }
1019 else
1020 {
1021 /*
1022 * Increment the lock statistics counter if lock could not be
1023 * acquired via the fast-path.
1024 */
1025 pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type);
1026 }
1027 }
1028
1029 /*
1030 * If this lock could potentially have been taken via the fast-path by
1031 * some other backend, we must (temporarily) disable further use of the
1032 * fast-path for this lock tag, and migrate any locks already taken via
1033 * this method to the main lock table.
1034 */
1035 if (ConflictsWithRelationFastPath(locktag, lockmode))
1036 {
1038
1041 hashcode))
1042 {
1044 if (locallock->nLocks == 0)
1046 if (locallockp)
1047 *locallockp = NULL;
1049 ereport(ERROR,
1051 errmsg("out of shared memory"),
1052 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1053 else
1054 return LOCKACQUIRE_NOT_AVAIL;
1055 }
1056 }
1057
1058 /*
1059 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1060 * take it via the fast-path, either, so we've got to mess with the shared
1061 * lock table.
1062 */
1064
1066
1067 /*
1068 * Find or create lock and proclock entries with this tag
1069 *
1070 * Note: if the locallock object already existed, it might have a pointer
1071 * to the lock already ... but we should not assume that that pointer is
1072 * valid, since a lock object with zero hold and request counts can go
1073 * away anytime. So we have to use SetupLockInTable() to recompute the
1074 * lock and proclock pointers, even if they're already set.
1075 */
1076 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1077 hashcode, lockmode);
1078 if (!proclock)
1079 {
1082 if (locallock->nLocks == 0)
1084 if (locallockp)
1085 *locallockp = NULL;
1087 ereport(ERROR,
1089 errmsg("out of shared memory"),
1090 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1091 else
1092 return LOCKACQUIRE_NOT_AVAIL;
1093 }
1094 locallock->proclock = proclock;
1095 lock = proclock->tag.myLock;
1096 locallock->lock = lock;
1097
1098 /*
1099 * If lock requested conflicts with locks requested by waiters, must join
1100 * wait queue. Otherwise, check for conflict with already-held locks.
1101 * (That's last because most complex check.)
1102 */
1103 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1104 found_conflict = true;
1105 else
1107 lock, proclock);
1108
1109 if (!found_conflict)
1110 {
1111 /* No conflict with held or previously requested locks */
1112 GrantLock(lock, proclock, lockmode);
1114 }
1115 else
1116 {
1117 /*
1118 * Join the lock's wait queue. We call this even in the dontWait
1119 * case, because JoinWaitQueue() may discover that we can acquire the
1120 * lock immediately after all.
1121 */
1123 }
1124
1126 {
1127 /*
1128 * We're not getting the lock because a deadlock was detected already
1129 * while trying to join the wait queue, or because we would have to
1130 * wait but the caller requested no blocking.
1131 *
1132 * Undo the changes to shared entries before releasing the partition
1133 * lock.
1134 */
1136
1137 if (proclock->holdMask == 0)
1138 {
1140
1142 hashcode);
1143 dlist_delete(&proclock->lockLink);
1144 dlist_delete(&proclock->procLink);
1146 &(proclock->tag),
1149 NULL))
1150 elog(PANIC, "proclock table corrupted");
1151 }
1152 else
1153 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1154 lock->nRequested--;
1155 lock->requested[lockmode]--;
1156 LOCK_PRINT("LockAcquire: did not join wait queue",
1157 lock, lockmode);
1158 Assert((lock->nRequested > 0) &&
1159 (lock->requested[lockmode] >= 0));
1160 Assert(lock->nGranted <= lock->nRequested);
1162 if (locallock->nLocks == 0)
1164
1165 if (dontWait)
1166 {
1167 /*
1168 * Log lock holders and waiters as a detail log message if
1169 * logLockFailure = true and lock acquisition fails with dontWait
1170 * = true
1171 */
1172 if (logLockFailure)
1173 {
1177 const char *modename;
1178 int lockHoldersNum = 0;
1179
1183
1184 DescribeLockTag(&buf, &locallock->tag.lock);
1185 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1186 lockmode);
1187
1188 /* Gather a list of all lock holders and waiters */
1193
1194 ereport(LOG,
1195 (errmsg("process %d could not obtain %s on %s",
1196 MyProcPid, modename, buf.data),
1198 "Process holding the lock: %s, Wait queue: %s.",
1199 "Processes holding the lock: %s, Wait queue: %s.",
1201 lock_holders_sbuf.data,
1202 lock_waiters_sbuf.data)));
1203
1204 pfree(buf.data);
1207 }
1208 if (locallockp)
1209 *locallockp = NULL;
1210 return LOCKACQUIRE_NOT_AVAIL;
1211 }
1212 else
1213 {
1215 /* DeadLockReport() will not return */
1216 }
1217 }
1218
1219 /*
1220 * We are now in the lock queue, or the lock was already granted. If
1221 * queued, go to sleep.
1222 */
1224 {
1225 Assert(!dontWait);
1226 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1227 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1229
1231
1232 /*
1233 * NOTE: do not do any material change of state between here and
1234 * return. All required changes in locktable state must have been
1235 * done when the lock was granted to us --- see notes in WaitOnLock.
1236 */
1237
1239 {
1240 /*
1241 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1242 * now.
1243 */
1244 Assert(!dontWait);
1246 /* DeadLockReport() will not return */
1247 }
1248 }
1249 else
1252
1253 /* The lock was granted to us. Update the local lock entry accordingly */
1254 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1255 GrantLockLocal(locallock, owner);
1256
1257 /*
1258 * Lock state is fully up-to-date now; if we error out after this, no
1259 * special error cleanup is required.
1260 */
1262
1263 /*
1264 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1265 * standby server.
1266 */
1267 if (log_lock)
1268 {
1269 /*
1270 * Decode the locktag back to the original values, to avoid sending
1271 * lots of empty bytes with every message. See lock.h to check how a
1272 * locktag is defined for LOCKTAG_RELATION
1273 */
1275 locktag->locktag_field2);
1276 }
1277
1278 return LOCKACQUIRE_OK;
1279}
1280
1281/*
1282 * Find or create LOCK and PROCLOCK objects as needed for a new lock
1283 * request.
1284 *
1285 * Returns the PROCLOCK object, or NULL if we failed to create the objects
1286 * for lack of shared memory.
1287 *
1288 * The appropriate partition lock must be held at entry, and will be
1289 * held at exit.
1290 */
1291static PROCLOCK *
1293 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1294{
1295 LOCK *lock;
1296 PROCLOCK *proclock;
1299 bool found;
1300
1301 /*
1302 * Find or create a lock with this tag.
1303 */
1305 locktag,
1306 hashcode,
1308 &found);
1309 if (!lock)
1310 return NULL;
1311
1312 /*
1313 * if it's a new lock object, initialize it
1314 */
1315 if (!found)
1316 {
1317 lock->grantMask = 0;
1318 lock->waitMask = 0;
1319 dlist_init(&lock->procLocks);
1320 dclist_init(&lock->waitProcs);
1321 lock->nRequested = 0;
1322 lock->nGranted = 0;
1323 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1324 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1325 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1326 }
1327 else
1328 {
1329 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1330 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1331 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1332 Assert(lock->nGranted <= lock->nRequested);
1333 }
1334
1335 /*
1336 * Create the hash key for the proclock table.
1337 */
1338 proclocktag.myLock = lock;
1339 proclocktag.myProc = proc;
1340
1342
1343 /*
1344 * Find or create a proclock entry with this tag
1345 */
1347 &proclocktag,
1350 &found);
1351 if (!proclock)
1352 {
1353 /* Oops, not enough shmem for the proclock */
1354 if (lock->nRequested == 0)
1355 {
1356 /*
1357 * There are no other requestors of this lock, so garbage-collect
1358 * the lock object. We *must* do this to avoid a permanent leak
1359 * of shared memory, because there won't be anything to cause
1360 * anyone to release the lock object later.
1361 */
1362 Assert(dlist_is_empty(&(lock->procLocks)));
1364 &(lock->tag),
1365 hashcode,
1367 NULL))
1368 elog(PANIC, "lock table corrupted");
1369 }
1370 return NULL;
1371 }
1372
1373 /*
1374 * If new, initialize the new entry
1375 */
1376 if (!found)
1377 {
1379
1380 /*
1381 * It might seem unsafe to access proclock->groupLeader without a
1382 * lock, but it's not really. Either we are initializing a proclock
1383 * on our own behalf, in which case our group leader isn't changing
1384 * because the group leader for a process can only ever be changed by
1385 * the process itself; or else we are transferring a fast-path lock to
1386 * the main lock table, in which case that process can't change its
1387 * lock group leader without first releasing all of its locks (and in
1388 * particular the one we are currently transferring).
1389 */
1390 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1391 proc->lockGroupLeader : proc;
1392 proclock->holdMask = 0;
1393 proclock->releaseMask = 0;
1394 /* Add proclock to appropriate lists */
1395 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1396 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1397 PROCLOCK_PRINT("LockAcquire: new", proclock);
1398 }
1399 else
1400 {
1401 PROCLOCK_PRINT("LockAcquire: found", proclock);
1402 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1403
1404#ifdef CHECK_DEADLOCK_RISK
1405
1406 /*
1407 * Issue warning if we already hold a lower-level lock on this object
1408 * and do not hold a lock of the requested level or higher. This
1409 * indicates a deadlock-prone coding practice (eg, we'd have a
1410 * deadlock if another backend were following the same code path at
1411 * about the same time).
1412 *
1413 * This is not enabled by default, because it may generate log entries
1414 * about user-level coding practices that are in fact safe in context.
1415 * It can be enabled to help find system-level problems.
1416 *
1417 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1418 * better to use a table. For now, though, this works.
1419 */
1420 {
1421 int i;
1422
1423 for (i = lockMethodTable->numLockModes; i > 0; i--)
1424 {
1425 if (proclock->holdMask & LOCKBIT_ON(i))
1426 {
1427 if (i >= (int) lockmode)
1428 break; /* safe: we have a lock >= req level */
1429 elog(LOG, "deadlock risk: raising lock level"
1430 " from %s to %s on object %u/%u/%u",
1431 lockMethodTable->lockModeNames[i],
1432 lockMethodTable->lockModeNames[lockmode],
1433 lock->tag.locktag_field1, lock->tag.locktag_field2,
1434 lock->tag.locktag_field3);
1435 break;
1436 }
1437 }
1438 }
1439#endif /* CHECK_DEADLOCK_RISK */
1440 }
1441
1442 /*
1443 * lock->nRequested and lock->requested[] count the total number of
1444 * requests, whether granted or waiting, so increment those immediately.
1445 * The other counts don't increment till we get the lock.
1446 */
1447 lock->nRequested++;
1448 lock->requested[lockmode]++;
1449 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1450
1451 /*
1452 * We shouldn't already hold the desired lock; else locallock table is
1453 * broken.
1454 */
1455 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1456 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1457 lockMethodTable->lockModeNames[lockmode],
1458 lock->tag.locktag_field1, lock->tag.locktag_field2,
1459 lock->tag.locktag_field3);
1460
1461 return proclock;
1462}
1463
1464/*
1465 * Check and set/reset the flag that we hold the relation extension lock.
1466 *
1467 * It is callers responsibility that this function is called after
1468 * acquiring/releasing the relation extension lock.
1469 *
1470 * Pass acquired as true if lock is acquired, false otherwise.
1471 */
1472static inline void
1474{
1475#ifdef USE_ASSERT_CHECKING
1478#endif
1479}
1480
1481/*
1482 * Subroutine to free a locallock entry
1483 */
1484static void
1486{
1487 int i;
1488
1489 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1490 {
1491 if (locallock->lockOwners[i].owner != NULL)
1492 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1493 }
1494 locallock->numLockOwners = 0;
1495 if (locallock->lockOwners != NULL)
1496 pfree(locallock->lockOwners);
1497 locallock->lockOwners = NULL;
1498
1499 if (locallock->holdsStrongLockCount)
1500 {
1502
1504
1508 locallock->holdsStrongLockCount = false;
1510 }
1511
1513 &(locallock->tag),
1514 HASH_REMOVE, NULL))
1515 elog(WARNING, "locallock table corrupted");
1516
1517 /*
1518 * Indicate that the lock is released for certain types of locks
1519 */
1521}
1522
1523/*
1524 * LockCheckConflicts -- test whether requested lock conflicts
1525 * with those already granted
1526 *
1527 * Returns true if conflict, false if no conflict.
1528 *
1529 * NOTES:
1530 * Here's what makes this complicated: one process's locks don't
1531 * conflict with one another, no matter what purpose they are held for
1532 * (eg, session and transaction locks do not conflict). Nor do the locks
1533 * of one process in a lock group conflict with those of another process in
1534 * the same group. So, we must subtract off these locks when determining
1535 * whether the requested new lock conflicts with those already held.
1536 */
1537bool
1539 LOCKMODE lockmode,
1540 LOCK *lock,
1541 PROCLOCK *proclock)
1542{
1543 int numLockModes = lockMethodTable->numLockModes;
1545 int conflictMask = lockMethodTable->conflictTab[lockmode];
1549 int i;
1550
1551 /*
1552 * first check for global conflicts: If no locks conflict with my request,
1553 * then I get the lock.
1554 *
1555 * Checking for conflict: lock->grantMask represents the types of
1556 * currently held locks. conflictTable[lockmode] has a bit set for each
1557 * type of lock that conflicts with request. Bitwise compare tells if
1558 * there is a conflict.
1559 */
1560 if (!(conflictMask & lock->grantMask))
1561 {
1562 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1563 return false;
1564 }
1565
1566 /*
1567 * Rats. Something conflicts. But it could still be my own lock, or a
1568 * lock held by another member of my locking group. First, figure out how
1569 * many conflicts remain after subtracting out any locks I hold myself.
1570 */
1571 myLocks = proclock->holdMask;
1572 for (i = 1; i <= numLockModes; i++)
1573 {
1574 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1575 {
1576 conflictsRemaining[i] = 0;
1577 continue;
1578 }
1579 conflictsRemaining[i] = lock->granted[i];
1580 if (myLocks & LOCKBIT_ON(i))
1583 }
1584
1585 /* If no conflicts remain, we get the lock. */
1586 if (totalConflictsRemaining == 0)
1587 {
1588 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1589 return false;
1590 }
1591
1592 /* If no group locking, it's definitely a conflict. */
1593 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1594 {
1595 Assert(proclock->tag.myProc == MyProc);
1596 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1597 proclock);
1598 return true;
1599 }
1600
1601 /*
1602 * The relation extension lock conflict even between the group members.
1603 */
1605 {
1606 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1607 proclock);
1608 return true;
1609 }
1610
1611 /*
1612 * Locks held in conflicting modes by members of our own lock group are
1613 * not real conflicts; we can subtract those out and see if we still have
1614 * a conflict. This is O(N) in the number of processes holding or
1615 * awaiting locks on this object. We could improve that by making the
1616 * shared memory state more complex (and larger) but it doesn't seem worth
1617 * it.
1618 */
1620 {
1622 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1623
1624 if (proclock != otherproclock &&
1625 proclock->groupLeader == otherproclock->groupLeader &&
1626 (otherproclock->holdMask & conflictMask) != 0)
1627 {
1628 int intersectMask = otherproclock->holdMask & conflictMask;
1629
1630 for (i = 1; i <= numLockModes; i++)
1631 {
1632 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1633 {
1634 if (conflictsRemaining[i] <= 0)
1635 elog(PANIC, "proclocks held do not match lock");
1638 }
1639 }
1640
1641 if (totalConflictsRemaining == 0)
1642 {
1643 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1644 proclock);
1645 return false;
1646 }
1647 }
1648 }
1649
1650 /* Nope, it's a real conflict. */
1651 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1652 return true;
1653}
1654
1655/*
1656 * GrantLock -- update the lock and proclock data structures to show
1657 * the lock request has been granted.
1658 *
1659 * NOTE: if proc was blocked, it also needs to be removed from the wait list
1660 * and have its waitLock/waitProcLock fields cleared. That's not done here.
1661 *
1662 * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1663 * table entry; but since we may be awaking some other process, we can't do
1664 * that here; it's done by GrantLockLocal, instead.
1665 */
1666void
1667GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1668{
1669 lock->nGranted++;
1670 lock->granted[lockmode]++;
1671 lock->grantMask |= LOCKBIT_ON(lockmode);
1672 if (lock->granted[lockmode] == lock->requested[lockmode])
1673 lock->waitMask &= LOCKBIT_OFF(lockmode);
1674 proclock->holdMask |= LOCKBIT_ON(lockmode);
1675 LOCK_PRINT("GrantLock", lock, lockmode);
1676 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1677 Assert(lock->nGranted <= lock->nRequested);
1678}
1679
1680/*
1681 * UnGrantLock -- opposite of GrantLock.
1682 *
1683 * Updates the lock and proclock data structures to show that the lock
1684 * is no longer held nor requested by the current holder.
1685 *
1686 * Returns true if there were any waiters waiting on the lock that
1687 * should now be woken up with ProcLockWakeup.
1688 */
1689static bool
1690UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1692{
1693 bool wakeupNeeded = false;
1694
1695 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1696 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1697 Assert(lock->nGranted <= lock->nRequested);
1698
1699 /*
1700 * fix the general lock stats
1701 */
1702 lock->nRequested--;
1703 lock->requested[lockmode]--;
1704 lock->nGranted--;
1705 lock->granted[lockmode]--;
1706
1707 if (lock->granted[lockmode] == 0)
1708 {
1709 /* change the conflict mask. No more of this lock type. */
1710 lock->grantMask &= LOCKBIT_OFF(lockmode);
1711 }
1712
1713 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1714
1715 /*
1716 * We need only run ProcLockWakeup if the released lock conflicts with at
1717 * least one of the lock types requested by waiter(s). Otherwise whatever
1718 * conflict made them wait must still exist. NOTE: before MVCC, we could
1719 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1720 * not true anymore, because the remaining granted locks might belong to
1721 * some waiter, who could now be awakened because he doesn't conflict with
1722 * his own locks.
1723 */
1724 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1725 wakeupNeeded = true;
1726
1727 /*
1728 * Now fix the per-proclock state.
1729 */
1730 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1731 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1732
1733 return wakeupNeeded;
1734}
1735
1736/*
1737 * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1738 * proclock and lock objects if possible, and call ProcLockWakeup if there
1739 * are remaining requests and the caller says it's OK. (Normally, this
1740 * should be called after UnGrantLock, and wakeupNeeded is the result from
1741 * UnGrantLock.)
1742 *
1743 * The appropriate partition lock must be held at entry, and will be
1744 * held at exit.
1745 */
1746static void
1747CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1749 bool wakeupNeeded)
1750{
1751 /*
1752 * If this was my last hold on this lock, delete my entry in the proclock
1753 * table.
1754 */
1755 if (proclock->holdMask == 0)
1756 {
1758
1759 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1760 dlist_delete(&proclock->lockLink);
1761 dlist_delete(&proclock->procLink);
1762 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1764 &(proclock->tag),
1767 NULL))
1768 elog(PANIC, "proclock table corrupted");
1769 }
1770
1771 if (lock->nRequested == 0)
1772 {
1773 /*
1774 * The caller just released the last lock, so garbage-collect the lock
1775 * object.
1776 */
1777 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1780 &(lock->tag),
1781 hashcode,
1783 NULL))
1784 elog(PANIC, "lock table corrupted");
1785 }
1786 else if (wakeupNeeded)
1787 {
1788 /* There are waiters on this lock, so wake them up. */
1790 }
1791}
1792
1793/*
1794 * GrantLockLocal -- update the locallock data structures to show
1795 * the lock request has been granted.
1796 *
1797 * We expect that LockAcquire made sure there is room to add a new
1798 * ResourceOwner entry.
1799 */
1800static void
1802{
1803 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1804 int i;
1805
1806 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1807 /* Count the total */
1808 locallock->nLocks++;
1809 /* Count the per-owner lock */
1810 for (i = 0; i < locallock->numLockOwners; i++)
1811 {
1812 if (lockOwners[i].owner == owner)
1813 {
1814 lockOwners[i].nLocks++;
1815 return;
1816 }
1817 }
1818 lockOwners[i].owner = owner;
1819 lockOwners[i].nLocks = 1;
1820 locallock->numLockOwners++;
1821 if (owner != NULL)
1823
1824 /* Indicate that the lock is acquired for certain types of locks. */
1826}
1827
1828/*
1829 * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1830 * and arrange for error cleanup if it fails
1831 */
1832static void
1834{
1836 Assert(locallock->holdsStrongLockCount == false);
1837
1838 /*
1839 * Adding to a memory location is not atomic, so we take a spinlock to
1840 * ensure we don't collide with someone else trying to bump the count at
1841 * the same time.
1842 *
1843 * XXX: It might be worth considering using an atomic fetch-and-add
1844 * instruction here, on architectures where that is supported.
1845 */
1846
1849 locallock->holdsStrongLockCount = true;
1852}
1853
1854/*
1855 * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1856 * acquisition once it's no longer needed
1857 */
1858static void
1863
1864/*
1865 * AbortStrongLockAcquire - undo strong lock state changes performed by
1866 * BeginStrongLockAcquire.
1867 */
1868void
1886
1887/*
1888 * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1889 * WaitOnLock on.
1890 *
1891 * proc.c needs this for the case where we are booted off the lock by
1892 * timeout, but discover that someone granted us the lock anyway.
1893 *
1894 * We could just export GrantLockLocal, but that would require including
1895 * resowner.h in lock.h, which creates circularity.
1896 */
1897void
1902
1903/*
1904 * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1905 */
1906LOCALLOCK *
1908{
1909 return awaitedLock;
1910}
1911
1912/*
1913 * ResetAwaitedLock -- Forget that we are waiting on a lock.
1914 */
1915void
1917{
1918 awaitedLock = NULL;
1919}
1920
1921/*
1922 * MarkLockClear -- mark an acquired lock as "clear"
1923 *
1924 * This means that we know we have absorbed all sinval messages that other
1925 * sessions generated before we acquired this lock, and so we can confidently
1926 * assume we know about any catalog changes protected by this lock.
1927 */
1928void
1930{
1931 Assert(locallock->nLocks > 0);
1932 locallock->lockCleared = true;
1933}
1934
1935/*
1936 * WaitOnLock -- wait to acquire a lock
1937 *
1938 * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1939 */
1940static ProcWaitStatus
1942{
1945
1946 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1947 locallock->tag.lock.locktag_field2,
1948 locallock->tag.lock.locktag_field3,
1949 locallock->tag.lock.locktag_field4,
1950 locallock->tag.lock.locktag_type,
1951 locallock->tag.mode);
1952
1953 /* Setup error traceback support for ereport() */
1958
1959 /* adjust the process title to indicate that it's waiting */
1960 set_ps_display_suffix("waiting");
1961
1962 /*
1963 * Record the fact that we are waiting for a lock, so that
1964 * LockErrorCleanup will clean up if cancel/die happens.
1965 */
1967 awaitedOwner = owner;
1968
1969 /*
1970 * NOTE: Think not to put any shared-state cleanup after the call to
1971 * ProcSleep, in either the normal or failure path. The lock state must
1972 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1973 * waiting for the lock. This is necessary because of the possibility
1974 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1975 * grants us the lock, but before we've noticed it. Hence, after granting,
1976 * the locktable state must fully reflect the fact that we own the lock;
1977 * we can't do additional work on return.
1978 *
1979 * We can and do use a PG_TRY block to try to clean up after failure, but
1980 * this still has a major limitation: elog(FATAL) can occur while waiting
1981 * (eg, a "die" interrupt), and then control won't come back here. So all
1982 * cleanup of essential state should happen in LockErrorCleanup, not here.
1983 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1984 * is unimportant if the process exits.
1985 */
1986 PG_TRY();
1987 {
1989 }
1990 PG_CATCH();
1991 {
1992 /* In this path, awaitedLock remains set until LockErrorCleanup */
1993
1994 /* reset ps display to remove the suffix */
1996
1997 /* and propagate the error */
1998 PG_RE_THROW();
1999 }
2000 PG_END_TRY();
2001
2002 /*
2003 * We no longer want LockErrorCleanup to do anything.
2004 */
2005 awaitedLock = NULL;
2006
2007 /* reset ps display to remove the suffix */
2009
2011
2012 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2013 locallock->tag.lock.locktag_field2,
2014 locallock->tag.lock.locktag_field3,
2015 locallock->tag.lock.locktag_field4,
2016 locallock->tag.lock.locktag_type,
2017 locallock->tag.mode);
2018
2019 return result;
2020}
2021
2022/*
2023 * error context callback for failures in WaitOnLock
2024 *
2025 * We report which lock was being waited on, in the same style used in
2026 * deadlock reports. This helps with lock timeout errors in particular.
2027 */
2028static void
2030{
2032 const LOCKTAG *tag = &locallock->tag.lock;
2033 LOCKMODE mode = locallock->tag.mode;
2035
2038
2039 errcontext("waiting for %s on %s",
2041 locktagbuf.data);
2042}
2043
2044/*
2045 * Remove a proc from the wait-queue it is on (caller must know it is on one).
2046 * This is only used when the proc has failed to get the lock, so we set its
2047 * waitStatus to PROC_WAIT_STATUS_ERROR.
2048 *
2049 * Appropriate partition lock must be held by caller. Also, caller is
2050 * responsible for signaling the proc if needed.
2051 *
2052 * NB: this does not clean up any locallock object that may exist for the lock.
2053 */
2054void
2056{
2057 LOCK *waitLock = proc->waitLock;
2058 PROCLOCK *proclock = proc->waitProcLock;
2059 LOCKMODE lockmode = proc->waitLockMode;
2061
2062 /* Make sure proc is waiting */
2065 Assert(waitLock);
2066 Assert(!dclist_is_empty(&waitLock->waitProcs));
2068
2069 /* Remove proc from lock's wait queue */
2071
2072 /* Undo increments of request counts by waiting process */
2073 Assert(waitLock->nRequested > 0);
2074 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2075 waitLock->nRequested--;
2076 Assert(waitLock->requested[lockmode] > 0);
2077 waitLock->requested[lockmode]--;
2078 /* don't forget to clear waitMask bit if appropriate */
2079 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2080 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2081
2082 /* Clean up the proc's own state, and pass it the ok/fail signal */
2083 proc->waitLock = NULL;
2084 proc->waitProcLock = NULL;
2086
2087 /*
2088 * Delete the proclock immediately if it represents no already-held locks.
2089 * (This must happen now because if the owner of the lock decides to
2090 * release it, and the requested/granted counts then go to zero,
2091 * LockRelease expects there to be no remaining proclocks.) Then see if
2092 * any other waiters for the lock can be woken up now.
2093 */
2094 CleanUpLock(waitLock, proclock,
2095 LockMethods[lockmethodid], hashcode,
2096 true);
2097}
2098
2099/*
2100 * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2101 * Release a session lock if 'sessionLock' is true, else release a
2102 * regular transaction lock.
2103 *
2104 * Side Effects: find any waiting processes that are now wakable,
2105 * grant them their requested locks and awaken them.
2106 * (We have to grant the lock here to avoid a race between
2107 * the waking process and any new process to
2108 * come along and request the lock.)
2109 */
2110bool
2111LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2112{
2117 LOCK *lock;
2118 PROCLOCK *proclock;
2120 bool wakeupNeeded;
2121
2123 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2126 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2127
2128#ifdef LOCK_DEBUG
2129 if (LOCK_DEBUG_ENABLED(locktag))
2130 elog(LOG, "LockRelease: lock [%u,%u] %s",
2131 locktag->locktag_field1, locktag->locktag_field2,
2132 lockMethodTable->lockModeNames[lockmode]);
2133#endif
2134
2135 /*
2136 * Find the LOCALLOCK entry for this lock and lockmode
2137 */
2138 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2139 localtag.lock = *locktag;
2140 localtag.mode = lockmode;
2141
2143 &localtag,
2144 HASH_FIND, NULL);
2145
2146 /*
2147 * let the caller print its own error message, too. Do not ereport(ERROR).
2148 */
2149 if (!locallock || locallock->nLocks <= 0)
2150 {
2151 elog(WARNING, "you don't own a lock of type %s",
2152 lockMethodTable->lockModeNames[lockmode]);
2153 return false;
2154 }
2155
2156 /*
2157 * Decrease the count for the resource owner.
2158 */
2159 {
2160 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2161 ResourceOwner owner;
2162 int i;
2163
2164 /* Identify owner for lock */
2165 if (sessionLock)
2166 owner = NULL;
2167 else
2168 owner = CurrentResourceOwner;
2169
2170 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2171 {
2172 if (lockOwners[i].owner == owner)
2173 {
2174 Assert(lockOwners[i].nLocks > 0);
2175 if (--lockOwners[i].nLocks == 0)
2176 {
2177 if (owner != NULL)
2179 /* compact out unused slot */
2180 locallock->numLockOwners--;
2181 if (i < locallock->numLockOwners)
2182 lockOwners[i] = lockOwners[locallock->numLockOwners];
2183 }
2184 break;
2185 }
2186 }
2187 if (i < 0)
2188 {
2189 /* don't release a lock belonging to another owner */
2190 elog(WARNING, "you don't own a lock of type %s",
2191 lockMethodTable->lockModeNames[lockmode]);
2192 return false;
2193 }
2194 }
2195
2196 /*
2197 * Decrease the total local count. If we're still holding the lock, we're
2198 * done.
2199 */
2200 locallock->nLocks--;
2201
2202 if (locallock->nLocks > 0)
2203 return true;
2204
2205 /*
2206 * At this point we can no longer suppose we are clear of invalidation
2207 * messages related to this lock. Although we'll delete the LOCALLOCK
2208 * object before any intentional return from this routine, it seems worth
2209 * the trouble to explicitly reset lockCleared right now, just in case
2210 * some error prevents us from deleting the LOCALLOCK.
2211 */
2212 locallock->lockCleared = false;
2213
2214 /* Attempt fast release of any lock eligible for the fast path. */
2215 if (EligibleForRelationFastPath(locktag, lockmode) &&
2217 {
2218 bool released;
2219
2220 /*
2221 * We might not find the lock here, even if we originally entered it
2222 * here. Another backend may have moved it to the main table.
2223 */
2226 lockmode);
2228 if (released)
2229 {
2231 return true;
2232 }
2233 }
2234
2235 /*
2236 * Otherwise we've got to mess with the shared lock table.
2237 */
2239
2241
2242 /*
2243 * Normally, we don't need to re-find the lock or proclock, since we kept
2244 * their addresses in the locallock table, and they couldn't have been
2245 * removed while we were holding a lock on them. But it's possible that
2246 * the lock was taken fast-path and has since been moved to the main hash
2247 * table by another backend, in which case we will need to look up the
2248 * objects here. We assume the lock field is NULL if so.
2249 */
2250 lock = locallock->lock;
2251 if (!lock)
2252 {
2254
2255 Assert(EligibleForRelationFastPath(locktag, lockmode));
2257 locktag,
2258 locallock->hashcode,
2259 HASH_FIND,
2260 NULL);
2261 if (!lock)
2262 elog(ERROR, "failed to re-find shared lock object");
2263 locallock->lock = lock;
2264
2265 proclocktag.myLock = lock;
2266 proclocktag.myProc = MyProc;
2268 &proclocktag,
2269 HASH_FIND,
2270 NULL);
2271 if (!locallock->proclock)
2272 elog(ERROR, "failed to re-find shared proclock object");
2273 }
2274 LOCK_PRINT("LockRelease: found", lock, lockmode);
2275 proclock = locallock->proclock;
2276 PROCLOCK_PRINT("LockRelease: found", proclock);
2277
2278 /*
2279 * Double-check that we are actually holding a lock of the type we want to
2280 * release.
2281 */
2282 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2283 {
2284 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2286 elog(WARNING, "you don't own a lock of type %s",
2287 lockMethodTable->lockModeNames[lockmode]);
2289 return false;
2290 }
2291
2292 /*
2293 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2294 */
2295 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2296
2297 CleanUpLock(lock, proclock,
2298 lockMethodTable, locallock->hashcode,
2299 wakeupNeeded);
2300
2302
2304 return true;
2305}
2306
2307/*
2308 * LockReleaseAll -- Release all locks of the specified lock method that
2309 * are held by the current process.
2310 *
2311 * Well, not necessarily *all* locks. The available behaviors are:
2312 * allLocks == true: release all locks including session locks.
2313 * allLocks == false: release all non-session locks.
2314 */
2315void
2317{
2318 HASH_SEQ_STATUS status;
2320 int i,
2321 numLockModes;
2323 LOCK *lock;
2324 int partition;
2325 bool have_fast_path_lwlock = false;
2326
2328 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2330
2331#ifdef LOCK_DEBUG
2332 if (*(lockMethodTable->trace_flag))
2333 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2334#endif
2335
2336 /*
2337 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2338 * the only way that the lock we hold on our own VXID can ever get
2339 * released: it is always and only released when a toplevel transaction
2340 * ends.
2341 */
2344
2345 numLockModes = lockMethodTable->numLockModes;
2346
2347 /*
2348 * First we run through the locallock table and get rid of unwanted
2349 * entries, then we scan the process's proclocks and get rid of those. We
2350 * do this separately because we may have multiple locallock entries
2351 * pointing to the same proclock, and we daren't end up with any dangling
2352 * pointers. Fast-path locks are cleaned up during the locallock table
2353 * scan, though.
2354 */
2356
2357 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2358 {
2359 /*
2360 * If the LOCALLOCK entry is unused, something must've gone wrong
2361 * while trying to acquire this lock. Just forget the local entry.
2362 */
2363 if (locallock->nLocks == 0)
2364 {
2366 continue;
2367 }
2368
2369 /* Ignore items that are not of the lockmethod to be removed */
2371 continue;
2372
2373 /*
2374 * If we are asked to release all locks, we can just zap the entry.
2375 * Otherwise, must scan to see if there are session locks. We assume
2376 * there is at most one lockOwners entry for session locks.
2377 */
2378 if (!allLocks)
2379 {
2380 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2381
2382 /* If session lock is above array position 0, move it down to 0 */
2383 for (i = 0; i < locallock->numLockOwners; i++)
2384 {
2385 if (lockOwners[i].owner == NULL)
2386 lockOwners[0] = lockOwners[i];
2387 else
2388 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2389 }
2390
2391 if (locallock->numLockOwners > 0 &&
2392 lockOwners[0].owner == NULL &&
2393 lockOwners[0].nLocks > 0)
2394 {
2395 /* Fix the locallock to show just the session locks */
2396 locallock->nLocks = lockOwners[0].nLocks;
2397 locallock->numLockOwners = 1;
2398 /* We aren't deleting this locallock, so done */
2399 continue;
2400 }
2401 else
2402 locallock->numLockOwners = 0;
2403 }
2404
2405#ifdef USE_ASSERT_CHECKING
2406
2407 /*
2408 * Tuple locks are currently held only for short durations within a
2409 * transaction. Check that we didn't forget to release one.
2410 */
2412 elog(WARNING, "tuple lock held at commit");
2413#endif
2414
2415 /*
2416 * If the lock or proclock pointers are NULL, this lock was taken via
2417 * the relation fast-path (and is not known to have been transferred).
2418 */
2419 if (locallock->proclock == NULL || locallock->lock == NULL)
2420 {
2421 LOCKMODE lockmode = locallock->tag.mode;
2422 Oid relid;
2423
2424 /* Verify that a fast-path lock is what we've got. */
2425 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2426 elog(PANIC, "locallock table corrupted");
2427
2428 /*
2429 * If we don't currently hold the LWLock that protects our
2430 * fast-path data structures, we must acquire it before attempting
2431 * to release the lock via the fast-path. We will continue to
2432 * hold the LWLock until we're done scanning the locallock table,
2433 * unless we hit a transferred fast-path lock. (XXX is this
2434 * really such a good idea? There could be a lot of entries ...)
2435 */
2437 {
2439 have_fast_path_lwlock = true;
2440 }
2441
2442 /* Attempt fast-path release. */
2443 relid = locallock->tag.lock.locktag_field2;
2444 if (FastPathUnGrantRelationLock(relid, lockmode))
2445 {
2447 continue;
2448 }
2449
2450 /*
2451 * Our lock, originally taken via the fast path, has been
2452 * transferred to the main lock table. That's going to require
2453 * some extra work, so release our fast-path lock before starting.
2454 */
2456 have_fast_path_lwlock = false;
2457
2458 /*
2459 * Now dump the lock. We haven't got a pointer to the LOCK or
2460 * PROCLOCK in this case, so we have to handle this a bit
2461 * differently than a normal lock release. Unfortunately, this
2462 * requires an extra LWLock acquire-and-release cycle on the
2463 * partitionLock, but hopefully it shouldn't happen often.
2464 */
2466 &locallock->tag.lock, lockmode, false);
2468 continue;
2469 }
2470
2471 /* Mark the proclock to show we need to release this lockmode */
2472 if (locallock->nLocks > 0)
2473 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2474
2475 /* And remove the locallock hashtable entry */
2477 }
2478
2479 /* Done with the fast-path data structures */
2482
2483 /*
2484 * Now, scan each lock partition separately.
2485 */
2487 {
2489 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2491
2493
2494 /*
2495 * If the proclock list for this partition is empty, we can skip
2496 * acquiring the partition lock. This optimization is trickier than
2497 * it looks, because another backend could be in process of adding
2498 * something to our proclock list due to promoting one of our
2499 * fast-path locks. However, any such lock must be one that we
2500 * decided not to delete above, so it's okay to skip it again now;
2501 * we'd just decide not to delete it again. We must, however, be
2502 * careful to re-fetch the list header once we've acquired the
2503 * partition lock, to be sure we have a valid, up-to-date pointer.
2504 * (There is probably no significant risk if pointer fetch/store is
2505 * atomic, but we don't wish to assume that.)
2506 *
2507 * XXX This argument assumes that the locallock table correctly
2508 * represents all of our fast-path locks. While allLocks mode
2509 * guarantees to clean up all of our normal locks regardless of the
2510 * locallock situation, we lose that guarantee for fast-path locks.
2511 * This is not ideal.
2512 */
2513 if (dlist_is_empty(procLocks))
2514 continue; /* needn't examine this partition */
2515
2517
2519 {
2520 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2521 bool wakeupNeeded = false;
2522
2523 Assert(proclock->tag.myProc == MyProc);
2524
2525 lock = proclock->tag.myLock;
2526
2527 /* Ignore items that are not of the lockmethod to be removed */
2528 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2529 continue;
2530
2531 /*
2532 * In allLocks mode, force release of all locks even if locallock
2533 * table had problems
2534 */
2535 if (allLocks)
2536 proclock->releaseMask = proclock->holdMask;
2537 else
2538 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2539
2540 /*
2541 * Ignore items that have nothing to be released, unless they have
2542 * holdMask == 0 and are therefore recyclable
2543 */
2544 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2545 continue;
2546
2547 PROCLOCK_PRINT("LockReleaseAll", proclock);
2548 LOCK_PRINT("LockReleaseAll", lock, 0);
2549 Assert(lock->nRequested >= 0);
2550 Assert(lock->nGranted >= 0);
2551 Assert(lock->nGranted <= lock->nRequested);
2552 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2553
2554 /*
2555 * Release the previously-marked lock modes
2556 */
2557 for (i = 1; i <= numLockModes; i++)
2558 {
2559 if (proclock->releaseMask & LOCKBIT_ON(i))
2560 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2562 }
2563 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2564 Assert(lock->nGranted <= lock->nRequested);
2565 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2566
2567 proclock->releaseMask = 0;
2568
2569 /* CleanUpLock will wake up waiters if needed. */
2570 CleanUpLock(lock, proclock,
2572 LockTagHashCode(&lock->tag),
2573 wakeupNeeded);
2574 } /* loop over PROCLOCKs within this partition */
2575
2577 } /* loop over partitions */
2578
2579#ifdef LOCK_DEBUG
2580 if (*(lockMethodTable->trace_flag))
2581 elog(LOG, "LockReleaseAll done");
2582#endif
2583}
2584
2585/*
2586 * LockReleaseSession -- Release all session locks of the specified lock method
2587 * that are held by the current process.
2588 */
2589void
2591{
2592 HASH_SEQ_STATUS status;
2594
2596 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2597
2599
2600 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2601 {
2602 /* Ignore items that are not of the specified lock method */
2604 continue;
2605
2607 }
2608}
2609
2610/*
2611 * LockReleaseCurrentOwner
2612 * Release all locks belonging to CurrentResourceOwner
2613 *
2614 * If the caller knows what those locks are, it can pass them as an array.
2615 * That speeds up the call significantly, when a lot of locks are held.
2616 * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2617 * table to find them.
2618 */
2619void
2621{
2622 if (locallocks == NULL)
2623 {
2624 HASH_SEQ_STATUS status;
2626
2628
2629 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2631 }
2632 else
2633 {
2634 int i;
2635
2636 for (i = nlocks - 1; i >= 0; i--)
2638 }
2639}
2640
2641/*
2642 * ReleaseLockIfHeld
2643 * Release any session-level locks on this lockable object if sessionLock
2644 * is true; else, release any locks held by CurrentResourceOwner.
2645 *
2646 * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2647 * locks), but without refactoring LockRelease() we cannot support releasing
2648 * locks belonging to resource owners other than CurrentResourceOwner.
2649 * If we were to refactor, it'd be a good idea to fix it so we don't have to
2650 * do a hashtable lookup of the locallock, too. However, currently this
2651 * function isn't used heavily enough to justify refactoring for its
2652 * convenience.
2653 */
2654static void
2656{
2657 ResourceOwner owner;
2658 LOCALLOCKOWNER *lockOwners;
2659 int i;
2660
2661 /* Identify owner for lock (must match LockRelease!) */
2662 if (sessionLock)
2663 owner = NULL;
2664 else
2665 owner = CurrentResourceOwner;
2666
2667 /* Scan to see if there are any locks belonging to the target owner */
2668 lockOwners = locallock->lockOwners;
2669 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2670 {
2671 if (lockOwners[i].owner == owner)
2672 {
2673 Assert(lockOwners[i].nLocks > 0);
2674 if (lockOwners[i].nLocks < locallock->nLocks)
2675 {
2676 /*
2677 * We will still hold this lock after forgetting this
2678 * ResourceOwner.
2679 */
2680 locallock->nLocks -= lockOwners[i].nLocks;
2681 /* compact out unused slot */
2682 locallock->numLockOwners--;
2683 if (owner != NULL)
2685 if (i < locallock->numLockOwners)
2686 lockOwners[i] = lockOwners[locallock->numLockOwners];
2687 }
2688 else
2689 {
2690 Assert(lockOwners[i].nLocks == locallock->nLocks);
2691 /* We want to call LockRelease just once */
2692 lockOwners[i].nLocks = 1;
2693 locallock->nLocks = 1;
2694 if (!LockRelease(&locallock->tag.lock,
2695 locallock->tag.mode,
2696 sessionLock))
2697 elog(WARNING, "ReleaseLockIfHeld: failed??");
2698 }
2699 break;
2700 }
2701 }
2702}
2703
2704/*
2705 * LockReassignCurrentOwner
2706 * Reassign all locks belonging to CurrentResourceOwner to belong
2707 * to its parent resource owner.
2708 *
2709 * If the caller knows what those locks are, it can pass them as an array.
2710 * That speeds up the call significantly, when a lot of locks are held
2711 * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2712 * and we'll traverse through our hash table to find them.
2713 */
2714void
2716{
2718
2719 Assert(parent != NULL);
2720
2721 if (locallocks == NULL)
2722 {
2723 HASH_SEQ_STATUS status;
2725
2727
2728 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2730 }
2731 else
2732 {
2733 int i;
2734
2735 for (i = nlocks - 1; i >= 0; i--)
2736 LockReassignOwner(locallocks[i], parent);
2737 }
2738}
2739
2740/*
2741 * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2742 * CurrentResourceOwner to its parent.
2743 */
2744static void
2746{
2747 LOCALLOCKOWNER *lockOwners;
2748 int i;
2749 int ic = -1;
2750 int ip = -1;
2751
2752 /*
2753 * Scan to see if there are any locks belonging to current owner or its
2754 * parent
2755 */
2756 lockOwners = locallock->lockOwners;
2757 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2758 {
2759 if (lockOwners[i].owner == CurrentResourceOwner)
2760 ic = i;
2761 else if (lockOwners[i].owner == parent)
2762 ip = i;
2763 }
2764
2765 if (ic < 0)
2766 return; /* no current locks */
2767
2768 if (ip < 0)
2769 {
2770 /* Parent has no slot, so just give it the child's slot */
2771 lockOwners[ic].owner = parent;
2773 }
2774 else
2775 {
2776 /* Merge child's count with parent's */
2777 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2778 /* compact out unused slot */
2779 locallock->numLockOwners--;
2780 if (ic < locallock->numLockOwners)
2781 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2782 }
2784}
2785
2786/*
2787 * FastPathGrantRelationLock
2788 * Grant lock using per-backend fast-path array, if there is space.
2789 */
2790static bool
2792{
2793 uint32 i;
2795
2796 /* fast-path group the lock belongs to */
2797 uint32 group = FAST_PATH_REL_GROUP(relid);
2798
2799 /* Scan for existing entry for this relid, remembering empty slot. */
2800 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2801 {
2802 /* index into the whole per-backend array */
2803 uint32 f = FAST_PATH_SLOT(group, i);
2804
2805 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2806 unused_slot = f;
2807 else if (MyProc->fpRelId[f] == relid)
2808 {
2809 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2810 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2811 return true;
2812 }
2813 }
2814
2815 /* If no existing entry, use any empty slot. */
2817 {
2818 MyProc->fpRelId[unused_slot] = relid;
2820 ++FastPathLocalUseCounts[group];
2821 return true;
2822 }
2823
2824 /* No existing entry, and no empty slot. */
2825 return false;
2826}
2827
2828/*
2829 * FastPathUnGrantRelationLock
2830 * Release fast-path lock, if present. Update backend-private local
2831 * use count, while we're at it.
2832 */
2833static bool
2835{
2836 uint32 i;
2837 bool result = false;
2838
2839 /* fast-path group the lock belongs to */
2840 uint32 group = FAST_PATH_REL_GROUP(relid);
2841
2842 FastPathLocalUseCounts[group] = 0;
2843 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2844 {
2845 /* index into the whole per-backend array */
2846 uint32 f = FAST_PATH_SLOT(group, i);
2847
2848 if (MyProc->fpRelId[f] == relid
2849 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2850 {
2851 Assert(!result);
2852 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2853 result = true;
2854 /* we continue iterating so as to update FastPathLocalUseCount */
2855 }
2856 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2857 ++FastPathLocalUseCounts[group];
2858 }
2859 return result;
2860}
2861
2862/*
2863 * FastPathTransferRelationLocks
2864 * Transfer locks matching the given lock tag from per-backend fast-path
2865 * arrays to the shared hash table.
2866 *
2867 * Returns true if successful, false if ran out of shared memory.
2868 */
2869static bool
2871 uint32 hashcode)
2872{
2874 Oid relid = locktag->locktag_field2;
2875 uint32 i;
2876
2877 /* fast-path group the lock belongs to */
2878 uint32 group = FAST_PATH_REL_GROUP(relid);
2879
2880 /*
2881 * Every PGPROC that can potentially hold a fast-path lock is present in
2882 * ProcGlobal->allProcs. Prepared transactions are not, but any
2883 * outstanding fast-path locks held by prepared transactions are
2884 * transferred to the main lock table.
2885 */
2886 for (i = 0; i < ProcGlobal->allProcCount; i++)
2887 {
2888 PGPROC *proc = GetPGProcByNumber(i);
2889 uint32 j;
2890
2892
2893 /*
2894 * If the target backend isn't referencing the same database as the
2895 * lock, then we needn't examine the individual relation IDs at all;
2896 * none of them can be relevant.
2897 *
2898 * proc->databaseId is set at backend startup time and never changes
2899 * thereafter, so it might be safe to perform this test before
2900 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2901 * assume that if the target backend holds any fast-path locks, it
2902 * must have performed a memory-fencing operation (in particular, an
2903 * LWLock acquisition) since setting proc->databaseId. However, it's
2904 * less clear that our backend is certain to have performed a memory
2905 * fencing operation since the other backend set proc->databaseId. So
2906 * for now, we test it after acquiring the LWLock just to be safe.
2907 *
2908 * Also skip groups without any registered fast-path locks.
2909 */
2910 if (proc->databaseId != locktag->locktag_field1 ||
2911 proc->fpLockBits[group] == 0)
2912 {
2913 LWLockRelease(&proc->fpInfoLock);
2914 continue;
2915 }
2916
2917 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2918 {
2919 uint32 lockmode;
2920
2921 /* index into the whole per-backend array */
2922 uint32 f = FAST_PATH_SLOT(group, j);
2923
2924 /* Look for an allocated slot matching the given relid. */
2925 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2926 continue;
2927
2928 /* Find or create lock object. */
2930 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2932 ++lockmode)
2933 {
2934 PROCLOCK *proclock;
2935
2936 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2937 continue;
2938 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2939 hashcode, lockmode);
2940 if (!proclock)
2941 {
2943 LWLockRelease(&proc->fpInfoLock);
2944 return false;
2945 }
2946 GrantLock(proclock->tag.myLock, proclock, lockmode);
2947 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2948 }
2950
2951 /* No need to examine remaining slots. */
2952 break;
2953 }
2954 LWLockRelease(&proc->fpInfoLock);
2955 }
2956 return true;
2957}
2958
2959/*
2960 * FastPathGetRelationLockEntry
2961 * Return the PROCLOCK for a lock originally taken via the fast-path,
2962 * transferring it to the primary lock table if necessary.
2963 *
2964 * Note: caller takes care of updating the locallock object.
2965 */
2966static PROCLOCK *
2968{
2970 LOCKTAG *locktag = &locallock->tag.lock;
2971 PROCLOCK *proclock = NULL;
2973 Oid relid = locktag->locktag_field2;
2974 uint32 i,
2975 group;
2976
2977 /* fast-path group the lock belongs to */
2978 group = FAST_PATH_REL_GROUP(relid);
2979
2981
2982 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2983 {
2984 uint32 lockmode;
2985
2986 /* index into the whole per-backend array */
2987 uint32 f = FAST_PATH_SLOT(group, i);
2988
2989 /* Look for an allocated slot matching the given relid. */
2990 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2991 continue;
2992
2993 /* If we don't have a lock of the given mode, forget it! */
2994 lockmode = locallock->tag.mode;
2995 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2996 break;
2997
2998 /* Find or create lock object. */
3000
3001 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
3002 locallock->hashcode, lockmode);
3003 if (!proclock)
3004 {
3007 ereport(ERROR,
3009 errmsg("out of shared memory"),
3010 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3011 }
3012 GrantLock(proclock->tag.myLock, proclock, lockmode);
3013 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3014
3016
3017 /* No need to examine remaining slots. */
3018 break;
3019 }
3020
3022
3023 /* Lock may have already been transferred by some other backend. */
3024 if (proclock == NULL)
3025 {
3026 LOCK *lock;
3029
3031
3033 locktag,
3034 locallock->hashcode,
3035 HASH_FIND,
3036 NULL);
3037 if (!lock)
3038 elog(ERROR, "failed to re-find shared lock object");
3039
3040 proclocktag.myLock = lock;
3041 proclocktag.myProc = MyProc;
3042
3044 proclock = (PROCLOCK *)
3046 &proclocktag,
3048 HASH_FIND,
3049 NULL);
3050 if (!proclock)
3051 elog(ERROR, "failed to re-find shared proclock object");
3053 }
3054
3055 return proclock;
3056}
3057
3058/*
3059 * GetLockConflicts
3060 * Get an array of VirtualTransactionIds of xacts currently holding locks
3061 * that would conflict with the specified lock/lockmode.
3062 * xacts merely awaiting such a lock are NOT reported.
3063 *
3064 * The result array is palloc'd and is terminated with an invalid VXID.
3065 * *countp, if not null, is updated to the number of items set.
3066 *
3067 * Of course, the result could be out of date by the time it's returned, so
3068 * use of this function has to be thought about carefully. Similarly, a
3069 * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3070 * lock it holds. Existing callers don't care about a locker after that
3071 * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3072 * pg_xact updates and before releasing locks.
3073 *
3074 * Note we never include the current xact's vxid in the result array,
3075 * since an xact never blocks itself.
3076 */
3078GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3079{
3083 LOCK *lock;
3086 PROCLOCK *proclock;
3087 uint32 hashcode;
3089 int count = 0;
3090 int fast_count = 0;
3091
3093 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3096 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3097
3098 /*
3099 * Allocate memory to store results, and fill with InvalidVXID. We only
3100 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3101 * InHotStandby allocate once in TopMemoryContext.
3102 */
3103 if (InHotStandby)
3104 {
3105 if (vxids == NULL)
3108 sizeof(VirtualTransactionId) *
3110 }
3111 else
3113
3114 /* Compute hash code and partition lock, and look up conflicting modes. */
3115 hashcode = LockTagHashCode(locktag);
3117 conflictMask = lockMethodTable->conflictTab[lockmode];
3118
3119 /*
3120 * Fast path locks might not have been entered in the primary lock table.
3121 * If the lock we're dealing with could conflict with such a lock, we must
3122 * examine each backend's fast-path array for conflicts.
3123 */
3124 if (ConflictsWithRelationFastPath(locktag, lockmode))
3125 {
3126 int i;
3127 Oid relid = locktag->locktag_field2;
3129
3130 /* fast-path group the lock belongs to */
3131 uint32 group = FAST_PATH_REL_GROUP(relid);
3132
3133 /*
3134 * Iterate over relevant PGPROCs. Anything held by a prepared
3135 * transaction will have been transferred to the primary lock table,
3136 * so we need not worry about those. This is all a bit fuzzy, because
3137 * new locks could be taken after we've visited a particular
3138 * partition, but the callers had better be prepared to deal with that
3139 * anyway, since the locks could equally well be taken between the
3140 * time we return the value and the time the caller does something
3141 * with it.
3142 */
3143 for (i = 0; i < ProcGlobal->allProcCount; i++)
3144 {
3145 PGPROC *proc = GetPGProcByNumber(i);
3146 uint32 j;
3147
3148 /* A backend never blocks itself */
3149 if (proc == MyProc)
3150 continue;
3151
3153
3154 /*
3155 * If the target backend isn't referencing the same database as
3156 * the lock, then we needn't examine the individual relation IDs
3157 * at all; none of them can be relevant.
3158 *
3159 * See FastPathTransferRelationLocks() for discussion of why we do
3160 * this test after acquiring the lock.
3161 *
3162 * Also skip groups without any registered fast-path locks.
3163 */
3164 if (proc->databaseId != locktag->locktag_field1 ||
3165 proc->fpLockBits[group] == 0)
3166 {
3167 LWLockRelease(&proc->fpInfoLock);
3168 continue;
3169 }
3170
3171 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3172 {
3174
3175 /* index into the whole per-backend array */
3176 uint32 f = FAST_PATH_SLOT(group, j);
3177
3178 /* Look for an allocated slot matching the given relid. */
3179 if (relid != proc->fpRelId[f])
3180 continue;
3181 lockmask = FAST_PATH_GET_BITS(proc, f);
3182 if (!lockmask)
3183 continue;
3185
3186 /*
3187 * There can only be one entry per relation, so if we found it
3188 * and it doesn't conflict, we can skip the rest of the slots.
3189 */
3190 if ((lockmask & conflictMask) == 0)
3191 break;
3192
3193 /* Conflict! */
3194 GET_VXID_FROM_PGPROC(vxid, *proc);
3195
3197 vxids[count++] = vxid;
3198 /* else, xact already committed or aborted */
3199
3200 /* No need to examine remaining slots. */
3201 break;
3202 }
3203
3204 LWLockRelease(&proc->fpInfoLock);
3205 }
3206 }
3207
3208 /* Remember how many fast-path conflicts we found. */
3209 fast_count = count;
3210
3211 /*
3212 * Look up the lock object matching the tag.
3213 */
3215
3217 locktag,
3218 hashcode,
3219 HASH_FIND,
3220 NULL);
3221 if (!lock)
3222 {
3223 /*
3224 * If the lock object doesn't exist, there is nothing holding a lock
3225 * on this lockable object.
3226 */
3228 vxids[count].procNumber = INVALID_PROC_NUMBER;
3229 vxids[count].localTransactionId = InvalidLocalTransactionId;
3230 if (countp)
3231 *countp = count;
3232 return vxids;
3233 }
3234
3235 /*
3236 * Examine each existing holder (or awaiter) of the lock.
3237 */
3239 {
3240 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3241
3242 if (conflictMask & proclock->holdMask)
3243 {
3244 PGPROC *proc = proclock->tag.myProc;
3245
3246 /* A backend never blocks itself */
3247 if (proc != MyProc)
3248 {
3250
3251 GET_VXID_FROM_PGPROC(vxid, *proc);
3252
3254 {
3255 int i;
3256
3257 /* Avoid duplicate entries. */
3258 for (i = 0; i < fast_count; ++i)
3260 break;
3261 if (i >= fast_count)
3262 vxids[count++] = vxid;
3263 }
3264 /* else, xact already committed or aborted */
3265 }
3266 }
3267 }
3268
3270
3271 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3272 elog(PANIC, "too many conflicting locks found");
3273
3274 vxids[count].procNumber = INVALID_PROC_NUMBER;
3275 vxids[count].localTransactionId = InvalidLocalTransactionId;
3276 if (countp)
3277 *countp = count;
3278 return vxids;
3279}
3280
3281/*
3282 * Find a lock in the shared lock table and release it. It is the caller's
3283 * responsibility to verify that this is a sane thing to do. (For example, it
3284 * would be bad to release a lock here if there might still be a LOCALLOCK
3285 * object with pointers to it.)
3286 *
3287 * We currently use this in two situations: first, to release locks held by
3288 * prepared transactions on commit (see lock_twophase_postcommit); and second,
3289 * to release locks taken via the fast-path, transferred to the main hash
3290 * table, and then released (see LockReleaseAll).
3291 */
3292static void
3294 LOCKTAG *locktag, LOCKMODE lockmode,
3296{
3297 LOCK *lock;
3298 PROCLOCK *proclock;
3300 uint32 hashcode;
3303 bool wakeupNeeded;
3304
3305 hashcode = LockTagHashCode(locktag);
3307
3309
3310 /*
3311 * Re-find the lock object (it had better be there).
3312 */
3314 locktag,
3315 hashcode,
3316 HASH_FIND,
3317 NULL);
3318 if (!lock)
3319 elog(PANIC, "failed to re-find shared lock object");
3320
3321 /*
3322 * Re-find the proclock object (ditto).
3323 */
3324 proclocktag.myLock = lock;
3325 proclocktag.myProc = proc;
3326
3328
3330 &proclocktag,
3332 HASH_FIND,
3333 NULL);
3334 if (!proclock)
3335 elog(PANIC, "failed to re-find shared proclock object");
3336
3337 /*
3338 * Double-check that we are actually holding a lock of the type we want to
3339 * release.
3340 */
3341 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3342 {
3343 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3345 elog(WARNING, "you don't own a lock of type %s",
3346 lockMethodTable->lockModeNames[lockmode]);
3347 return;
3348 }
3349
3350 /*
3351 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3352 */
3353 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3354
3355 CleanUpLock(lock, proclock,
3356 lockMethodTable, hashcode,
3357 wakeupNeeded);
3358
3360
3361 /*
3362 * Decrement strong lock count. This logic is needed only for 2PC.
3363 */
3365 && ConflictsWithRelationFastPath(locktag, lockmode))
3366 {
3368
3373 }
3374}
3375
3376/*
3377 * CheckForSessionAndXactLocks
3378 * Check to see if transaction holds both session-level and xact-level
3379 * locks on the same object; if so, throw an error.
3380 *
3381 * If we have both session- and transaction-level locks on the same object,
3382 * PREPARE TRANSACTION must fail. This should never happen with regular
3383 * locks, since we only take those at session level in some special operations
3384 * like VACUUM. It's possible to hit this with advisory locks, though.
3385 *
3386 * It would be nice if we could keep the session hold and give away the
3387 * transactional hold to the prepared xact. However, that would require two
3388 * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3389 * available when it comes time for PostPrepare_Locks to do the deed.
3390 * So for now, we error out while we can still do so safely.
3391 *
3392 * Since the LOCALLOCK table stores a separate entry for each lockmode,
3393 * we can't implement this check by examining LOCALLOCK entries in isolation.
3394 * We must build a transient hashtable that is indexed by locktag only.
3395 */
3396static void
3398{
3399 typedef struct
3400 {
3401 LOCKTAG lock; /* identifies the lockable object */
3402 bool sessLock; /* is any lockmode held at session level? */
3403 bool xactLock; /* is any lockmode held at xact level? */
3405
3407 HTAB *lockhtab;
3408 HASH_SEQ_STATUS status;
3410
3411 /* Create a local hash table keyed by LOCKTAG only */
3412 hash_ctl.keysize = sizeof(LOCKTAG);
3413 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3415
3416 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3417 256, /* arbitrary initial size */
3418 &hash_ctl,
3420
3421 /* Scan local lock table to find entries for each LOCKTAG */
3423
3424 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3425 {
3426 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3428 bool found;
3429 int i;
3430
3431 /*
3432 * Ignore VXID locks. We don't want those to be held by prepared
3433 * transactions, since they aren't meaningful after a restart.
3434 */
3435 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3436 continue;
3437
3438 /* Ignore it if we don't actually hold the lock */
3439 if (locallock->nLocks <= 0)
3440 continue;
3441
3442 /* Otherwise, find or make an entry in lockhtab */
3444 &locallock->tag.lock,
3445 HASH_ENTER, &found);
3446 if (!found) /* initialize, if newly created */
3447 hentry->sessLock = hentry->xactLock = false;
3448
3449 /* Scan to see if we hold lock at session or xact level or both */
3450 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3451 {
3452 if (lockOwners[i].owner == NULL)
3453 hentry->sessLock = true;
3454 else
3455 hentry->xactLock = true;
3456 }
3457
3458 /*
3459 * We can throw error immediately when we see both types of locks; no
3460 * need to wait around to see if there are more violations.
3461 */
3462 if (hentry->sessLock && hentry->xactLock)
3463 ereport(ERROR,
3465 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3466 }
3467
3468 /* Success, so clean up */
3470}
3471
3472/*
3473 * AtPrepare_Locks
3474 * Do the preparatory work for a PREPARE: make 2PC state file records
3475 * for all locks currently held.
3476 *
3477 * Session-level locks are ignored, as are VXID locks.
3478 *
3479 * For the most part, we don't need to touch shared memory for this ---
3480 * all the necessary state information is in the locallock table.
3481 * Fast-path locks are an exception, however: we move any such locks to
3482 * the main table before allowing PREPARE TRANSACTION to succeed.
3483 */
3484void
3486{
3487 HASH_SEQ_STATUS status;
3489
3490 /* First, verify there aren't locks of both xact and session level */
3492
3493 /* Now do the per-locallock cleanup work */
3495
3496 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3497 {
3498 TwoPhaseLockRecord record;
3499 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3500 bool haveSessionLock;
3501 bool haveXactLock;
3502 int i;
3503
3504 /*
3505 * Ignore VXID locks. We don't want those to be held by prepared
3506 * transactions, since they aren't meaningful after a restart.
3507 */
3508 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3509 continue;
3510
3511 /* Ignore it if we don't actually hold the lock */
3512 if (locallock->nLocks <= 0)
3513 continue;
3514
3515 /* Scan to see whether we hold it at session or transaction level */
3516 haveSessionLock = haveXactLock = false;
3517 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3518 {
3519 if (lockOwners[i].owner == NULL)
3520 haveSessionLock = true;
3521 else
3522 haveXactLock = true;
3523 }
3524
3525 /* Ignore it if we have only session lock */
3526 if (!haveXactLock)
3527 continue;
3528
3529 /* This can't happen, because we already checked it */
3530 if (haveSessionLock)
3531 ereport(ERROR,
3533 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3534
3535 /*
3536 * If the local lock was taken via the fast-path, we need to move it
3537 * to the primary lock table, or just get a pointer to the existing
3538 * primary lock table entry if by chance it's already been
3539 * transferred.
3540 */
3541 if (locallock->proclock == NULL)
3542 {
3544 locallock->lock = locallock->proclock->tag.myLock;
3545 }
3546
3547 /*
3548 * Arrange to not release any strong lock count held by this lock
3549 * entry. We must retain the count until the prepared transaction is
3550 * committed or rolled back.
3551 */
3552 locallock->holdsStrongLockCount = false;
3553
3554 /*
3555 * Create a 2PC record.
3556 */
3557 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3558 record.lockmode = locallock->tag.mode;
3559
3561 &record, sizeof(TwoPhaseLockRecord));
3562 }
3563}
3564
3565/*
3566 * PostPrepare_Locks
3567 * Clean up after successful PREPARE
3568 *
3569 * Here, we want to transfer ownership of our locks to a dummy PGPROC
3570 * that's now associated with the prepared transaction, and we want to
3571 * clean out the corresponding entries in the LOCALLOCK table.
3572 *
3573 * Note: by removing the LOCALLOCK entries, we are leaving dangling
3574 * pointers in the transaction's resource owner. This is OK at the
3575 * moment since resowner.c doesn't try to free locks retail at a toplevel
3576 * transaction commit or abort. We could alternatively zero out nLocks
3577 * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3578 * but that probably costs more cycles.
3579 */
3580void
3582{
3583 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3584 HASH_SEQ_STATUS status;
3586 LOCK *lock;
3587 PROCLOCK *proclock;
3589 int partition;
3590
3591 /* Can't prepare a lock group follower. */
3594
3595 /* This is a critical section: any error means big trouble */
3597
3598 /*
3599 * First we run through the locallock table and get rid of unwanted
3600 * entries, then we scan the process's proclocks and transfer them to the
3601 * target proc.
3602 *
3603 * We do this separately because we may have multiple locallock entries
3604 * pointing to the same proclock, and we daren't end up with any dangling
3605 * pointers.
3606 */
3608
3609 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3610 {
3611 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3612 bool haveSessionLock;
3613 bool haveXactLock;
3614 int i;
3615
3616 if (locallock->proclock == NULL || locallock->lock == NULL)
3617 {
3618 /*
3619 * We must've run out of shared memory while trying to set up this
3620 * lock. Just forget the local entry.
3621 */
3622 Assert(locallock->nLocks == 0);
3624 continue;
3625 }
3626
3627 /* Ignore VXID locks */
3628 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3629 continue;
3630
3631 /* Scan to see whether we hold it at session or transaction level */
3632 haveSessionLock = haveXactLock = false;
3633 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3634 {
3635 if (lockOwners[i].owner == NULL)
3636 haveSessionLock = true;
3637 else
3638 haveXactLock = true;
3639 }
3640
3641 /* Ignore it if we have only session lock */
3642 if (!haveXactLock)
3643 continue;
3644
3645 /* This can't happen, because we already checked it */
3646 if (haveSessionLock)
3647 ereport(PANIC,
3649 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3650
3651 /* Mark the proclock to show we need to release this lockmode */
3652 if (locallock->nLocks > 0)
3653 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3654
3655 /* And remove the locallock hashtable entry */
3657 }
3658
3659 /*
3660 * Now, scan each lock partition separately.
3661 */
3663 {
3665 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3667
3669
3670 /*
3671 * If the proclock list for this partition is empty, we can skip
3672 * acquiring the partition lock. This optimization is safer than the
3673 * situation in LockReleaseAll, because we got rid of any fast-path
3674 * locks during AtPrepare_Locks, so there cannot be any case where
3675 * another backend is adding something to our lists now. For safety,
3676 * though, we code this the same way as in LockReleaseAll.
3677 */
3678 if (dlist_is_empty(procLocks))
3679 continue; /* needn't examine this partition */
3680
3682
3684 {
3685 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3686
3687 Assert(proclock->tag.myProc == MyProc);
3688
3689 lock = proclock->tag.myLock;
3690
3691 /* Ignore VXID locks */
3693 continue;
3694
3695 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3696 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3697 Assert(lock->nRequested >= 0);
3698 Assert(lock->nGranted >= 0);
3699 Assert(lock->nGranted <= lock->nRequested);
3700 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3701
3702 /* Ignore it if nothing to release (must be a session lock) */
3703 if (proclock->releaseMask == 0)
3704 continue;
3705
3706 /* Else we should be releasing all locks */
3707 if (proclock->releaseMask != proclock->holdMask)
3708 elog(PANIC, "we seem to have dropped a bit somewhere");
3709
3710 /*
3711 * We cannot simply modify proclock->tag.myProc to reassign
3712 * ownership of the lock, because that's part of the hash key and
3713 * the proclock would then be in the wrong hash chain. Instead
3714 * use hash_update_hash_key. (We used to create a new hash entry,
3715 * but that risks out-of-memory failure if other processes are
3716 * busy making proclocks too.) We must unlink the proclock from
3717 * our procLink chain and put it into the new proc's chain, too.
3718 *
3719 * Note: the updated proclock hash key will still belong to the
3720 * same hash partition, cf proclock_hash(). So the partition lock
3721 * we already hold is sufficient for this.
3722 */
3723 dlist_delete(&proclock->procLink);
3724
3725 /*
3726 * Create the new hash key for the proclock.
3727 */
3728 proclocktag.myLock = lock;
3729 proclocktag.myProc = newproc;
3730
3731 /*
3732 * Update groupLeader pointer to point to the new proc. (We'd
3733 * better not be a member of somebody else's lock group!)
3734 */
3735 Assert(proclock->groupLeader == proclock->tag.myProc);
3736 proclock->groupLeader = newproc;
3737
3738 /*
3739 * Update the proclock. We should not find any existing entry for
3740 * the same hash key, since there can be only one entry for any
3741 * given lock with my own proc.
3742 */
3744 proclock,
3745 &proclocktag))
3746 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3747
3748 /* Re-link into the new proc's proclock list */
3749 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3750
3751 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3752 } /* loop over PROCLOCKs within this partition */
3753
3755 } /* loop over partitions */
3756
3758}
3759
3760
3761/*
3762 * GetLockStatusData - Return a summary of the lock manager's internal
3763 * status, for use in a user-level reporting function.
3764 *
3765 * The return data consists of an array of LockInstanceData objects,
3766 * which are a lightly abstracted version of the PROCLOCK data structures,
3767 * i.e. there is one entry for each unique lock and interested PGPROC.
3768 * It is the caller's responsibility to match up related items (such as
3769 * references to the same lockable object or PGPROC) if wanted.
3770 *
3771 * The design goal is to hold the LWLocks for as short a time as possible;
3772 * thus, this function simply makes a copy of the necessary data and releases
3773 * the locks, allowing the caller to contemplate and format the data for as
3774 * long as it pleases.
3775 */
3776LockData *
3778{
3779 LockData *data;
3780 PROCLOCK *proclock;
3782 int els;
3783 int el;
3784 int i;
3785
3787
3788 /* Guess how much space we'll need. */
3789 els = MaxBackends;
3790 el = 0;
3792
3793 /*
3794 * First, we iterate through the per-backend fast-path arrays, locking
3795 * them one at a time. This might produce an inconsistent picture of the
3796 * system state, but taking all of those LWLocks at the same time seems
3797 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3798 * matter too much, because none of these locks can be involved in lock
3799 * conflicts anyway - anything that might must be present in the main lock
3800 * table. (For the same reason, we don't sweat about making leaderPid
3801 * completely valid. We cannot safely dereference another backend's
3802 * lockGroupLeader field without holding all lock partition locks, and
3803 * it's not worth that.)
3804 */
3805 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3806 {
3807 PGPROC *proc = GetPGProcByNumber(i);
3808
3809 /* Skip backends with pid=0, as they don't hold fast-path locks */
3810 if (proc->pid == 0)
3811 continue;
3812
3814
3815 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3816 {
3817 /* Skip groups without registered fast-path locks */
3818 if (proc->fpLockBits[g] == 0)
3819 continue;
3820
3821 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3822 {
3824 uint32 f = FAST_PATH_SLOT(g, j);
3826
3827 /* Skip unallocated slots */
3828 if (!lockbits)
3829 continue;
3830
3831 if (el >= els)
3832 {
3833 els += MaxBackends;
3834 data->locks = (LockInstanceData *)
3835 repalloc(data->locks, sizeof(LockInstanceData) * els);
3836 }
3837
3838 instance = &data->locks[el];
3840 proc->fpRelId[f]);
3842 instance->waitLockMode = NoLock;
3843 instance->vxid.procNumber = proc->vxid.procNumber;
3844 instance->vxid.localTransactionId = proc->vxid.lxid;
3845 instance->pid = proc->pid;
3846 instance->leaderPid = proc->pid;
3847 instance->fastpath = true;
3848
3849 /*
3850 * Successfully taking fast path lock means there were no
3851 * conflicting locks.
3852 */
3853 instance->waitStart = 0;
3854
3855 el++;
3856 }
3857 }
3858
3859 if (proc->fpVXIDLock)
3860 {
3863
3864 if (el >= els)
3865 {
3866 els += MaxBackends;
3867 data->locks = (LockInstanceData *)
3868 repalloc(data->locks, sizeof(LockInstanceData) * els);
3869 }
3870
3871 vxid.procNumber = proc->vxid.procNumber;
3873
3874 instance = &data->locks[el];
3876 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3877 instance->waitLockMode = NoLock;
3878 instance->vxid.procNumber = proc->vxid.procNumber;
3879 instance->vxid.localTransactionId = proc->vxid.lxid;
3880 instance->pid = proc->pid;
3881 instance->leaderPid = proc->pid;
3882 instance->fastpath = true;
3883 instance->waitStart = 0;
3884
3885 el++;
3886 }
3887
3888 LWLockRelease(&proc->fpInfoLock);
3889 }
3890
3891 /*
3892 * Next, acquire lock on the entire shared lock data structure. We do
3893 * this so that, at least for locks in the primary lock table, the state
3894 * will be self-consistent.
3895 *
3896 * Since this is a read-only operation, we take shared instead of
3897 * exclusive lock. There's not a whole lot of point to this, because all
3898 * the normal operations require exclusive lock, but it doesn't hurt
3899 * anything either. It will at least allow two backends to do
3900 * GetLockStatusData in parallel.
3901 *
3902 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3903 */
3904 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3906
3907 /* Now we can safely count the number of proclocks */
3909 if (data->nelements > els)
3910 {
3911 els = data->nelements;
3912 data->locks = (LockInstanceData *)
3913 repalloc(data->locks, sizeof(LockInstanceData) * els);
3914 }
3915
3916 /* Now scan the tables to copy the data */
3918
3919 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3920 {
3921 PGPROC *proc = proclock->tag.myProc;
3922 LOCK *lock = proclock->tag.myLock;
3923 LockInstanceData *instance = &data->locks[el];
3924
3925 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3926 instance->holdMask = proclock->holdMask;
3927 if (proc->waitLock == proclock->tag.myLock)
3928 instance->waitLockMode = proc->waitLockMode;
3929 else
3930 instance->waitLockMode = NoLock;
3931 instance->vxid.procNumber = proc->vxid.procNumber;
3932 instance->vxid.localTransactionId = proc->vxid.lxid;
3933 instance->pid = proc->pid;
3934 instance->leaderPid = proclock->groupLeader->pid;
3935 instance->fastpath = false;
3936 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3937
3938 el++;
3939 }
3940
3941 /*
3942 * And release locks. We do this in reverse order for two reasons: (1)
3943 * Anyone else who needs more than one of the locks will be trying to lock
3944 * them in increasing order; we don't want to release the other process
3945 * until it can get all the locks it needs. (2) This avoids O(N^2)
3946 * behavior inside LWLockRelease.
3947 */
3948 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3950
3951 Assert(el == data->nelements);
3952
3953 return data;
3954}
3955
3956/*
3957 * GetBlockerStatusData - Return a summary of the lock manager's state
3958 * concerning locks that are blocking the specified PID or any member of
3959 * the PID's lock group, for use in a user-level reporting function.
3960 *
3961 * For each PID within the lock group that is awaiting some heavyweight lock,
3962 * the return data includes an array of LockInstanceData objects, which are
3963 * the same data structure used by GetLockStatusData; but unlike that function,
3964 * this one reports only the PROCLOCKs associated with the lock that that PID
3965 * is blocked on. (Hence, all the locktags should be the same for any one
3966 * blocked PID.) In addition, we return an array of the PIDs of those backends
3967 * that are ahead of the blocked PID in the lock's wait queue. These can be
3968 * compared with the PIDs in the LockInstanceData objects to determine which
3969 * waiters are ahead of or behind the blocked PID in the queue.
3970 *
3971 * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3972 * waiting on any heavyweight lock, return empty arrays.
3973 *
3974 * The design goal is to hold the LWLocks for as short a time as possible;
3975 * thus, this function simply makes a copy of the necessary data and releases
3976 * the locks, allowing the caller to contemplate and format the data for as
3977 * long as it pleases.
3978 */
3981{
3983 PGPROC *proc;
3984 int i;
3985
3987
3988 /*
3989 * Guess how much space we'll need, and preallocate. Most of the time
3990 * this will avoid needing to do repalloc while holding the LWLocks. (We
3991 * assume, but check with an Assert, that MaxBackends is enough entries
3992 * for the procs[] array; the other two could need enlargement, though.)
3993 */
3994 data->nprocs = data->nlocks = data->npids = 0;
3995 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3996 data->procs = palloc_array(BlockedProcData, data->maxprocs);
3997 data->locks = palloc_array(LockInstanceData, data->maxlocks);
3998 data->waiter_pids = palloc_array(int, data->maxpids);
3999
4000 /*
4001 * In order to search the ProcArray for blocked_pid and assume that that
4002 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4003 * In addition, to examine the lock grouping fields of any other backend,
4004 * we must hold all the hash partition locks. (Only one of those locks is
4005 * actually relevant for any one lock group, but we can't know which one
4006 * ahead of time.) It's fairly annoying to hold all those locks
4007 * throughout this, but it's no worse than GetLockStatusData(), and it
4008 * does have the advantage that we're guaranteed to return a
4009 * self-consistent instantaneous state.
4010 */
4012
4014
4015 /* Nothing to do if it's gone */
4016 if (proc != NULL)
4017 {
4018 /*
4019 * Acquire lock on the entire shared lock data structure. See notes
4020 * in GetLockStatusData().
4021 */
4022 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4024
4025 if (proc->lockGroupLeader == NULL)
4026 {
4027 /* Easy case, proc is not a lock group member */
4029 }
4030 else
4031 {
4032 /* Examine all procs in proc's lock group */
4033 dlist_iter iter;
4034
4036 {
4038
4039 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4041 }
4042 }
4043
4044 /*
4045 * And release locks. See notes in GetLockStatusData().
4046 */
4047 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4049
4050 Assert(data->nprocs <= data->maxprocs);
4051 }
4052
4054
4055 return data;
4056}
4057
4058/* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4059static void
4061{
4062 LOCK *theLock = blocked_proc->waitLock;
4067 int queue_size;
4068
4069 /* Nothing to do if this proc is not blocked */
4070 if (theLock == NULL)
4071 return;
4072
4073 /* Set up a procs[] element */
4074 bproc = &data->procs[data->nprocs++];
4075 bproc->pid = blocked_proc->pid;
4076 bproc->first_lock = data->nlocks;
4077 bproc->first_waiter = data->npids;
4078
4079 /*
4080 * We may ignore the proc's fast-path arrays, since nothing in those could
4081 * be related to a contended lock.
4082 */
4083
4084 /* Collect all PROCLOCKs associated with theLock */
4085 dlist_foreach(proclock_iter, &theLock->procLocks)
4086 {
4087 PROCLOCK *proclock =
4088 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4089 PGPROC *proc = proclock->tag.myProc;
4090 LOCK *lock = proclock->tag.myLock;
4092
4093 if (data->nlocks >= data->maxlocks)
4094 {
4095 data->maxlocks += MaxBackends;
4096 data->locks = (LockInstanceData *)
4097 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4098 }
4099
4100 instance = &data->locks[data->nlocks];
4101 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4102 instance->holdMask = proclock->holdMask;
4103 if (proc->waitLock == lock)
4104 instance->waitLockMode = proc->waitLockMode;
4105 else
4106 instance->waitLockMode = NoLock;
4107 instance->vxid.procNumber = proc->vxid.procNumber;
4108 instance->vxid.localTransactionId = proc->vxid.lxid;
4109 instance->pid = proc->pid;
4110 instance->leaderPid = proclock->groupLeader->pid;
4111 instance->fastpath = false;
4112 data->nlocks++;
4113 }
4114
4115 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4116 waitQueue = &(theLock->waitProcs);
4117 queue_size = dclist_count(waitQueue);
4118
4119 if (queue_size > data->maxpids - data->npids)
4120 {
4121 data->maxpids = Max(data->maxpids + MaxBackends,
4122 data->npids + queue_size);
4123 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4124 sizeof(int) * data->maxpids);
4125 }
4126
4127 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4129 {
4131
4133 break;
4134 data->waiter_pids[data->npids++] = queued_proc->pid;
4135 }
4136
4137 bproc->num_locks = data->nlocks - bproc->first_lock;
4138 bproc->num_waiters = data->npids - bproc->first_waiter;
4139}
4140
4141/*
4142 * Returns a list of currently held AccessExclusiveLocks, for use by
4143 * LogStandbySnapshot(). The result is a palloc'd array,
4144 * with the number of elements returned into *nlocks.
4145 *
4146 * XXX This currently takes a lock on all partitions of the lock table,
4147 * but it's possible to do better. By reference counting locks and storing
4148 * the value in the ProcArray entry for each backend we could tell if any
4149 * locks need recording without having to acquire the partition locks and
4150 * scan the lock table. Whether that's worth the additional overhead
4151 * is pretty dubious though.
4152 */
4155{
4157 PROCLOCK *proclock;
4159 int i;
4160 int index;
4161 int els;
4162
4163 /*
4164 * Acquire lock on the entire shared lock data structure.
4165 *
4166 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4167 */
4168 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4170
4171 /* Now we can safely count the number of proclocks */
4173
4174 /*
4175 * Allocating enough space for all locks in the lock table is overkill,
4176 * but it's more convenient and faster than having to enlarge the array.
4177 */
4179
4180 /* Now scan the tables to copy the data */
4182
4183 /*
4184 * If lock is a currently granted AccessExclusiveLock then it will have
4185 * just one proclock holder, so locks are never accessed twice in this
4186 * particular case. Don't copy this code for use elsewhere because in the
4187 * general case this will give you duplicate locks when looking at
4188 * non-exclusive lock types.
4189 */
4190 index = 0;
4191 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4192 {
4193 /* make sure this definition matches the one used in LockAcquire */
4194 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4196 {
4197 PGPROC *proc = proclock->tag.myProc;
4198 LOCK *lock = proclock->tag.myLock;
4199 TransactionId xid = proc->xid;
4200
4201 /*
4202 * Don't record locks for transactions if we know they have
4203 * already issued their WAL record for commit but not yet released
4204 * lock. It is still possible that we see locks held by already
4205 * complete transactions, if they haven't yet zeroed their xids.
4206 */
4207 if (!TransactionIdIsValid(xid))
4208 continue;
4209
4210 accessExclusiveLocks[index].xid = xid;
4213
4214 index++;
4215 }
4216 }
4217
4218 Assert(index <= els);
4219
4220 /*
4221 * And release locks. We do this in reverse order for two reasons: (1)
4222 * Anyone else who needs more than one of the locks will be trying to lock
4223 * them in increasing order; we don't want to release the other process
4224 * until it can get all the locks it needs. (2) This avoids O(N^2)
4225 * behavior inside LWLockRelease.
4226 */
4227 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4229
4230 *nlocks = index;
4231 return accessExclusiveLocks;
4232}
4233
4234/* Provide the textual name of any lock mode */
4235const char *
4242
4243#ifdef LOCK_DEBUG
4244/*
4245 * Dump all locks in the given proc's myProcLocks lists.
4246 *
4247 * Caller is responsible for having acquired appropriate LWLocks.
4248 */
4249void
4250DumpLocks(PGPROC *proc)
4251{
4252 int i;
4253
4254 if (proc == NULL)
4255 return;
4256
4257 if (proc->waitLock)
4258 LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4259
4260 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4261 {
4262 dlist_head *procLocks = &proc->myProcLocks[i];
4263 dlist_iter iter;
4264
4265 dlist_foreach(iter, procLocks)
4266 {
4267 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4268 LOCK *lock = proclock->tag.myLock;
4269
4270 Assert(proclock->tag.myProc == proc);
4271 PROCLOCK_PRINT("DumpLocks", proclock);
4272 LOCK_PRINT("DumpLocks", lock, 0);
4273 }
4274 }
4275}
4276
4277/*
4278 * Dump all lmgr locks.
4279 *
4280 * Caller is responsible for having acquired appropriate LWLocks.
4281 */
4282void
4283DumpAllLocks(void)
4284{
4285 PGPROC *proc;
4286 PROCLOCK *proclock;
4287 LOCK *lock;
4288 HASH_SEQ_STATUS status;
4289
4290 proc = MyProc;
4291
4292 if (proc && proc->waitLock)
4293 LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4294
4296
4297 while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4298 {
4299 PROCLOCK_PRINT("DumpAllLocks", proclock);
4300
4301 lock = proclock->tag.myLock;
4302 if (lock)
4303 LOCK_PRINT("DumpAllLocks", lock, 0);
4304 else
4305 elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4306 }
4307}
4308#endif /* LOCK_DEBUG */
4309
4310/*
4311 * LOCK 2PC resource manager's routines
4312 */
4313
4314/*
4315 * Re-acquire a lock belonging to a transaction that was prepared.
4316 *
4317 * Because this function is run at db startup, re-acquiring the locks should
4318 * never conflict with running transactions because there are none. We
4319 * assume that the lock state represented by the stored 2PC files is legal.
4320 *
4321 * When switching from Hot Standby mode to normal operation, the locks will
4322 * be already held by the startup process. The locks are acquired for the new
4323 * procs without checking for conflicts, so we don't get a conflict between the
4324 * startup process and the dummy procs, even though we will momentarily have
4325 * a situation where two procs are holding the same AccessExclusiveLock,
4326 * which isn't normally possible because the conflict. If we're in standby
4327 * mode, but a recovery snapshot hasn't been established yet, it's possible
4328 * that some but not all of the locks are already held by the startup process.
4329 *
4330 * This approach is simple, but also a bit dangerous, because if there isn't
4331 * enough shared memory to acquire the locks, an error will be thrown, which
4332 * is promoted to FATAL and recovery will abort, bringing down postmaster.
4333 * A safer approach would be to transfer the locks like we do in
4334 * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4335 * read-only backends to use up all the shared lock memory anyway, so that
4336 * replaying the WAL record that needs to acquire a lock will throw an error
4337 * and PANIC anyway.
4338 */
4339void
4341 void *recdata, uint32 len)
4342{
4344 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4345 LOCKTAG *locktag;
4346 LOCKMODE lockmode;
4348 LOCK *lock;
4349 PROCLOCK *proclock;
4351 bool found;
4352 uint32 hashcode;
4354 int partition;
4357
4358 Assert(len == sizeof(TwoPhaseLockRecord));
4359 locktag = &rec->locktag;
4360 lockmode = rec->lockmode;
4362
4364 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4366
4367 hashcode = LockTagHashCode(locktag);
4368 partition = LockHashPartition(hashcode);
4370
4372
4373 /*
4374 * Find or create a lock with this tag.
4375 */
4377 locktag,
4378 hashcode,
4380 &found);
4381 if (!lock)
4382 {
4384 ereport(ERROR,
4386 errmsg("out of shared memory"),
4387 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4388 }
4389
4390 /*
4391 * if it's a new lock object, initialize it
4392 */
4393 if (!found)
4394 {
4395 lock->grantMask = 0;
4396 lock->waitMask = 0;
4397 dlist_init(&lock->procLocks);
4398 dclist_init(&lock->waitProcs);
4399 lock->nRequested = 0;
4400 lock->nGranted = 0;
4401 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4402 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4403 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4404 }
4405 else
4406 {
4407 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4408 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4409 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4410 Assert(lock->nGranted <= lock->nRequested);
4411 }
4412
4413 /*
4414 * Create the hash key for the proclock table.
4415 */
4416 proclocktag.myLock = lock;
4417 proclocktag.myProc = proc;
4418
4420
4421 /*
4422 * Find or create a proclock entry with this tag
4423 */
4425 &proclocktag,
4428 &found);
4429 if (!proclock)
4430 {
4431 /* Oops, not enough shmem for the proclock */
4432 if (lock->nRequested == 0)
4433 {
4434 /*
4435 * There are no other requestors of this lock, so garbage-collect
4436 * the lock object. We *must* do this to avoid a permanent leak
4437 * of shared memory, because there won't be anything to cause
4438 * anyone to release the lock object later.
4439 */
4442 &(lock->tag),
4443 hashcode,
4445 NULL))
4446 elog(PANIC, "lock table corrupted");
4447 }
4449 ereport(ERROR,
4451 errmsg("out of shared memory"),
4452 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4453 }
4454
4455 /*
4456 * If new, initialize the new entry
4457 */
4458 if (!found)
4459 {
4460 Assert(proc->lockGroupLeader == NULL);
4461 proclock->groupLeader = proc;
4462 proclock->holdMask = 0;
4463 proclock->releaseMask = 0;
4464 /* Add proclock to appropriate lists */
4465 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4467 &proclock->procLink);
4468 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4469 }
4470 else
4471 {
4472 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4473 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4474 }
4475
4476 /*
4477 * lock->nRequested and lock->requested[] count the total number of
4478 * requests, whether granted or waiting, so increment those immediately.
4479 */
4480 lock->nRequested++;
4481 lock->requested[lockmode]++;
4482 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4483
4484 /*
4485 * We shouldn't already hold the desired lock.
4486 */
4487 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4488 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4489 lockMethodTable->lockModeNames[lockmode],
4490 lock->tag.locktag_field1, lock->tag.locktag_field2,
4491 lock->tag.locktag_field3);
4492
4493 /*
4494 * We ignore any possible conflicts and just grant ourselves the lock. Not
4495 * only because we don't bother, but also to avoid deadlocks when
4496 * switching from standby to normal mode. See function comment.
4497 */
4498 GrantLock(lock, proclock, lockmode);
4499
4500 /*
4501 * Bump strong lock count, to make sure any fast-path lock requests won't
4502 * be granted without consulting the primary lock table.
4503 */
4504 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4505 {
4507
4511 }
4512
4514}
4515
4516/*
4517 * Re-acquire a lock belonging to a transaction that was prepared, when
4518 * starting up into hot standby mode.
4519 */
4520void
4522 void *recdata, uint32 len)
4523{
4525 LOCKTAG *locktag;
4526 LOCKMODE lockmode;
4528
4529 Assert(len == sizeof(TwoPhaseLockRecord));
4530 locktag = &rec->locktag;
4531 lockmode = rec->lockmode;
4533
4535 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4536
4537 if (lockmode == AccessExclusiveLock &&
4538 locktag->locktag_type == LOCKTAG_RELATION)
4539 {
4541 locktag->locktag_field1 /* dboid */ ,
4542 locktag->locktag_field2 /* reloid */ );
4543 }
4544}
4545
4546
4547/*
4548 * 2PC processing routine for COMMIT PREPARED case.
4549 *
4550 * Find and release the lock indicated by the 2PC record.
4551 */
4552void
4554 void *recdata, uint32 len)
4555{
4557 PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4558 LOCKTAG *locktag;
4561
4562 Assert(len == sizeof(TwoPhaseLockRecord));
4563 locktag = &rec->locktag;
4565
4567 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4569
4570 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4571}
4572
4573/*
4574 * 2PC processing routine for ROLLBACK PREPARED case.
4575 *
4576 * This is actually just the same as the COMMIT case.
4577 */
4578void
4584
4585/*
4586 * VirtualXactLockTableInsert
4587 *
4588 * Take vxid lock via the fast-path. There can't be any pre-existing
4589 * lockers, as we haven't advertised this vxid via the ProcArray yet.
4590 *
4591 * Since MyProc->fpLocalTransactionId will normally contain the same data
4592 * as MyProc->vxid.lxid, you might wonder if we really need both. The
4593 * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4594 * examined by procarray.c, while fpLocalTransactionId is protected by
4595 * fpInfoLock and is used only by the locking subsystem. Doing it this
4596 * way makes it easier to verify that there are no funny race conditions.
4597 *
4598 * We don't bother recording this lock in the local lock table, since it's
4599 * only ever released at the end of a transaction. Instead,
4600 * LockReleaseAll() calls VirtualXactLockTableCleanup().
4601 */
4602void
4618
4619/*
4620 * VirtualXactLockTableCleanup
4621 *
4622 * Check whether a VXID lock has been materialized; if so, release it,
4623 * unblocking waiters.
4624 */
4625void
4627{
4628 bool fastpath;
4629 LocalTransactionId lxid;
4630
4632
4633 /*
4634 * Clean up shared memory state.
4635 */
4637
4638 fastpath = MyProc->fpVXIDLock;
4640 MyProc->fpVXIDLock = false;
4642
4644
4645 /*
4646 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4647 * that means someone transferred the lock to the main lock table.
4648 */
4649 if (!fastpath && LocalTransactionIdIsValid(lxid))
4650 {
4652 LOCKTAG locktag;
4653
4654 vxid.procNumber = MyProcNumber;
4655 vxid.localTransactionId = lxid;
4656 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4657
4659 &locktag, ExclusiveLock, false);
4660 }
4661}
4662
4663/*
4664 * XactLockForVirtualXact
4665 *
4666 * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4667 * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4668 * functions, it assumes "xid" is never a subtransaction and that "xid" is
4669 * prepared, committed, or aborted.
4670 *
4671 * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4672 * known as "vxid" before its PREPARE TRANSACTION.
4673 */
4674static bool
4676 TransactionId xid, bool wait)
4677{
4678 bool more = false;
4679
4680 /* There is no point to wait for 2PCs if you have no 2PCs. */
4681 if (max_prepared_xacts == 0)
4682 return true;
4683
4684 do
4685 {
4687 LOCKTAG tag;
4688
4689 /* Clear state from previous iterations. */
4690 if (more)
4691 {
4693 more = false;
4694 }
4695
4696 /* If we have no xid, try to find one. */
4697 if (!TransactionIdIsValid(xid))
4698 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4699 if (!TransactionIdIsValid(xid))
4700 {
4701 Assert(!more);
4702 return true;
4703 }
4704
4705 /* Check or wait for XID completion. */
4706 SET_LOCKTAG_TRANSACTION(tag, xid);
4707 lar = LockAcquire(&tag, ShareLock, false, !wait);
4709 return false;
4710 LockRelease(&tag, ShareLock, false);
4711 } while (more);
4712
4713 return true;
4714}
4715
4716/*
4717 * VirtualXactLock
4718 *
4719 * If wait = true, wait as long as the given VXID or any XID acquired by the
4720 * same transaction is still running. Then, return true.
4721 *
4722 * If wait = false, just check whether that VXID or one of those XIDs is still
4723 * running, and return true or false.
4724 */
4725bool
4727{
4728 LOCKTAG tag;
4729 PGPROC *proc;
4731
4733
4735 /* no vxid lock; localTransactionId is a normal, locked XID */
4736 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4737
4739
4740 /*
4741 * If a lock table entry must be made, this is the PGPROC on whose behalf
4742 * it must be done. Note that the transaction might end or the PGPROC
4743 * might be reassigned to a new backend before we get around to examining
4744 * it, but it doesn't matter. If we find upon examination that the
4745 * relevant lxid is no longer running here, that's enough to prove that
4746 * it's no longer running anywhere.
4747 */
4748 proc = ProcNumberGetProc(vxid.procNumber);
4749 if (proc == NULL)
4750 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4751
4752 /*
4753 * We must acquire this lock before checking the procNumber and lxid
4754 * against the ones we're waiting for. The target backend will only set
4755 * or clear lxid while holding this lock.
4756 */
4758
4759 if (proc->vxid.procNumber != vxid.procNumber
4761 {
4762 /* VXID ended */
4763 LWLockRelease(&proc->fpInfoLock);
4764 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4765 }
4766
4767 /*
4768 * If we aren't asked to wait, there's no need to set up a lock table
4769 * entry. The transaction is still in progress, so just return false.
4770 */
4771 if (!wait)
4772 {
4773 LWLockRelease(&proc->fpInfoLock);
4774 return false;
4775 }
4776
4777 /*
4778 * OK, we're going to need to sleep on the VXID. But first, we must set
4779 * up the primary lock table entry, if needed (ie, convert the proc's
4780 * fast-path lock on its VXID to a regular lock).
4781 */
4782 if (proc->fpVXIDLock)
4783 {
4784 PROCLOCK *proclock;
4785 uint32 hashcode;
4787
4788 hashcode = LockTagHashCode(&tag);
4789
4792
4794 &tag, hashcode, ExclusiveLock);
4795 if (!proclock)
4796 {
4798 LWLockRelease(&proc->fpInfoLock);
4799 ereport(ERROR,
4801 errmsg("out of shared memory"),
4802 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4803 }
4804 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4805
4807
4808 proc->fpVXIDLock = false;
4809 }
4810
4811 /*
4812 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4813 * search. The proc might have assigned this XID but not yet locked it,
4814 * in which case the proc will lock this XID before releasing the VXID.
4815 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4816 * so we won't save an XID of a different VXID. It doesn't matter whether
4817 * we save this before or after setting up the primary lock table entry.
4818 */
4819 xid = proc->xid;
4820
4821 /* Done with proc->fpLockBits */
4822 LWLockRelease(&proc->fpInfoLock);
4823
4824 /* Time to wait. */
4825 (void) LockAcquire(&tag, ShareLock, false, false);
4826
4827 LockRelease(&tag, ShareLock, false);
4828 return XactLockForVirtualXact(vxid, xid, wait);
4829}
4830
4831/*
4832 * LockWaiterCount
4833 *
4834 * Find the number of lock requester on this locktag
4835 */
4836int
4838{
4840 LOCK *lock;
4841 bool found;
4842 uint32 hashcode;
4844 int waiters = 0;
4845
4847 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4848
4849 hashcode = LockTagHashCode(locktag);
4852
4854 locktag,
4855 hashcode,
4856 HASH_FIND,
4857 &found);
4858 if (found)
4859 {
4860 Assert(lock != NULL);
4861 waiters = lock->nRequested;
4862 }
4864
4865 return waiters;
4866}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
#define Max(x, y)
Definition c.h:1085
#define Assert(condition)
Definition c.h:943
int64_t int64
Definition c.h:621
uint16_t uint16
Definition c.h:623
uint32_t uint32
Definition c.h:624
#define lengthof(array)
Definition c.h:873
uint32 LocalTransactionId
Definition c.h:738
#define MemSet(start, val, len)
Definition c.h:1107
uint32 TransactionId
Definition c.h:736
size_t Size
Definition c.h:689
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
int64 TimestampTz
Definition timestamp.h:39
void DeadLockReport(void)
Definition deadlock.c:1075
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:889
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:360
void hash_destroy(HTAB *hashp)
Definition dynahash.c:802
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition dynahash.c:902
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1352
int64 hash_get_num_entries(HTAB *hashp)
Definition dynahash.c:1273
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition dynahash.c:1077
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition dynahash.c:845
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1317
Datum arg
Definition elog.c:1322
ErrorContextCallback * error_context_stack
Definition elog.c:99
int errcode(int sqlerrcode)
Definition elog.c:874
#define LOG
Definition elog.h:32
#define PG_RE_THROW()
Definition elog.h:407
#define errcontext
Definition elog.h:200
int errhint(const char *fmt,...) pg_attribute_printf(1
#define PG_TRY(...)
Definition elog.h:374
#define WARNING
Definition elog.h:37
#define PG_END_TRY(...)
Definition elog.h:399
#define PANIC
Definition elog.h:44
#define ERROR
Definition elog.h:40
#define PG_CATCH(...)
Definition elog.h:384
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
int int int int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...) pg_attribute_printf(1
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_array(type, count)
Definition fe_memutils.h:77
int MyProcPid
Definition globals.c:49
ProcNumber MyProcNumber
Definition globals.c:92
int MaxBackends
Definition globals.c:149
@ HASH_FIND
Definition hsearch.h:108
@ HASH_REMOVE
Definition hsearch.h:110
@ HASH_ENTER
Definition hsearch.h:109
@ HASH_ENTER_NULL
Definition hsearch.h:111
#define HASH_CONTEXT
Definition hsearch.h:97
#define HASH_ELEM
Definition hsearch.h:90
#define HASH_FUNCTION
Definition hsearch.h:93
#define HASH_BLOBS
Definition hsearch.h:92
#define HASH_PARTITION
Definition hsearch.h:87
#define dlist_foreach(iter, lhead)
Definition ilist.h:623
static void dlist_init(dlist_head *head)
Definition ilist.h:314
static void dlist_delete(dlist_node *node)
Definition ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition ilist.h:932
static bool dclist_is_empty(const dclist_head *head)
Definition ilist.h:682
static bool dlist_node_is_detached(const dlist_node *node)
Definition ilist.h:525
#define dlist_foreach_modify(iter, lhead)
Definition ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition ilist.h:776
static void dclist_init(dclist_head *head)
Definition ilist.h:671
#define dlist_container(type, membername, ptr)
Definition ilist.h:593
#define dclist_foreach(iter, lhead)
Definition ilist.h:970
int j
Definition isn.c:78
int i
Definition isn.c:77
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition lmgr.c:1249
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition lock.c:4675
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition lock.c:807
static LOCALLOCK * awaitedLock
Definition lock.c:339
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition lock.c:1485
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition lock.c:2745
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition lock.c:641
static bool Dummy_trace
Definition lock.c:125
static const char *const lock_mode_names[]
Definition lock.c:111
void lock_twophase_postabort(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4579
#define LOCK_PRINT(where, lock, type)
Definition lock.c:416
void PostPrepare_Locks(FullTransactionId fxid)
Definition lock.c:3581
void lock_twophase_standby_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4521
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition lock.c:621
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition lock.c:1292
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition lock.c:2967
const ShmemCallbacks LockManagerShmemCallbacks
Definition lock.c:320
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition lock.c:4603
#define NLOCKENTS()
Definition lock.c:59
#define FastPathStrongLockHashPartition(hashcode)
Definition lock.c:306
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition lock.c:603
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition lock.c:259
void GrantAwaitedLock(void)
Definition lock.c:1898
int LockWaiterCount(const LOCKTAG *locktag)
Definition lock.c:4837
void AtPrepare_Locks(void)
Definition lock.c:3485
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:2111
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition lock.c:245
#define FAST_PATH_REL_GROUP(rel)
Definition lock.c:220
void InitLockManagerAccess(void)
Definition lock.c:503
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition lock.c:1667
void VirtualXactLockTableCleanup(void)
Definition lock.c:4626
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition lock.c:4726
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition lock.c:3078
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition lock.c:315
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition lock.c:2055
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
Definition lock.c:834
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition lock.c:2316
#define FAST_PATH_SLOT(group, index)
Definition lock.c:227
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition lock.c:1473
#define ConflictsWithRelationFastPath(locktag, mode)
Definition lock.c:276
void ResetAwaitedLock(void)
Definition lock.c:1916
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition lock.c:2870
static HTAB * LockMethodLocalHash
Definition lock.c:334
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2715
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition lock.c:1690
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition lock.c:255
#define PROCLOCK_PRINT(where, proclockP)
Definition lock.c:417
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition lock.c:1747
static uint32 proclock_hash(const void *key, Size keysize)
Definition lock.c:572
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2834
void AbortStrongLockAcquire(void)
Definition lock.c:1869
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2791
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition lock.c:179
static HTAB * LockMethodLockHash
Definition lock.c:332
static ResourceOwner awaitedOwner
Definition lock.c:340
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition lock.c:3980
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1941
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:694
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition lock.c:4236
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition lock.c:4060
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition lock.c:257
void lock_twophase_postcommit(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4553
int max_locks_per_xact
Definition lock.c:56
static const LockMethod LockMethods[]
Definition lock.c:153
static void waitonlock_error_callback(void *arg)
Definition lock.c:2029
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2620
LOCALLOCK * GetAwaitedLock(void)
Definition lock.c:1907
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition lock.c:2590
void MarkLockClear(LOCALLOCK *locallock)
Definition lock.c:1929
LockData * GetLockStatusData(void)
Definition lock.c:3777
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY
Definition lock.c:194
static const LockMethodData default_lockmethod
Definition lock.c:128
#define FAST_PATH_GET_BITS(proc, n)
Definition lock.c:248
static LOCALLOCK * StrongLockInProgress
Definition lock.c:338
#define FAST_PATH_BITS_PER_SLOT
Definition lock.c:244
static const LockMethodData user_lockmethod
Definition lock.c:139
int FastPathLockGroupsPerBackend
Definition lock.c:205
#define EligibleForRelationFastPath(locktag, mode)
Definition lock.c:270
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition lock.c:555
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition lock.c:1833
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition lock.c:1538
static void LockManagerShmemRequest(void *arg)
Definition lock.c:451
void lock_twophase_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4340
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1801
static const LOCKMASK LockConflicts[]
Definition lock.c:68
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition lock.c:2655
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition lock.c:525
static void FinishStrongLockAcquire(void)
Definition lock.c:1859
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition lock.c:304
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition lock.c:4154
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition lock.c:3293
static void CheckForSessionAndXactLocks(void)
Definition lock.c:3397
static HTAB * LockMethodProcLockHash
Definition lock.c:333
static void LockManagerShmemInit(void *arg)
Definition lock.c:494
bool log_lock_failures
Definition lock.c:57
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition lock.c:537
#define LOCK_LOCKTAG(lock)
Definition lock.h:156
#define VirtualTransactionIdIsValid(vxid)
Definition lock.h:70
#define LockHashPartitionLock(hashcode)
Definition lock.h:357
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition lock.h:80
#define LOCK_LOCKMETHOD(lock)
Definition lock.h:155
#define LOCKBIT_OFF(lockmode)
Definition lock.h:88
#define LOCALLOCK_LOCKMETHOD(llock)
Definition lock.h:274
#define InvalidLocalTransactionId
Definition lock.h:68
#define MAX_LOCKMODES
Definition lock.h:85
#define LOCKBIT_ON(lockmode)
Definition lock.h:87
#define LocalTransactionIdIsValid(lxid)
Definition lock.h:69
#define LOCALLOCK_LOCKTAG(llock)
Definition lock.h:275
#define LockHashPartition(hashcode)
Definition lock.h:355
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition lock.h:74
#define PROCLOCK_LOCKMETHOD(proclock)
Definition lock.h:213
#define LockHashPartitionLockByIndex(i)
Definition lock.h:360
LockAcquireResult
Definition lock.h:331
@ LOCKACQUIRE_ALREADY_CLEAR
Definition lock.h:335
@ LOCKACQUIRE_OK
Definition lock.h:333
@ LOCKACQUIRE_ALREADY_HELD
Definition lock.h:334
@ LOCKACQUIRE_NOT_AVAIL
Definition lock.h:332
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition lock.h:72
int LOCKMODE
Definition lockdefs.h:26
#define NoLock
Definition lockdefs.h:34
#define AccessExclusiveLock
Definition lockdefs.h:43
#define ShareRowExclusiveLock
Definition lockdefs.h:41
#define AccessShareLock
Definition lockdefs.h:36
int LOCKMASK
Definition lockdefs.h:25
#define ShareUpdateExclusiveLock
Definition lockdefs.h:39
#define ExclusiveLock
Definition lockdefs.h:42
#define RowShareLock
Definition lockdefs.h:37
#define ShareLock
Definition lockdefs.h:40
#define MaxLockMode
Definition lockdefs.h:45
#define RowExclusiveLock
Definition lockdefs.h:38
uint16 LOCKMETHODID
Definition locktag.h:22
#define DEFAULT_LOCKMETHOD
Definition locktag.h:25
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition locktag.h:135
@ LOCKTAG_OBJECT
Definition locktag.h:45
@ LOCKTAG_RELATION_EXTEND
Definition locktag.h:38
@ LOCKTAG_RELATION
Definition locktag.h:37
@ LOCKTAG_TUPLE
Definition locktag.h:41
@ LOCKTAG_VIRTUALTRANSACTION
Definition locktag.h:43
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition locktag.h:126
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition locktag.h:81
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1150
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1767
#define NUM_LOCK_PARTITIONS
Definition lwlock.h:87
#define LOG2_NUM_LOCK_PARTITIONS
Definition lwlock.h:86
@ LW_SHARED
Definition lwlock.h:105
@ LW_EXCLUSIVE
Definition lwlock.h:104
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
#define START_CRIT_SECTION()
Definition miscadmin.h:152
#define END_CRIT_SECTION()
Definition miscadmin.h:154
static char * errmsg
static PgChecksumMode mode
const void size_t len
const void * data
static char buf[DEFAULT_XLOG_SEG_SIZE]
void pgstat_count_lock_fastpath_exceeded(uint8 locktag_type)
static uint32 DatumGetUInt32(Datum X)
Definition postgres.h:222
static Datum PointerGetDatum(const void *X)
Definition postgres.h:342
uint64_t Datum
Definition postgres.h:70
unsigned int Oid
static int fb(int x)
#define FP_LOCK_GROUPS_PER_BACKEND_MAX
Definition proc.h:95
#define FastPathLockSlotsPerBackend()
Definition proc.h:97
#define GetPGProcByNumber(n)
Definition proc.h:504
#define FP_LOCK_SLOTS_PER_GROUP
Definition proc.h:96
ProcWaitStatus
Definition proc.h:144
@ PROC_WAIT_STATUS_OK
Definition proc.h:145
@ PROC_WAIT_STATUS_WAITING
Definition proc.h:146
@ PROC_WAIT_STATUS_ERROR
Definition proc.h:147
PGPROC * BackendPidGetProcWithLock(int pid)
Definition procarray.c:3192
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition procarray.c:3111
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
void set_ps_display_remove_suffix(void)
Definition ps_status.c:440
void set_ps_display_suffix(const char *suffix)
Definition ps_status.c:388
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1059
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition resowner.c:902
ResourceOwner CurrentResourceOwner
Definition resowner.c:173
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1079
#define ShmemRequestHash(...)
Definition shmem.h:179
#define ShmemRequestStruct(...)
Definition shmem.h:176
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition proc.c:1146
PGPROC * MyProc
Definition proc.c:71
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition proc.c:1941
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition proc.c:1315
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition proc.c:1776
PROC_HDR * ProcGlobal
Definition proc.c:74
void LogAccessExclusiveLockPrepare(void)
Definition standby.c:1471
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition standby.c:988
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition standby.c:1454
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
struct ErrorContextCallback * previous
Definition elog.h:299
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition lock.c:312
Size keysize
Definition hsearch.h:69
Size entrysize
Definition hsearch.h:70
Size keysize
Definition dynahash.c:241
int64 nLocks
Definition lock.h:254
struct ResourceOwnerData * owner
Definition lock.h:253
uint8 locktag_type
Definition locktag.h:70
uint32 locktag_field3
Definition locktag.h:68
uint32 locktag_field1
Definition locktag.h:66
uint8 locktag_lockmethodid
Definition locktag.h:71
uint16 locktag_field4
Definition locktag.h:69
uint32 locktag_field2
Definition locktag.h:67
Definition lock.h:140
int nRequested
Definition lock.h:150
LOCKTAG tag
Definition lock.h:142
int requested[MAX_LOCKMODES]
Definition lock.h:149
dclist_head waitProcs
Definition lock.h:148
int granted[MAX_LOCKMODES]
Definition lock.h:151
LOCKMASK grantMask
Definition lock.h:145
LOCKMASK waitMask
Definition lock.h:146
int nGranted
Definition lock.h:152
dlist_head procLocks
Definition lock.h:147
const bool * trace_flag
Definition lock.h:116
const char *const * lockModeNames
Definition lock.h:115
Definition proc.h:179
LWLock fpInfoLock
Definition proc.h:324
LocalTransactionId lxid
Definition proc.h:231
PROCLOCK * waitProcLock
Definition proc.h:306
dlist_head lockGroupMembers
Definition proc.h:299
Oid * fpRelId
Definition proc.h:326
Oid databaseId
Definition proc.h:201
uint64 * fpLockBits
Definition proc.h:325
pg_atomic_uint64 waitStart
Definition proc.h:311
bool fpVXIDLock
Definition proc.h:327
ProcNumber procNumber
Definition proc.h:226
int pid
Definition proc.h:197
struct PGPROC::@136 vxid
LOCK * waitLock
Definition proc.h:304
TransactionId xid
Definition proc.h:237
LOCKMODE waitLockMode
Definition proc.h:307
dlist_node waitLink
Definition proc.h:305
PGPROC * lockGroupLeader
Definition proc.h:298
LocalTransactionId fpLocalTransactionId
Definition proc.h:328
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition proc.h:321
ProcWaitStatus waitStatus
Definition proc.h:314
LOCK * myLock
Definition lock.h:196
PGPROC * myProc
Definition lock.h:197
LOCKMASK holdMask
Definition lock.h:207
dlist_node lockLink
Definition lock.h:209
PGPROC * groupLeader
Definition lock.h:206
LOCKMASK releaseMask
Definition lock.h:208
PROCLOCKTAG tag
Definition lock.h:203
dlist_node procLink
Definition lock.h:210
uint32 allProcCount
Definition proc.h:459
ShmemRequestCallback request_fn
Definition shmem.h:133
LOCKTAG locktag
Definition lock.c:163
LOCKMODE lockmode
Definition lock.c:164
LocalTransactionId localTransactionId
Definition lock.h:65
ProcNumber procNumber
Definition lock.h:64
dlist_node * cur
Definition ilist.h:179
Definition type.h:96
#define InvalidTransactionId
Definition transam.h:31
#define XidFromFullTransactionId(x)
Definition transam.h:48
#define FirstNormalObjectId
Definition transam.h:197
#define TransactionIdIsValid(xid)
Definition transam.h:41
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition twophase.c:1277
int max_prepared_xacts
Definition twophase.c:118
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition twophase.c:862
PGPROC * TwoPhaseGetDummyProc(FullTransactionId fxid, bool lock_held)
Definition twophase.c:929
#define TWOPHASE_RM_LOCK_ID
const char * type
const char * name
bool RecoveryInProgress(void)
Definition xlog.c:6830
#define XLogStandbyInfoActive()
Definition xlog.h:126
bool InRecovery
Definition xlogutils.c:50
#define InHotStandby
Definition xlogutils.h:60