PostgreSQL Source Code git master
Loading...
Searching...
No Matches
lock.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * lock.c
4 * POSTGRES primary lock mechanism
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/storage/lmgr/lock.c
12 *
13 * NOTES
14 * A lock table is a shared memory hash table. When
15 * a process tries to acquire a lock of a type that conflicts
16 * with existing locks, it is put to sleep using the routines
17 * in storage/lmgr/proc.c.
18 *
19 * For the most part, this code should be invoked via lmgr.c
20 * or another lock-management module, not directly.
21 *
22 * Interface:
23 *
24 * LockManagerShmemInit(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 * LockAcquire(), LockRelease(), LockReleaseAll(),
26 * LockCheckConflicts(), GrantLock()
27 *
28 *-------------------------------------------------------------------------
29 */
30#include "postgres.h"
31
32#include <signal.h>
33#include <unistd.h>
34
35#include "access/transam.h"
36#include "access/twophase.h"
38#include "access/xlog.h"
39#include "access/xlogutils.h"
40#include "miscadmin.h"
41#include "pg_trace.h"
42#include "pgstat.h"
43#include "storage/lmgr.h"
44#include "storage/proc.h"
45#include "storage/procarray.h"
46#include "storage/shmem.h"
47#include "storage/spin.h"
48#include "storage/standby.h"
49#include "storage/subsystems.h"
50#include "utils/memutils.h"
51#include "utils/ps_status.h"
52#include "utils/resowner.h"
53
54
55/* GUC variables */
56int max_locks_per_xact; /* used to set the lock table size */
57bool log_lock_failures = false;
58
59#define NLOCKENTS() \
60 mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
61
62
63/*
64 * Data structures defining the semantics of the standard lock methods.
65 *
66 * The conflict table defines the semantics of the various lock modes.
67 */
68static const LOCKMASK LockConflicts[] = {
69 0,
70
71 /* AccessShareLock */
73
74 /* RowShareLock */
76
77 /* RowExclusiveLock */
80
81 /* ShareUpdateExclusiveLock */
85
86 /* ShareLock */
90
91 /* ShareRowExclusiveLock */
95
96 /* ExclusiveLock */
101
102 /* AccessExclusiveLock */
107
108};
109
110/* Names of lock modes, for debug printouts */
111static const char *const lock_mode_names[] =
112{
113 "INVALID",
114 "AccessShareLock",
115 "RowShareLock",
116 "RowExclusiveLock",
117 "ShareUpdateExclusiveLock",
118 "ShareLock",
119 "ShareRowExclusiveLock",
120 "ExclusiveLock",
121 "AccessExclusiveLock"
122};
123
124#ifndef LOCK_DEBUG
125static bool Dummy_trace = false;
126#endif
127
132#ifdef LOCK_DEBUG
134#else
136#endif
137};
138
143#ifdef LOCK_DEBUG
145#else
147#endif
148};
149
150/*
151 * map from lock method id to the lock table data structures
152 */
153static const LockMethod LockMethods[] = {
154 NULL,
157};
158
159
160/* Record that's written to 2PC state file when a lock is persisted */
166
167
168/*
169 * Count of the number of fast path lock slots we believe to be used. This
170 * might be higher than the real number if another backend has transferred
171 * our locks to the primary lock table, but it can never be lower than the
172 * real value, since only we can acquire locks on our own behalf.
173 *
174 * XXX Allocate a static array of the maximum size. We could use a pointer
175 * and then allocate just the right size to save a couple kB, but then we
176 * would have to initialize that, while for the static array that happens
177 * automatically. Doesn't seem worth the extra complexity.
178 */
180
181/*
182 * Flag to indicate if the relation extension lock is held by this backend.
183 * This flag is used to ensure that while holding the relation extension lock
184 * we don't try to acquire a heavyweight lock on any other object. This
185 * restriction implies that the relation extension lock won't ever participate
186 * in the deadlock cycle because we can never wait for any other heavyweight
187 * lock after acquiring this lock.
188 *
189 * Such a restriction is okay for relation extension locks as unlike other
190 * heavyweight locks these are not held till the transaction end. These are
191 * taken for a short duration to extend a particular relation and then
192 * released.
193 */
195
196/*
197 * Number of fast-path locks per backend - size of the arrays in PGPROC.
198 * This is set only once during start, before initializing shared memory,
199 * and remains constant after that.
200 *
201 * We set the limit based on max_locks_per_transaction GUC, because that's
202 * the best information about expected number of locks per backend we have.
203 * See InitializeFastPathLocks() for details.
204 */
206
207/*
208 * Macros to calculate the fast-path group and index for a relation.
209 *
210 * The formula is a simple hash function, designed to spread the OIDs a bit,
211 * so that even contiguous values end up in different groups. In most cases
212 * there will be gaps anyway, but the multiplication should help a bit.
213 *
214 * The selected constant (49157) is a prime not too close to 2^k, and it's
215 * small enough to not cause overflows (in 64-bit).
216 *
217 * We can assume that FastPathLockGroupsPerBackend is a power-of-two per
218 * InitializeFastPathLocks().
219 */
220#define FAST_PATH_REL_GROUP(rel) \
221 (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
222
223/*
224 * Given the group/slot indexes, calculate the slot index in the whole array
225 * of fast-path lock slots.
226 */
227#define FAST_PATH_SLOT(group, index) \
228 (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
229 AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
230 ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
231
232/*
233 * Given a slot index (into the whole per-backend array), calculated using
234 * the FAST_PATH_SLOT macro, split it into group and index (in the group).
235 */
236#define FAST_PATH_GROUP(index) \
237 (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
238 ((index) / FP_LOCK_SLOTS_PER_GROUP))
239#define FAST_PATH_INDEX(index) \
240 (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
241 ((index) % FP_LOCK_SLOTS_PER_GROUP))
242
243/* Macros for manipulating proc->fpLockBits */
244#define FAST_PATH_BITS_PER_SLOT 3
245#define FAST_PATH_LOCKNUMBER_OFFSET 1
246#define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
247#define FAST_PATH_BITS(proc, n) (proc)->fpLockBits[FAST_PATH_GROUP(n)]
248#define FAST_PATH_GET_BITS(proc, n) \
249 ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
250#define FAST_PATH_BIT_POSITION(n, l) \
251 (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
252 AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
253 AssertMacro((n) < FastPathLockSlotsPerBackend()), \
254 ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
255#define FAST_PATH_SET_LOCKMODE(proc, n, l) \
256 FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
257#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
258 FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
259#define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
260 (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
261
262/*
263 * The fast-path lock mechanism is concerned only with relation locks on
264 * unshared relations by backends bound to a database. The fast-path
265 * mechanism exists mostly to accelerate acquisition and release of locks
266 * that rarely conflict. Because ShareUpdateExclusiveLock is
267 * self-conflicting, it can't use the fast-path mechanism; but it also does
268 * not conflict with any of the locks that do, so we can ignore it completely.
269 */
270#define EligibleForRelationFastPath(locktag, mode) \
271 ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
272 (locktag)->locktag_type == LOCKTAG_RELATION && \
273 (locktag)->locktag_field1 == MyDatabaseId && \
274 MyDatabaseId != InvalidOid && \
275 (mode) < ShareUpdateExclusiveLock)
276#define ConflictsWithRelationFastPath(locktag, mode) \
277 ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
278 (locktag)->locktag_type == LOCKTAG_RELATION && \
279 (locktag)->locktag_field1 != InvalidOid && \
280 (mode) > ShareUpdateExclusiveLock)
281
282static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
283static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
285 const LOCKTAG *locktag, uint32 hashcode);
287
288/*
289 * To make the fast-path lock mechanism work, we must have some way of
290 * preventing the use of the fast-path when a conflicting lock might be present.
291 * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
292 * and maintain an integer count of the number of "strong" lockers
293 * in each partition. When any "strong" lockers are present (which is
294 * hopefully not very often), the fast-path mechanism can't be used, and we
295 * must fall back to the slower method of pushing matching locks directly
296 * into the main lock tables.
297 *
298 * The deadlock detector does not know anything about the fast path mechanism,
299 * so any locks that might be involved in a deadlock must be transferred from
300 * the fast-path queues to the main lock table.
301 */
302
303#define FAST_PATH_STRONG_LOCK_HASH_BITS 10
304#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
305 (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
306#define FastPathStrongLockHashPartition(hashcode) \
307 ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
308
314
316
317static void LockManagerShmemRequest(void *arg);
318static void LockManagerShmemInit(void *arg);
319
324
325
326/*
327 * Pointers to hash tables containing lock state
328 *
329 * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
330 * shared memory; LockMethodLocalHash is local to each backend.
331 */
335
336
337/* private state for error cleanup */
341
342
343#ifdef LOCK_DEBUG
344
345/*------
346 * The following configuration options are available for lock debugging:
347 *
348 * TRACE_LOCKS -- give a bunch of output what's going on in this file
349 * TRACE_USERLOCKS -- same but for user locks
350 * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
351 * (use to avoid output on system tables)
352 * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
353 * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
354 *
355 * Furthermore, but in storage/lmgr/lwlock.c:
356 * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
357 *
358 * Define LOCK_DEBUG at compile time to get all these enabled.
359 * --------
360 */
361
363bool Trace_locks = false;
364bool Trace_userlocks = false;
365int Trace_lock_table = 0;
366bool Debug_deadlocks = false;
367
368
369inline static bool
370LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
371{
372 return
375 || (Trace_lock_table &&
377}
378
379
380inline static void
381LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
382{
383 if (LOCK_DEBUG_ENABLED(&lock->tag))
384 elog(LOG,
385 "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
386 "req(%d,%d,%d,%d,%d,%d,%d)=%d "
387 "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
388 where, lock,
392 lock->grantMask,
393 lock->requested[1], lock->requested[2], lock->requested[3],
394 lock->requested[4], lock->requested[5], lock->requested[6],
395 lock->requested[7], lock->nRequested,
396 lock->granted[1], lock->granted[2], lock->granted[3],
397 lock->granted[4], lock->granted[5], lock->granted[6],
398 lock->granted[7], lock->nGranted,
399 dclist_count(&lock->waitProcs),
400 LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
401}
402
403
404inline static void
405PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
406{
407 if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
408 elog(LOG,
409 "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
410 where, proclockP, proclockP->tag.myLock,
412 proclockP->tag.myProc, (int) proclockP->holdMask);
413}
414#else /* not LOCK_DEBUG */
415
416#define LOCK_PRINT(where, lock, type) ((void) 0)
417#define PROCLOCK_PRINT(where, proclockP) ((void) 0)
418#endif /* not LOCK_DEBUG */
419
420
421static uint32 proclock_hash(const void *key, Size keysize);
424 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
427static void FinishStrongLockAcquire(void);
429static void waitonlock_error_callback(void *arg);
432static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
434static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
436 bool wakeupNeeded);
438 LOCKTAG *locktag, LOCKMODE lockmode,
442
443
444/*
445 * Register the lock manager's shmem data structures.
446 *
447 * In addition to this, each backend must also call InitLockManagerAccess() to
448 * create the locallock hash table.
449 */
450static void
452{
454
455 /*
456 * Compute sizes for lock hashtables.
457 */
459
460 /*
461 * Hash table for LOCK structs. This stores per-locked-object
462 * information.
463 */
464 ShmemRequestHash(.name = "LOCK hash",
465 .nelems = max_table_size,
466 .ptr = &LockMethodLockHash,
467 .hash_info.keysize = sizeof(LOCKTAG),
468 .hash_info.entrysize = sizeof(LOCK),
469 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
470 .hash_flags = HASH_ELEM | HASH_BLOBS | HASH_PARTITION,
471 );
472
473 /* Assume an average of 2 holders per lock */
474 max_table_size *= 2;
475
476 ShmemRequestHash(.name = "PROCLOCK hash",
477 .nelems = max_table_size,
479 .hash_info.keysize = sizeof(PROCLOCKTAG),
480 .hash_info.entrysize = sizeof(PROCLOCK),
481 .hash_info.hash = proclock_hash,
482 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
483 .hash_flags = HASH_ELEM | HASH_FUNCTION | HASH_PARTITION,
484 );
485
486 ShmemRequestStruct(.name = "Fast Path Strong Relation Lock Data",
487 .size = sizeof(FastPathStrongRelationLockData),
488 .ptr = (void **) (void *) &FastPathStrongRelationLocks,
489 );
490}
491
492static void
497
498/*
499 * Initialize the lock manager's backend-private data structures.
500 */
501void
503{
504 /*
505 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
506 * counts and resource owner information.
507 */
508 HASHCTL info;
509
510 info.keysize = sizeof(LOCALLOCKTAG);
511 info.entrysize = sizeof(LOCALLOCK);
512
513 LockMethodLocalHash = hash_create("LOCALLOCK hash",
514 16,
515 &info,
517}
518
519
520/*
521 * Fetch the lock method table associated with a given lock
522 */
531
532/*
533 * Fetch the lock method table associated with a given locktag
534 */
543
544
545/*
546 * Compute the hash code associated with a LOCKTAG.
547 *
548 * To avoid unnecessary recomputations of the hash code, we try to do this
549 * just once per function, and then pass it around as needed. Aside from
550 * passing the hashcode to hash_search_with_hash_value(), we can extract
551 * the lock partition number from the hashcode.
552 */
553uint32
555{
556 return get_hash_value(LockMethodLockHash, locktag);
557}
558
559/*
560 * Compute the hash code associated with a PROCLOCKTAG.
561 *
562 * Because we want to use just one set of partition locks for both the
563 * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
564 * fall into the same partition number as their associated LOCKs.
565 * dynahash.c expects the partition number to be the low-order bits of
566 * the hash code, and therefore a PROCLOCKTAG's hash code must have the
567 * same low-order bits as the associated LOCKTAG's hash code. We achieve
568 * this with this specialized hash function.
569 */
570static uint32
571proclock_hash(const void *key, Size keysize)
572{
573 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
576
577 Assert(keysize == sizeof(PROCLOCKTAG));
578
579 /* Look into the associated LOCK object, and compute its hash code */
580 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
581
582 /*
583 * To make the hash code also depend on the PGPROC, we xor the proc
584 * struct's address into the hash code, left-shifted so that the
585 * partition-number bits don't change. Since this is only a hash, we
586 * don't care if we lose high-order bits of the address; use an
587 * intermediate variable to suppress cast-pointer-to-int warnings.
588 */
591
592 return lockhash;
593}
594
595/*
596 * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
597 * for its underlying LOCK.
598 *
599 * We use this just to avoid redundant calls of LockTagHashCode().
600 */
601static inline uint32
603{
604 uint32 lockhash = hashcode;
606
607 /*
608 * This must match proclock_hash()!
609 */
612
613 return lockhash;
614}
615
616/*
617 * Given two lock modes, return whether they would conflict.
618 */
619bool
621{
623
624 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
625 return true;
626
627 return false;
628}
629
630/*
631 * LockHeldByMe -- test whether lock 'locktag' is held by the current
632 * transaction
633 *
634 * Returns true if current transaction holds a lock on 'tag' of mode
635 * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
636 * ("Stronger" is defined as "numerically higher", which is a bit
637 * semantically dubious but is OK for the purposes we use this for.)
638 */
639bool
640LockHeldByMe(const LOCKTAG *locktag,
641 LOCKMODE lockmode, bool orstronger)
642{
645
646 /*
647 * See if there is a LOCALLOCK entry for this lock and lockmode
648 */
649 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
650 localtag.lock = *locktag;
651 localtag.mode = lockmode;
652
654 &localtag,
655 HASH_FIND, NULL);
656
657 if (locallock && locallock->nLocks > 0)
658 return true;
659
660 if (orstronger)
661 {
663
664 for (slockmode = lockmode + 1;
666 slockmode++)
667 {
668 if (LockHeldByMe(locktag, slockmode, false))
669 return true;
670 }
671 }
672
673 return false;
674}
675
676#ifdef USE_ASSERT_CHECKING
677/*
678 * GetLockMethodLocalHash -- return the hash of local locks, for modules that
679 * evaluate assertions based on all locks held.
680 */
681HTAB *
683{
684 return LockMethodLocalHash;
685}
686#endif
687
688/*
689 * LockHasWaiters -- look up 'locktag' and check if releasing this
690 * lock would wake up other processes waiting for it.
691 */
692bool
693LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
694{
699 LOCK *lock;
700 PROCLOCK *proclock;
702 bool hasWaiters = false;
703
705 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
708 elog(ERROR, "unrecognized lock mode: %d", lockmode);
709
710#ifdef LOCK_DEBUG
711 if (LOCK_DEBUG_ENABLED(locktag))
712 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
713 locktag->locktag_field1, locktag->locktag_field2,
714 lockMethodTable->lockModeNames[lockmode]);
715#endif
716
717 /*
718 * Find the LOCALLOCK entry for this lock and lockmode
719 */
720 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
721 localtag.lock = *locktag;
722 localtag.mode = lockmode;
723
725 &localtag,
726 HASH_FIND, NULL);
727
728 /*
729 * let the caller print its own error message, too. Do not ereport(ERROR).
730 */
731 if (!locallock || locallock->nLocks <= 0)
732 {
733 elog(WARNING, "you don't own a lock of type %s",
734 lockMethodTable->lockModeNames[lockmode]);
735 return false;
736 }
737
738 /*
739 * Check the shared lock table.
740 */
742
744
745 /*
746 * We don't need to re-find the lock or proclock, since we kept their
747 * addresses in the locallock table, and they couldn't have been removed
748 * while we were holding a lock on them.
749 */
750 lock = locallock->lock;
751 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
752 proclock = locallock->proclock;
753 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
754
755 /*
756 * Double-check that we are actually holding a lock of the type we want to
757 * release.
758 */
759 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
760 {
761 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
763 elog(WARNING, "you don't own a lock of type %s",
764 lockMethodTable->lockModeNames[lockmode]);
766 return false;
767 }
768
769 /*
770 * Do the checking.
771 */
772 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
773 hasWaiters = true;
774
776
777 return hasWaiters;
778}
779
780/*
781 * LockAcquire -- Check for lock conflicts, sleep if conflict found,
782 * set lock if/when no conflicts.
783 *
784 * Inputs:
785 * locktag: unique identifier for the lockable object
786 * lockmode: lock mode to acquire
787 * sessionLock: if true, acquire lock for session not current transaction
788 * dontWait: if true, don't wait to acquire lock
789 *
790 * Returns one of:
791 * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
792 * LOCKACQUIRE_OK lock successfully acquired
793 * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
794 * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
795 *
796 * In the normal case where dontWait=false and the caller doesn't need to
797 * distinguish a freshly acquired lock from one already taken earlier in
798 * this same transaction, there is no need to examine the return value.
799 *
800 * Side Effects: The lock is acquired and recorded in lock tables.
801 *
802 * NOTE: if we wait for the lock, there is no way to abort the wait
803 * short of aborting the transaction.
804 */
806LockAcquire(const LOCKTAG *locktag,
807 LOCKMODE lockmode,
808 bool sessionLock,
809 bool dontWait)
810{
811 return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
812 true, NULL, false);
813}
814
815/*
816 * LockAcquireExtended - allows us to specify additional options
817 *
818 * reportMemoryError specifies whether a lock request that fills the lock
819 * table should generate an ERROR or not. Passing "false" allows the caller
820 * to attempt to recover from lock-table-full situations, perhaps by forcibly
821 * canceling other lock holders and then retrying. Note, however, that the
822 * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
823 * in combination with dontWait = true, as the cause of failure couldn't be
824 * distinguished.
825 *
826 * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
827 * table entry if a lock is successfully acquired, or NULL if not.
828 *
829 * logLockFailure indicates whether to log details when a lock acquisition
830 * fails with dontWait = true.
831 */
834 LOCKMODE lockmode,
835 bool sessionLock,
836 bool dontWait,
839 bool logLockFailure)
840{
845 LOCK *lock;
846 PROCLOCK *proclock;
847 bool found;
848 ResourceOwner owner;
849 uint32 hashcode;
851 bool found_conflict;
853 bool log_lock = false;
854
856 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
859 elog(ERROR, "unrecognized lock mode: %d", lockmode);
860
861 if (RecoveryInProgress() && !InRecovery &&
862 (locktag->locktag_type == LOCKTAG_OBJECT ||
863 locktag->locktag_type == LOCKTAG_RELATION) &&
864 lockmode > RowExclusiveLock)
867 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
868 lockMethodTable->lockModeNames[lockmode]),
869 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
870
871#ifdef LOCK_DEBUG
872 if (LOCK_DEBUG_ENABLED(locktag))
873 elog(LOG, "LockAcquire: lock [%u,%u] %s",
874 locktag->locktag_field1, locktag->locktag_field2,
875 lockMethodTable->lockModeNames[lockmode]);
876#endif
877
878 /* Identify owner for lock */
879 if (sessionLock)
880 owner = NULL;
881 else
882 owner = CurrentResourceOwner;
883
884 /*
885 * Find or create a LOCALLOCK entry for this lock and lockmode
886 */
887 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
888 localtag.lock = *locktag;
889 localtag.mode = lockmode;
890
892 &localtag,
893 HASH_ENTER, &found);
894
895 /*
896 * if it's a new locallock object, initialize it
897 */
898 if (!found)
899 {
900 locallock->lock = NULL;
901 locallock->proclock = NULL;
902 locallock->hashcode = LockTagHashCode(&(localtag.lock));
903 locallock->nLocks = 0;
904 locallock->holdsStrongLockCount = false;
905 locallock->lockCleared = false;
906 locallock->numLockOwners = 0;
907 locallock->maxLockOwners = 8;
908 locallock->lockOwners = NULL; /* in case next line fails */
909 locallock->lockOwners = (LOCALLOCKOWNER *)
911 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
912 }
913 else
914 {
915 /* Make sure there will be room to remember the lock */
916 if (locallock->numLockOwners >= locallock->maxLockOwners)
917 {
918 int newsize = locallock->maxLockOwners * 2;
919
920 locallock->lockOwners = (LOCALLOCKOWNER *)
921 repalloc(locallock->lockOwners,
922 newsize * sizeof(LOCALLOCKOWNER));
923 locallock->maxLockOwners = newsize;
924 }
925 }
926 hashcode = locallock->hashcode;
927
928 if (locallockp)
930
931 /*
932 * If we already hold the lock, we can just increase the count locally.
933 *
934 * If lockCleared is already set, caller need not worry about absorbing
935 * sinval messages related to the lock's object.
936 */
937 if (locallock->nLocks > 0)
938 {
940 if (locallock->lockCleared)
942 else
944 }
945
946 /*
947 * We don't acquire any other heavyweight lock while holding the relation
948 * extension lock. We do allow to acquire the same relation extension
949 * lock more than once but that case won't reach here.
950 */
952
953 /*
954 * Prepare to emit a WAL record if acquisition of this lock needs to be
955 * replayed in a standby server.
956 *
957 * Here we prepare to log; after lock is acquired we'll issue log record.
958 * This arrangement simplifies error recovery in case the preparation step
959 * fails.
960 *
961 * Only AccessExclusiveLocks can conflict with lock types that read-only
962 * transactions can acquire in a standby server. Make sure this definition
963 * matches the one in GetRunningTransactionLocks().
964 */
965 if (lockmode >= AccessExclusiveLock &&
966 locktag->locktag_type == LOCKTAG_RELATION &&
969 {
971 log_lock = true;
972 }
973
974 /*
975 * Attempt to take lock via fast path, if eligible. But if we remember
976 * having filled up the fast path array, we don't attempt to make any
977 * further use of it until we release some locks. It's possible that some
978 * other backend has transferred some of those locks to the shared hash
979 * table, leaving space free, but it's not worth acquiring the LWLock just
980 * to check. It's also possible that we're acquiring a second or third
981 * lock type on a relation we have already locked using the fast-path, but
982 * for now we don't worry about that case either.
983 */
984 if (EligibleForRelationFastPath(locktag, lockmode))
985 {
988 {
990 bool acquired;
991
992 /*
993 * LWLockAcquire acts as a memory sequencing point, so it's safe
994 * to assume that any strong locker whose increment to
995 * FastPathStrongRelationLocks->counts becomes visible after we
996 * test it has yet to begin to transfer fast-path locks.
997 */
1000 acquired = false;
1001 else
1003 lockmode);
1005 if (acquired)
1006 {
1007 /*
1008 * The locallock might contain stale pointers to some old
1009 * shared objects; we MUST reset these to null before
1010 * considering the lock to be acquired via fast-path.
1011 */
1012 locallock->lock = NULL;
1013 locallock->proclock = NULL;
1014 GrantLockLocal(locallock, owner);
1015 return LOCKACQUIRE_OK;
1016 }
1017 }
1018 else
1019 {
1020 /*
1021 * Increment the lock statistics counter if lock could not be
1022 * acquired via the fast-path.
1023 */
1024 pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type);
1025 }
1026 }
1027
1028 /*
1029 * If this lock could potentially have been taken via the fast-path by
1030 * some other backend, we must (temporarily) disable further use of the
1031 * fast-path for this lock tag, and migrate any locks already taken via
1032 * this method to the main lock table.
1033 */
1034 if (ConflictsWithRelationFastPath(locktag, lockmode))
1035 {
1037
1040 hashcode))
1041 {
1043 if (locallock->nLocks == 0)
1045 if (locallockp)
1046 *locallockp = NULL;
1048 ereport(ERROR,
1050 errmsg("out of shared memory"),
1051 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1052 else
1053 return LOCKACQUIRE_NOT_AVAIL;
1054 }
1055 }
1056
1057 /*
1058 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1059 * take it via the fast-path, either, so we've got to mess with the shared
1060 * lock table.
1061 */
1063
1065
1066 /*
1067 * Find or create lock and proclock entries with this tag
1068 *
1069 * Note: if the locallock object already existed, it might have a pointer
1070 * to the lock already ... but we should not assume that that pointer is
1071 * valid, since a lock object with zero hold and request counts can go
1072 * away anytime. So we have to use SetupLockInTable() to recompute the
1073 * lock and proclock pointers, even if they're already set.
1074 */
1075 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1076 hashcode, lockmode);
1077 if (!proclock)
1078 {
1081 if (locallock->nLocks == 0)
1083 if (locallockp)
1084 *locallockp = NULL;
1086 ereport(ERROR,
1088 errmsg("out of shared memory"),
1089 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1090 else
1091 return LOCKACQUIRE_NOT_AVAIL;
1092 }
1093 locallock->proclock = proclock;
1094 lock = proclock->tag.myLock;
1095 locallock->lock = lock;
1096
1097 /*
1098 * If lock requested conflicts with locks requested by waiters, must join
1099 * wait queue. Otherwise, check for conflict with already-held locks.
1100 * (That's last because most complex check.)
1101 */
1102 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1103 found_conflict = true;
1104 else
1106 lock, proclock);
1107
1108 if (!found_conflict)
1109 {
1110 /* No conflict with held or previously requested locks */
1111 GrantLock(lock, proclock, lockmode);
1113 }
1114 else
1115 {
1116 /*
1117 * Join the lock's wait queue. We call this even in the dontWait
1118 * case, because JoinWaitQueue() may discover that we can acquire the
1119 * lock immediately after all.
1120 */
1122 }
1123
1125 {
1126 /*
1127 * We're not getting the lock because a deadlock was detected already
1128 * while trying to join the wait queue, or because we would have to
1129 * wait but the caller requested no blocking.
1130 *
1131 * Undo the changes to shared entries before releasing the partition
1132 * lock.
1133 */
1135
1136 if (proclock->holdMask == 0)
1137 {
1139
1141 hashcode);
1142 dlist_delete(&proclock->lockLink);
1143 dlist_delete(&proclock->procLink);
1145 &(proclock->tag),
1148 NULL))
1149 elog(PANIC, "proclock table corrupted");
1150 }
1151 else
1152 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1153 lock->nRequested--;
1154 lock->requested[lockmode]--;
1155 LOCK_PRINT("LockAcquire: did not join wait queue",
1156 lock, lockmode);
1157 Assert((lock->nRequested > 0) &&
1158 (lock->requested[lockmode] >= 0));
1159 Assert(lock->nGranted <= lock->nRequested);
1161 if (locallock->nLocks == 0)
1163
1164 if (dontWait)
1165 {
1166 /*
1167 * Log lock holders and waiters as a detail log message if
1168 * logLockFailure = true and lock acquisition fails with dontWait
1169 * = true
1170 */
1171 if (logLockFailure)
1172 {
1176 const char *modename;
1177 int lockHoldersNum = 0;
1178
1182
1183 DescribeLockTag(&buf, &locallock->tag.lock);
1184 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1185 lockmode);
1186
1187 /* Gather a list of all lock holders and waiters */
1192
1193 ereport(LOG,
1194 (errmsg("process %d could not obtain %s on %s",
1195 MyProcPid, modename, buf.data),
1197 "Process holding the lock: %s, Wait queue: %s.",
1198 "Processes holding the lock: %s, Wait queue: %s.",
1200 lock_holders_sbuf.data,
1201 lock_waiters_sbuf.data)));
1202
1203 pfree(buf.data);
1206 }
1207 if (locallockp)
1208 *locallockp = NULL;
1209 return LOCKACQUIRE_NOT_AVAIL;
1210 }
1211 else
1212 {
1214 /* DeadLockReport() will not return */
1215 }
1216 }
1217
1218 /*
1219 * We are now in the lock queue, or the lock was already granted. If
1220 * queued, go to sleep.
1221 */
1223 {
1224 Assert(!dontWait);
1225 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1226 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1228
1230
1231 /*
1232 * NOTE: do not do any material change of state between here and
1233 * return. All required changes in locktable state must have been
1234 * done when the lock was granted to us --- see notes in WaitOnLock.
1235 */
1236
1238 {
1239 /*
1240 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1241 * now.
1242 */
1243 Assert(!dontWait);
1245 /* DeadLockReport() will not return */
1246 }
1247 }
1248 else
1251
1252 /* The lock was granted to us. Update the local lock entry accordingly */
1253 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1254 GrantLockLocal(locallock, owner);
1255
1256 /*
1257 * Lock state is fully up-to-date now; if we error out after this, no
1258 * special error cleanup is required.
1259 */
1261
1262 /*
1263 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1264 * standby server.
1265 */
1266 if (log_lock)
1267 {
1268 /*
1269 * Decode the locktag back to the original values, to avoid sending
1270 * lots of empty bytes with every message. See lock.h to check how a
1271 * locktag is defined for LOCKTAG_RELATION
1272 */
1274 locktag->locktag_field2);
1275 }
1276
1277 return LOCKACQUIRE_OK;
1278}
1279
1280/*
1281 * Find or create LOCK and PROCLOCK objects as needed for a new lock
1282 * request.
1283 *
1284 * Returns the PROCLOCK object, or NULL if we failed to create the objects
1285 * for lack of shared memory.
1286 *
1287 * The appropriate partition lock must be held at entry, and will be
1288 * held at exit.
1289 */
1290static PROCLOCK *
1292 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1293{
1294 LOCK *lock;
1295 PROCLOCK *proclock;
1298 bool found;
1299
1300 /*
1301 * Find or create a lock with this tag.
1302 */
1304 locktag,
1305 hashcode,
1307 &found);
1308 if (!lock)
1309 return NULL;
1310
1311 /*
1312 * if it's a new lock object, initialize it
1313 */
1314 if (!found)
1315 {
1316 lock->grantMask = 0;
1317 lock->waitMask = 0;
1318 dlist_init(&lock->procLocks);
1319 dclist_init(&lock->waitProcs);
1320 lock->nRequested = 0;
1321 lock->nGranted = 0;
1322 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1323 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1324 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1325 }
1326 else
1327 {
1328 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1329 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1330 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1331 Assert(lock->nGranted <= lock->nRequested);
1332 }
1333
1334 /*
1335 * Create the hash key for the proclock table.
1336 */
1337 proclocktag.myLock = lock;
1338 proclocktag.myProc = proc;
1339
1341
1342 /*
1343 * Find or create a proclock entry with this tag
1344 */
1346 &proclocktag,
1349 &found);
1350 if (!proclock)
1351 {
1352 /* Oops, not enough shmem for the proclock */
1353 if (lock->nRequested == 0)
1354 {
1355 /*
1356 * There are no other requestors of this lock, so garbage-collect
1357 * the lock object. We *must* do this to avoid a permanent leak
1358 * of shared memory, because there won't be anything to cause
1359 * anyone to release the lock object later.
1360 */
1361 Assert(dlist_is_empty(&(lock->procLocks)));
1363 &(lock->tag),
1364 hashcode,
1366 NULL))
1367 elog(PANIC, "lock table corrupted");
1368 }
1369 return NULL;
1370 }
1371
1372 /*
1373 * If new, initialize the new entry
1374 */
1375 if (!found)
1376 {
1378
1379 /*
1380 * It might seem unsafe to access proclock->groupLeader without a
1381 * lock, but it's not really. Either we are initializing a proclock
1382 * on our own behalf, in which case our group leader isn't changing
1383 * because the group leader for a process can only ever be changed by
1384 * the process itself; or else we are transferring a fast-path lock to
1385 * the main lock table, in which case that process can't change its
1386 * lock group leader without first releasing all of its locks (and in
1387 * particular the one we are currently transferring).
1388 */
1389 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1390 proc->lockGroupLeader : proc;
1391 proclock->holdMask = 0;
1392 proclock->releaseMask = 0;
1393 /* Add proclock to appropriate lists */
1394 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1395 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1396 PROCLOCK_PRINT("LockAcquire: new", proclock);
1397 }
1398 else
1399 {
1400 PROCLOCK_PRINT("LockAcquire: found", proclock);
1401 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1402
1403#ifdef CHECK_DEADLOCK_RISK
1404
1405 /*
1406 * Issue warning if we already hold a lower-level lock on this object
1407 * and do not hold a lock of the requested level or higher. This
1408 * indicates a deadlock-prone coding practice (eg, we'd have a
1409 * deadlock if another backend were following the same code path at
1410 * about the same time).
1411 *
1412 * This is not enabled by default, because it may generate log entries
1413 * about user-level coding practices that are in fact safe in context.
1414 * It can be enabled to help find system-level problems.
1415 *
1416 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1417 * better to use a table. For now, though, this works.
1418 */
1419 {
1420 int i;
1421
1422 for (i = lockMethodTable->numLockModes; i > 0; i--)
1423 {
1424 if (proclock->holdMask & LOCKBIT_ON(i))
1425 {
1426 if (i >= (int) lockmode)
1427 break; /* safe: we have a lock >= req level */
1428 elog(LOG, "deadlock risk: raising lock level"
1429 " from %s to %s on object %u/%u/%u",
1430 lockMethodTable->lockModeNames[i],
1431 lockMethodTable->lockModeNames[lockmode],
1432 lock->tag.locktag_field1, lock->tag.locktag_field2,
1433 lock->tag.locktag_field3);
1434 break;
1435 }
1436 }
1437 }
1438#endif /* CHECK_DEADLOCK_RISK */
1439 }
1440
1441 /*
1442 * lock->nRequested and lock->requested[] count the total number of
1443 * requests, whether granted or waiting, so increment those immediately.
1444 * The other counts don't increment till we get the lock.
1445 */
1446 lock->nRequested++;
1447 lock->requested[lockmode]++;
1448 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1449
1450 /*
1451 * We shouldn't already hold the desired lock; else locallock table is
1452 * broken.
1453 */
1454 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1455 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1456 lockMethodTable->lockModeNames[lockmode],
1457 lock->tag.locktag_field1, lock->tag.locktag_field2,
1458 lock->tag.locktag_field3);
1459
1460 return proclock;
1461}
1462
1463/*
1464 * Check and set/reset the flag that we hold the relation extension lock.
1465 *
1466 * It is callers responsibility that this function is called after
1467 * acquiring/releasing the relation extension lock.
1468 *
1469 * Pass acquired as true if lock is acquired, false otherwise.
1470 */
1471static inline void
1473{
1474#ifdef USE_ASSERT_CHECKING
1477#endif
1478}
1479
1480/*
1481 * Subroutine to free a locallock entry
1482 */
1483static void
1485{
1486 int i;
1487
1488 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1489 {
1490 if (locallock->lockOwners[i].owner != NULL)
1491 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1492 }
1493 locallock->numLockOwners = 0;
1494 if (locallock->lockOwners != NULL)
1495 pfree(locallock->lockOwners);
1496 locallock->lockOwners = NULL;
1497
1498 if (locallock->holdsStrongLockCount)
1499 {
1501
1503
1507 locallock->holdsStrongLockCount = false;
1509 }
1510
1512 &(locallock->tag),
1513 HASH_REMOVE, NULL))
1514 elog(WARNING, "locallock table corrupted");
1515
1516 /*
1517 * Indicate that the lock is released for certain types of locks
1518 */
1520}
1521
1522/*
1523 * LockCheckConflicts -- test whether requested lock conflicts
1524 * with those already granted
1525 *
1526 * Returns true if conflict, false if no conflict.
1527 *
1528 * NOTES:
1529 * Here's what makes this complicated: one process's locks don't
1530 * conflict with one another, no matter what purpose they are held for
1531 * (eg, session and transaction locks do not conflict). Nor do the locks
1532 * of one process in a lock group conflict with those of another process in
1533 * the same group. So, we must subtract off these locks when determining
1534 * whether the requested new lock conflicts with those already held.
1535 */
1536bool
1538 LOCKMODE lockmode,
1539 LOCK *lock,
1540 PROCLOCK *proclock)
1541{
1542 int numLockModes = lockMethodTable->numLockModes;
1544 int conflictMask = lockMethodTable->conflictTab[lockmode];
1548 int i;
1549
1550 /*
1551 * first check for global conflicts: If no locks conflict with my request,
1552 * then I get the lock.
1553 *
1554 * Checking for conflict: lock->grantMask represents the types of
1555 * currently held locks. conflictTable[lockmode] has a bit set for each
1556 * type of lock that conflicts with request. Bitwise compare tells if
1557 * there is a conflict.
1558 */
1559 if (!(conflictMask & lock->grantMask))
1560 {
1561 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1562 return false;
1563 }
1564
1565 /*
1566 * Rats. Something conflicts. But it could still be my own lock, or a
1567 * lock held by another member of my locking group. First, figure out how
1568 * many conflicts remain after subtracting out any locks I hold myself.
1569 */
1570 myLocks = proclock->holdMask;
1571 for (i = 1; i <= numLockModes; i++)
1572 {
1573 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1574 {
1575 conflictsRemaining[i] = 0;
1576 continue;
1577 }
1578 conflictsRemaining[i] = lock->granted[i];
1579 if (myLocks & LOCKBIT_ON(i))
1582 }
1583
1584 /* If no conflicts remain, we get the lock. */
1585 if (totalConflictsRemaining == 0)
1586 {
1587 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1588 return false;
1589 }
1590
1591 /* If no group locking, it's definitely a conflict. */
1592 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1593 {
1594 Assert(proclock->tag.myProc == MyProc);
1595 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1596 proclock);
1597 return true;
1598 }
1599
1600 /*
1601 * The relation extension lock conflict even between the group members.
1602 */
1604 {
1605 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1606 proclock);
1607 return true;
1608 }
1609
1610 /*
1611 * Locks held in conflicting modes by members of our own lock group are
1612 * not real conflicts; we can subtract those out and see if we still have
1613 * a conflict. This is O(N) in the number of processes holding or
1614 * awaiting locks on this object. We could improve that by making the
1615 * shared memory state more complex (and larger) but it doesn't seem worth
1616 * it.
1617 */
1619 {
1621 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1622
1623 if (proclock != otherproclock &&
1624 proclock->groupLeader == otherproclock->groupLeader &&
1625 (otherproclock->holdMask & conflictMask) != 0)
1626 {
1627 int intersectMask = otherproclock->holdMask & conflictMask;
1628
1629 for (i = 1; i <= numLockModes; i++)
1630 {
1631 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1632 {
1633 if (conflictsRemaining[i] <= 0)
1634 elog(PANIC, "proclocks held do not match lock");
1637 }
1638 }
1639
1640 if (totalConflictsRemaining == 0)
1641 {
1642 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1643 proclock);
1644 return false;
1645 }
1646 }
1647 }
1648
1649 /* Nope, it's a real conflict. */
1650 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1651 return true;
1652}
1653
1654/*
1655 * GrantLock -- update the lock and proclock data structures to show
1656 * the lock request has been granted.
1657 *
1658 * NOTE: if proc was blocked, it also needs to be removed from the wait list
1659 * and have its waitLock/waitProcLock fields cleared. That's not done here.
1660 *
1661 * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1662 * table entry; but since we may be awaking some other process, we can't do
1663 * that here; it's done by GrantLockLocal, instead.
1664 */
1665void
1666GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1667{
1668 lock->nGranted++;
1669 lock->granted[lockmode]++;
1670 lock->grantMask |= LOCKBIT_ON(lockmode);
1671 if (lock->granted[lockmode] == lock->requested[lockmode])
1672 lock->waitMask &= LOCKBIT_OFF(lockmode);
1673 proclock->holdMask |= LOCKBIT_ON(lockmode);
1674 LOCK_PRINT("GrantLock", lock, lockmode);
1675 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1676 Assert(lock->nGranted <= lock->nRequested);
1677}
1678
1679/*
1680 * UnGrantLock -- opposite of GrantLock.
1681 *
1682 * Updates the lock and proclock data structures to show that the lock
1683 * is no longer held nor requested by the current holder.
1684 *
1685 * Returns true if there were any waiters waiting on the lock that
1686 * should now be woken up with ProcLockWakeup.
1687 */
1688static bool
1689UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1691{
1692 bool wakeupNeeded = false;
1693
1694 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1695 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1696 Assert(lock->nGranted <= lock->nRequested);
1697
1698 /*
1699 * fix the general lock stats
1700 */
1701 lock->nRequested--;
1702 lock->requested[lockmode]--;
1703 lock->nGranted--;
1704 lock->granted[lockmode]--;
1705
1706 if (lock->granted[lockmode] == 0)
1707 {
1708 /* change the conflict mask. No more of this lock type. */
1709 lock->grantMask &= LOCKBIT_OFF(lockmode);
1710 }
1711
1712 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1713
1714 /*
1715 * We need only run ProcLockWakeup if the released lock conflicts with at
1716 * least one of the lock types requested by waiter(s). Otherwise whatever
1717 * conflict made them wait must still exist. NOTE: before MVCC, we could
1718 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1719 * not true anymore, because the remaining granted locks might belong to
1720 * some waiter, who could now be awakened because he doesn't conflict with
1721 * his own locks.
1722 */
1723 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1724 wakeupNeeded = true;
1725
1726 /*
1727 * Now fix the per-proclock state.
1728 */
1729 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1730 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1731
1732 return wakeupNeeded;
1733}
1734
1735/*
1736 * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1737 * proclock and lock objects if possible, and call ProcLockWakeup if there
1738 * are remaining requests and the caller says it's OK. (Normally, this
1739 * should be called after UnGrantLock, and wakeupNeeded is the result from
1740 * UnGrantLock.)
1741 *
1742 * The appropriate partition lock must be held at entry, and will be
1743 * held at exit.
1744 */
1745static void
1746CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1748 bool wakeupNeeded)
1749{
1750 /*
1751 * If this was my last hold on this lock, delete my entry in the proclock
1752 * table.
1753 */
1754 if (proclock->holdMask == 0)
1755 {
1757
1758 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1759 dlist_delete(&proclock->lockLink);
1760 dlist_delete(&proclock->procLink);
1761 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1763 &(proclock->tag),
1766 NULL))
1767 elog(PANIC, "proclock table corrupted");
1768 }
1769
1770 if (lock->nRequested == 0)
1771 {
1772 /*
1773 * The caller just released the last lock, so garbage-collect the lock
1774 * object.
1775 */
1776 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1779 &(lock->tag),
1780 hashcode,
1782 NULL))
1783 elog(PANIC, "lock table corrupted");
1784 }
1785 else if (wakeupNeeded)
1786 {
1787 /* There are waiters on this lock, so wake them up. */
1789 }
1790}
1791
1792/*
1793 * GrantLockLocal -- update the locallock data structures to show
1794 * the lock request has been granted.
1795 *
1796 * We expect that LockAcquire made sure there is room to add a new
1797 * ResourceOwner entry.
1798 */
1799static void
1801{
1802 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1803 int i;
1804
1805 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1806 /* Count the total */
1807 locallock->nLocks++;
1808 /* Count the per-owner lock */
1809 for (i = 0; i < locallock->numLockOwners; i++)
1810 {
1811 if (lockOwners[i].owner == owner)
1812 {
1813 lockOwners[i].nLocks++;
1814 return;
1815 }
1816 }
1817 lockOwners[i].owner = owner;
1818 lockOwners[i].nLocks = 1;
1819 locallock->numLockOwners++;
1820 if (owner != NULL)
1822
1823 /* Indicate that the lock is acquired for certain types of locks. */
1825}
1826
1827/*
1828 * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1829 * and arrange for error cleanup if it fails
1830 */
1831static void
1833{
1835 Assert(locallock->holdsStrongLockCount == false);
1836
1837 /*
1838 * Adding to a memory location is not atomic, so we take a spinlock to
1839 * ensure we don't collide with someone else trying to bump the count at
1840 * the same time.
1841 *
1842 * XXX: It might be worth considering using an atomic fetch-and-add
1843 * instruction here, on architectures where that is supported.
1844 */
1845
1848 locallock->holdsStrongLockCount = true;
1851}
1852
1853/*
1854 * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1855 * acquisition once it's no longer needed
1856 */
1857static void
1862
1863/*
1864 * AbortStrongLockAcquire - undo strong lock state changes performed by
1865 * BeginStrongLockAcquire.
1866 */
1867void
1885
1886/*
1887 * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1888 * WaitOnLock on.
1889 *
1890 * proc.c needs this for the case where we are booted off the lock by
1891 * timeout, but discover that someone granted us the lock anyway.
1892 *
1893 * We could just export GrantLockLocal, but that would require including
1894 * resowner.h in lock.h, which creates circularity.
1895 */
1896void
1901
1902/*
1903 * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1904 */
1905LOCALLOCK *
1907{
1908 return awaitedLock;
1909}
1910
1911/*
1912 * ResetAwaitedLock -- Forget that we are waiting on a lock.
1913 */
1914void
1916{
1917 awaitedLock = NULL;
1918}
1919
1920/*
1921 * MarkLockClear -- mark an acquired lock as "clear"
1922 *
1923 * This means that we know we have absorbed all sinval messages that other
1924 * sessions generated before we acquired this lock, and so we can confidently
1925 * assume we know about any catalog changes protected by this lock.
1926 */
1927void
1929{
1930 Assert(locallock->nLocks > 0);
1931 locallock->lockCleared = true;
1932}
1933
1934/*
1935 * WaitOnLock -- wait to acquire a lock
1936 *
1937 * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1938 */
1939static ProcWaitStatus
1941{
1944
1945 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1946 locallock->tag.lock.locktag_field2,
1947 locallock->tag.lock.locktag_field3,
1948 locallock->tag.lock.locktag_field4,
1949 locallock->tag.lock.locktag_type,
1950 locallock->tag.mode);
1951
1952 /* Setup error traceback support for ereport() */
1957
1958 /* adjust the process title to indicate that it's waiting */
1959 set_ps_display_suffix("waiting");
1960
1961 /*
1962 * Record the fact that we are waiting for a lock, so that
1963 * LockErrorCleanup will clean up if cancel/die happens.
1964 */
1966 awaitedOwner = owner;
1967
1968 /*
1969 * NOTE: Think not to put any shared-state cleanup after the call to
1970 * ProcSleep, in either the normal or failure path. The lock state must
1971 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1972 * waiting for the lock. This is necessary because of the possibility
1973 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1974 * grants us the lock, but before we've noticed it. Hence, after granting,
1975 * the locktable state must fully reflect the fact that we own the lock;
1976 * we can't do additional work on return.
1977 *
1978 * We can and do use a PG_TRY block to try to clean up after failure, but
1979 * this still has a major limitation: elog(FATAL) can occur while waiting
1980 * (eg, a "die" interrupt), and then control won't come back here. So all
1981 * cleanup of essential state should happen in LockErrorCleanup, not here.
1982 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1983 * is unimportant if the process exits.
1984 */
1985 PG_TRY();
1986 {
1988 }
1989 PG_CATCH();
1990 {
1991 /* In this path, awaitedLock remains set until LockErrorCleanup */
1992
1993 /* reset ps display to remove the suffix */
1995
1996 /* and propagate the error */
1997 PG_RE_THROW();
1998 }
1999 PG_END_TRY();
2000
2001 /*
2002 * We no longer want LockErrorCleanup to do anything.
2003 */
2004 awaitedLock = NULL;
2005
2006 /* reset ps display to remove the suffix */
2008
2010
2011 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2012 locallock->tag.lock.locktag_field2,
2013 locallock->tag.lock.locktag_field3,
2014 locallock->tag.lock.locktag_field4,
2015 locallock->tag.lock.locktag_type,
2016 locallock->tag.mode);
2017
2018 return result;
2019}
2020
2021/*
2022 * error context callback for failures in WaitOnLock
2023 *
2024 * We report which lock was being waited on, in the same style used in
2025 * deadlock reports. This helps with lock timeout errors in particular.
2026 */
2027static void
2029{
2031 const LOCKTAG *tag = &locallock->tag.lock;
2032 LOCKMODE mode = locallock->tag.mode;
2034
2037
2038 errcontext("waiting for %s on %s",
2040 locktagbuf.data);
2041}
2042
2043/*
2044 * Remove a proc from the wait-queue it is on (caller must know it is on one).
2045 * This is only used when the proc has failed to get the lock, so we set its
2046 * waitStatus to PROC_WAIT_STATUS_ERROR.
2047 *
2048 * Appropriate partition lock must be held by caller. Also, caller is
2049 * responsible for signaling the proc if needed.
2050 *
2051 * NB: this does not clean up any locallock object that may exist for the lock.
2052 */
2053void
2055{
2056 LOCK *waitLock = proc->waitLock;
2057 PROCLOCK *proclock = proc->waitProcLock;
2058 LOCKMODE lockmode = proc->waitLockMode;
2060
2061 /* Make sure proc is waiting */
2064 Assert(waitLock);
2065 Assert(!dclist_is_empty(&waitLock->waitProcs));
2067
2068 /* Remove proc from lock's wait queue */
2070
2071 /* Undo increments of request counts by waiting process */
2072 Assert(waitLock->nRequested > 0);
2073 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2074 waitLock->nRequested--;
2075 Assert(waitLock->requested[lockmode] > 0);
2076 waitLock->requested[lockmode]--;
2077 /* don't forget to clear waitMask bit if appropriate */
2078 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2079 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2080
2081 /* Clean up the proc's own state, and pass it the ok/fail signal */
2082 proc->waitLock = NULL;
2083 proc->waitProcLock = NULL;
2085
2086 /*
2087 * Delete the proclock immediately if it represents no already-held locks.
2088 * (This must happen now because if the owner of the lock decides to
2089 * release it, and the requested/granted counts then go to zero,
2090 * LockRelease expects there to be no remaining proclocks.) Then see if
2091 * any other waiters for the lock can be woken up now.
2092 */
2093 CleanUpLock(waitLock, proclock,
2094 LockMethods[lockmethodid], hashcode,
2095 true);
2096}
2097
2098/*
2099 * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2100 * Release a session lock if 'sessionLock' is true, else release a
2101 * regular transaction lock.
2102 *
2103 * Side Effects: find any waiting processes that are now wakable,
2104 * grant them their requested locks and awaken them.
2105 * (We have to grant the lock here to avoid a race between
2106 * the waking process and any new process to
2107 * come along and request the lock.)
2108 */
2109bool
2110LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2111{
2116 LOCK *lock;
2117 PROCLOCK *proclock;
2119 bool wakeupNeeded;
2120
2122 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2125 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2126
2127#ifdef LOCK_DEBUG
2128 if (LOCK_DEBUG_ENABLED(locktag))
2129 elog(LOG, "LockRelease: lock [%u,%u] %s",
2130 locktag->locktag_field1, locktag->locktag_field2,
2131 lockMethodTable->lockModeNames[lockmode]);
2132#endif
2133
2134 /*
2135 * Find the LOCALLOCK entry for this lock and lockmode
2136 */
2137 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2138 localtag.lock = *locktag;
2139 localtag.mode = lockmode;
2140
2142 &localtag,
2143 HASH_FIND, NULL);
2144
2145 /*
2146 * let the caller print its own error message, too. Do not ereport(ERROR).
2147 */
2148 if (!locallock || locallock->nLocks <= 0)
2149 {
2150 elog(WARNING, "you don't own a lock of type %s",
2151 lockMethodTable->lockModeNames[lockmode]);
2152 return false;
2153 }
2154
2155 /*
2156 * Decrease the count for the resource owner.
2157 */
2158 {
2159 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2160 ResourceOwner owner;
2161 int i;
2162
2163 /* Identify owner for lock */
2164 if (sessionLock)
2165 owner = NULL;
2166 else
2167 owner = CurrentResourceOwner;
2168
2169 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2170 {
2171 if (lockOwners[i].owner == owner)
2172 {
2173 Assert(lockOwners[i].nLocks > 0);
2174 if (--lockOwners[i].nLocks == 0)
2175 {
2176 if (owner != NULL)
2178 /* compact out unused slot */
2179 locallock->numLockOwners--;
2180 if (i < locallock->numLockOwners)
2181 lockOwners[i] = lockOwners[locallock->numLockOwners];
2182 }
2183 break;
2184 }
2185 }
2186 if (i < 0)
2187 {
2188 /* don't release a lock belonging to another owner */
2189 elog(WARNING, "you don't own a lock of type %s",
2190 lockMethodTable->lockModeNames[lockmode]);
2191 return false;
2192 }
2193 }
2194
2195 /*
2196 * Decrease the total local count. If we're still holding the lock, we're
2197 * done.
2198 */
2199 locallock->nLocks--;
2200
2201 if (locallock->nLocks > 0)
2202 return true;
2203
2204 /*
2205 * At this point we can no longer suppose we are clear of invalidation
2206 * messages related to this lock. Although we'll delete the LOCALLOCK
2207 * object before any intentional return from this routine, it seems worth
2208 * the trouble to explicitly reset lockCleared right now, just in case
2209 * some error prevents us from deleting the LOCALLOCK.
2210 */
2211 locallock->lockCleared = false;
2212
2213 /* Attempt fast release of any lock eligible for the fast path. */
2214 if (EligibleForRelationFastPath(locktag, lockmode) &&
2216 {
2217 bool released;
2218
2219 /*
2220 * We might not find the lock here, even if we originally entered it
2221 * here. Another backend may have moved it to the main table.
2222 */
2225 lockmode);
2227 if (released)
2228 {
2230 return true;
2231 }
2232 }
2233
2234 /*
2235 * Otherwise we've got to mess with the shared lock table.
2236 */
2238
2240
2241 /*
2242 * Normally, we don't need to re-find the lock or proclock, since we kept
2243 * their addresses in the locallock table, and they couldn't have been
2244 * removed while we were holding a lock on them. But it's possible that
2245 * the lock was taken fast-path and has since been moved to the main hash
2246 * table by another backend, in which case we will need to look up the
2247 * objects here. We assume the lock field is NULL if so.
2248 */
2249 lock = locallock->lock;
2250 if (!lock)
2251 {
2253
2254 Assert(EligibleForRelationFastPath(locktag, lockmode));
2256 locktag,
2257 locallock->hashcode,
2258 HASH_FIND,
2259 NULL);
2260 if (!lock)
2261 elog(ERROR, "failed to re-find shared lock object");
2262 locallock->lock = lock;
2263
2264 proclocktag.myLock = lock;
2265 proclocktag.myProc = MyProc;
2267 &proclocktag,
2268 HASH_FIND,
2269 NULL);
2270 if (!locallock->proclock)
2271 elog(ERROR, "failed to re-find shared proclock object");
2272 }
2273 LOCK_PRINT("LockRelease: found", lock, lockmode);
2274 proclock = locallock->proclock;
2275 PROCLOCK_PRINT("LockRelease: found", proclock);
2276
2277 /*
2278 * Double-check that we are actually holding a lock of the type we want to
2279 * release.
2280 */
2281 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2282 {
2283 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2285 elog(WARNING, "you don't own a lock of type %s",
2286 lockMethodTable->lockModeNames[lockmode]);
2288 return false;
2289 }
2290
2291 /*
2292 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2293 */
2294 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2295
2296 CleanUpLock(lock, proclock,
2297 lockMethodTable, locallock->hashcode,
2298 wakeupNeeded);
2299
2301
2303 return true;
2304}
2305
2306/*
2307 * LockReleaseAll -- Release all locks of the specified lock method that
2308 * are held by the current process.
2309 *
2310 * Well, not necessarily *all* locks. The available behaviors are:
2311 * allLocks == true: release all locks including session locks.
2312 * allLocks == false: release all non-session locks.
2313 */
2314void
2316{
2317 HASH_SEQ_STATUS status;
2319 int i,
2320 numLockModes;
2322 LOCK *lock;
2323 int partition;
2324 bool have_fast_path_lwlock = false;
2325
2327 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2329
2330#ifdef LOCK_DEBUG
2331 if (*(lockMethodTable->trace_flag))
2332 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2333#endif
2334
2335 /*
2336 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2337 * the only way that the lock we hold on our own VXID can ever get
2338 * released: it is always and only released when a toplevel transaction
2339 * ends.
2340 */
2343
2344 numLockModes = lockMethodTable->numLockModes;
2345
2346 /*
2347 * First we run through the locallock table and get rid of unwanted
2348 * entries, then we scan the process's proclocks and get rid of those. We
2349 * do this separately because we may have multiple locallock entries
2350 * pointing to the same proclock, and we daren't end up with any dangling
2351 * pointers. Fast-path locks are cleaned up during the locallock table
2352 * scan, though.
2353 */
2355
2356 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2357 {
2358 /*
2359 * If the LOCALLOCK entry is unused, something must've gone wrong
2360 * while trying to acquire this lock. Just forget the local entry.
2361 */
2362 if (locallock->nLocks == 0)
2363 {
2365 continue;
2366 }
2367
2368 /* Ignore items that are not of the lockmethod to be removed */
2370 continue;
2371
2372 /*
2373 * If we are asked to release all locks, we can just zap the entry.
2374 * Otherwise, must scan to see if there are session locks. We assume
2375 * there is at most one lockOwners entry for session locks.
2376 */
2377 if (!allLocks)
2378 {
2379 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2380
2381 /* If session lock is above array position 0, move it down to 0 */
2382 for (i = 0; i < locallock->numLockOwners; i++)
2383 {
2384 if (lockOwners[i].owner == NULL)
2385 lockOwners[0] = lockOwners[i];
2386 else
2387 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2388 }
2389
2390 if (locallock->numLockOwners > 0 &&
2391 lockOwners[0].owner == NULL &&
2392 lockOwners[0].nLocks > 0)
2393 {
2394 /* Fix the locallock to show just the session locks */
2395 locallock->nLocks = lockOwners[0].nLocks;
2396 locallock->numLockOwners = 1;
2397 /* We aren't deleting this locallock, so done */
2398 continue;
2399 }
2400 else
2401 locallock->numLockOwners = 0;
2402 }
2403
2404#ifdef USE_ASSERT_CHECKING
2405
2406 /*
2407 * Tuple locks are currently held only for short durations within a
2408 * transaction. Check that we didn't forget to release one.
2409 */
2411 elog(WARNING, "tuple lock held at commit");
2412#endif
2413
2414 /*
2415 * If the lock or proclock pointers are NULL, this lock was taken via
2416 * the relation fast-path (and is not known to have been transferred).
2417 */
2418 if (locallock->proclock == NULL || locallock->lock == NULL)
2419 {
2420 LOCKMODE lockmode = locallock->tag.mode;
2421 Oid relid;
2422
2423 /* Verify that a fast-path lock is what we've got. */
2424 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2425 elog(PANIC, "locallock table corrupted");
2426
2427 /*
2428 * If we don't currently hold the LWLock that protects our
2429 * fast-path data structures, we must acquire it before attempting
2430 * to release the lock via the fast-path. We will continue to
2431 * hold the LWLock until we're done scanning the locallock table,
2432 * unless we hit a transferred fast-path lock. (XXX is this
2433 * really such a good idea? There could be a lot of entries ...)
2434 */
2436 {
2438 have_fast_path_lwlock = true;
2439 }
2440
2441 /* Attempt fast-path release. */
2442 relid = locallock->tag.lock.locktag_field2;
2443 if (FastPathUnGrantRelationLock(relid, lockmode))
2444 {
2446 continue;
2447 }
2448
2449 /*
2450 * Our lock, originally taken via the fast path, has been
2451 * transferred to the main lock table. That's going to require
2452 * some extra work, so release our fast-path lock before starting.
2453 */
2455 have_fast_path_lwlock = false;
2456
2457 /*
2458 * Now dump the lock. We haven't got a pointer to the LOCK or
2459 * PROCLOCK in this case, so we have to handle this a bit
2460 * differently than a normal lock release. Unfortunately, this
2461 * requires an extra LWLock acquire-and-release cycle on the
2462 * partitionLock, but hopefully it shouldn't happen often.
2463 */
2465 &locallock->tag.lock, lockmode, false);
2467 continue;
2468 }
2469
2470 /* Mark the proclock to show we need to release this lockmode */
2471 if (locallock->nLocks > 0)
2472 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2473
2474 /* And remove the locallock hashtable entry */
2476 }
2477
2478 /* Done with the fast-path data structures */
2481
2482 /*
2483 * Now, scan each lock partition separately.
2484 */
2486 {
2488 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2490
2492
2493 /*
2494 * If the proclock list for this partition is empty, we can skip
2495 * acquiring the partition lock. This optimization is trickier than
2496 * it looks, because another backend could be in process of adding
2497 * something to our proclock list due to promoting one of our
2498 * fast-path locks. However, any such lock must be one that we
2499 * decided not to delete above, so it's okay to skip it again now;
2500 * we'd just decide not to delete it again. We must, however, be
2501 * careful to re-fetch the list header once we've acquired the
2502 * partition lock, to be sure we have a valid, up-to-date pointer.
2503 * (There is probably no significant risk if pointer fetch/store is
2504 * atomic, but we don't wish to assume that.)
2505 *
2506 * XXX This argument assumes that the locallock table correctly
2507 * represents all of our fast-path locks. While allLocks mode
2508 * guarantees to clean up all of our normal locks regardless of the
2509 * locallock situation, we lose that guarantee for fast-path locks.
2510 * This is not ideal.
2511 */
2512 if (dlist_is_empty(procLocks))
2513 continue; /* needn't examine this partition */
2514
2516
2518 {
2519 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2520 bool wakeupNeeded = false;
2521
2522 Assert(proclock->tag.myProc == MyProc);
2523
2524 lock = proclock->tag.myLock;
2525
2526 /* Ignore items that are not of the lockmethod to be removed */
2527 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2528 continue;
2529
2530 /*
2531 * In allLocks mode, force release of all locks even if locallock
2532 * table had problems
2533 */
2534 if (allLocks)
2535 proclock->releaseMask = proclock->holdMask;
2536 else
2537 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2538
2539 /*
2540 * Ignore items that have nothing to be released, unless they have
2541 * holdMask == 0 and are therefore recyclable
2542 */
2543 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2544 continue;
2545
2546 PROCLOCK_PRINT("LockReleaseAll", proclock);
2547 LOCK_PRINT("LockReleaseAll", lock, 0);
2548 Assert(lock->nRequested >= 0);
2549 Assert(lock->nGranted >= 0);
2550 Assert(lock->nGranted <= lock->nRequested);
2551 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2552
2553 /*
2554 * Release the previously-marked lock modes
2555 */
2556 for (i = 1; i <= numLockModes; i++)
2557 {
2558 if (proclock->releaseMask & LOCKBIT_ON(i))
2559 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2561 }
2562 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2563 Assert(lock->nGranted <= lock->nRequested);
2564 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2565
2566 proclock->releaseMask = 0;
2567
2568 /* CleanUpLock will wake up waiters if needed. */
2569 CleanUpLock(lock, proclock,
2571 LockTagHashCode(&lock->tag),
2572 wakeupNeeded);
2573 } /* loop over PROCLOCKs within this partition */
2574
2576 } /* loop over partitions */
2577
2578#ifdef LOCK_DEBUG
2579 if (*(lockMethodTable->trace_flag))
2580 elog(LOG, "LockReleaseAll done");
2581#endif
2582}
2583
2584/*
2585 * LockReleaseSession -- Release all session locks of the specified lock method
2586 * that are held by the current process.
2587 */
2588void
2590{
2591 HASH_SEQ_STATUS status;
2593
2595 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2596
2598
2599 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2600 {
2601 /* Ignore items that are not of the specified lock method */
2603 continue;
2604
2606 }
2607}
2608
2609/*
2610 * LockReleaseCurrentOwner
2611 * Release all locks belonging to CurrentResourceOwner
2612 *
2613 * If the caller knows what those locks are, it can pass them as an array.
2614 * That speeds up the call significantly, when a lot of locks are held.
2615 * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2616 * table to find them.
2617 */
2618void
2620{
2621 if (locallocks == NULL)
2622 {
2623 HASH_SEQ_STATUS status;
2625
2627
2628 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2630 }
2631 else
2632 {
2633 int i;
2634
2635 for (i = nlocks - 1; i >= 0; i--)
2637 }
2638}
2639
2640/*
2641 * ReleaseLockIfHeld
2642 * Release any session-level locks on this lockable object if sessionLock
2643 * is true; else, release any locks held by CurrentResourceOwner.
2644 *
2645 * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2646 * locks), but without refactoring LockRelease() we cannot support releasing
2647 * locks belonging to resource owners other than CurrentResourceOwner.
2648 * If we were to refactor, it'd be a good idea to fix it so we don't have to
2649 * do a hashtable lookup of the locallock, too. However, currently this
2650 * function isn't used heavily enough to justify refactoring for its
2651 * convenience.
2652 */
2653static void
2655{
2656 ResourceOwner owner;
2657 LOCALLOCKOWNER *lockOwners;
2658 int i;
2659
2660 /* Identify owner for lock (must match LockRelease!) */
2661 if (sessionLock)
2662 owner = NULL;
2663 else
2664 owner = CurrentResourceOwner;
2665
2666 /* Scan to see if there are any locks belonging to the target owner */
2667 lockOwners = locallock->lockOwners;
2668 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2669 {
2670 if (lockOwners[i].owner == owner)
2671 {
2672 Assert(lockOwners[i].nLocks > 0);
2673 if (lockOwners[i].nLocks < locallock->nLocks)
2674 {
2675 /*
2676 * We will still hold this lock after forgetting this
2677 * ResourceOwner.
2678 */
2679 locallock->nLocks -= lockOwners[i].nLocks;
2680 /* compact out unused slot */
2681 locallock->numLockOwners--;
2682 if (owner != NULL)
2684 if (i < locallock->numLockOwners)
2685 lockOwners[i] = lockOwners[locallock->numLockOwners];
2686 }
2687 else
2688 {
2689 Assert(lockOwners[i].nLocks == locallock->nLocks);
2690 /* We want to call LockRelease just once */
2691 lockOwners[i].nLocks = 1;
2692 locallock->nLocks = 1;
2693 if (!LockRelease(&locallock->tag.lock,
2694 locallock->tag.mode,
2695 sessionLock))
2696 elog(WARNING, "ReleaseLockIfHeld: failed??");
2697 }
2698 break;
2699 }
2700 }
2701}
2702
2703/*
2704 * LockReassignCurrentOwner
2705 * Reassign all locks belonging to CurrentResourceOwner to belong
2706 * to its parent resource owner.
2707 *
2708 * If the caller knows what those locks are, it can pass them as an array.
2709 * That speeds up the call significantly, when a lot of locks are held
2710 * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2711 * and we'll traverse through our hash table to find them.
2712 */
2713void
2715{
2717
2718 Assert(parent != NULL);
2719
2720 if (locallocks == NULL)
2721 {
2722 HASH_SEQ_STATUS status;
2724
2726
2727 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2729 }
2730 else
2731 {
2732 int i;
2733
2734 for (i = nlocks - 1; i >= 0; i--)
2735 LockReassignOwner(locallocks[i], parent);
2736 }
2737}
2738
2739/*
2740 * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2741 * CurrentResourceOwner to its parent.
2742 */
2743static void
2745{
2746 LOCALLOCKOWNER *lockOwners;
2747 int i;
2748 int ic = -1;
2749 int ip = -1;
2750
2751 /*
2752 * Scan to see if there are any locks belonging to current owner or its
2753 * parent
2754 */
2755 lockOwners = locallock->lockOwners;
2756 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2757 {
2758 if (lockOwners[i].owner == CurrentResourceOwner)
2759 ic = i;
2760 else if (lockOwners[i].owner == parent)
2761 ip = i;
2762 }
2763
2764 if (ic < 0)
2765 return; /* no current locks */
2766
2767 if (ip < 0)
2768 {
2769 /* Parent has no slot, so just give it the child's slot */
2770 lockOwners[ic].owner = parent;
2772 }
2773 else
2774 {
2775 /* Merge child's count with parent's */
2776 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2777 /* compact out unused slot */
2778 locallock->numLockOwners--;
2779 if (ic < locallock->numLockOwners)
2780 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2781 }
2783}
2784
2785/*
2786 * FastPathGrantRelationLock
2787 * Grant lock using per-backend fast-path array, if there is space.
2788 */
2789static bool
2791{
2792 uint32 i;
2794
2795 /* fast-path group the lock belongs to */
2796 uint32 group = FAST_PATH_REL_GROUP(relid);
2797
2798 /* Scan for existing entry for this relid, remembering empty slot. */
2799 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2800 {
2801 /* index into the whole per-backend array */
2802 uint32 f = FAST_PATH_SLOT(group, i);
2803
2804 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2805 unused_slot = f;
2806 else if (MyProc->fpRelId[f] == relid)
2807 {
2808 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2809 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2810 return true;
2811 }
2812 }
2813
2814 /* If no existing entry, use any empty slot. */
2816 {
2817 MyProc->fpRelId[unused_slot] = relid;
2819 ++FastPathLocalUseCounts[group];
2820 return true;
2821 }
2822
2823 /* No existing entry, and no empty slot. */
2824 return false;
2825}
2826
2827/*
2828 * FastPathUnGrantRelationLock
2829 * Release fast-path lock, if present. Update backend-private local
2830 * use count, while we're at it.
2831 */
2832static bool
2834{
2835 uint32 i;
2836 bool result = false;
2837
2838 /* fast-path group the lock belongs to */
2839 uint32 group = FAST_PATH_REL_GROUP(relid);
2840
2841 FastPathLocalUseCounts[group] = 0;
2842 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2843 {
2844 /* index into the whole per-backend array */
2845 uint32 f = FAST_PATH_SLOT(group, i);
2846
2847 if (MyProc->fpRelId[f] == relid
2848 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2849 {
2850 Assert(!result);
2851 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2852 result = true;
2853 /* we continue iterating so as to update FastPathLocalUseCount */
2854 }
2855 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2856 ++FastPathLocalUseCounts[group];
2857 }
2858 return result;
2859}
2860
2861/*
2862 * FastPathTransferRelationLocks
2863 * Transfer locks matching the given lock tag from per-backend fast-path
2864 * arrays to the shared hash table.
2865 *
2866 * Returns true if successful, false if ran out of shared memory.
2867 */
2868static bool
2870 uint32 hashcode)
2871{
2873 Oid relid = locktag->locktag_field2;
2874 uint32 i;
2875
2876 /* fast-path group the lock belongs to */
2877 uint32 group = FAST_PATH_REL_GROUP(relid);
2878
2879 /*
2880 * Every PGPROC that can potentially hold a fast-path lock is present in
2881 * ProcGlobal->allProcs. Prepared transactions are not, but any
2882 * outstanding fast-path locks held by prepared transactions are
2883 * transferred to the main lock table.
2884 */
2885 for (i = 0; i < ProcGlobal->allProcCount; i++)
2886 {
2887 PGPROC *proc = GetPGProcByNumber(i);
2888 uint32 j;
2889
2891
2892 /*
2893 * If the target backend isn't referencing the same database as the
2894 * lock, then we needn't examine the individual relation IDs at all;
2895 * none of them can be relevant.
2896 *
2897 * proc->databaseId is set at backend startup time and never changes
2898 * thereafter, so it might be safe to perform this test before
2899 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2900 * assume that if the target backend holds any fast-path locks, it
2901 * must have performed a memory-fencing operation (in particular, an
2902 * LWLock acquisition) since setting proc->databaseId. However, it's
2903 * less clear that our backend is certain to have performed a memory
2904 * fencing operation since the other backend set proc->databaseId. So
2905 * for now, we test it after acquiring the LWLock just to be safe.
2906 *
2907 * Also skip groups without any registered fast-path locks.
2908 */
2909 if (proc->databaseId != locktag->locktag_field1 ||
2910 proc->fpLockBits[group] == 0)
2911 {
2912 LWLockRelease(&proc->fpInfoLock);
2913 continue;
2914 }
2915
2916 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2917 {
2918 uint32 lockmode;
2919
2920 /* index into the whole per-backend array */
2921 uint32 f = FAST_PATH_SLOT(group, j);
2922
2923 /* Look for an allocated slot matching the given relid. */
2924 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2925 continue;
2926
2927 /* Find or create lock object. */
2929 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2931 ++lockmode)
2932 {
2933 PROCLOCK *proclock;
2934
2935 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2936 continue;
2937 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2938 hashcode, lockmode);
2939 if (!proclock)
2940 {
2942 LWLockRelease(&proc->fpInfoLock);
2943 return false;
2944 }
2945 GrantLock(proclock->tag.myLock, proclock, lockmode);
2946 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2947 }
2949
2950 /* No need to examine remaining slots. */
2951 break;
2952 }
2953 LWLockRelease(&proc->fpInfoLock);
2954 }
2955 return true;
2956}
2957
2958/*
2959 * FastPathGetRelationLockEntry
2960 * Return the PROCLOCK for a lock originally taken via the fast-path,
2961 * transferring it to the primary lock table if necessary.
2962 *
2963 * Note: caller takes care of updating the locallock object.
2964 */
2965static PROCLOCK *
2967{
2969 LOCKTAG *locktag = &locallock->tag.lock;
2970 PROCLOCK *proclock = NULL;
2972 Oid relid = locktag->locktag_field2;
2973 uint32 i,
2974 group;
2975
2976 /* fast-path group the lock belongs to */
2977 group = FAST_PATH_REL_GROUP(relid);
2978
2980
2981 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2982 {
2983 uint32 lockmode;
2984
2985 /* index into the whole per-backend array */
2986 uint32 f = FAST_PATH_SLOT(group, i);
2987
2988 /* Look for an allocated slot matching the given relid. */
2989 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2990 continue;
2991
2992 /* If we don't have a lock of the given mode, forget it! */
2993 lockmode = locallock->tag.mode;
2994 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2995 break;
2996
2997 /* Find or create lock object. */
2999
3000 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
3001 locallock->hashcode, lockmode);
3002 if (!proclock)
3003 {
3006 ereport(ERROR,
3008 errmsg("out of shared memory"),
3009 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3010 }
3011 GrantLock(proclock->tag.myLock, proclock, lockmode);
3012 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3013
3015
3016 /* No need to examine remaining slots. */
3017 break;
3018 }
3019
3021
3022 /* Lock may have already been transferred by some other backend. */
3023 if (proclock == NULL)
3024 {
3025 LOCK *lock;
3028
3030
3032 locktag,
3033 locallock->hashcode,
3034 HASH_FIND,
3035 NULL);
3036 if (!lock)
3037 elog(ERROR, "failed to re-find shared lock object");
3038
3039 proclocktag.myLock = lock;
3040 proclocktag.myProc = MyProc;
3041
3043 proclock = (PROCLOCK *)
3045 &proclocktag,
3047 HASH_FIND,
3048 NULL);
3049 if (!proclock)
3050 elog(ERROR, "failed to re-find shared proclock object");
3052 }
3053
3054 return proclock;
3055}
3056
3057/*
3058 * GetLockConflicts
3059 * Get an array of VirtualTransactionIds of xacts currently holding locks
3060 * that would conflict with the specified lock/lockmode.
3061 * xacts merely awaiting such a lock are NOT reported.
3062 *
3063 * The result array is palloc'd and is terminated with an invalid VXID.
3064 * *countp, if not null, is updated to the number of items set.
3065 *
3066 * Of course, the result could be out of date by the time it's returned, so
3067 * use of this function has to be thought about carefully. Similarly, a
3068 * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3069 * lock it holds. Existing callers don't care about a locker after that
3070 * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3071 * pg_xact updates and before releasing locks.
3072 *
3073 * Note we never include the current xact's vxid in the result array,
3074 * since an xact never blocks itself.
3075 */
3077GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3078{
3082 LOCK *lock;
3085 PROCLOCK *proclock;
3086 uint32 hashcode;
3088 int count = 0;
3089 int fast_count = 0;
3090
3092 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3095 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3096
3097 /*
3098 * Allocate memory to store results, and fill with InvalidVXID. We only
3099 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3100 * InHotStandby allocate once in TopMemoryContext.
3101 */
3102 if (InHotStandby)
3103 {
3104 if (vxids == NULL)
3107 sizeof(VirtualTransactionId) *
3109 }
3110 else
3112
3113 /* Compute hash code and partition lock, and look up conflicting modes. */
3114 hashcode = LockTagHashCode(locktag);
3116 conflictMask = lockMethodTable->conflictTab[lockmode];
3117
3118 /*
3119 * Fast path locks might not have been entered in the primary lock table.
3120 * If the lock we're dealing with could conflict with such a lock, we must
3121 * examine each backend's fast-path array for conflicts.
3122 */
3123 if (ConflictsWithRelationFastPath(locktag, lockmode))
3124 {
3125 int i;
3126 Oid relid = locktag->locktag_field2;
3128
3129 /* fast-path group the lock belongs to */
3130 uint32 group = FAST_PATH_REL_GROUP(relid);
3131
3132 /*
3133 * Iterate over relevant PGPROCs. Anything held by a prepared
3134 * transaction will have been transferred to the primary lock table,
3135 * so we need not worry about those. This is all a bit fuzzy, because
3136 * new locks could be taken after we've visited a particular
3137 * partition, but the callers had better be prepared to deal with that
3138 * anyway, since the locks could equally well be taken between the
3139 * time we return the value and the time the caller does something
3140 * with it.
3141 */
3142 for (i = 0; i < ProcGlobal->allProcCount; i++)
3143 {
3144 PGPROC *proc = GetPGProcByNumber(i);
3145 uint32 j;
3146
3147 /* A backend never blocks itself */
3148 if (proc == MyProc)
3149 continue;
3150
3152
3153 /*
3154 * If the target backend isn't referencing the same database as
3155 * the lock, then we needn't examine the individual relation IDs
3156 * at all; none of them can be relevant.
3157 *
3158 * See FastPathTransferRelationLocks() for discussion of why we do
3159 * this test after acquiring the lock.
3160 *
3161 * Also skip groups without any registered fast-path locks.
3162 */
3163 if (proc->databaseId != locktag->locktag_field1 ||
3164 proc->fpLockBits[group] == 0)
3165 {
3166 LWLockRelease(&proc->fpInfoLock);
3167 continue;
3168 }
3169
3170 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3171 {
3173
3174 /* index into the whole per-backend array */
3175 uint32 f = FAST_PATH_SLOT(group, j);
3176
3177 /* Look for an allocated slot matching the given relid. */
3178 if (relid != proc->fpRelId[f])
3179 continue;
3180 lockmask = FAST_PATH_GET_BITS(proc, f);
3181 if (!lockmask)
3182 continue;
3184
3185 /*
3186 * There can only be one entry per relation, so if we found it
3187 * and it doesn't conflict, we can skip the rest of the slots.
3188 */
3189 if ((lockmask & conflictMask) == 0)
3190 break;
3191
3192 /* Conflict! */
3193 GET_VXID_FROM_PGPROC(vxid, *proc);
3194
3196 vxids[count++] = vxid;
3197 /* else, xact already committed or aborted */
3198
3199 /* No need to examine remaining slots. */
3200 break;
3201 }
3202
3203 LWLockRelease(&proc->fpInfoLock);
3204 }
3205 }
3206
3207 /* Remember how many fast-path conflicts we found. */
3208 fast_count = count;
3209
3210 /*
3211 * Look up the lock object matching the tag.
3212 */
3214
3216 locktag,
3217 hashcode,
3218 HASH_FIND,
3219 NULL);
3220 if (!lock)
3221 {
3222 /*
3223 * If the lock object doesn't exist, there is nothing holding a lock
3224 * on this lockable object.
3225 */
3227 vxids[count].procNumber = INVALID_PROC_NUMBER;
3228 vxids[count].localTransactionId = InvalidLocalTransactionId;
3229 if (countp)
3230 *countp = count;
3231 return vxids;
3232 }
3233
3234 /*
3235 * Examine each existing holder (or awaiter) of the lock.
3236 */
3238 {
3239 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3240
3241 if (conflictMask & proclock->holdMask)
3242 {
3243 PGPROC *proc = proclock->tag.myProc;
3244
3245 /* A backend never blocks itself */
3246 if (proc != MyProc)
3247 {
3249
3250 GET_VXID_FROM_PGPROC(vxid, *proc);
3251
3253 {
3254 int i;
3255
3256 /* Avoid duplicate entries. */
3257 for (i = 0; i < fast_count; ++i)
3259 break;
3260 if (i >= fast_count)
3261 vxids[count++] = vxid;
3262 }
3263 /* else, xact already committed or aborted */
3264 }
3265 }
3266 }
3267
3269
3270 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3271 elog(PANIC, "too many conflicting locks found");
3272
3273 vxids[count].procNumber = INVALID_PROC_NUMBER;
3274 vxids[count].localTransactionId = InvalidLocalTransactionId;
3275 if (countp)
3276 *countp = count;
3277 return vxids;
3278}
3279
3280/*
3281 * Find a lock in the shared lock table and release it. It is the caller's
3282 * responsibility to verify that this is a sane thing to do. (For example, it
3283 * would be bad to release a lock here if there might still be a LOCALLOCK
3284 * object with pointers to it.)
3285 *
3286 * We currently use this in two situations: first, to release locks held by
3287 * prepared transactions on commit (see lock_twophase_postcommit); and second,
3288 * to release locks taken via the fast-path, transferred to the main hash
3289 * table, and then released (see LockReleaseAll).
3290 */
3291static void
3293 LOCKTAG *locktag, LOCKMODE lockmode,
3295{
3296 LOCK *lock;
3297 PROCLOCK *proclock;
3299 uint32 hashcode;
3302 bool wakeupNeeded;
3303
3304 hashcode = LockTagHashCode(locktag);
3306
3308
3309 /*
3310 * Re-find the lock object (it had better be there).
3311 */
3313 locktag,
3314 hashcode,
3315 HASH_FIND,
3316 NULL);
3317 if (!lock)
3318 elog(PANIC, "failed to re-find shared lock object");
3319
3320 /*
3321 * Re-find the proclock object (ditto).
3322 */
3323 proclocktag.myLock = lock;
3324 proclocktag.myProc = proc;
3325
3327
3329 &proclocktag,
3331 HASH_FIND,
3332 NULL);
3333 if (!proclock)
3334 elog(PANIC, "failed to re-find shared proclock object");
3335
3336 /*
3337 * Double-check that we are actually holding a lock of the type we want to
3338 * release.
3339 */
3340 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3341 {
3342 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3344 elog(WARNING, "you don't own a lock of type %s",
3345 lockMethodTable->lockModeNames[lockmode]);
3346 return;
3347 }
3348
3349 /*
3350 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3351 */
3352 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3353
3354 CleanUpLock(lock, proclock,
3355 lockMethodTable, hashcode,
3356 wakeupNeeded);
3357
3359
3360 /*
3361 * Decrement strong lock count. This logic is needed only for 2PC.
3362 */
3364 && ConflictsWithRelationFastPath(locktag, lockmode))
3365 {
3367
3372 }
3373}
3374
3375/*
3376 * CheckForSessionAndXactLocks
3377 * Check to see if transaction holds both session-level and xact-level
3378 * locks on the same object; if so, throw an error.
3379 *
3380 * If we have both session- and transaction-level locks on the same object,
3381 * PREPARE TRANSACTION must fail. This should never happen with regular
3382 * locks, since we only take those at session level in some special operations
3383 * like VACUUM. It's possible to hit this with advisory locks, though.
3384 *
3385 * It would be nice if we could keep the session hold and give away the
3386 * transactional hold to the prepared xact. However, that would require two
3387 * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3388 * available when it comes time for PostPrepare_Locks to do the deed.
3389 * So for now, we error out while we can still do so safely.
3390 *
3391 * Since the LOCALLOCK table stores a separate entry for each lockmode,
3392 * we can't implement this check by examining LOCALLOCK entries in isolation.
3393 * We must build a transient hashtable that is indexed by locktag only.
3394 */
3395static void
3397{
3398 typedef struct
3399 {
3400 LOCKTAG lock; /* identifies the lockable object */
3401 bool sessLock; /* is any lockmode held at session level? */
3402 bool xactLock; /* is any lockmode held at xact level? */
3404
3406 HTAB *lockhtab;
3407 HASH_SEQ_STATUS status;
3409
3410 /* Create a local hash table keyed by LOCKTAG only */
3411 hash_ctl.keysize = sizeof(LOCKTAG);
3412 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3414
3415 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3416 256, /* arbitrary initial size */
3417 &hash_ctl,
3419
3420 /* Scan local lock table to find entries for each LOCKTAG */
3422
3423 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3424 {
3425 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3427 bool found;
3428 int i;
3429
3430 /*
3431 * Ignore VXID locks. We don't want those to be held by prepared
3432 * transactions, since they aren't meaningful after a restart.
3433 */
3434 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3435 continue;
3436
3437 /* Ignore it if we don't actually hold the lock */
3438 if (locallock->nLocks <= 0)
3439 continue;
3440
3441 /* Otherwise, find or make an entry in lockhtab */
3443 &locallock->tag.lock,
3444 HASH_ENTER, &found);
3445 if (!found) /* initialize, if newly created */
3446 hentry->sessLock = hentry->xactLock = false;
3447
3448 /* Scan to see if we hold lock at session or xact level or both */
3449 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3450 {
3451 if (lockOwners[i].owner == NULL)
3452 hentry->sessLock = true;
3453 else
3454 hentry->xactLock = true;
3455 }
3456
3457 /*
3458 * We can throw error immediately when we see both types of locks; no
3459 * need to wait around to see if there are more violations.
3460 */
3461 if (hentry->sessLock && hentry->xactLock)
3462 ereport(ERROR,
3464 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3465 }
3466
3467 /* Success, so clean up */
3469}
3470
3471/*
3472 * AtPrepare_Locks
3473 * Do the preparatory work for a PREPARE: make 2PC state file records
3474 * for all locks currently held.
3475 *
3476 * Session-level locks are ignored, as are VXID locks.
3477 *
3478 * For the most part, we don't need to touch shared memory for this ---
3479 * all the necessary state information is in the locallock table.
3480 * Fast-path locks are an exception, however: we move any such locks to
3481 * the main table before allowing PREPARE TRANSACTION to succeed.
3482 */
3483void
3485{
3486 HASH_SEQ_STATUS status;
3488
3489 /* First, verify there aren't locks of both xact and session level */
3491
3492 /* Now do the per-locallock cleanup work */
3494
3495 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3496 {
3497 TwoPhaseLockRecord record;
3498 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3499 bool haveSessionLock;
3500 bool haveXactLock;
3501 int i;
3502
3503 /*
3504 * Ignore VXID locks. We don't want those to be held by prepared
3505 * transactions, since they aren't meaningful after a restart.
3506 */
3507 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3508 continue;
3509
3510 /* Ignore it if we don't actually hold the lock */
3511 if (locallock->nLocks <= 0)
3512 continue;
3513
3514 /* Scan to see whether we hold it at session or transaction level */
3515 haveSessionLock = haveXactLock = false;
3516 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3517 {
3518 if (lockOwners[i].owner == NULL)
3519 haveSessionLock = true;
3520 else
3521 haveXactLock = true;
3522 }
3523
3524 /* Ignore it if we have only session lock */
3525 if (!haveXactLock)
3526 continue;
3527
3528 /* This can't happen, because we already checked it */
3529 if (haveSessionLock)
3530 ereport(ERROR,
3532 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3533
3534 /*
3535 * If the local lock was taken via the fast-path, we need to move it
3536 * to the primary lock table, or just get a pointer to the existing
3537 * primary lock table entry if by chance it's already been
3538 * transferred.
3539 */
3540 if (locallock->proclock == NULL)
3541 {
3543 locallock->lock = locallock->proclock->tag.myLock;
3544 }
3545
3546 /*
3547 * Arrange to not release any strong lock count held by this lock
3548 * entry. We must retain the count until the prepared transaction is
3549 * committed or rolled back.
3550 */
3551 locallock->holdsStrongLockCount = false;
3552
3553 /*
3554 * Create a 2PC record.
3555 */
3556 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3557 record.lockmode = locallock->tag.mode;
3558
3560 &record, sizeof(TwoPhaseLockRecord));
3561 }
3562}
3563
3564/*
3565 * PostPrepare_Locks
3566 * Clean up after successful PREPARE
3567 *
3568 * Here, we want to transfer ownership of our locks to a dummy PGPROC
3569 * that's now associated with the prepared transaction, and we want to
3570 * clean out the corresponding entries in the LOCALLOCK table.
3571 *
3572 * Note: by removing the LOCALLOCK entries, we are leaving dangling
3573 * pointers in the transaction's resource owner. This is OK at the
3574 * moment since resowner.c doesn't try to free locks retail at a toplevel
3575 * transaction commit or abort. We could alternatively zero out nLocks
3576 * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3577 * but that probably costs more cycles.
3578 */
3579void
3581{
3582 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3583 HASH_SEQ_STATUS status;
3585 LOCK *lock;
3586 PROCLOCK *proclock;
3588 int partition;
3589
3590 /* Can't prepare a lock group follower. */
3593
3594 /* This is a critical section: any error means big trouble */
3596
3597 /*
3598 * First we run through the locallock table and get rid of unwanted
3599 * entries, then we scan the process's proclocks and transfer them to the
3600 * target proc.
3601 *
3602 * We do this separately because we may have multiple locallock entries
3603 * pointing to the same proclock, and we daren't end up with any dangling
3604 * pointers.
3605 */
3607
3608 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3609 {
3610 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3611 bool haveSessionLock;
3612 bool haveXactLock;
3613 int i;
3614
3615 if (locallock->proclock == NULL || locallock->lock == NULL)
3616 {
3617 /*
3618 * We must've run out of shared memory while trying to set up this
3619 * lock. Just forget the local entry.
3620 */
3621 Assert(locallock->nLocks == 0);
3623 continue;
3624 }
3625
3626 /* Ignore VXID locks */
3627 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3628 continue;
3629
3630 /* Scan to see whether we hold it at session or transaction level */
3631 haveSessionLock = haveXactLock = false;
3632 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3633 {
3634 if (lockOwners[i].owner == NULL)
3635 haveSessionLock = true;
3636 else
3637 haveXactLock = true;
3638 }
3639
3640 /* Ignore it if we have only session lock */
3641 if (!haveXactLock)
3642 continue;
3643
3644 /* This can't happen, because we already checked it */
3645 if (haveSessionLock)
3646 ereport(PANIC,
3648 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3649
3650 /* Mark the proclock to show we need to release this lockmode */
3651 if (locallock->nLocks > 0)
3652 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3653
3654 /* And remove the locallock hashtable entry */
3656 }
3657
3658 /*
3659 * Now, scan each lock partition separately.
3660 */
3662 {
3664 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3666
3668
3669 /*
3670 * If the proclock list for this partition is empty, we can skip
3671 * acquiring the partition lock. This optimization is safer than the
3672 * situation in LockReleaseAll, because we got rid of any fast-path
3673 * locks during AtPrepare_Locks, so there cannot be any case where
3674 * another backend is adding something to our lists now. For safety,
3675 * though, we code this the same way as in LockReleaseAll.
3676 */
3677 if (dlist_is_empty(procLocks))
3678 continue; /* needn't examine this partition */
3679
3681
3683 {
3684 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3685
3686 Assert(proclock->tag.myProc == MyProc);
3687
3688 lock = proclock->tag.myLock;
3689
3690 /* Ignore VXID locks */
3692 continue;
3693
3694 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3695 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3696 Assert(lock->nRequested >= 0);
3697 Assert(lock->nGranted >= 0);
3698 Assert(lock->nGranted <= lock->nRequested);
3699 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3700
3701 /* Ignore it if nothing to release (must be a session lock) */
3702 if (proclock->releaseMask == 0)
3703 continue;
3704
3705 /* Else we should be releasing all locks */
3706 if (proclock->releaseMask != proclock->holdMask)
3707 elog(PANIC, "we seem to have dropped a bit somewhere");
3708
3709 /*
3710 * We cannot simply modify proclock->tag.myProc to reassign
3711 * ownership of the lock, because that's part of the hash key and
3712 * the proclock would then be in the wrong hash chain. Instead
3713 * use hash_update_hash_key. (We used to create a new hash entry,
3714 * but that risks out-of-memory failure if other processes are
3715 * busy making proclocks too.) We must unlink the proclock from
3716 * our procLink chain and put it into the new proc's chain, too.
3717 *
3718 * Note: the updated proclock hash key will still belong to the
3719 * same hash partition, cf proclock_hash(). So the partition lock
3720 * we already hold is sufficient for this.
3721 */
3722 dlist_delete(&proclock->procLink);
3723
3724 /*
3725 * Create the new hash key for the proclock.
3726 */
3727 proclocktag.myLock = lock;
3728 proclocktag.myProc = newproc;
3729
3730 /*
3731 * Update groupLeader pointer to point to the new proc. (We'd
3732 * better not be a member of somebody else's lock group!)
3733 */
3734 Assert(proclock->groupLeader == proclock->tag.myProc);
3735 proclock->groupLeader = newproc;
3736
3737 /*
3738 * Update the proclock. We should not find any existing entry for
3739 * the same hash key, since there can be only one entry for any
3740 * given lock with my own proc.
3741 */
3743 proclock,
3744 &proclocktag))
3745 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3746
3747 /* Re-link into the new proc's proclock list */
3748 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3749
3750 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3751 } /* loop over PROCLOCKs within this partition */
3752
3754 } /* loop over partitions */
3755
3757}
3758
3759
3760/*
3761 * GetLockStatusData - Return a summary of the lock manager's internal
3762 * status, for use in a user-level reporting function.
3763 *
3764 * The return data consists of an array of LockInstanceData objects,
3765 * which are a lightly abstracted version of the PROCLOCK data structures,
3766 * i.e. there is one entry for each unique lock and interested PGPROC.
3767 * It is the caller's responsibility to match up related items (such as
3768 * references to the same lockable object or PGPROC) if wanted.
3769 *
3770 * The design goal is to hold the LWLocks for as short a time as possible;
3771 * thus, this function simply makes a copy of the necessary data and releases
3772 * the locks, allowing the caller to contemplate and format the data for as
3773 * long as it pleases.
3774 */
3775LockData *
3777{
3778 LockData *data;
3779 PROCLOCK *proclock;
3781 int els;
3782 int el;
3783 int i;
3784
3786
3787 /* Guess how much space we'll need. */
3788 els = MaxBackends;
3789 el = 0;
3791
3792 /*
3793 * First, we iterate through the per-backend fast-path arrays, locking
3794 * them one at a time. This might produce an inconsistent picture of the
3795 * system state, but taking all of those LWLocks at the same time seems
3796 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3797 * matter too much, because none of these locks can be involved in lock
3798 * conflicts anyway - anything that might must be present in the main lock
3799 * table. (For the same reason, we don't sweat about making leaderPid
3800 * completely valid. We cannot safely dereference another backend's
3801 * lockGroupLeader field without holding all lock partition locks, and
3802 * it's not worth that.)
3803 */
3804 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3805 {
3806 PGPROC *proc = GetPGProcByNumber(i);
3807
3808 /* Skip backends with pid=0, as they don't hold fast-path locks */
3809 if (proc->pid == 0)
3810 continue;
3811
3813
3814 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3815 {
3816 /* Skip groups without registered fast-path locks */
3817 if (proc->fpLockBits[g] == 0)
3818 continue;
3819
3820 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3821 {
3823 uint32 f = FAST_PATH_SLOT(g, j);
3825
3826 /* Skip unallocated slots */
3827 if (!lockbits)
3828 continue;
3829
3830 if (el >= els)
3831 {
3832 els += MaxBackends;
3833 data->locks = (LockInstanceData *)
3834 repalloc(data->locks, sizeof(LockInstanceData) * els);
3835 }
3836
3837 instance = &data->locks[el];
3839 proc->fpRelId[f]);
3841 instance->waitLockMode = NoLock;
3842 instance->vxid.procNumber = proc->vxid.procNumber;
3843 instance->vxid.localTransactionId = proc->vxid.lxid;
3844 instance->pid = proc->pid;
3845 instance->leaderPid = proc->pid;
3846 instance->fastpath = true;
3847
3848 /*
3849 * Successfully taking fast path lock means there were no
3850 * conflicting locks.
3851 */
3852 instance->waitStart = 0;
3853
3854 el++;
3855 }
3856 }
3857
3858 if (proc->fpVXIDLock)
3859 {
3862
3863 if (el >= els)
3864 {
3865 els += MaxBackends;
3866 data->locks = (LockInstanceData *)
3867 repalloc(data->locks, sizeof(LockInstanceData) * els);
3868 }
3869
3870 vxid.procNumber = proc->vxid.procNumber;
3872
3873 instance = &data->locks[el];
3875 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3876 instance->waitLockMode = NoLock;
3877 instance->vxid.procNumber = proc->vxid.procNumber;
3878 instance->vxid.localTransactionId = proc->vxid.lxid;
3879 instance->pid = proc->pid;
3880 instance->leaderPid = proc->pid;
3881 instance->fastpath = true;
3882 instance->waitStart = 0;
3883
3884 el++;
3885 }
3886
3887 LWLockRelease(&proc->fpInfoLock);
3888 }
3889
3890 /*
3891 * Next, acquire lock on the entire shared lock data structure. We do
3892 * this so that, at least for locks in the primary lock table, the state
3893 * will be self-consistent.
3894 *
3895 * Since this is a read-only operation, we take shared instead of
3896 * exclusive lock. There's not a whole lot of point to this, because all
3897 * the normal operations require exclusive lock, but it doesn't hurt
3898 * anything either. It will at least allow two backends to do
3899 * GetLockStatusData in parallel.
3900 *
3901 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3902 */
3903 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3905
3906 /* Now we can safely count the number of proclocks */
3908 if (data->nelements > els)
3909 {
3910 els = data->nelements;
3911 data->locks = (LockInstanceData *)
3912 repalloc(data->locks, sizeof(LockInstanceData) * els);
3913 }
3914
3915 /* Now scan the tables to copy the data */
3917
3918 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3919 {
3920 PGPROC *proc = proclock->tag.myProc;
3921 LOCK *lock = proclock->tag.myLock;
3922 LockInstanceData *instance = &data->locks[el];
3923
3924 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3925 instance->holdMask = proclock->holdMask;
3926 if (proc->waitLock == proclock->tag.myLock)
3927 instance->waitLockMode = proc->waitLockMode;
3928 else
3929 instance->waitLockMode = NoLock;
3930 instance->vxid.procNumber = proc->vxid.procNumber;
3931 instance->vxid.localTransactionId = proc->vxid.lxid;
3932 instance->pid = proc->pid;
3933 instance->leaderPid = proclock->groupLeader->pid;
3934 instance->fastpath = false;
3935 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3936
3937 el++;
3938 }
3939
3940 /*
3941 * And release locks. We do this in reverse order for two reasons: (1)
3942 * Anyone else who needs more than one of the locks will be trying to lock
3943 * them in increasing order; we don't want to release the other process
3944 * until it can get all the locks it needs. (2) This avoids O(N^2)
3945 * behavior inside LWLockRelease.
3946 */
3947 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3949
3950 Assert(el == data->nelements);
3951
3952 return data;
3953}
3954
3955/*
3956 * GetBlockerStatusData - Return a summary of the lock manager's state
3957 * concerning locks that are blocking the specified PID or any member of
3958 * the PID's lock group, for use in a user-level reporting function.
3959 *
3960 * For each PID within the lock group that is awaiting some heavyweight lock,
3961 * the return data includes an array of LockInstanceData objects, which are
3962 * the same data structure used by GetLockStatusData; but unlike that function,
3963 * this one reports only the PROCLOCKs associated with the lock that that PID
3964 * is blocked on. (Hence, all the locktags should be the same for any one
3965 * blocked PID.) In addition, we return an array of the PIDs of those backends
3966 * that are ahead of the blocked PID in the lock's wait queue. These can be
3967 * compared with the PIDs in the LockInstanceData objects to determine which
3968 * waiters are ahead of or behind the blocked PID in the queue.
3969 *
3970 * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3971 * waiting on any heavyweight lock, return empty arrays.
3972 *
3973 * The design goal is to hold the LWLocks for as short a time as possible;
3974 * thus, this function simply makes a copy of the necessary data and releases
3975 * the locks, allowing the caller to contemplate and format the data for as
3976 * long as it pleases.
3977 */
3980{
3982 PGPROC *proc;
3983 int i;
3984
3986
3987 /*
3988 * Guess how much space we'll need, and preallocate. Most of the time
3989 * this will avoid needing to do repalloc while holding the LWLocks. (We
3990 * assume, but check with an Assert, that MaxBackends is enough entries
3991 * for the procs[] array; the other two could need enlargement, though.)
3992 */
3993 data->nprocs = data->nlocks = data->npids = 0;
3994 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3995 data->procs = palloc_array(BlockedProcData, data->maxprocs);
3996 data->locks = palloc_array(LockInstanceData, data->maxlocks);
3997 data->waiter_pids = palloc_array(int, data->maxpids);
3998
3999 /*
4000 * In order to search the ProcArray for blocked_pid and assume that that
4001 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4002 * In addition, to examine the lock grouping fields of any other backend,
4003 * we must hold all the hash partition locks. (Only one of those locks is
4004 * actually relevant for any one lock group, but we can't know which one
4005 * ahead of time.) It's fairly annoying to hold all those locks
4006 * throughout this, but it's no worse than GetLockStatusData(), and it
4007 * does have the advantage that we're guaranteed to return a
4008 * self-consistent instantaneous state.
4009 */
4011
4013
4014 /* Nothing to do if it's gone */
4015 if (proc != NULL)
4016 {
4017 /*
4018 * Acquire lock on the entire shared lock data structure. See notes
4019 * in GetLockStatusData().
4020 */
4021 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4023
4024 if (proc->lockGroupLeader == NULL)
4025 {
4026 /* Easy case, proc is not a lock group member */
4028 }
4029 else
4030 {
4031 /* Examine all procs in proc's lock group */
4032 dlist_iter iter;
4033
4035 {
4037
4038 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4040 }
4041 }
4042
4043 /*
4044 * And release locks. See notes in GetLockStatusData().
4045 */
4046 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4048
4049 Assert(data->nprocs <= data->maxprocs);
4050 }
4051
4053
4054 return data;
4055}
4056
4057/* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4058static void
4060{
4061 LOCK *theLock = blocked_proc->waitLock;
4066 int queue_size;
4067
4068 /* Nothing to do if this proc is not blocked */
4069 if (theLock == NULL)
4070 return;
4071
4072 /* Set up a procs[] element */
4073 bproc = &data->procs[data->nprocs++];
4074 bproc->pid = blocked_proc->pid;
4075 bproc->first_lock = data->nlocks;
4076 bproc->first_waiter = data->npids;
4077
4078 /*
4079 * We may ignore the proc's fast-path arrays, since nothing in those could
4080 * be related to a contended lock.
4081 */
4082
4083 /* Collect all PROCLOCKs associated with theLock */
4084 dlist_foreach(proclock_iter, &theLock->procLocks)
4085 {
4086 PROCLOCK *proclock =
4087 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4088 PGPROC *proc = proclock->tag.myProc;
4089 LOCK *lock = proclock->tag.myLock;
4091
4092 if (data->nlocks >= data->maxlocks)
4093 {
4094 data->maxlocks += MaxBackends;
4095 data->locks = (LockInstanceData *)
4096 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4097 }
4098
4099 instance = &data->locks[data->nlocks];
4100 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4101 instance->holdMask = proclock->holdMask;
4102 if (proc->waitLock == lock)
4103 instance->waitLockMode = proc->waitLockMode;
4104 else
4105 instance->waitLockMode = NoLock;
4106 instance->vxid.procNumber = proc->vxid.procNumber;
4107 instance->vxid.localTransactionId = proc->vxid.lxid;
4108 instance->pid = proc->pid;
4109 instance->leaderPid = proclock->groupLeader->pid;
4110 instance->fastpath = false;
4111 data->nlocks++;
4112 }
4113
4114 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4115 waitQueue = &(theLock->waitProcs);
4116 queue_size = dclist_count(waitQueue);
4117
4118 if (queue_size > data->maxpids - data->npids)
4119 {
4120 data->maxpids = Max(data->maxpids + MaxBackends,
4121 data->npids + queue_size);
4122 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4123 sizeof(int) * data->maxpids);
4124 }
4125
4126 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4128 {
4130
4132 break;
4133 data->waiter_pids[data->npids++] = queued_proc->pid;
4134 }
4135
4136 bproc->num_locks = data->nlocks - bproc->first_lock;
4137 bproc->num_waiters = data->npids - bproc->first_waiter;
4138}
4139
4140/*
4141 * Returns a list of currently held AccessExclusiveLocks, for use by
4142 * LogStandbySnapshot(). The result is a palloc'd array,
4143 * with the number of elements returned into *nlocks.
4144 *
4145 * XXX This currently takes a lock on all partitions of the lock table,
4146 * but it's possible to do better. By reference counting locks and storing
4147 * the value in the ProcArray entry for each backend we could tell if any
4148 * locks need recording without having to acquire the partition locks and
4149 * scan the lock table. Whether that's worth the additional overhead
4150 * is pretty dubious though.
4151 */
4154{
4156 PROCLOCK *proclock;
4158 int i;
4159 int index;
4160 int els;
4161
4162 /*
4163 * Acquire lock on the entire shared lock data structure.
4164 *
4165 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4166 */
4167 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4169
4170 /* Now we can safely count the number of proclocks */
4172
4173 /*
4174 * Allocating enough space for all locks in the lock table is overkill,
4175 * but it's more convenient and faster than having to enlarge the array.
4176 */
4178
4179 /* Now scan the tables to copy the data */
4181
4182 /*
4183 * If lock is a currently granted AccessExclusiveLock then it will have
4184 * just one proclock holder, so locks are never accessed twice in this
4185 * particular case. Don't copy this code for use elsewhere because in the
4186 * general case this will give you duplicate locks when looking at
4187 * non-exclusive lock types.
4188 */
4189 index = 0;
4190 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4191 {
4192 /* make sure this definition matches the one used in LockAcquire */
4193 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4195 {
4196 PGPROC *proc = proclock->tag.myProc;
4197 LOCK *lock = proclock->tag.myLock;
4198 TransactionId xid = proc->xid;
4199
4200 /*
4201 * Don't record locks for transactions if we know they have
4202 * already issued their WAL record for commit but not yet released
4203 * lock. It is still possible that we see locks held by already
4204 * complete transactions, if they haven't yet zeroed their xids.
4205 */
4206 if (!TransactionIdIsValid(xid))
4207 continue;
4208
4209 accessExclusiveLocks[index].xid = xid;
4212
4213 index++;
4214 }
4215 }
4216
4217 Assert(index <= els);
4218
4219 /*
4220 * And release locks. We do this in reverse order for two reasons: (1)
4221 * Anyone else who needs more than one of the locks will be trying to lock
4222 * them in increasing order; we don't want to release the other process
4223 * until it can get all the locks it needs. (2) This avoids O(N^2)
4224 * behavior inside LWLockRelease.
4225 */
4226 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4228
4229 *nlocks = index;
4230 return accessExclusiveLocks;
4231}
4232
4233/* Provide the textual name of any lock mode */
4234const char *
4241
4242#ifdef LOCK_DEBUG
4243/*
4244 * Dump all locks in the given proc's myProcLocks lists.
4245 *
4246 * Caller is responsible for having acquired appropriate LWLocks.
4247 */
4248void
4249DumpLocks(PGPROC *proc)
4250{
4251 int i;
4252
4253 if (proc == NULL)
4254 return;
4255
4256 if (proc->waitLock)
4257 LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4258
4259 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4260 {
4261 dlist_head *procLocks = &proc->myProcLocks[i];
4262 dlist_iter iter;
4263
4264 dlist_foreach(iter, procLocks)
4265 {
4266 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4267 LOCK *lock = proclock->tag.myLock;
4268
4269 Assert(proclock->tag.myProc == proc);
4270 PROCLOCK_PRINT("DumpLocks", proclock);
4271 LOCK_PRINT("DumpLocks", lock, 0);
4272 }
4273 }
4274}
4275
4276/*
4277 * Dump all lmgr locks.
4278 *
4279 * Caller is responsible for having acquired appropriate LWLocks.
4280 */
4281void
4282DumpAllLocks(void)
4283{
4284 PGPROC *proc;
4285 PROCLOCK *proclock;
4286 LOCK *lock;
4287 HASH_SEQ_STATUS status;
4288
4289 proc = MyProc;
4290
4291 if (proc && proc->waitLock)
4292 LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4293
4295
4296 while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4297 {
4298 PROCLOCK_PRINT("DumpAllLocks", proclock);
4299
4300 lock = proclock->tag.myLock;
4301 if (lock)
4302 LOCK_PRINT("DumpAllLocks", lock, 0);
4303 else
4304 elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4305 }
4306}
4307#endif /* LOCK_DEBUG */
4308
4309/*
4310 * LOCK 2PC resource manager's routines
4311 */
4312
4313/*
4314 * Re-acquire a lock belonging to a transaction that was prepared.
4315 *
4316 * Because this function is run at db startup, re-acquiring the locks should
4317 * never conflict with running transactions because there are none. We
4318 * assume that the lock state represented by the stored 2PC files is legal.
4319 *
4320 * When switching from Hot Standby mode to normal operation, the locks will
4321 * be already held by the startup process. The locks are acquired for the new
4322 * procs without checking for conflicts, so we don't get a conflict between the
4323 * startup process and the dummy procs, even though we will momentarily have
4324 * a situation where two procs are holding the same AccessExclusiveLock,
4325 * which isn't normally possible because the conflict. If we're in standby
4326 * mode, but a recovery snapshot hasn't been established yet, it's possible
4327 * that some but not all of the locks are already held by the startup process.
4328 *
4329 * This approach is simple, but also a bit dangerous, because if there isn't
4330 * enough shared memory to acquire the locks, an error will be thrown, which
4331 * is promoted to FATAL and recovery will abort, bringing down postmaster.
4332 * A safer approach would be to transfer the locks like we do in
4333 * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4334 * read-only backends to use up all the shared lock memory anyway, so that
4335 * replaying the WAL record that needs to acquire a lock will throw an error
4336 * and PANIC anyway.
4337 */
4338void
4340 void *recdata, uint32 len)
4341{
4343 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4344 LOCKTAG *locktag;
4345 LOCKMODE lockmode;
4347 LOCK *lock;
4348 PROCLOCK *proclock;
4350 bool found;
4351 uint32 hashcode;
4353 int partition;
4356
4357 Assert(len == sizeof(TwoPhaseLockRecord));
4358 locktag = &rec->locktag;
4359 lockmode = rec->lockmode;
4361
4363 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4365
4366 hashcode = LockTagHashCode(locktag);
4367 partition = LockHashPartition(hashcode);
4369
4371
4372 /*
4373 * Find or create a lock with this tag.
4374 */
4376 locktag,
4377 hashcode,
4379 &found);
4380 if (!lock)
4381 {
4383 ereport(ERROR,
4385 errmsg("out of shared memory"),
4386 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4387 }
4388
4389 /*
4390 * if it's a new lock object, initialize it
4391 */
4392 if (!found)
4393 {
4394 lock->grantMask = 0;
4395 lock->waitMask = 0;
4396 dlist_init(&lock->procLocks);
4397 dclist_init(&lock->waitProcs);
4398 lock->nRequested = 0;
4399 lock->nGranted = 0;
4400 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4401 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4402 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4403 }
4404 else
4405 {
4406 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4407 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4408 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4409 Assert(lock->nGranted <= lock->nRequested);
4410 }
4411
4412 /*
4413 * Create the hash key for the proclock table.
4414 */
4415 proclocktag.myLock = lock;
4416 proclocktag.myProc = proc;
4417
4419
4420 /*
4421 * Find or create a proclock entry with this tag
4422 */
4424 &proclocktag,
4427 &found);
4428 if (!proclock)
4429 {
4430 /* Oops, not enough shmem for the proclock */
4431 if (lock->nRequested == 0)
4432 {
4433 /*
4434 * There are no other requestors of this lock, so garbage-collect
4435 * the lock object. We *must* do this to avoid a permanent leak
4436 * of shared memory, because there won't be anything to cause
4437 * anyone to release the lock object later.
4438 */
4441 &(lock->tag),
4442 hashcode,
4444 NULL))
4445 elog(PANIC, "lock table corrupted");
4446 }
4448 ereport(ERROR,
4450 errmsg("out of shared memory"),
4451 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4452 }
4453
4454 /*
4455 * If new, initialize the new entry
4456 */
4457 if (!found)
4458 {
4459 Assert(proc->lockGroupLeader == NULL);
4460 proclock->groupLeader = proc;
4461 proclock->holdMask = 0;
4462 proclock->releaseMask = 0;
4463 /* Add proclock to appropriate lists */
4464 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4466 &proclock->procLink);
4467 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4468 }
4469 else
4470 {
4471 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4472 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4473 }
4474
4475 /*
4476 * lock->nRequested and lock->requested[] count the total number of
4477 * requests, whether granted or waiting, so increment those immediately.
4478 */
4479 lock->nRequested++;
4480 lock->requested[lockmode]++;
4481 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4482
4483 /*
4484 * We shouldn't already hold the desired lock.
4485 */
4486 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4487 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4488 lockMethodTable->lockModeNames[lockmode],
4489 lock->tag.locktag_field1, lock->tag.locktag_field2,
4490 lock->tag.locktag_field3);
4491
4492 /*
4493 * We ignore any possible conflicts and just grant ourselves the lock. Not
4494 * only because we don't bother, but also to avoid deadlocks when
4495 * switching from standby to normal mode. See function comment.
4496 */
4497 GrantLock(lock, proclock, lockmode);
4498
4499 /*
4500 * Bump strong lock count, to make sure any fast-path lock requests won't
4501 * be granted without consulting the primary lock table.
4502 */
4503 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4504 {
4506
4510 }
4511
4513}
4514
4515/*
4516 * Re-acquire a lock belonging to a transaction that was prepared, when
4517 * starting up into hot standby mode.
4518 */
4519void
4521 void *recdata, uint32 len)
4522{
4524 LOCKTAG *locktag;
4525 LOCKMODE lockmode;
4527
4528 Assert(len == sizeof(TwoPhaseLockRecord));
4529 locktag = &rec->locktag;
4530 lockmode = rec->lockmode;
4532
4534 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4535
4536 if (lockmode == AccessExclusiveLock &&
4537 locktag->locktag_type == LOCKTAG_RELATION)
4538 {
4540 locktag->locktag_field1 /* dboid */ ,
4541 locktag->locktag_field2 /* reloid */ );
4542 }
4543}
4544
4545
4546/*
4547 * 2PC processing routine for COMMIT PREPARED case.
4548 *
4549 * Find and release the lock indicated by the 2PC record.
4550 */
4551void
4553 void *recdata, uint32 len)
4554{
4556 PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4557 LOCKTAG *locktag;
4560
4561 Assert(len == sizeof(TwoPhaseLockRecord));
4562 locktag = &rec->locktag;
4564
4566 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4568
4569 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4570}
4571
4572/*
4573 * 2PC processing routine for ROLLBACK PREPARED case.
4574 *
4575 * This is actually just the same as the COMMIT case.
4576 */
4577void
4583
4584/*
4585 * VirtualXactLockTableInsert
4586 *
4587 * Take vxid lock via the fast-path. There can't be any pre-existing
4588 * lockers, as we haven't advertised this vxid via the ProcArray yet.
4589 *
4590 * Since MyProc->fpLocalTransactionId will normally contain the same data
4591 * as MyProc->vxid.lxid, you might wonder if we really need both. The
4592 * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4593 * examined by procarray.c, while fpLocalTransactionId is protected by
4594 * fpInfoLock and is used only by the locking subsystem. Doing it this
4595 * way makes it easier to verify that there are no funny race conditions.
4596 *
4597 * We don't bother recording this lock in the local lock table, since it's
4598 * only ever released at the end of a transaction. Instead,
4599 * LockReleaseAll() calls VirtualXactLockTableCleanup().
4600 */
4601void
4617
4618/*
4619 * VirtualXactLockTableCleanup
4620 *
4621 * Check whether a VXID lock has been materialized; if so, release it,
4622 * unblocking waiters.
4623 */
4624void
4626{
4627 bool fastpath;
4628 LocalTransactionId lxid;
4629
4631
4632 /*
4633 * Clean up shared memory state.
4634 */
4636
4637 fastpath = MyProc->fpVXIDLock;
4639 MyProc->fpVXIDLock = false;
4641
4643
4644 /*
4645 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4646 * that means someone transferred the lock to the main lock table.
4647 */
4648 if (!fastpath && LocalTransactionIdIsValid(lxid))
4649 {
4651 LOCKTAG locktag;
4652
4653 vxid.procNumber = MyProcNumber;
4654 vxid.localTransactionId = lxid;
4655 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4656
4658 &locktag, ExclusiveLock, false);
4659 }
4660}
4661
4662/*
4663 * XactLockForVirtualXact
4664 *
4665 * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4666 * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4667 * functions, it assumes "xid" is never a subtransaction and that "xid" is
4668 * prepared, committed, or aborted.
4669 *
4670 * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4671 * known as "vxid" before its PREPARE TRANSACTION.
4672 */
4673static bool
4675 TransactionId xid, bool wait)
4676{
4677 bool more = false;
4678
4679 /* There is no point to wait for 2PCs if you have no 2PCs. */
4680 if (max_prepared_xacts == 0)
4681 return true;
4682
4683 do
4684 {
4686 LOCKTAG tag;
4687
4688 /* Clear state from previous iterations. */
4689 if (more)
4690 {
4692 more = false;
4693 }
4694
4695 /* If we have no xid, try to find one. */
4696 if (!TransactionIdIsValid(xid))
4697 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4698 if (!TransactionIdIsValid(xid))
4699 {
4700 Assert(!more);
4701 return true;
4702 }
4703
4704 /* Check or wait for XID completion. */
4705 SET_LOCKTAG_TRANSACTION(tag, xid);
4706 lar = LockAcquire(&tag, ShareLock, false, !wait);
4708 return false;
4709 LockRelease(&tag, ShareLock, false);
4710 } while (more);
4711
4712 return true;
4713}
4714
4715/*
4716 * VirtualXactLock
4717 *
4718 * If wait = true, wait as long as the given VXID or any XID acquired by the
4719 * same transaction is still running. Then, return true.
4720 *
4721 * If wait = false, just check whether that VXID or one of those XIDs is still
4722 * running, and return true or false.
4723 */
4724bool
4726{
4727 LOCKTAG tag;
4728 PGPROC *proc;
4730
4732
4734 /* no vxid lock; localTransactionId is a normal, locked XID */
4735 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4736
4738
4739 /*
4740 * If a lock table entry must be made, this is the PGPROC on whose behalf
4741 * it must be done. Note that the transaction might end or the PGPROC
4742 * might be reassigned to a new backend before we get around to examining
4743 * it, but it doesn't matter. If we find upon examination that the
4744 * relevant lxid is no longer running here, that's enough to prove that
4745 * it's no longer running anywhere.
4746 */
4747 proc = ProcNumberGetProc(vxid.procNumber);
4748 if (proc == NULL)
4749 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4750
4751 /*
4752 * We must acquire this lock before checking the procNumber and lxid
4753 * against the ones we're waiting for. The target backend will only set
4754 * or clear lxid while holding this lock.
4755 */
4757
4758 if (proc->vxid.procNumber != vxid.procNumber
4760 {
4761 /* VXID ended */
4762 LWLockRelease(&proc->fpInfoLock);
4763 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4764 }
4765
4766 /*
4767 * If we aren't asked to wait, there's no need to set up a lock table
4768 * entry. The transaction is still in progress, so just return false.
4769 */
4770 if (!wait)
4771 {
4772 LWLockRelease(&proc->fpInfoLock);
4773 return false;
4774 }
4775
4776 /*
4777 * OK, we're going to need to sleep on the VXID. But first, we must set
4778 * up the primary lock table entry, if needed (ie, convert the proc's
4779 * fast-path lock on its VXID to a regular lock).
4780 */
4781 if (proc->fpVXIDLock)
4782 {
4783 PROCLOCK *proclock;
4784 uint32 hashcode;
4786
4787 hashcode = LockTagHashCode(&tag);
4788
4791
4793 &tag, hashcode, ExclusiveLock);
4794 if (!proclock)
4795 {
4797 LWLockRelease(&proc->fpInfoLock);
4798 ereport(ERROR,
4800 errmsg("out of shared memory"),
4801 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4802 }
4803 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4804
4806
4807 proc->fpVXIDLock = false;
4808 }
4809
4810 /*
4811 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4812 * search. The proc might have assigned this XID but not yet locked it,
4813 * in which case the proc will lock this XID before releasing the VXID.
4814 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4815 * so we won't save an XID of a different VXID. It doesn't matter whether
4816 * we save this before or after setting up the primary lock table entry.
4817 */
4818 xid = proc->xid;
4819
4820 /* Done with proc->fpLockBits */
4821 LWLockRelease(&proc->fpInfoLock);
4822
4823 /* Time to wait. */
4824 (void) LockAcquire(&tag, ShareLock, false, false);
4825
4826 LockRelease(&tag, ShareLock, false);
4827 return XactLockForVirtualXact(vxid, xid, wait);
4828}
4829
4830/*
4831 * LockWaiterCount
4832 *
4833 * Find the number of lock requester on this locktag
4834 */
4835int
4837{
4839 LOCK *lock;
4840 bool found;
4841 uint32 hashcode;
4843 int waiters = 0;
4844
4846 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4847
4848 hashcode = LockTagHashCode(locktag);
4851
4853 locktag,
4854 hashcode,
4855 HASH_FIND,
4856 &found);
4857 if (found)
4858 {
4859 Assert(lock != NULL);
4860 waiters = lock->nRequested;
4861 }
4863
4864 return waiters;
4865}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
#define Max(x, y)
Definition c.h:1085
#define Assert(condition)
Definition c.h:943
int64_t int64
Definition c.h:621
uint16_t uint16
Definition c.h:623
uint32_t uint32
Definition c.h:624
#define lengthof(array)
Definition c.h:873
uint32 LocalTransactionId
Definition c.h:738
#define MemSet(start, val, len)
Definition c.h:1107
uint32 TransactionId
Definition c.h:736
size_t Size
Definition c.h:689
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
int64 TimestampTz
Definition timestamp.h:39
void DeadLockReport(void)
Definition deadlock.c:1075
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:889
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:360
void hash_destroy(HTAB *hashp)
Definition dynahash.c:802
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition dynahash.c:902
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1352
int64 hash_get_num_entries(HTAB *hashp)
Definition dynahash.c:1273
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition dynahash.c:1077
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition dynahash.c:845
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1317
Datum arg
Definition elog.c:1323
ErrorContextCallback * error_context_stack
Definition elog.c:100
int errcode(int sqlerrcode)
Definition elog.c:875
#define LOG
Definition elog.h:32
#define PG_RE_THROW()
Definition elog.h:407
#define errcontext
Definition elog.h:200
int errhint(const char *fmt,...) pg_attribute_printf(1
#define PG_TRY(...)
Definition elog.h:374
#define WARNING
Definition elog.h:37
#define PG_END_TRY(...)
Definition elog.h:399
#define PANIC
Definition elog.h:44
#define ERROR
Definition elog.h:40
#define PG_CATCH(...)
Definition elog.h:384
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
int int int int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...) pg_attribute_printf(1
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_array(type, count)
Definition fe_memutils.h:77
int MyProcPid
Definition globals.c:49
ProcNumber MyProcNumber
Definition globals.c:92
int MaxBackends
Definition globals.c:149
@ HASH_FIND
Definition hsearch.h:108
@ HASH_REMOVE
Definition hsearch.h:110
@ HASH_ENTER
Definition hsearch.h:109
@ HASH_ENTER_NULL
Definition hsearch.h:111
#define HASH_CONTEXT
Definition hsearch.h:97
#define HASH_ELEM
Definition hsearch.h:90
#define HASH_FUNCTION
Definition hsearch.h:93
#define HASH_BLOBS
Definition hsearch.h:92
#define HASH_PARTITION
Definition hsearch.h:87
#define dlist_foreach(iter, lhead)
Definition ilist.h:623
static void dlist_init(dlist_head *head)
Definition ilist.h:314
static void dlist_delete(dlist_node *node)
Definition ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition ilist.h:932
static bool dclist_is_empty(const dclist_head *head)
Definition ilist.h:682
static bool dlist_node_is_detached(const dlist_node *node)
Definition ilist.h:525
#define dlist_foreach_modify(iter, lhead)
Definition ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition ilist.h:776
static void dclist_init(dclist_head *head)
Definition ilist.h:671
#define dlist_container(type, membername, ptr)
Definition ilist.h:593
#define dclist_foreach(iter, lhead)
Definition ilist.h:970
int j
Definition isn.c:78
int i
Definition isn.c:77
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition lmgr.c:1249
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition lock.c:4674
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition lock.c:806
static LOCALLOCK * awaitedLock
Definition lock.c:339
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition lock.c:1484
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition lock.c:2744
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition lock.c:640
static bool Dummy_trace
Definition lock.c:125
static const char *const lock_mode_names[]
Definition lock.c:111
void lock_twophase_postabort(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4578
#define LOCK_PRINT(where, lock, type)
Definition lock.c:416
void PostPrepare_Locks(FullTransactionId fxid)
Definition lock.c:3580
void lock_twophase_standby_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4520
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition lock.c:620
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition lock.c:1291
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition lock.c:2966
const ShmemCallbacks LockManagerShmemCallbacks
Definition lock.c:320
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition lock.c:4602
#define NLOCKENTS()
Definition lock.c:59
#define FastPathStrongLockHashPartition(hashcode)
Definition lock.c:306
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition lock.c:602
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition lock.c:259
void GrantAwaitedLock(void)
Definition lock.c:1897
int LockWaiterCount(const LOCKTAG *locktag)
Definition lock.c:4836
void AtPrepare_Locks(void)
Definition lock.c:3484
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:2110
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition lock.c:245
#define FAST_PATH_REL_GROUP(rel)
Definition lock.c:220
void InitLockManagerAccess(void)
Definition lock.c:502
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition lock.c:1666
void VirtualXactLockTableCleanup(void)
Definition lock.c:4625
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition lock.c:4725
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition lock.c:3077
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition lock.c:315
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition lock.c:2054
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
Definition lock.c:833
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition lock.c:2315
#define FAST_PATH_SLOT(group, index)
Definition lock.c:227
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition lock.c:1472
#define ConflictsWithRelationFastPath(locktag, mode)
Definition lock.c:276
void ResetAwaitedLock(void)
Definition lock.c:1915
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition lock.c:2869
static HTAB * LockMethodLocalHash
Definition lock.c:334
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2714
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition lock.c:1689
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition lock.c:255
#define PROCLOCK_PRINT(where, proclockP)
Definition lock.c:417
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition lock.c:1746
static uint32 proclock_hash(const void *key, Size keysize)
Definition lock.c:571
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2833
void AbortStrongLockAcquire(void)
Definition lock.c:1868
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2790
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition lock.c:179
static HTAB * LockMethodLockHash
Definition lock.c:332
static ResourceOwner awaitedOwner
Definition lock.c:340
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition lock.c:3979
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1940
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:693
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition lock.c:4235
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition lock.c:4059
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition lock.c:257
void lock_twophase_postcommit(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4552
int max_locks_per_xact
Definition lock.c:56
static const LockMethod LockMethods[]
Definition lock.c:153
static void waitonlock_error_callback(void *arg)
Definition lock.c:2028
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2619
LOCALLOCK * GetAwaitedLock(void)
Definition lock.c:1906
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition lock.c:2589
void MarkLockClear(LOCALLOCK *locallock)
Definition lock.c:1928
LockData * GetLockStatusData(void)
Definition lock.c:3776
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY
Definition lock.c:194
static const LockMethodData default_lockmethod
Definition lock.c:128
#define FAST_PATH_GET_BITS(proc, n)
Definition lock.c:248
static LOCALLOCK * StrongLockInProgress
Definition lock.c:338
#define FAST_PATH_BITS_PER_SLOT
Definition lock.c:244
static const LockMethodData user_lockmethod
Definition lock.c:139
int FastPathLockGroupsPerBackend
Definition lock.c:205
#define EligibleForRelationFastPath(locktag, mode)
Definition lock.c:270
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition lock.c:554
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition lock.c:1832
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition lock.c:1537
static void LockManagerShmemRequest(void *arg)
Definition lock.c:451
void lock_twophase_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4339
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1800
static const LOCKMASK LockConflicts[]
Definition lock.c:68
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition lock.c:2654
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition lock.c:524
static void FinishStrongLockAcquire(void)
Definition lock.c:1858
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition lock.c:304
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition lock.c:4153
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition lock.c:3292
static void CheckForSessionAndXactLocks(void)
Definition lock.c:3396
static HTAB * LockMethodProcLockHash
Definition lock.c:333
static void LockManagerShmemInit(void *arg)
Definition lock.c:493
bool log_lock_failures
Definition lock.c:57
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition lock.c:536
#define LOCK_LOCKTAG(lock)
Definition lock.h:156
#define VirtualTransactionIdIsValid(vxid)
Definition lock.h:70
#define LockHashPartitionLock(hashcode)
Definition lock.h:357
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition lock.h:80
#define LOCK_LOCKMETHOD(lock)
Definition lock.h:155
#define LOCKBIT_OFF(lockmode)
Definition lock.h:88
#define LOCALLOCK_LOCKMETHOD(llock)
Definition lock.h:274
#define InvalidLocalTransactionId
Definition lock.h:68
#define MAX_LOCKMODES
Definition lock.h:85
#define LOCKBIT_ON(lockmode)
Definition lock.h:87
#define LocalTransactionIdIsValid(lxid)
Definition lock.h:69
#define LOCALLOCK_LOCKTAG(llock)
Definition lock.h:275
#define LockHashPartition(hashcode)
Definition lock.h:355
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition lock.h:74
#define PROCLOCK_LOCKMETHOD(proclock)
Definition lock.h:213
#define LockHashPartitionLockByIndex(i)
Definition lock.h:360
LockAcquireResult
Definition lock.h:331
@ LOCKACQUIRE_ALREADY_CLEAR
Definition lock.h:335
@ LOCKACQUIRE_OK
Definition lock.h:333
@ LOCKACQUIRE_ALREADY_HELD
Definition lock.h:334
@ LOCKACQUIRE_NOT_AVAIL
Definition lock.h:332
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition lock.h:72
int LOCKMODE
Definition lockdefs.h:26
#define NoLock
Definition lockdefs.h:34
#define AccessExclusiveLock
Definition lockdefs.h:43
#define ShareRowExclusiveLock
Definition lockdefs.h:41
#define AccessShareLock
Definition lockdefs.h:36
int LOCKMASK
Definition lockdefs.h:25
#define ShareUpdateExclusiveLock
Definition lockdefs.h:39
#define ExclusiveLock
Definition lockdefs.h:42
#define RowShareLock
Definition lockdefs.h:37
#define ShareLock
Definition lockdefs.h:40
#define MaxLockMode
Definition lockdefs.h:45
#define RowExclusiveLock
Definition lockdefs.h:38
uint16 LOCKMETHODID
Definition locktag.h:22
#define DEFAULT_LOCKMETHOD
Definition locktag.h:25
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition locktag.h:135
@ LOCKTAG_OBJECT
Definition locktag.h:45
@ LOCKTAG_RELATION_EXTEND
Definition locktag.h:38
@ LOCKTAG_RELATION
Definition locktag.h:37
@ LOCKTAG_TUPLE
Definition locktag.h:41
@ LOCKTAG_VIRTUALTRANSACTION
Definition locktag.h:43
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition locktag.h:126
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition locktag.h:81
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1150
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1767
#define NUM_LOCK_PARTITIONS
Definition lwlock.h:87
#define LOG2_NUM_LOCK_PARTITIONS
Definition lwlock.h:86
@ LW_SHARED
Definition lwlock.h:105
@ LW_EXCLUSIVE
Definition lwlock.h:104
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
#define START_CRIT_SECTION()
Definition miscadmin.h:152
#define END_CRIT_SECTION()
Definition miscadmin.h:154
static char * errmsg
static PgChecksumMode mode
const void size_t len
const void * data
static char buf[DEFAULT_XLOG_SEG_SIZE]
void pgstat_count_lock_fastpath_exceeded(uint8 locktag_type)
static uint32 DatumGetUInt32(Datum X)
Definition postgres.h:222
uint64_t Datum
Definition postgres.h:70
#define PointerGetDatum(X)
Definition postgres.h:354
unsigned int Oid
static int fb(int x)
#define FP_LOCK_GROUPS_PER_BACKEND_MAX
Definition proc.h:95
#define FastPathLockSlotsPerBackend()
Definition proc.h:97
#define GetPGProcByNumber(n)
Definition proc.h:504
#define FP_LOCK_SLOTS_PER_GROUP
Definition proc.h:96
ProcWaitStatus
Definition proc.h:144
@ PROC_WAIT_STATUS_OK
Definition proc.h:145
@ PROC_WAIT_STATUS_WAITING
Definition proc.h:146
@ PROC_WAIT_STATUS_ERROR
Definition proc.h:147
PGPROC * BackendPidGetProcWithLock(int pid)
Definition procarray.c:3192
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition procarray.c:3111
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
void set_ps_display_remove_suffix(void)
Definition ps_status.c:440
void set_ps_display_suffix(const char *suffix)
Definition ps_status.c:388
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1059
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition resowner.c:902
ResourceOwner CurrentResourceOwner
Definition resowner.c:173
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1079
#define ShmemRequestHash(...)
Definition shmem.h:179
#define ShmemRequestStruct(...)
Definition shmem.h:176
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition proc.c:1146
PGPROC * MyProc
Definition proc.c:71
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition proc.c:1941
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition proc.c:1315
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition proc.c:1776
PROC_HDR * ProcGlobal
Definition proc.c:74
void LogAccessExclusiveLockPrepare(void)
Definition standby.c:1471
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition standby.c:988
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition standby.c:1454
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
struct ErrorContextCallback * previous
Definition elog.h:299
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition lock.c:312
Size keysize
Definition hsearch.h:69
Size entrysize
Definition hsearch.h:70
Size keysize
Definition dynahash.c:241
int64 nLocks
Definition lock.h:254
struct ResourceOwnerData * owner
Definition lock.h:253
uint8 locktag_type
Definition locktag.h:70
uint32 locktag_field3
Definition locktag.h:68
uint32 locktag_field1
Definition locktag.h:66
uint8 locktag_lockmethodid
Definition locktag.h:71
uint16 locktag_field4
Definition locktag.h:69
uint32 locktag_field2
Definition locktag.h:67
Definition lock.h:140
int nRequested
Definition lock.h:150
LOCKTAG tag
Definition lock.h:142
int requested[MAX_LOCKMODES]
Definition lock.h:149
dclist_head waitProcs
Definition lock.h:148
int granted[MAX_LOCKMODES]
Definition lock.h:151
LOCKMASK grantMask
Definition lock.h:145
LOCKMASK waitMask
Definition lock.h:146
int nGranted
Definition lock.h:152
dlist_head procLocks
Definition lock.h:147
const bool * trace_flag
Definition lock.h:116
const char *const * lockModeNames
Definition lock.h:115
Definition proc.h:179
LWLock fpInfoLock
Definition proc.h:324
LocalTransactionId lxid
Definition proc.h:231
PROCLOCK * waitProcLock
Definition proc.h:306
dlist_head lockGroupMembers
Definition proc.h:299
Oid * fpRelId
Definition proc.h:326
Oid databaseId
Definition proc.h:201
uint64 * fpLockBits
Definition proc.h:325
pg_atomic_uint64 waitStart
Definition proc.h:311
bool fpVXIDLock
Definition proc.h:327
ProcNumber procNumber
Definition proc.h:226
int pid
Definition proc.h:197
struct PGPROC::@136 vxid
LOCK * waitLock
Definition proc.h:304
TransactionId xid
Definition proc.h:237
LOCKMODE waitLockMode
Definition proc.h:307
dlist_node waitLink
Definition proc.h:305
PGPROC * lockGroupLeader
Definition proc.h:298
LocalTransactionId fpLocalTransactionId
Definition proc.h:328
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition proc.h:321
ProcWaitStatus waitStatus
Definition proc.h:314
LOCK * myLock
Definition lock.h:196
PGPROC * myProc
Definition lock.h:197
LOCKMASK holdMask
Definition lock.h:207
dlist_node lockLink
Definition lock.h:209
PGPROC * groupLeader
Definition lock.h:206
LOCKMASK releaseMask
Definition lock.h:208
PROCLOCKTAG tag
Definition lock.h:203
dlist_node procLink
Definition lock.h:210
uint32 allProcCount
Definition proc.h:459
ShmemRequestCallback request_fn
Definition shmem.h:133
LOCKTAG locktag
Definition lock.c:163
LOCKMODE lockmode
Definition lock.c:164
LocalTransactionId localTransactionId
Definition lock.h:65
ProcNumber procNumber
Definition lock.h:64
dlist_node * cur
Definition ilist.h:179
Definition type.h:96
#define InvalidTransactionId
Definition transam.h:31
#define XidFromFullTransactionId(x)
Definition transam.h:48
#define FirstNormalObjectId
Definition transam.h:197
#define TransactionIdIsValid(xid)
Definition transam.h:41
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition twophase.c:1277
int max_prepared_xacts
Definition twophase.c:118
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition twophase.c:862
PGPROC * TwoPhaseGetDummyProc(FullTransactionId fxid, bool lock_held)
Definition twophase.c:929
#define TWOPHASE_RM_LOCK_ID
const char * type
const char * name
bool RecoveryInProgress(void)
Definition xlog.c:6836
#define XLogStandbyInfoActive()
Definition xlog.h:126
bool InRecovery
Definition xlogutils.c:50
#define InHotStandby
Definition xlogutils.h:60