PostgreSQL Source Code git master
Loading...
Searching...
No Matches
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_REL_GROUP(rel)    (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
 
#define FAST_PATH_SLOT(group, index)
 
#define FAST_PATH_GROUP(index)
 
#define FAST_PATH_INDEX(index)
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_BITS(proc, n)   (proc)->fpLockBits[FAST_PATH_GROUP(n)]
 
#define FAST_PATH_GET_BITS(proc, n)    ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static ProcWaitStatus WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner)
 
static void waitonlock_error_callback (void *arg)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void LockManagerShmemInit (void)
 
void InitLockManagerAccess (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
LOCALLOCKGetAwaitedLock (void)
 
void ResetAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (FullTransactionId fxid)
 
Size LockManagerShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const charGetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
bool log_lock_failures = false
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCounts [FP_LOCK_GROUPS_PER_BACKEND_MAX]
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
int FastPathLockGroupsPerBackend = 0
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define DEFAULT_LOCKMETHOD
Definition lock.h:127
@ LOCKTAG_RELATION
Definition lock.h:139
#define ShareUpdateExclusiveLock
Definition lockdefs.h:39
static PgChecksumMode mode
#define InvalidOid

Definition at line 273 of file lock.c.

306{
307 slock_t mutex;
310
312
313
314/*
315 * Pointers to hash tables containing lock state
316 *
317 * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
318 * shared memory; LockMethodLocalHash is local to each backend.
319 */
323
324
325/* private state for error cleanup */
327static LOCALLOCK *awaitedLock;
329
330
331#ifdef LOCK_DEBUG
332
333/*------
334 * The following configuration options are available for lock debugging:
335 *
336 * TRACE_LOCKS -- give a bunch of output what's going on in this file
337 * TRACE_USERLOCKS -- same but for user locks
338 * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
339 * (use to avoid output on system tables)
340 * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
341 * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
342 *
343 * Furthermore, but in storage/lmgr/lwlock.c:
344 * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
345 *
346 * Define LOCK_DEBUG at compile time to get all these enabled.
347 * --------
348 */
349
351bool Trace_locks = false;
352bool Trace_userlocks = false;
353int Trace_lock_table = 0;
354bool Debug_deadlocks = false;
355
356
357inline static bool
358LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
359{
360 return
363 || (Trace_lock_table &&
365}
366
367
368inline static void
369LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
370{
371 if (LOCK_DEBUG_ENABLED(&lock->tag))
372 elog(LOG,
373 "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
374 "req(%d,%d,%d,%d,%d,%d,%d)=%d "
375 "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
376 where, lock,
380 lock->grantMask,
381 lock->requested[1], lock->requested[2], lock->requested[3],
382 lock->requested[4], lock->requested[5], lock->requested[6],
383 lock->requested[7], lock->nRequested,
384 lock->granted[1], lock->granted[2], lock->granted[3],
385 lock->granted[4], lock->granted[5], lock->granted[6],
386 lock->granted[7], lock->nGranted,
387 dclist_count(&lock->waitProcs),
388 LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
389}
390
391
392inline static void
393PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
394{
395 if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
396 elog(LOG,
397 "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
398 where, proclockP, proclockP->tag.myLock,
400 proclockP->tag.myProc, (int) proclockP->holdMask);
401}
402#else /* not LOCK_DEBUG */
403
404#define LOCK_PRINT(where, lock, type) ((void) 0)
405#define PROCLOCK_PRINT(where, proclockP) ((void) 0)
406#endif /* not LOCK_DEBUG */
407
408
409static uint32 proclock_hash(const void *key, Size keysize);
412 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
415static void FinishStrongLockAcquire(void);
417static void waitonlock_error_callback(void *arg);
420static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
422static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
424 bool wakeupNeeded);
426 LOCKTAG *locktag, LOCKMODE lockmode,
430
431
432/*
433 * Initialize the lock manager's shmem data structures.
434 *
435 * This is called from CreateSharedMemoryAndSemaphores(), which see for more
436 * comments. In the normal postmaster case, the shared hash tables are
437 * created here, and backends inherit pointers to them via fork(). In the
438 * EXEC_BACKEND case, each backend re-executes this code to obtain pointers to
439 * the already existing shared hash tables. In either case, each backend must
440 * also call InitLockManagerAccess() to create the locallock hash table.
441 */
442void
444{
445 HASHCTL info;
448 bool found;
449
450 /*
451 * Compute init/max size to request for lock hashtables. Note these
452 * calculations must agree with LockManagerShmemSize!
453 */
456
457 /*
458 * Allocate hash table for LOCK structs. This stores per-locked-object
459 * information.
460 */
461 info.keysize = sizeof(LOCKTAG);
462 info.entrysize = sizeof(LOCK);
464
465 LockMethodLockHash = ShmemInitHash("LOCK hash",
468 &info,
470
471 /* Assume an average of 2 holders per lock */
472 max_table_size *= 2;
473 init_table_size *= 2;
474
475 /*
476 * Allocate hash table for PROCLOCK structs. This stores
477 * per-lock-per-holder information.
478 */
479 info.keysize = sizeof(PROCLOCKTAG);
480 info.entrysize = sizeof(PROCLOCK);
481 info.hash = proclock_hash;
483
484 LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
487 &info,
489
490 /*
491 * Allocate fast-path structures.
492 */
494 ShmemInitStruct("Fast Path Strong Relation Lock Data",
495 sizeof(FastPathStrongRelationLockData), &found);
496 if (!found)
498}
499
500/*
501 * Initialize the lock manager's backend-private data structures.
502 */
503void
505{
506 /*
507 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
508 * counts and resource owner information.
509 */
510 HASHCTL info;
511
512 info.keysize = sizeof(LOCALLOCKTAG);
513 info.entrysize = sizeof(LOCALLOCK);
514
515 LockMethodLocalHash = hash_create("LOCALLOCK hash",
516 16,
517 &info,
519}
520
521
522/*
523 * Fetch the lock method table associated with a given lock
524 */
526GetLocksMethodTable(const LOCK *lock)
527{
529
532}
533
534/*
535 * Fetch the lock method table associated with a given locktag
536 */
538GetLockTagsMethodTable(const LOCKTAG *locktag)
539{
541
544}
545
546
547/*
548 * Compute the hash code associated with a LOCKTAG.
549 *
550 * To avoid unnecessary recomputations of the hash code, we try to do this
551 * just once per function, and then pass it around as needed. Aside from
552 * passing the hashcode to hash_search_with_hash_value(), we can extract
553 * the lock partition number from the hashcode.
554 */
555uint32
556LockTagHashCode(const LOCKTAG *locktag)
557{
558 return get_hash_value(LockMethodLockHash, locktag);
559}
560
561/*
562 * Compute the hash code associated with a PROCLOCKTAG.
563 *
564 * Because we want to use just one set of partition locks for both the
565 * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
566 * fall into the same partition number as their associated LOCKs.
567 * dynahash.c expects the partition number to be the low-order bits of
568 * the hash code, and therefore a PROCLOCKTAG's hash code must have the
569 * same low-order bits as the associated LOCKTAG's hash code. We achieve
570 * this with this specialized hash function.
571 */
572static uint32
573proclock_hash(const void *key, Size keysize)
574{
575 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
578
579 Assert(keysize == sizeof(PROCLOCKTAG));
580
581 /* Look into the associated LOCK object, and compute its hash code */
582 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
583
584 /*
585 * To make the hash code also depend on the PGPROC, we xor the proc
586 * struct's address into the hash code, left-shifted so that the
587 * partition-number bits don't change. Since this is only a hash, we
588 * don't care if we lose high-order bits of the address; use an
589 * intermediate variable to suppress cast-pointer-to-int warnings.
590 */
593
594 return lockhash;
595}
596
597/*
598 * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
599 * for its underlying LOCK.
600 *
601 * We use this just to avoid redundant calls of LockTagHashCode().
602 */
603static inline uint32
605{
606 uint32 lockhash = hashcode;
608
609 /*
610 * This must match proclock_hash()!
611 */
614
615 return lockhash;
616}
617
618/*
619 * Given two lock modes, return whether they would conflict.
620 */
621bool
623{
625
626 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
627 return true;
628
629 return false;
630}
631
632/*
633 * LockHeldByMe -- test whether lock 'locktag' is held by the current
634 * transaction
635 *
636 * Returns true if current transaction holds a lock on 'tag' of mode
637 * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
638 * ("Stronger" is defined as "numerically higher", which is a bit
639 * semantically dubious but is OK for the purposes we use this for.)
640 */
641bool
642LockHeldByMe(const LOCKTAG *locktag,
643 LOCKMODE lockmode, bool orstronger)
644{
647
648 /*
649 * See if there is a LOCALLOCK entry for this lock and lockmode
650 */
651 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
652 localtag.lock = *locktag;
653 localtag.mode = lockmode;
654
656 &localtag,
657 HASH_FIND, NULL);
658
659 if (locallock && locallock->nLocks > 0)
660 return true;
661
662 if (orstronger)
663 {
665
666 for (slockmode = lockmode + 1;
668 slockmode++)
669 {
670 if (LockHeldByMe(locktag, slockmode, false))
671 return true;
672 }
673 }
674
675 return false;
676}
677
678#ifdef USE_ASSERT_CHECKING
679/*
680 * GetLockMethodLocalHash -- return the hash of local locks, for modules that
681 * evaluate assertions based on all locks held.
682 */
683HTAB *
685{
686 return LockMethodLocalHash;
687}
688#endif
689
690/*
691 * LockHasWaiters -- look up 'locktag' and check if releasing this
692 * lock would wake up other processes waiting for it.
693 */
694bool
695LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
696{
701 LOCK *lock;
702 PROCLOCK *proclock;
704 bool hasWaiters = false;
705
707 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
710 elog(ERROR, "unrecognized lock mode: %d", lockmode);
711
712#ifdef LOCK_DEBUG
713 if (LOCK_DEBUG_ENABLED(locktag))
714 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
715 locktag->locktag_field1, locktag->locktag_field2,
716 lockMethodTable->lockModeNames[lockmode]);
717#endif
718
719 /*
720 * Find the LOCALLOCK entry for this lock and lockmode
721 */
722 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
723 localtag.lock = *locktag;
724 localtag.mode = lockmode;
725
727 &localtag,
728 HASH_FIND, NULL);
729
730 /*
731 * let the caller print its own error message, too. Do not ereport(ERROR).
732 */
733 if (!locallock || locallock->nLocks <= 0)
734 {
735 elog(WARNING, "you don't own a lock of type %s",
736 lockMethodTable->lockModeNames[lockmode]);
737 return false;
738 }
739
740 /*
741 * Check the shared lock table.
742 */
744
746
747 /*
748 * We don't need to re-find the lock or proclock, since we kept their
749 * addresses in the locallock table, and they couldn't have been removed
750 * while we were holding a lock on them.
751 */
752 lock = locallock->lock;
753 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
754 proclock = locallock->proclock;
755 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
756
757 /*
758 * Double-check that we are actually holding a lock of the type we want to
759 * release.
760 */
761 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
762 {
763 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
765 elog(WARNING, "you don't own a lock of type %s",
766 lockMethodTable->lockModeNames[lockmode]);
768 return false;
769 }
770
771 /*
772 * Do the checking.
773 */
774 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
775 hasWaiters = true;
776
778
779 return hasWaiters;
780}
781
782/*
783 * LockAcquire -- Check for lock conflicts, sleep if conflict found,
784 * set lock if/when no conflicts.
785 *
786 * Inputs:
787 * locktag: unique identifier for the lockable object
788 * lockmode: lock mode to acquire
789 * sessionLock: if true, acquire lock for session not current transaction
790 * dontWait: if true, don't wait to acquire lock
791 *
792 * Returns one of:
793 * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
794 * LOCKACQUIRE_OK lock successfully acquired
795 * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
796 * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
797 *
798 * In the normal case where dontWait=false and the caller doesn't need to
799 * distinguish a freshly acquired lock from one already taken earlier in
800 * this same transaction, there is no need to examine the return value.
801 *
802 * Side Effects: The lock is acquired and recorded in lock tables.
803 *
804 * NOTE: if we wait for the lock, there is no way to abort the wait
805 * short of aborting the transaction.
806 */
808LockAcquire(const LOCKTAG *locktag,
809 LOCKMODE lockmode,
810 bool sessionLock,
811 bool dontWait)
812{
813 return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
814 true, NULL, false);
815}
816
817/*
818 * LockAcquireExtended - allows us to specify additional options
819 *
820 * reportMemoryError specifies whether a lock request that fills the lock
821 * table should generate an ERROR or not. Passing "false" allows the caller
822 * to attempt to recover from lock-table-full situations, perhaps by forcibly
823 * canceling other lock holders and then retrying. Note, however, that the
824 * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
825 * in combination with dontWait = true, as the cause of failure couldn't be
826 * distinguished.
827 *
828 * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
829 * table entry if a lock is successfully acquired, or NULL if not.
830 *
831 * logLockFailure indicates whether to log details when a lock acquisition
832 * fails with dontWait = true.
833 */
835LockAcquireExtended(const LOCKTAG *locktag,
836 LOCKMODE lockmode,
837 bool sessionLock,
838 bool dontWait,
841 bool logLockFailure)
842{
847 LOCK *lock;
848 PROCLOCK *proclock;
849 bool found;
850 ResourceOwner owner;
851 uint32 hashcode;
853 bool found_conflict;
855 bool log_lock = false;
856
858 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
861 elog(ERROR, "unrecognized lock mode: %d", lockmode);
862
863 if (RecoveryInProgress() && !InRecovery &&
864 (locktag->locktag_type == LOCKTAG_OBJECT ||
865 locktag->locktag_type == LOCKTAG_RELATION) &&
866 lockmode > RowExclusiveLock)
869 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
870 lockMethodTable->lockModeNames[lockmode]),
871 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
872
873#ifdef LOCK_DEBUG
874 if (LOCK_DEBUG_ENABLED(locktag))
875 elog(LOG, "LockAcquire: lock [%u,%u] %s",
876 locktag->locktag_field1, locktag->locktag_field2,
877 lockMethodTable->lockModeNames[lockmode]);
878#endif
879
880 /* Identify owner for lock */
881 if (sessionLock)
882 owner = NULL;
883 else
884 owner = CurrentResourceOwner;
885
886 /*
887 * Find or create a LOCALLOCK entry for this lock and lockmode
888 */
889 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
890 localtag.lock = *locktag;
891 localtag.mode = lockmode;
892
894 &localtag,
895 HASH_ENTER, &found);
896
897 /*
898 * if it's a new locallock object, initialize it
899 */
900 if (!found)
901 {
902 locallock->lock = NULL;
903 locallock->proclock = NULL;
904 locallock->hashcode = LockTagHashCode(&(localtag.lock));
905 locallock->nLocks = 0;
906 locallock->holdsStrongLockCount = false;
907 locallock->lockCleared = false;
908 locallock->numLockOwners = 0;
909 locallock->maxLockOwners = 8;
910 locallock->lockOwners = NULL; /* in case next line fails */
911 locallock->lockOwners = (LOCALLOCKOWNER *)
913 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
914 }
915 else
916 {
917 /* Make sure there will be room to remember the lock */
918 if (locallock->numLockOwners >= locallock->maxLockOwners)
919 {
920 int newsize = locallock->maxLockOwners * 2;
921
922 locallock->lockOwners = (LOCALLOCKOWNER *)
923 repalloc(locallock->lockOwners,
924 newsize * sizeof(LOCALLOCKOWNER));
925 locallock->maxLockOwners = newsize;
926 }
927 }
928 hashcode = locallock->hashcode;
929
930 if (locallockp)
932
933 /*
934 * If we already hold the lock, we can just increase the count locally.
935 *
936 * If lockCleared is already set, caller need not worry about absorbing
937 * sinval messages related to the lock's object.
938 */
939 if (locallock->nLocks > 0)
940 {
942 if (locallock->lockCleared)
944 else
946 }
947
948 /*
949 * We don't acquire any other heavyweight lock while holding the relation
950 * extension lock. We do allow to acquire the same relation extension
951 * lock more than once but that case won't reach here.
952 */
954
955 /*
956 * Prepare to emit a WAL record if acquisition of this lock needs to be
957 * replayed in a standby server.
958 *
959 * Here we prepare to log; after lock is acquired we'll issue log record.
960 * This arrangement simplifies error recovery in case the preparation step
961 * fails.
962 *
963 * Only AccessExclusiveLocks can conflict with lock types that read-only
964 * transactions can acquire in a standby server. Make sure this definition
965 * matches the one in GetRunningTransactionLocks().
966 */
967 if (lockmode >= AccessExclusiveLock &&
968 locktag->locktag_type == LOCKTAG_RELATION &&
971 {
973 log_lock = true;
974 }
975
976 /*
977 * Attempt to take lock via fast path, if eligible. But if we remember
978 * having filled up the fast path array, we don't attempt to make any
979 * further use of it until we release some locks. It's possible that some
980 * other backend has transferred some of those locks to the shared hash
981 * table, leaving space free, but it's not worth acquiring the LWLock just
982 * to check. It's also possible that we're acquiring a second or third
983 * lock type on a relation we have already locked using the fast-path, but
984 * for now we don't worry about that case either.
985 */
986 if (EligibleForRelationFastPath(locktag, lockmode) &&
988 {
990 bool acquired;
991
992 /*
993 * LWLockAcquire acts as a memory sequencing point, so it's safe to
994 * assume that any strong locker whose increment to
995 * FastPathStrongRelationLocks->counts becomes visible after we test
996 * it has yet to begin to transfer fast-path locks.
997 */
1000 acquired = false;
1001 else
1003 lockmode);
1005 if (acquired)
1006 {
1007 /*
1008 * The locallock might contain stale pointers to some old shared
1009 * objects; we MUST reset these to null before considering the
1010 * lock to be acquired via fast-path.
1011 */
1012 locallock->lock = NULL;
1013 locallock->proclock = NULL;
1014 GrantLockLocal(locallock, owner);
1015 return LOCKACQUIRE_OK;
1016 }
1017 }
1018
1019 /*
1020 * If this lock could potentially have been taken via the fast-path by
1021 * some other backend, we must (temporarily) disable further use of the
1022 * fast-path for this lock tag, and migrate any locks already taken via
1023 * this method to the main lock table.
1024 */
1025 if (ConflictsWithRelationFastPath(locktag, lockmode))
1026 {
1028
1031 hashcode))
1032 {
1034 if (locallock->nLocks == 0)
1036 if (locallockp)
1037 *locallockp = NULL;
1039 ereport(ERROR,
1041 errmsg("out of shared memory"),
1042 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1043 else
1044 return LOCKACQUIRE_NOT_AVAIL;
1045 }
1046 }
1047
1048 /*
1049 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1050 * take it via the fast-path, either, so we've got to mess with the shared
1051 * lock table.
1052 */
1054
1056
1057 /*
1058 * Find or create lock and proclock entries with this tag
1059 *
1060 * Note: if the locallock object already existed, it might have a pointer
1061 * to the lock already ... but we should not assume that that pointer is
1062 * valid, since a lock object with zero hold and request counts can go
1063 * away anytime. So we have to use SetupLockInTable() to recompute the
1064 * lock and proclock pointers, even if they're already set.
1065 */
1066 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1067 hashcode, lockmode);
1068 if (!proclock)
1069 {
1072 if (locallock->nLocks == 0)
1074 if (locallockp)
1075 *locallockp = NULL;
1077 ereport(ERROR,
1079 errmsg("out of shared memory"),
1080 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1081 else
1082 return LOCKACQUIRE_NOT_AVAIL;
1083 }
1084 locallock->proclock = proclock;
1085 lock = proclock->tag.myLock;
1086 locallock->lock = lock;
1087
1088 /*
1089 * If lock requested conflicts with locks requested by waiters, must join
1090 * wait queue. Otherwise, check for conflict with already-held locks.
1091 * (That's last because most complex check.)
1092 */
1093 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1094 found_conflict = true;
1095 else
1097 lock, proclock);
1098
1099 if (!found_conflict)
1100 {
1101 /* No conflict with held or previously requested locks */
1102 GrantLock(lock, proclock, lockmode);
1104 }
1105 else
1106 {
1107 /*
1108 * Join the lock's wait queue. We call this even in the dontWait
1109 * case, because JoinWaitQueue() may discover that we can acquire the
1110 * lock immediately after all.
1111 */
1113 }
1114
1116 {
1117 /*
1118 * We're not getting the lock because a deadlock was detected already
1119 * while trying to join the wait queue, or because we would have to
1120 * wait but the caller requested no blocking.
1121 *
1122 * Undo the changes to shared entries before releasing the partition
1123 * lock.
1124 */
1126
1127 if (proclock->holdMask == 0)
1128 {
1130
1132 hashcode);
1133 dlist_delete(&proclock->lockLink);
1134 dlist_delete(&proclock->procLink);
1136 &(proclock->tag),
1139 NULL))
1140 elog(PANIC, "proclock table corrupted");
1141 }
1142 else
1143 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1144 lock->nRequested--;
1145 lock->requested[lockmode]--;
1146 LOCK_PRINT("LockAcquire: did not join wait queue",
1147 lock, lockmode);
1148 Assert((lock->nRequested > 0) &&
1149 (lock->requested[lockmode] >= 0));
1150 Assert(lock->nGranted <= lock->nRequested);
1152 if (locallock->nLocks == 0)
1154
1155 if (dontWait)
1156 {
1157 /*
1158 * Log lock holders and waiters as a detail log message if
1159 * logLockFailure = true and lock acquisition fails with dontWait
1160 * = true
1161 */
1162 if (logLockFailure)
1163 {
1167 const char *modename;
1168 int lockHoldersNum = 0;
1169
1173
1174 DescribeLockTag(&buf, &locallock->tag.lock);
1175 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1176 lockmode);
1177
1178 /* Gather a list of all lock holders and waiters */
1183
1184 ereport(LOG,
1185 (errmsg("process %d could not obtain %s on %s",
1186 MyProcPid, modename, buf.data),
1188 "Process holding the lock: %s, Wait queue: %s.",
1189 "Processes holding the lock: %s, Wait queue: %s.",
1191 lock_holders_sbuf.data,
1192 lock_waiters_sbuf.data)));
1193
1194 pfree(buf.data);
1197 }
1198 if (locallockp)
1199 *locallockp = NULL;
1200 return LOCKACQUIRE_NOT_AVAIL;
1201 }
1202 else
1203 {
1205 /* DeadLockReport() will not return */
1206 }
1207 }
1208
1209 /*
1210 * We are now in the lock queue, or the lock was already granted. If
1211 * queued, go to sleep.
1212 */
1214 {
1215 Assert(!dontWait);
1216 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1217 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1219
1221
1222 /*
1223 * NOTE: do not do any material change of state between here and
1224 * return. All required changes in locktable state must have been
1225 * done when the lock was granted to us --- see notes in WaitOnLock.
1226 */
1227
1229 {
1230 /*
1231 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1232 * now.
1233 */
1234 Assert(!dontWait);
1236 /* DeadLockReport() will not return */
1237 }
1238 }
1239 else
1242
1243 /* The lock was granted to us. Update the local lock entry accordingly */
1244 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1245 GrantLockLocal(locallock, owner);
1246
1247 /*
1248 * Lock state is fully up-to-date now; if we error out after this, no
1249 * special error cleanup is required.
1250 */
1252
1253 /*
1254 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1255 * standby server.
1256 */
1257 if (log_lock)
1258 {
1259 /*
1260 * Decode the locktag back to the original values, to avoid sending
1261 * lots of empty bytes with every message. See lock.h to check how a
1262 * locktag is defined for LOCKTAG_RELATION
1263 */
1265 locktag->locktag_field2);
1266 }
1267
1268 return LOCKACQUIRE_OK;
1269}
1270
1271/*
1272 * Find or create LOCK and PROCLOCK objects as needed for a new lock
1273 * request.
1274 *
1275 * Returns the PROCLOCK object, or NULL if we failed to create the objects
1276 * for lack of shared memory.
1277 *
1278 * The appropriate partition lock must be held at entry, and will be
1279 * held at exit.
1280 */
1281static PROCLOCK *
1283 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1284{
1285 LOCK *lock;
1286 PROCLOCK *proclock;
1289 bool found;
1290
1291 /*
1292 * Find or create a lock with this tag.
1293 */
1295 locktag,
1296 hashcode,
1298 &found);
1299 if (!lock)
1300 return NULL;
1301
1302 /*
1303 * if it's a new lock object, initialize it
1304 */
1305 if (!found)
1306 {
1307 lock->grantMask = 0;
1308 lock->waitMask = 0;
1309 dlist_init(&lock->procLocks);
1310 dclist_init(&lock->waitProcs);
1311 lock->nRequested = 0;
1312 lock->nGranted = 0;
1313 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1314 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1315 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1316 }
1317 else
1318 {
1319 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1320 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1321 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1322 Assert(lock->nGranted <= lock->nRequested);
1323 }
1324
1325 /*
1326 * Create the hash key for the proclock table.
1327 */
1328 proclocktag.myLock = lock;
1329 proclocktag.myProc = proc;
1330
1332
1333 /*
1334 * Find or create a proclock entry with this tag
1335 */
1337 &proclocktag,
1340 &found);
1341 if (!proclock)
1342 {
1343 /* Oops, not enough shmem for the proclock */
1344 if (lock->nRequested == 0)
1345 {
1346 /*
1347 * There are no other requestors of this lock, so garbage-collect
1348 * the lock object. We *must* do this to avoid a permanent leak
1349 * of shared memory, because there won't be anything to cause
1350 * anyone to release the lock object later.
1351 */
1352 Assert(dlist_is_empty(&(lock->procLocks)));
1354 &(lock->tag),
1355 hashcode,
1357 NULL))
1358 elog(PANIC, "lock table corrupted");
1359 }
1360 return NULL;
1361 }
1362
1363 /*
1364 * If new, initialize the new entry
1365 */
1366 if (!found)
1367 {
1369
1370 /*
1371 * It might seem unsafe to access proclock->groupLeader without a
1372 * lock, but it's not really. Either we are initializing a proclock
1373 * on our own behalf, in which case our group leader isn't changing
1374 * because the group leader for a process can only ever be changed by
1375 * the process itself; or else we are transferring a fast-path lock to
1376 * the main lock table, in which case that process can't change its
1377 * lock group leader without first releasing all of its locks (and in
1378 * particular the one we are currently transferring).
1379 */
1380 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1381 proc->lockGroupLeader : proc;
1382 proclock->holdMask = 0;
1383 proclock->releaseMask = 0;
1384 /* Add proclock to appropriate lists */
1385 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1386 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1387 PROCLOCK_PRINT("LockAcquire: new", proclock);
1388 }
1389 else
1390 {
1391 PROCLOCK_PRINT("LockAcquire: found", proclock);
1392 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1393
1394#ifdef CHECK_DEADLOCK_RISK
1395
1396 /*
1397 * Issue warning if we already hold a lower-level lock on this object
1398 * and do not hold a lock of the requested level or higher. This
1399 * indicates a deadlock-prone coding practice (eg, we'd have a
1400 * deadlock if another backend were following the same code path at
1401 * about the same time).
1402 *
1403 * This is not enabled by default, because it may generate log entries
1404 * about user-level coding practices that are in fact safe in context.
1405 * It can be enabled to help find system-level problems.
1406 *
1407 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1408 * better to use a table. For now, though, this works.
1409 */
1410 {
1411 int i;
1412
1413 for (i = lockMethodTable->numLockModes; i > 0; i--)
1414 {
1415 if (proclock->holdMask & LOCKBIT_ON(i))
1416 {
1417 if (i >= (int) lockmode)
1418 break; /* safe: we have a lock >= req level */
1419 elog(LOG, "deadlock risk: raising lock level"
1420 " from %s to %s on object %u/%u/%u",
1421 lockMethodTable->lockModeNames[i],
1422 lockMethodTable->lockModeNames[lockmode],
1423 lock->tag.locktag_field1, lock->tag.locktag_field2,
1424 lock->tag.locktag_field3);
1425 break;
1426 }
1427 }
1428 }
1429#endif /* CHECK_DEADLOCK_RISK */
1430 }
1431
1432 /*
1433 * lock->nRequested and lock->requested[] count the total number of
1434 * requests, whether granted or waiting, so increment those immediately.
1435 * The other counts don't increment till we get the lock.
1436 */
1437 lock->nRequested++;
1438 lock->requested[lockmode]++;
1439 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1440
1441 /*
1442 * We shouldn't already hold the desired lock; else locallock table is
1443 * broken.
1444 */
1445 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1446 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1447 lockMethodTable->lockModeNames[lockmode],
1448 lock->tag.locktag_field1, lock->tag.locktag_field2,
1449 lock->tag.locktag_field3);
1450
1451 return proclock;
1452}
1453
1454/*
1455 * Check and set/reset the flag that we hold the relation extension lock.
1456 *
1457 * It is callers responsibility that this function is called after
1458 * acquiring/releasing the relation extension lock.
1459 *
1460 * Pass acquired as true if lock is acquired, false otherwise.
1461 */
1462static inline void
1464{
1465#ifdef USE_ASSERT_CHECKING
1468#endif
1469}
1470
1471/*
1472 * Subroutine to free a locallock entry
1473 */
1474static void
1476{
1477 int i;
1478
1479 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1480 {
1481 if (locallock->lockOwners[i].owner != NULL)
1482 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1483 }
1484 locallock->numLockOwners = 0;
1485 if (locallock->lockOwners != NULL)
1486 pfree(locallock->lockOwners);
1487 locallock->lockOwners = NULL;
1488
1489 if (locallock->holdsStrongLockCount)
1490 {
1492
1494
1498 locallock->holdsStrongLockCount = false;
1500 }
1501
1503 &(locallock->tag),
1504 HASH_REMOVE, NULL))
1505 elog(WARNING, "locallock table corrupted");
1506
1507 /*
1508 * Indicate that the lock is released for certain types of locks
1509 */
1511}
1512
1513/*
1514 * LockCheckConflicts -- test whether requested lock conflicts
1515 * with those already granted
1516 *
1517 * Returns true if conflict, false if no conflict.
1518 *
1519 * NOTES:
1520 * Here's what makes this complicated: one process's locks don't
1521 * conflict with one another, no matter what purpose they are held for
1522 * (eg, session and transaction locks do not conflict). Nor do the locks
1523 * of one process in a lock group conflict with those of another process in
1524 * the same group. So, we must subtract off these locks when determining
1525 * whether the requested new lock conflicts with those already held.
1526 */
1527bool
1529 LOCKMODE lockmode,
1530 LOCK *lock,
1531 PROCLOCK *proclock)
1532{
1533 int numLockModes = lockMethodTable->numLockModes;
1535 int conflictMask = lockMethodTable->conflictTab[lockmode];
1539 int i;
1540
1541 /*
1542 * first check for global conflicts: If no locks conflict with my request,
1543 * then I get the lock.
1544 *
1545 * Checking for conflict: lock->grantMask represents the types of
1546 * currently held locks. conflictTable[lockmode] has a bit set for each
1547 * type of lock that conflicts with request. Bitwise compare tells if
1548 * there is a conflict.
1549 */
1550 if (!(conflictMask & lock->grantMask))
1551 {
1552 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1553 return false;
1554 }
1555
1556 /*
1557 * Rats. Something conflicts. But it could still be my own lock, or a
1558 * lock held by another member of my locking group. First, figure out how
1559 * many conflicts remain after subtracting out any locks I hold myself.
1560 */
1561 myLocks = proclock->holdMask;
1562 for (i = 1; i <= numLockModes; i++)
1563 {
1564 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1565 {
1566 conflictsRemaining[i] = 0;
1567 continue;
1568 }
1569 conflictsRemaining[i] = lock->granted[i];
1570 if (myLocks & LOCKBIT_ON(i))
1573 }
1574
1575 /* If no conflicts remain, we get the lock. */
1576 if (totalConflictsRemaining == 0)
1577 {
1578 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1579 return false;
1580 }
1581
1582 /* If no group locking, it's definitely a conflict. */
1583 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1584 {
1585 Assert(proclock->tag.myProc == MyProc);
1586 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1587 proclock);
1588 return true;
1589 }
1590
1591 /*
1592 * The relation extension lock conflict even between the group members.
1593 */
1595 {
1596 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1597 proclock);
1598 return true;
1599 }
1600
1601 /*
1602 * Locks held in conflicting modes by members of our own lock group are
1603 * not real conflicts; we can subtract those out and see if we still have
1604 * a conflict. This is O(N) in the number of processes holding or
1605 * awaiting locks on this object. We could improve that by making the
1606 * shared memory state more complex (and larger) but it doesn't seem worth
1607 * it.
1608 */
1610 {
1612 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1613
1614 if (proclock != otherproclock &&
1615 proclock->groupLeader == otherproclock->groupLeader &&
1616 (otherproclock->holdMask & conflictMask) != 0)
1617 {
1618 int intersectMask = otherproclock->holdMask & conflictMask;
1619
1620 for (i = 1; i <= numLockModes; i++)
1621 {
1622 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1623 {
1624 if (conflictsRemaining[i] <= 0)
1625 elog(PANIC, "proclocks held do not match lock");
1628 }
1629 }
1630
1631 if (totalConflictsRemaining == 0)
1632 {
1633 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1634 proclock);
1635 return false;
1636 }
1637 }
1638 }
1639
1640 /* Nope, it's a real conflict. */
1641 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1642 return true;
1643}
1644
1645/*
1646 * GrantLock -- update the lock and proclock data structures to show
1647 * the lock request has been granted.
1648 *
1649 * NOTE: if proc was blocked, it also needs to be removed from the wait list
1650 * and have its waitLock/waitProcLock fields cleared. That's not done here.
1651 *
1652 * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1653 * table entry; but since we may be awaking some other process, we can't do
1654 * that here; it's done by GrantLockLocal, instead.
1655 */
1656void
1657GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1658{
1659 lock->nGranted++;
1660 lock->granted[lockmode]++;
1661 lock->grantMask |= LOCKBIT_ON(lockmode);
1662 if (lock->granted[lockmode] == lock->requested[lockmode])
1663 lock->waitMask &= LOCKBIT_OFF(lockmode);
1664 proclock->holdMask |= LOCKBIT_ON(lockmode);
1665 LOCK_PRINT("GrantLock", lock, lockmode);
1666 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1667 Assert(lock->nGranted <= lock->nRequested);
1668}
1669
1670/*
1671 * UnGrantLock -- opposite of GrantLock.
1672 *
1673 * Updates the lock and proclock data structures to show that the lock
1674 * is no longer held nor requested by the current holder.
1675 *
1676 * Returns true if there were any waiters waiting on the lock that
1677 * should now be woken up with ProcLockWakeup.
1678 */
1679static bool
1680UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1682{
1683 bool wakeupNeeded = false;
1684
1685 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1686 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1687 Assert(lock->nGranted <= lock->nRequested);
1688
1689 /*
1690 * fix the general lock stats
1691 */
1692 lock->nRequested--;
1693 lock->requested[lockmode]--;
1694 lock->nGranted--;
1695 lock->granted[lockmode]--;
1696
1697 if (lock->granted[lockmode] == 0)
1698 {
1699 /* change the conflict mask. No more of this lock type. */
1700 lock->grantMask &= LOCKBIT_OFF(lockmode);
1701 }
1702
1703 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1704
1705 /*
1706 * We need only run ProcLockWakeup if the released lock conflicts with at
1707 * least one of the lock types requested by waiter(s). Otherwise whatever
1708 * conflict made them wait must still exist. NOTE: before MVCC, we could
1709 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1710 * not true anymore, because the remaining granted locks might belong to
1711 * some waiter, who could now be awakened because he doesn't conflict with
1712 * his own locks.
1713 */
1714 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1715 wakeupNeeded = true;
1716
1717 /*
1718 * Now fix the per-proclock state.
1719 */
1720 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1721 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1722
1723 return wakeupNeeded;
1724}
1725
1726/*
1727 * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1728 * proclock and lock objects if possible, and call ProcLockWakeup if there
1729 * are remaining requests and the caller says it's OK. (Normally, this
1730 * should be called after UnGrantLock, and wakeupNeeded is the result from
1731 * UnGrantLock.)
1732 *
1733 * The appropriate partition lock must be held at entry, and will be
1734 * held at exit.
1735 */
1736static void
1737CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1739 bool wakeupNeeded)
1740{
1741 /*
1742 * If this was my last hold on this lock, delete my entry in the proclock
1743 * table.
1744 */
1745 if (proclock->holdMask == 0)
1746 {
1748
1749 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1750 dlist_delete(&proclock->lockLink);
1751 dlist_delete(&proclock->procLink);
1752 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1754 &(proclock->tag),
1757 NULL))
1758 elog(PANIC, "proclock table corrupted");
1759 }
1760
1761 if (lock->nRequested == 0)
1762 {
1763 /*
1764 * The caller just released the last lock, so garbage-collect the lock
1765 * object.
1766 */
1767 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1770 &(lock->tag),
1771 hashcode,
1773 NULL))
1774 elog(PANIC, "lock table corrupted");
1775 }
1776 else if (wakeupNeeded)
1777 {
1778 /* There are waiters on this lock, so wake them up. */
1780 }
1781}
1782
1783/*
1784 * GrantLockLocal -- update the locallock data structures to show
1785 * the lock request has been granted.
1786 *
1787 * We expect that LockAcquire made sure there is room to add a new
1788 * ResourceOwner entry.
1789 */
1790static void
1792{
1793 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1794 int i;
1795
1796 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1797 /* Count the total */
1798 locallock->nLocks++;
1799 /* Count the per-owner lock */
1800 for (i = 0; i < locallock->numLockOwners; i++)
1801 {
1802 if (lockOwners[i].owner == owner)
1803 {
1804 lockOwners[i].nLocks++;
1805 return;
1806 }
1807 }
1808 lockOwners[i].owner = owner;
1809 lockOwners[i].nLocks = 1;
1810 locallock->numLockOwners++;
1811 if (owner != NULL)
1813
1814 /* Indicate that the lock is acquired for certain types of locks. */
1816}
1817
1818/*
1819 * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1820 * and arrange for error cleanup if it fails
1821 */
1822static void
1824{
1826 Assert(locallock->holdsStrongLockCount == false);
1827
1828 /*
1829 * Adding to a memory location is not atomic, so we take a spinlock to
1830 * ensure we don't collide with someone else trying to bump the count at
1831 * the same time.
1832 *
1833 * XXX: It might be worth considering using an atomic fetch-and-add
1834 * instruction here, on architectures where that is supported.
1835 */
1836
1839 locallock->holdsStrongLockCount = true;
1842}
1843
1844/*
1845 * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1846 * acquisition once it's no longer needed
1847 */
1848static void
1850{
1852}
1853
1854/*
1855 * AbortStrongLockAcquire - undo strong lock state changes performed by
1856 * BeginStrongLockAcquire.
1857 */
1858void
1860{
1863
1864 if (locallock == NULL)
1865 return;
1866
1868 Assert(locallock->holdsStrongLockCount == true);
1872 locallock->holdsStrongLockCount = false;
1875}
1876
1877/*
1878 * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1879 * WaitOnLock on.
1880 *
1881 * proc.c needs this for the case where we are booted off the lock by
1882 * timeout, but discover that someone granted us the lock anyway.
1883 *
1884 * We could just export GrantLockLocal, but that would require including
1885 * resowner.h in lock.h, which creates circularity.
1886 */
1887void
1888GrantAwaitedLock(void)
1889{
1891}
1892
1893/*
1894 * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1895 */
1896LOCALLOCK *
1897GetAwaitedLock(void)
1898{
1899 return awaitedLock;
1900}
1901
1902/*
1903 * ResetAwaitedLock -- Forget that we are waiting on a lock.
1904 */
1905void
1906ResetAwaitedLock(void)
1907{
1908 awaitedLock = NULL;
1909}
1910
1911/*
1912 * MarkLockClear -- mark an acquired lock as "clear"
1913 *
1914 * This means that we know we have absorbed all sinval messages that other
1915 * sessions generated before we acquired this lock, and so we can confidently
1916 * assume we know about any catalog changes protected by this lock.
1917 */
1918void
1920{
1921 Assert(locallock->nLocks > 0);
1922 locallock->lockCleared = true;
1923}
1924
1925/*
1926 * WaitOnLock -- wait to acquire a lock
1927 *
1928 * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1929 */
1930static ProcWaitStatus
1932{
1933 ProcWaitStatus result;
1935
1936 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1937 locallock->tag.lock.locktag_field2,
1938 locallock->tag.lock.locktag_field3,
1939 locallock->tag.lock.locktag_field4,
1940 locallock->tag.lock.locktag_type,
1941 locallock->tag.mode);
1942
1943 /* Setup error traceback support for ereport() */
1948
1949 /* adjust the process title to indicate that it's waiting */
1950 set_ps_display_suffix("waiting");
1951
1952 /*
1953 * Record the fact that we are waiting for a lock, so that
1954 * LockErrorCleanup will clean up if cancel/die happens.
1955 */
1957 awaitedOwner = owner;
1958
1959 /*
1960 * NOTE: Think not to put any shared-state cleanup after the call to
1961 * ProcSleep, in either the normal or failure path. The lock state must
1962 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1963 * waiting for the lock. This is necessary because of the possibility
1964 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1965 * grants us the lock, but before we've noticed it. Hence, after granting,
1966 * the locktable state must fully reflect the fact that we own the lock;
1967 * we can't do additional work on return.
1968 *
1969 * We can and do use a PG_TRY block to try to clean up after failure, but
1970 * this still has a major limitation: elog(FATAL) can occur while waiting
1971 * (eg, a "die" interrupt), and then control won't come back here. So all
1972 * cleanup of essential state should happen in LockErrorCleanup, not here.
1973 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1974 * is unimportant if the process exits.
1975 */
1976 PG_TRY();
1977 {
1978 result = ProcSleep(locallock);
1979 }
1980 PG_CATCH();
1981 {
1982 /* In this path, awaitedLock remains set until LockErrorCleanup */
1983
1984 /* reset ps display to remove the suffix */
1986
1987 /* and propagate the error */
1988 PG_RE_THROW();
1989 }
1990 PG_END_TRY();
1991
1992 /*
1993 * We no longer want LockErrorCleanup to do anything.
1994 */
1995 awaitedLock = NULL;
1996
1997 /* reset ps display to remove the suffix */
1999
2001
2002 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2003 locallock->tag.lock.locktag_field2,
2004 locallock->tag.lock.locktag_field3,
2005 locallock->tag.lock.locktag_field4,
2006 locallock->tag.lock.locktag_type,
2007 locallock->tag.mode);
2008
2009 return result;
2010}
2011
2012/*
2013 * error context callback for failures in WaitOnLock
2014 *
2015 * We report which lock was being waited on, in the same style used in
2016 * deadlock reports. This helps with lock timeout errors in particular.
2017 */
2018static void
2020{
2022 const LOCKTAG *tag = &locallock->tag.lock;
2023 LOCKMODE mode = locallock->tag.mode;
2025
2028
2029 errcontext("waiting for %s on %s",
2031 locktagbuf.data);
2032}
2033
2034/*
2035 * Remove a proc from the wait-queue it is on (caller must know it is on one).
2036 * This is only used when the proc has failed to get the lock, so we set its
2037 * waitStatus to PROC_WAIT_STATUS_ERROR.
2038 *
2039 * Appropriate partition lock must be held by caller. Also, caller is
2040 * responsible for signaling the proc if needed.
2041 *
2042 * NB: this does not clean up any locallock object that may exist for the lock.
2043 */
2044void
2045RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
2046{
2047 LOCK *waitLock = proc->waitLock;
2048 PROCLOCK *proclock = proc->waitProcLock;
2049 LOCKMODE lockmode = proc->waitLockMode;
2051
2052 /* Make sure proc is waiting */
2054 Assert(proc->links.next != NULL);
2055 Assert(waitLock);
2056 Assert(!dclist_is_empty(&waitLock->waitProcs));
2058
2059 /* Remove proc from lock's wait queue */
2060 dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
2061
2062 /* Undo increments of request counts by waiting process */
2063 Assert(waitLock->nRequested > 0);
2064 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2065 waitLock->nRequested--;
2066 Assert(waitLock->requested[lockmode] > 0);
2067 waitLock->requested[lockmode]--;
2068 /* don't forget to clear waitMask bit if appropriate */
2069 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2070 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2071
2072 /* Clean up the proc's own state, and pass it the ok/fail signal */
2073 proc->waitLock = NULL;
2074 proc->waitProcLock = NULL;
2076
2077 /*
2078 * Delete the proclock immediately if it represents no already-held locks.
2079 * (This must happen now because if the owner of the lock decides to
2080 * release it, and the requested/granted counts then go to zero,
2081 * LockRelease expects there to be no remaining proclocks.) Then see if
2082 * any other waiters for the lock can be woken up now.
2083 */
2084 CleanUpLock(waitLock, proclock,
2085 LockMethods[lockmethodid], hashcode,
2086 true);
2087}
2088
2089/*
2090 * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2091 * Release a session lock if 'sessionLock' is true, else release a
2092 * regular transaction lock.
2093 *
2094 * Side Effects: find any waiting processes that are now wakable,
2095 * grant them their requested locks and awaken them.
2096 * (We have to grant the lock here to avoid a race between
2097 * the waking process and any new process to
2098 * come along and request the lock.)
2099 */
2100bool
2101LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2102{
2107 LOCK *lock;
2108 PROCLOCK *proclock;
2110 bool wakeupNeeded;
2111
2113 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2116 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2117
2118#ifdef LOCK_DEBUG
2119 if (LOCK_DEBUG_ENABLED(locktag))
2120 elog(LOG, "LockRelease: lock [%u,%u] %s",
2121 locktag->locktag_field1, locktag->locktag_field2,
2122 lockMethodTable->lockModeNames[lockmode]);
2123#endif
2124
2125 /*
2126 * Find the LOCALLOCK entry for this lock and lockmode
2127 */
2128 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2129 localtag.lock = *locktag;
2130 localtag.mode = lockmode;
2131
2133 &localtag,
2134 HASH_FIND, NULL);
2135
2136 /*
2137 * let the caller print its own error message, too. Do not ereport(ERROR).
2138 */
2139 if (!locallock || locallock->nLocks <= 0)
2140 {
2141 elog(WARNING, "you don't own a lock of type %s",
2142 lockMethodTable->lockModeNames[lockmode]);
2143 return false;
2144 }
2145
2146 /*
2147 * Decrease the count for the resource owner.
2148 */
2149 {
2150 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2151 ResourceOwner owner;
2152 int i;
2153
2154 /* Identify owner for lock */
2155 if (sessionLock)
2156 owner = NULL;
2157 else
2158 owner = CurrentResourceOwner;
2159
2160 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2161 {
2162 if (lockOwners[i].owner == owner)
2163 {
2164 Assert(lockOwners[i].nLocks > 0);
2165 if (--lockOwners[i].nLocks == 0)
2166 {
2167 if (owner != NULL)
2169 /* compact out unused slot */
2170 locallock->numLockOwners--;
2171 if (i < locallock->numLockOwners)
2172 lockOwners[i] = lockOwners[locallock->numLockOwners];
2173 }
2174 break;
2175 }
2176 }
2177 if (i < 0)
2178 {
2179 /* don't release a lock belonging to another owner */
2180 elog(WARNING, "you don't own a lock of type %s",
2181 lockMethodTable->lockModeNames[lockmode]);
2182 return false;
2183 }
2184 }
2185
2186 /*
2187 * Decrease the total local count. If we're still holding the lock, we're
2188 * done.
2189 */
2190 locallock->nLocks--;
2191
2192 if (locallock->nLocks > 0)
2193 return true;
2194
2195 /*
2196 * At this point we can no longer suppose we are clear of invalidation
2197 * messages related to this lock. Although we'll delete the LOCALLOCK
2198 * object before any intentional return from this routine, it seems worth
2199 * the trouble to explicitly reset lockCleared right now, just in case
2200 * some error prevents us from deleting the LOCALLOCK.
2201 */
2202 locallock->lockCleared = false;
2203
2204 /* Attempt fast release of any lock eligible for the fast path. */
2205 if (EligibleForRelationFastPath(locktag, lockmode) &&
2207 {
2208 bool released;
2209
2210 /*
2211 * We might not find the lock here, even if we originally entered it
2212 * here. Another backend may have moved it to the main table.
2213 */
2216 lockmode);
2218 if (released)
2219 {
2221 return true;
2222 }
2223 }
2224
2225 /*
2226 * Otherwise we've got to mess with the shared lock table.
2227 */
2229
2231
2232 /*
2233 * Normally, we don't need to re-find the lock or proclock, since we kept
2234 * their addresses in the locallock table, and they couldn't have been
2235 * removed while we were holding a lock on them. But it's possible that
2236 * the lock was taken fast-path and has since been moved to the main hash
2237 * table by another backend, in which case we will need to look up the
2238 * objects here. We assume the lock field is NULL if so.
2239 */
2240 lock = locallock->lock;
2241 if (!lock)
2242 {
2244
2245 Assert(EligibleForRelationFastPath(locktag, lockmode));
2247 locktag,
2248 locallock->hashcode,
2249 HASH_FIND,
2250 NULL);
2251 if (!lock)
2252 elog(ERROR, "failed to re-find shared lock object");
2253 locallock->lock = lock;
2254
2255 proclocktag.myLock = lock;
2256 proclocktag.myProc = MyProc;
2258 &proclocktag,
2259 HASH_FIND,
2260 NULL);
2261 if (!locallock->proclock)
2262 elog(ERROR, "failed to re-find shared proclock object");
2263 }
2264 LOCK_PRINT("LockRelease: found", lock, lockmode);
2265 proclock = locallock->proclock;
2266 PROCLOCK_PRINT("LockRelease: found", proclock);
2267
2268 /*
2269 * Double-check that we are actually holding a lock of the type we want to
2270 * release.
2271 */
2272 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2273 {
2274 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2276 elog(WARNING, "you don't own a lock of type %s",
2277 lockMethodTable->lockModeNames[lockmode]);
2279 return false;
2280 }
2281
2282 /*
2283 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2284 */
2285 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2286
2287 CleanUpLock(lock, proclock,
2288 lockMethodTable, locallock->hashcode,
2289 wakeupNeeded);
2290
2292
2294 return true;
2295}
2296
2297/*
2298 * LockReleaseAll -- Release all locks of the specified lock method that
2299 * are held by the current process.
2300 *
2301 * Well, not necessarily *all* locks. The available behaviors are:
2302 * allLocks == true: release all locks including session locks.
2303 * allLocks == false: release all non-session locks.
2304 */
2305void
2307{
2308 HASH_SEQ_STATUS status;
2310 int i,
2311 numLockModes;
2313 LOCK *lock;
2314 int partition;
2315 bool have_fast_path_lwlock = false;
2316
2318 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2320
2321#ifdef LOCK_DEBUG
2322 if (*(lockMethodTable->trace_flag))
2323 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2324#endif
2325
2326 /*
2327 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2328 * the only way that the lock we hold on our own VXID can ever get
2329 * released: it is always and only released when a toplevel transaction
2330 * ends.
2331 */
2334
2335 numLockModes = lockMethodTable->numLockModes;
2336
2337 /*
2338 * First we run through the locallock table and get rid of unwanted
2339 * entries, then we scan the process's proclocks and get rid of those. We
2340 * do this separately because we may have multiple locallock entries
2341 * pointing to the same proclock, and we daren't end up with any dangling
2342 * pointers. Fast-path locks are cleaned up during the locallock table
2343 * scan, though.
2344 */
2346
2347 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2348 {
2349 /*
2350 * If the LOCALLOCK entry is unused, something must've gone wrong
2351 * while trying to acquire this lock. Just forget the local entry.
2352 */
2353 if (locallock->nLocks == 0)
2354 {
2356 continue;
2357 }
2358
2359 /* Ignore items that are not of the lockmethod to be removed */
2361 continue;
2362
2363 /*
2364 * If we are asked to release all locks, we can just zap the entry.
2365 * Otherwise, must scan to see if there are session locks. We assume
2366 * there is at most one lockOwners entry for session locks.
2367 */
2368 if (!allLocks)
2369 {
2370 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2371
2372 /* If session lock is above array position 0, move it down to 0 */
2373 for (i = 0; i < locallock->numLockOwners; i++)
2374 {
2375 if (lockOwners[i].owner == NULL)
2376 lockOwners[0] = lockOwners[i];
2377 else
2378 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2379 }
2380
2381 if (locallock->numLockOwners > 0 &&
2382 lockOwners[0].owner == NULL &&
2383 lockOwners[0].nLocks > 0)
2384 {
2385 /* Fix the locallock to show just the session locks */
2386 locallock->nLocks = lockOwners[0].nLocks;
2387 locallock->numLockOwners = 1;
2388 /* We aren't deleting this locallock, so done */
2389 continue;
2390 }
2391 else
2392 locallock->numLockOwners = 0;
2393 }
2394
2395#ifdef USE_ASSERT_CHECKING
2396
2397 /*
2398 * Tuple locks are currently held only for short durations within a
2399 * transaction. Check that we didn't forget to release one.
2400 */
2402 elog(WARNING, "tuple lock held at commit");
2403#endif
2404
2405 /*
2406 * If the lock or proclock pointers are NULL, this lock was taken via
2407 * the relation fast-path (and is not known to have been transferred).
2408 */
2409 if (locallock->proclock == NULL || locallock->lock == NULL)
2410 {
2411 LOCKMODE lockmode = locallock->tag.mode;
2412 Oid relid;
2413
2414 /* Verify that a fast-path lock is what we've got. */
2415 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2416 elog(PANIC, "locallock table corrupted");
2417
2418 /*
2419 * If we don't currently hold the LWLock that protects our
2420 * fast-path data structures, we must acquire it before attempting
2421 * to release the lock via the fast-path. We will continue to
2422 * hold the LWLock until we're done scanning the locallock table,
2423 * unless we hit a transferred fast-path lock. (XXX is this
2424 * really such a good idea? There could be a lot of entries ...)
2425 */
2427 {
2429 have_fast_path_lwlock = true;
2430 }
2431
2432 /* Attempt fast-path release. */
2433 relid = locallock->tag.lock.locktag_field2;
2434 if (FastPathUnGrantRelationLock(relid, lockmode))
2435 {
2437 continue;
2438 }
2439
2440 /*
2441 * Our lock, originally taken via the fast path, has been
2442 * transferred to the main lock table. That's going to require
2443 * some extra work, so release our fast-path lock before starting.
2444 */
2446 have_fast_path_lwlock = false;
2447
2448 /*
2449 * Now dump the lock. We haven't got a pointer to the LOCK or
2450 * PROCLOCK in this case, so we have to handle this a bit
2451 * differently than a normal lock release. Unfortunately, this
2452 * requires an extra LWLock acquire-and-release cycle on the
2453 * partitionLock, but hopefully it shouldn't happen often.
2454 */
2456 &locallock->tag.lock, lockmode, false);
2458 continue;
2459 }
2460
2461 /* Mark the proclock to show we need to release this lockmode */
2462 if (locallock->nLocks > 0)
2463 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2464
2465 /* And remove the locallock hashtable entry */
2467 }
2468
2469 /* Done with the fast-path data structures */
2472
2473 /*
2474 * Now, scan each lock partition separately.
2475 */
2477 {
2479 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2481
2483
2484 /*
2485 * If the proclock list for this partition is empty, we can skip
2486 * acquiring the partition lock. This optimization is trickier than
2487 * it looks, because another backend could be in process of adding
2488 * something to our proclock list due to promoting one of our
2489 * fast-path locks. However, any such lock must be one that we
2490 * decided not to delete above, so it's okay to skip it again now;
2491 * we'd just decide not to delete it again. We must, however, be
2492 * careful to re-fetch the list header once we've acquired the
2493 * partition lock, to be sure we have a valid, up-to-date pointer.
2494 * (There is probably no significant risk if pointer fetch/store is
2495 * atomic, but we don't wish to assume that.)
2496 *
2497 * XXX This argument assumes that the locallock table correctly
2498 * represents all of our fast-path locks. While allLocks mode
2499 * guarantees to clean up all of our normal locks regardless of the
2500 * locallock situation, we lose that guarantee for fast-path locks.
2501 * This is not ideal.
2502 */
2503 if (dlist_is_empty(procLocks))
2504 continue; /* needn't examine this partition */
2505
2507
2509 {
2510 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2511 bool wakeupNeeded = false;
2512
2513 Assert(proclock->tag.myProc == MyProc);
2514
2515 lock = proclock->tag.myLock;
2516
2517 /* Ignore items that are not of the lockmethod to be removed */
2518 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2519 continue;
2520
2521 /*
2522 * In allLocks mode, force release of all locks even if locallock
2523 * table had problems
2524 */
2525 if (allLocks)
2526 proclock->releaseMask = proclock->holdMask;
2527 else
2528 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2529
2530 /*
2531 * Ignore items that have nothing to be released, unless they have
2532 * holdMask == 0 and are therefore recyclable
2533 */
2534 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2535 continue;
2536
2537 PROCLOCK_PRINT("LockReleaseAll", proclock);
2538 LOCK_PRINT("LockReleaseAll", lock, 0);
2539 Assert(lock->nRequested >= 0);
2540 Assert(lock->nGranted >= 0);
2541 Assert(lock->nGranted <= lock->nRequested);
2542 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2543
2544 /*
2545 * Release the previously-marked lock modes
2546 */
2547 for (i = 1; i <= numLockModes; i++)
2548 {
2549 if (proclock->releaseMask & LOCKBIT_ON(i))
2550 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2552 }
2553 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2554 Assert(lock->nGranted <= lock->nRequested);
2555 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2556
2557 proclock->releaseMask = 0;
2558
2559 /* CleanUpLock will wake up waiters if needed. */
2560 CleanUpLock(lock, proclock,
2562 LockTagHashCode(&lock->tag),
2563 wakeupNeeded);
2564 } /* loop over PROCLOCKs within this partition */
2565
2567 } /* loop over partitions */
2568
2569#ifdef LOCK_DEBUG
2570 if (*(lockMethodTable->trace_flag))
2571 elog(LOG, "LockReleaseAll done");
2572#endif
2573}
2574
2575/*
2576 * LockReleaseSession -- Release all session locks of the specified lock method
2577 * that are held by the current process.
2578 */
2579void
2581{
2582 HASH_SEQ_STATUS status;
2584
2586 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2587
2589
2590 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2591 {
2592 /* Ignore items that are not of the specified lock method */
2594 continue;
2595
2597 }
2598}
2599
2600/*
2601 * LockReleaseCurrentOwner
2602 * Release all locks belonging to CurrentResourceOwner
2603 *
2604 * If the caller knows what those locks are, it can pass them as an array.
2605 * That speeds up the call significantly, when a lot of locks are held.
2606 * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2607 * table to find them.
2608 */
2609void
2611{
2612 if (locallocks == NULL)
2613 {
2614 HASH_SEQ_STATUS status;
2616
2618
2619 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2621 }
2622 else
2623 {
2624 int i;
2625
2626 for (i = nlocks - 1; i >= 0; i--)
2628 }
2629}
2630
2631/*
2632 * ReleaseLockIfHeld
2633 * Release any session-level locks on this lockable object if sessionLock
2634 * is true; else, release any locks held by CurrentResourceOwner.
2635 *
2636 * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2637 * locks), but without refactoring LockRelease() we cannot support releasing
2638 * locks belonging to resource owners other than CurrentResourceOwner.
2639 * If we were to refactor, it'd be a good idea to fix it so we don't have to
2640 * do a hashtable lookup of the locallock, too. However, currently this
2641 * function isn't used heavily enough to justify refactoring for its
2642 * convenience.
2643 */
2644static void
2646{
2647 ResourceOwner owner;
2648 LOCALLOCKOWNER *lockOwners;
2649 int i;
2650
2651 /* Identify owner for lock (must match LockRelease!) */
2652 if (sessionLock)
2653 owner = NULL;
2654 else
2655 owner = CurrentResourceOwner;
2656
2657 /* Scan to see if there are any locks belonging to the target owner */
2658 lockOwners = locallock->lockOwners;
2659 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2660 {
2661 if (lockOwners[i].owner == owner)
2662 {
2663 Assert(lockOwners[i].nLocks > 0);
2664 if (lockOwners[i].nLocks < locallock->nLocks)
2665 {
2666 /*
2667 * We will still hold this lock after forgetting this
2668 * ResourceOwner.
2669 */
2670 locallock->nLocks -= lockOwners[i].nLocks;
2671 /* compact out unused slot */
2672 locallock->numLockOwners--;
2673 if (owner != NULL)
2675 if (i < locallock->numLockOwners)
2676 lockOwners[i] = lockOwners[locallock->numLockOwners];
2677 }
2678 else
2679 {
2680 Assert(lockOwners[i].nLocks == locallock->nLocks);
2681 /* We want to call LockRelease just once */
2682 lockOwners[i].nLocks = 1;
2683 locallock->nLocks = 1;
2684 if (!LockRelease(&locallock->tag.lock,
2685 locallock->tag.mode,
2686 sessionLock))
2687 elog(WARNING, "ReleaseLockIfHeld: failed??");
2688 }
2689 break;
2690 }
2691 }
2692}
2693
2694/*
2695 * LockReassignCurrentOwner
2696 * Reassign all locks belonging to CurrentResourceOwner to belong
2697 * to its parent resource owner.
2698 *
2699 * If the caller knows what those locks are, it can pass them as an array.
2700 * That speeds up the call significantly, when a lot of locks are held
2701 * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2702 * and we'll traverse through our hash table to find them.
2703 */
2704void
2706{
2708
2709 Assert(parent != NULL);
2710
2711 if (locallocks == NULL)
2712 {
2713 HASH_SEQ_STATUS status;
2715
2717
2718 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2720 }
2721 else
2722 {
2723 int i;
2724
2725 for (i = nlocks - 1; i >= 0; i--)
2726 LockReassignOwner(locallocks[i], parent);
2727 }
2728}
2729
2730/*
2731 * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2732 * CurrentResourceOwner to its parent.
2733 */
2734static void
2736{
2737 LOCALLOCKOWNER *lockOwners;
2738 int i;
2739 int ic = -1;
2740 int ip = -1;
2741
2742 /*
2743 * Scan to see if there are any locks belonging to current owner or its
2744 * parent
2745 */
2746 lockOwners = locallock->lockOwners;
2747 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2748 {
2749 if (lockOwners[i].owner == CurrentResourceOwner)
2750 ic = i;
2751 else if (lockOwners[i].owner == parent)
2752 ip = i;
2753 }
2754
2755 if (ic < 0)
2756 return; /* no current locks */
2757
2758 if (ip < 0)
2759 {
2760 /* Parent has no slot, so just give it the child's slot */
2761 lockOwners[ic].owner = parent;
2763 }
2764 else
2765 {
2766 /* Merge child's count with parent's */
2767 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2768 /* compact out unused slot */
2769 locallock->numLockOwners--;
2770 if (ic < locallock->numLockOwners)
2771 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2772 }
2774}
2775
2776/*
2777 * FastPathGrantRelationLock
2778 * Grant lock using per-backend fast-path array, if there is space.
2779 */
2780static bool
2781FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2782{
2783 uint32 i;
2785
2786 /* fast-path group the lock belongs to */
2787 uint32 group = FAST_PATH_REL_GROUP(relid);
2788
2789 /* Scan for existing entry for this relid, remembering empty slot. */
2790 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2791 {
2792 /* index into the whole per-backend array */
2793 uint32 f = FAST_PATH_SLOT(group, i);
2794
2795 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2796 unused_slot = f;
2797 else if (MyProc->fpRelId[f] == relid)
2798 {
2799 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2800 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2801 return true;
2802 }
2803 }
2804
2805 /* If no existing entry, use any empty slot. */
2807 {
2808 MyProc->fpRelId[unused_slot] = relid;
2810 ++FastPathLocalUseCounts[group];
2811 return true;
2812 }
2813
2814 /* No existing entry, and no empty slot. */
2815 return false;
2816}
2817
2818/*
2819 * FastPathUnGrantRelationLock
2820 * Release fast-path lock, if present. Update backend-private local
2821 * use count, while we're at it.
2822 */
2823static bool
2825{
2826 uint32 i;
2827 bool result = false;
2828
2829 /* fast-path group the lock belongs to */
2830 uint32 group = FAST_PATH_REL_GROUP(relid);
2831
2832 FastPathLocalUseCounts[group] = 0;
2833 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2834 {
2835 /* index into the whole per-backend array */
2836 uint32 f = FAST_PATH_SLOT(group, i);
2837
2838 if (MyProc->fpRelId[f] == relid
2839 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2840 {
2841 Assert(!result);
2842 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2843 result = true;
2844 /* we continue iterating so as to update FastPathLocalUseCount */
2845 }
2846 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2847 ++FastPathLocalUseCounts[group];
2848 }
2849 return result;
2850}
2851
2852/*
2853 * FastPathTransferRelationLocks
2854 * Transfer locks matching the given lock tag from per-backend fast-path
2855 * arrays to the shared hash table.
2856 *
2857 * Returns true if successful, false if ran out of shared memory.
2858 */
2859static bool
2861 uint32 hashcode)
2862{
2864 Oid relid = locktag->locktag_field2;
2865 uint32 i;
2866
2867 /* fast-path group the lock belongs to */
2868 uint32 group = FAST_PATH_REL_GROUP(relid);
2869
2870 /*
2871 * Every PGPROC that can potentially hold a fast-path lock is present in
2872 * ProcGlobal->allProcs. Prepared transactions are not, but any
2873 * outstanding fast-path locks held by prepared transactions are
2874 * transferred to the main lock table.
2875 */
2876 for (i = 0; i < ProcGlobal->allProcCount; i++)
2877 {
2878 PGPROC *proc = GetPGProcByNumber(i);
2879 uint32 j;
2880
2882
2883 /*
2884 * If the target backend isn't referencing the same database as the
2885 * lock, then we needn't examine the individual relation IDs at all;
2886 * none of them can be relevant.
2887 *
2888 * proc->databaseId is set at backend startup time and never changes
2889 * thereafter, so it might be safe to perform this test before
2890 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2891 * assume that if the target backend holds any fast-path locks, it
2892 * must have performed a memory-fencing operation (in particular, an
2893 * LWLock acquisition) since setting proc->databaseId. However, it's
2894 * less clear that our backend is certain to have performed a memory
2895 * fencing operation since the other backend set proc->databaseId. So
2896 * for now, we test it after acquiring the LWLock just to be safe.
2897 *
2898 * Also skip groups without any registered fast-path locks.
2899 */
2900 if (proc->databaseId != locktag->locktag_field1 ||
2901 proc->fpLockBits[group] == 0)
2902 {
2903 LWLockRelease(&proc->fpInfoLock);
2904 continue;
2905 }
2906
2907 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2908 {
2909 uint32 lockmode;
2910
2911 /* index into the whole per-backend array */
2912 uint32 f = FAST_PATH_SLOT(group, j);
2913
2914 /* Look for an allocated slot matching the given relid. */
2915 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2916 continue;
2917
2918 /* Find or create lock object. */
2920 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2922 ++lockmode)
2923 {
2924 PROCLOCK *proclock;
2925
2926 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2927 continue;
2928 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2929 hashcode, lockmode);
2930 if (!proclock)
2931 {
2933 LWLockRelease(&proc->fpInfoLock);
2934 return false;
2935 }
2936 GrantLock(proclock->tag.myLock, proclock, lockmode);
2937 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2938 }
2940
2941 /* No need to examine remaining slots. */
2942 break;
2943 }
2944 LWLockRelease(&proc->fpInfoLock);
2945 }
2946 return true;
2947}
2948
2949/*
2950 * FastPathGetRelationLockEntry
2951 * Return the PROCLOCK for a lock originally taken via the fast-path,
2952 * transferring it to the primary lock table if necessary.
2953 *
2954 * Note: caller takes care of updating the locallock object.
2955 */
2956static PROCLOCK *
2958{
2960 LOCKTAG *locktag = &locallock->tag.lock;
2961 PROCLOCK *proclock = NULL;
2963 Oid relid = locktag->locktag_field2;
2964 uint32 i,
2965 group;
2966
2967 /* fast-path group the lock belongs to */
2968 group = FAST_PATH_REL_GROUP(relid);
2969
2971
2972 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2973 {
2974 uint32 lockmode;
2975
2976 /* index into the whole per-backend array */
2977 uint32 f = FAST_PATH_SLOT(group, i);
2978
2979 /* Look for an allocated slot matching the given relid. */
2980 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2981 continue;
2982
2983 /* If we don't have a lock of the given mode, forget it! */
2984 lockmode = locallock->tag.mode;
2985 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2986 break;
2987
2988 /* Find or create lock object. */
2990
2991 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2992 locallock->hashcode, lockmode);
2993 if (!proclock)
2994 {
2997 ereport(ERROR,
2999 errmsg("out of shared memory"),
3000 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3001 }
3002 GrantLock(proclock->tag.myLock, proclock, lockmode);
3003 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3004
3006
3007 /* No need to examine remaining slots. */
3008 break;
3009 }
3010
3012
3013 /* Lock may have already been transferred by some other backend. */
3014 if (proclock == NULL)
3015 {
3016 LOCK *lock;
3019
3021
3023 locktag,
3024 locallock->hashcode,
3025 HASH_FIND,
3026 NULL);
3027 if (!lock)
3028 elog(ERROR, "failed to re-find shared lock object");
3029
3030 proclocktag.myLock = lock;
3031 proclocktag.myProc = MyProc;
3032
3034 proclock = (PROCLOCK *)
3036 &proclocktag,
3038 HASH_FIND,
3039 NULL);
3040 if (!proclock)
3041 elog(ERROR, "failed to re-find shared proclock object");
3043 }
3044
3045 return proclock;
3046}
3047
3048/*
3049 * GetLockConflicts
3050 * Get an array of VirtualTransactionIds of xacts currently holding locks
3051 * that would conflict with the specified lock/lockmode.
3052 * xacts merely awaiting such a lock are NOT reported.
3053 *
3054 * The result array is palloc'd and is terminated with an invalid VXID.
3055 * *countp, if not null, is updated to the number of items set.
3056 *
3057 * Of course, the result could be out of date by the time it's returned, so
3058 * use of this function has to be thought about carefully. Similarly, a
3059 * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3060 * lock it holds. Existing callers don't care about a locker after that
3061 * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3062 * pg_xact updates and before releasing locks.
3063 *
3064 * Note we never include the current xact's vxid in the result array,
3065 * since an xact never blocks itself.
3066 */
3068GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3069{
3073 LOCK *lock;
3076 PROCLOCK *proclock;
3077 uint32 hashcode;
3079 int count = 0;
3080 int fast_count = 0;
3081
3083 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3086 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3087
3088 /*
3089 * Allocate memory to store results, and fill with InvalidVXID. We only
3090 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3091 * InHotStandby allocate once in TopMemoryContext.
3092 */
3093 if (InHotStandby)
3094 {
3095 if (vxids == NULL)
3098 sizeof(VirtualTransactionId) *
3100 }
3101 else
3103
3104 /* Compute hash code and partition lock, and look up conflicting modes. */
3105 hashcode = LockTagHashCode(locktag);
3107 conflictMask = lockMethodTable->conflictTab[lockmode];
3108
3109 /*
3110 * Fast path locks might not have been entered in the primary lock table.
3111 * If the lock we're dealing with could conflict with such a lock, we must
3112 * examine each backend's fast-path array for conflicts.
3113 */
3114 if (ConflictsWithRelationFastPath(locktag, lockmode))
3115 {
3116 int i;
3117 Oid relid = locktag->locktag_field2;
3119
3120 /* fast-path group the lock belongs to */
3121 uint32 group = FAST_PATH_REL_GROUP(relid);
3122
3123 /*
3124 * Iterate over relevant PGPROCs. Anything held by a prepared
3125 * transaction will have been transferred to the primary lock table,
3126 * so we need not worry about those. This is all a bit fuzzy, because
3127 * new locks could be taken after we've visited a particular
3128 * partition, but the callers had better be prepared to deal with that
3129 * anyway, since the locks could equally well be taken between the
3130 * time we return the value and the time the caller does something
3131 * with it.
3132 */
3133 for (i = 0; i < ProcGlobal->allProcCount; i++)
3134 {
3135 PGPROC *proc = GetPGProcByNumber(i);
3136 uint32 j;
3137
3138 /* A backend never blocks itself */
3139 if (proc == MyProc)
3140 continue;
3141
3143
3144 /*
3145 * If the target backend isn't referencing the same database as
3146 * the lock, then we needn't examine the individual relation IDs
3147 * at all; none of them can be relevant.
3148 *
3149 * See FastPathTransferRelationLocks() for discussion of why we do
3150 * this test after acquiring the lock.
3151 *
3152 * Also skip groups without any registered fast-path locks.
3153 */
3154 if (proc->databaseId != locktag->locktag_field1 ||
3155 proc->fpLockBits[group] == 0)
3156 {
3157 LWLockRelease(&proc->fpInfoLock);
3158 continue;
3159 }
3160
3161 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3162 {
3164
3165 /* index into the whole per-backend array */
3166 uint32 f = FAST_PATH_SLOT(group, j);
3167
3168 /* Look for an allocated slot matching the given relid. */
3169 if (relid != proc->fpRelId[f])
3170 continue;
3171 lockmask = FAST_PATH_GET_BITS(proc, f);
3172 if (!lockmask)
3173 continue;
3175
3176 /*
3177 * There can only be one entry per relation, so if we found it
3178 * and it doesn't conflict, we can skip the rest of the slots.
3179 */
3180 if ((lockmask & conflictMask) == 0)
3181 break;
3182
3183 /* Conflict! */
3184 GET_VXID_FROM_PGPROC(vxid, *proc);
3185
3187 vxids[count++] = vxid;
3188 /* else, xact already committed or aborted */
3189
3190 /* No need to examine remaining slots. */
3191 break;
3192 }
3193
3194 LWLockRelease(&proc->fpInfoLock);
3195 }
3196 }
3197
3198 /* Remember how many fast-path conflicts we found. */
3199 fast_count = count;
3200
3201 /*
3202 * Look up the lock object matching the tag.
3203 */
3205
3207 locktag,
3208 hashcode,
3209 HASH_FIND,
3210 NULL);
3211 if (!lock)
3212 {
3213 /*
3214 * If the lock object doesn't exist, there is nothing holding a lock
3215 * on this lockable object.
3216 */
3218 vxids[count].procNumber = INVALID_PROC_NUMBER;
3219 vxids[count].localTransactionId = InvalidLocalTransactionId;
3220 if (countp)
3221 *countp = count;
3222 return vxids;
3223 }
3224
3225 /*
3226 * Examine each existing holder (or awaiter) of the lock.
3227 */
3229 {
3230 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3231
3232 if (conflictMask & proclock->holdMask)
3233 {
3234 PGPROC *proc = proclock->tag.myProc;
3235
3236 /* A backend never blocks itself */
3237 if (proc != MyProc)
3238 {
3240
3241 GET_VXID_FROM_PGPROC(vxid, *proc);
3242
3244 {
3245 int i;
3246
3247 /* Avoid duplicate entries. */
3248 for (i = 0; i < fast_count; ++i)
3250 break;
3251 if (i >= fast_count)
3252 vxids[count++] = vxid;
3253 }
3254 /* else, xact already committed or aborted */
3255 }
3256 }
3257 }
3258
3260
3261 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3262 elog(PANIC, "too many conflicting locks found");
3263
3264 vxids[count].procNumber = INVALID_PROC_NUMBER;
3265 vxids[count].localTransactionId = InvalidLocalTransactionId;
3266 if (countp)
3267 *countp = count;
3268 return vxids;
3269}
3270
3271/*
3272 * Find a lock in the shared lock table and release it. It is the caller's
3273 * responsibility to verify that this is a sane thing to do. (For example, it
3274 * would be bad to release a lock here if there might still be a LOCALLOCK
3275 * object with pointers to it.)
3276 *
3277 * We currently use this in two situations: first, to release locks held by
3278 * prepared transactions on commit (see lock_twophase_postcommit); and second,
3279 * to release locks taken via the fast-path, transferred to the main hash
3280 * table, and then released (see LockReleaseAll).
3281 */
3282static void
3284 LOCKTAG *locktag, LOCKMODE lockmode,
3286{
3287 LOCK *lock;
3288 PROCLOCK *proclock;
3290 uint32 hashcode;
3293 bool wakeupNeeded;
3294
3295 hashcode = LockTagHashCode(locktag);
3297
3299
3300 /*
3301 * Re-find the lock object (it had better be there).
3302 */
3304 locktag,
3305 hashcode,
3306 HASH_FIND,
3307 NULL);
3308 if (!lock)
3309 elog(PANIC, "failed to re-find shared lock object");
3310
3311 /*
3312 * Re-find the proclock object (ditto).
3313 */
3314 proclocktag.myLock = lock;
3315 proclocktag.myProc = proc;
3316
3318
3320 &proclocktag,
3322 HASH_FIND,
3323 NULL);
3324 if (!proclock)
3325 elog(PANIC, "failed to re-find shared proclock object");
3326
3327 /*
3328 * Double-check that we are actually holding a lock of the type we want to
3329 * release.
3330 */
3331 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3332 {
3333 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3335 elog(WARNING, "you don't own a lock of type %s",
3336 lockMethodTable->lockModeNames[lockmode]);
3337 return;
3338 }
3339
3340 /*
3341 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3342 */
3343 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3344
3345 CleanUpLock(lock, proclock,
3346 lockMethodTable, hashcode,
3347 wakeupNeeded);
3348
3350
3351 /*
3352 * Decrement strong lock count. This logic is needed only for 2PC.
3353 */
3355 && ConflictsWithRelationFastPath(locktag, lockmode))
3356 {
3358
3363 }
3364}
3365
3366/*
3367 * CheckForSessionAndXactLocks
3368 * Check to see if transaction holds both session-level and xact-level
3369 * locks on the same object; if so, throw an error.
3370 *
3371 * If we have both session- and transaction-level locks on the same object,
3372 * PREPARE TRANSACTION must fail. This should never happen with regular
3373 * locks, since we only take those at session level in some special operations
3374 * like VACUUM. It's possible to hit this with advisory locks, though.
3375 *
3376 * It would be nice if we could keep the session hold and give away the
3377 * transactional hold to the prepared xact. However, that would require two
3378 * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3379 * available when it comes time for PostPrepare_Locks to do the deed.
3380 * So for now, we error out while we can still do so safely.
3381 *
3382 * Since the LOCALLOCK table stores a separate entry for each lockmode,
3383 * we can't implement this check by examining LOCALLOCK entries in isolation.
3384 * We must build a transient hashtable that is indexed by locktag only.
3385 */
3386static void
3388{
3389 typedef struct
3390 {
3391 LOCKTAG lock; /* identifies the lockable object */
3392 bool sessLock; /* is any lockmode held at session level? */
3393 bool xactLock; /* is any lockmode held at xact level? */
3395
3397 HTAB *lockhtab;
3398 HASH_SEQ_STATUS status;
3400
3401 /* Create a local hash table keyed by LOCKTAG only */
3402 hash_ctl.keysize = sizeof(LOCKTAG);
3403 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3405
3406 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3407 256, /* arbitrary initial size */
3408 &hash_ctl,
3410
3411 /* Scan local lock table to find entries for each LOCKTAG */
3413
3414 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3415 {
3416 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3418 bool found;
3419 int i;
3420
3421 /*
3422 * Ignore VXID locks. We don't want those to be held by prepared
3423 * transactions, since they aren't meaningful after a restart.
3424 */
3425 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3426 continue;
3427
3428 /* Ignore it if we don't actually hold the lock */
3429 if (locallock->nLocks <= 0)
3430 continue;
3431
3432 /* Otherwise, find or make an entry in lockhtab */
3434 &locallock->tag.lock,
3435 HASH_ENTER, &found);
3436 if (!found) /* initialize, if newly created */
3437 hentry->sessLock = hentry->xactLock = false;
3438
3439 /* Scan to see if we hold lock at session or xact level or both */
3440 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3441 {
3442 if (lockOwners[i].owner == NULL)
3443 hentry->sessLock = true;
3444 else
3445 hentry->xactLock = true;
3446 }
3447
3448 /*
3449 * We can throw error immediately when we see both types of locks; no
3450 * need to wait around to see if there are more violations.
3451 */
3452 if (hentry->sessLock && hentry->xactLock)
3453 ereport(ERROR,
3455 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3456 }
3457
3458 /* Success, so clean up */
3460}
3461
3462/*
3463 * AtPrepare_Locks
3464 * Do the preparatory work for a PREPARE: make 2PC state file records
3465 * for all locks currently held.
3466 *
3467 * Session-level locks are ignored, as are VXID locks.
3468 *
3469 * For the most part, we don't need to touch shared memory for this ---
3470 * all the necessary state information is in the locallock table.
3471 * Fast-path locks are an exception, however: we move any such locks to
3472 * the main table before allowing PREPARE TRANSACTION to succeed.
3473 */
3474void
3475AtPrepare_Locks(void)
3476{
3477 HASH_SEQ_STATUS status;
3479
3480 /* First, verify there aren't locks of both xact and session level */
3482
3483 /* Now do the per-locallock cleanup work */
3485
3486 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3487 {
3488 TwoPhaseLockRecord record;
3489 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3490 bool haveSessionLock;
3491 bool haveXactLock;
3492 int i;
3493
3494 /*
3495 * Ignore VXID locks. We don't want those to be held by prepared
3496 * transactions, since they aren't meaningful after a restart.
3497 */
3498 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3499 continue;
3500
3501 /* Ignore it if we don't actually hold the lock */
3502 if (locallock->nLocks <= 0)
3503 continue;
3504
3505 /* Scan to see whether we hold it at session or transaction level */
3506 haveSessionLock = haveXactLock = false;
3507 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3508 {
3509 if (lockOwners[i].owner == NULL)
3510 haveSessionLock = true;
3511 else
3512 haveXactLock = true;
3513 }
3514
3515 /* Ignore it if we have only session lock */
3516 if (!haveXactLock)
3517 continue;
3518
3519 /* This can't happen, because we already checked it */
3520 if (haveSessionLock)
3521 ereport(ERROR,
3523 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3524
3525 /*
3526 * If the local lock was taken via the fast-path, we need to move it
3527 * to the primary lock table, or just get a pointer to the existing
3528 * primary lock table entry if by chance it's already been
3529 * transferred.
3530 */
3531 if (locallock->proclock == NULL)
3532 {
3534 locallock->lock = locallock->proclock->tag.myLock;
3535 }
3536
3537 /*
3538 * Arrange to not release any strong lock count held by this lock
3539 * entry. We must retain the count until the prepared transaction is
3540 * committed or rolled back.
3541 */
3542 locallock->holdsStrongLockCount = false;
3543
3544 /*
3545 * Create a 2PC record.
3546 */
3547 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3548 record.lockmode = locallock->tag.mode;
3549
3551 &record, sizeof(TwoPhaseLockRecord));
3552 }
3553}
3554
3555/*
3556 * PostPrepare_Locks
3557 * Clean up after successful PREPARE
3558 *
3559 * Here, we want to transfer ownership of our locks to a dummy PGPROC
3560 * that's now associated with the prepared transaction, and we want to
3561 * clean out the corresponding entries in the LOCALLOCK table.
3562 *
3563 * Note: by removing the LOCALLOCK entries, we are leaving dangling
3564 * pointers in the transaction's resource owner. This is OK at the
3565 * moment since resowner.c doesn't try to free locks retail at a toplevel
3566 * transaction commit or abort. We could alternatively zero out nLocks
3567 * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3568 * but that probably costs more cycles.
3569 */
3570void
3572{
3573 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3574 HASH_SEQ_STATUS status;
3576 LOCK *lock;
3577 PROCLOCK *proclock;
3579 int partition;
3580
3581 /* Can't prepare a lock group follower. */
3584
3585 /* This is a critical section: any error means big trouble */
3587
3588 /*
3589 * First we run through the locallock table and get rid of unwanted
3590 * entries, then we scan the process's proclocks and transfer them to the
3591 * target proc.
3592 *
3593 * We do this separately because we may have multiple locallock entries
3594 * pointing to the same proclock, and we daren't end up with any dangling
3595 * pointers.
3596 */
3598
3599 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3600 {
3601 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3602 bool haveSessionLock;
3603 bool haveXactLock;
3604 int i;
3605
3606 if (locallock->proclock == NULL || locallock->lock == NULL)
3607 {
3608 /*
3609 * We must've run out of shared memory while trying to set up this
3610 * lock. Just forget the local entry.
3611 */
3612 Assert(locallock->nLocks == 0);
3614 continue;
3615 }
3616
3617 /* Ignore VXID locks */
3618 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3619 continue;
3620
3621 /* Scan to see whether we hold it at session or transaction level */
3622 haveSessionLock = haveXactLock = false;
3623 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3624 {
3625 if (lockOwners[i].owner == NULL)
3626 haveSessionLock = true;
3627 else
3628 haveXactLock = true;
3629 }
3630
3631 /* Ignore it if we have only session lock */
3632 if (!haveXactLock)
3633 continue;
3634
3635 /* This can't happen, because we already checked it */
3636 if (haveSessionLock)
3637 ereport(PANIC,
3639 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3640
3641 /* Mark the proclock to show we need to release this lockmode */
3642 if (locallock->nLocks > 0)
3643 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3644
3645 /* And remove the locallock hashtable entry */
3647 }
3648
3649 /*
3650 * Now, scan each lock partition separately.
3651 */
3653 {
3655 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3657
3659
3660 /*
3661 * If the proclock list for this partition is empty, we can skip
3662 * acquiring the partition lock. This optimization is safer than the
3663 * situation in LockReleaseAll, because we got rid of any fast-path
3664 * locks during AtPrepare_Locks, so there cannot be any case where
3665 * another backend is adding something to our lists now. For safety,
3666 * though, we code this the same way as in LockReleaseAll.
3667 */
3668 if (dlist_is_empty(procLocks))
3669 continue; /* needn't examine this partition */
3670
3672
3674 {
3675 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3676
3677 Assert(proclock->tag.myProc == MyProc);
3678
3679 lock = proclock->tag.myLock;
3680
3681 /* Ignore VXID locks */
3683 continue;
3684
3685 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3686 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3687 Assert(lock->nRequested >= 0);
3688 Assert(lock->nGranted >= 0);
3689 Assert(lock->nGranted <= lock->nRequested);
3690 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3691
3692 /* Ignore it if nothing to release (must be a session lock) */
3693 if (proclock->releaseMask == 0)
3694 continue;
3695
3696 /* Else we should be releasing all locks */
3697 if (proclock->releaseMask != proclock->holdMask)
3698 elog(PANIC, "we seem to have dropped a bit somewhere");
3699
3700 /*
3701 * We cannot simply modify proclock->tag.myProc to reassign
3702 * ownership of the lock, because that's part of the hash key and
3703 * the proclock would then be in the wrong hash chain. Instead
3704 * use hash_update_hash_key. (We used to create a new hash entry,
3705 * but that risks out-of-memory failure if other processes are
3706 * busy making proclocks too.) We must unlink the proclock from
3707 * our procLink chain and put it into the new proc's chain, too.
3708 *
3709 * Note: the updated proclock hash key will still belong to the
3710 * same hash partition, cf proclock_hash(). So the partition lock
3711 * we already hold is sufficient for this.
3712 */
3713 dlist_delete(&proclock->procLink);
3714
3715 /*
3716 * Create the new hash key for the proclock.
3717 */
3718 proclocktag.myLock = lock;
3719 proclocktag.myProc = newproc;
3720
3721 /*
3722 * Update groupLeader pointer to point to the new proc. (We'd
3723 * better not be a member of somebody else's lock group!)
3724 */
3725 Assert(proclock->groupLeader == proclock->tag.myProc);
3726 proclock->groupLeader = newproc;
3727
3728 /*
3729 * Update the proclock. We should not find any existing entry for
3730 * the same hash key, since there can be only one entry for any
3731 * given lock with my own proc.
3732 */
3734 proclock,
3735 &proclocktag))
3736 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3737
3738 /* Re-link into the new proc's proclock list */
3739 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3740
3741 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3742 } /* loop over PROCLOCKs within this partition */
3743
3745 } /* loop over partitions */
3746
3748}
3749
3750
3751/*
3752 * Estimate shared-memory space used for lock tables
3753 */
3754Size
3756{
3757 Size size = 0;
3758 long max_table_size;
3759
3760 /* lock hash table */
3762 size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3763
3764 /* proclock hash table */
3765 max_table_size *= 2;
3766 size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3767
3768 /*
3769 * Since NLOCKENTS is only an estimate, add 10% safety margin.
3770 */
3771 size = add_size(size, size / 10);
3772
3773 return size;
3774}
3775
3776/*
3777 * GetLockStatusData - Return a summary of the lock manager's internal
3778 * status, for use in a user-level reporting function.
3779 *
3780 * The return data consists of an array of LockInstanceData objects,
3781 * which are a lightly abstracted version of the PROCLOCK data structures,
3782 * i.e. there is one entry for each unique lock and interested PGPROC.
3783 * It is the caller's responsibility to match up related items (such as
3784 * references to the same lockable object or PGPROC) if wanted.
3785 *
3786 * The design goal is to hold the LWLocks for as short a time as possible;
3787 * thus, this function simply makes a copy of the necessary data and releases
3788 * the locks, allowing the caller to contemplate and format the data for as
3789 * long as it pleases.
3790 */
3791LockData *
3793{
3794 LockData *data;
3795 PROCLOCK *proclock;
3797 int els;
3798 int el;
3799 int i;
3800
3802
3803 /* Guess how much space we'll need. */
3804 els = MaxBackends;
3805 el = 0;
3807
3808 /*
3809 * First, we iterate through the per-backend fast-path arrays, locking
3810 * them one at a time. This might produce an inconsistent picture of the
3811 * system state, but taking all of those LWLocks at the same time seems
3812 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3813 * matter too much, because none of these locks can be involved in lock
3814 * conflicts anyway - anything that might must be present in the main lock
3815 * table. (For the same reason, we don't sweat about making leaderPid
3816 * completely valid. We cannot safely dereference another backend's
3817 * lockGroupLeader field without holding all lock partition locks, and
3818 * it's not worth that.)
3819 */
3820 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3821 {
3822 PGPROC *proc = GetPGProcByNumber(i);
3823
3824 /* Skip backends with pid=0, as they don't hold fast-path locks */
3825 if (proc->pid == 0)
3826 continue;
3827
3829
3830 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3831 {
3832 /* Skip groups without registered fast-path locks */
3833 if (proc->fpLockBits[g] == 0)
3834 continue;
3835
3836 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3837 {
3839 uint32 f = FAST_PATH_SLOT(g, j);
3841
3842 /* Skip unallocated slots */
3843 if (!lockbits)
3844 continue;
3845
3846 if (el >= els)
3847 {
3848 els += MaxBackends;
3849 data->locks = (LockInstanceData *)
3850 repalloc(data->locks, sizeof(LockInstanceData) * els);
3851 }
3852
3853 instance = &data->locks[el];
3855 proc->fpRelId[f]);
3857 instance->waitLockMode = NoLock;
3858 instance->vxid.procNumber = proc->vxid.procNumber;
3859 instance->vxid.localTransactionId = proc->vxid.lxid;
3860 instance->pid = proc->pid;
3861 instance->leaderPid = proc->pid;
3862 instance->fastpath = true;
3863
3864 /*
3865 * Successfully taking fast path lock means there were no
3866 * conflicting locks.
3867 */
3868 instance->waitStart = 0;
3869
3870 el++;
3871 }
3872 }
3873
3874 if (proc->fpVXIDLock)
3875 {
3878
3879 if (el >= els)
3880 {
3881 els += MaxBackends;
3882 data->locks = (LockInstanceData *)
3883 repalloc(data->locks, sizeof(LockInstanceData) * els);
3884 }
3885
3886 vxid.procNumber = proc->vxid.procNumber;
3888
3889 instance = &data->locks[el];
3891 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3892 instance->waitLockMode = NoLock;
3893 instance->vxid.procNumber = proc->vxid.procNumber;
3894 instance->vxid.localTransactionId = proc->vxid.lxid;
3895 instance->pid = proc->pid;
3896 instance->leaderPid = proc->pid;
3897 instance->fastpath = true;
3898 instance->waitStart = 0;
3899
3900 el++;
3901 }
3902
3903 LWLockRelease(&proc->fpInfoLock);
3904 }
3905
3906 /*
3907 * Next, acquire lock on the entire shared lock data structure. We do
3908 * this so that, at least for locks in the primary lock table, the state
3909 * will be self-consistent.
3910 *
3911 * Since this is a read-only operation, we take shared instead of
3912 * exclusive lock. There's not a whole lot of point to this, because all
3913 * the normal operations require exclusive lock, but it doesn't hurt
3914 * anything either. It will at least allow two backends to do
3915 * GetLockStatusData in parallel.
3916 *
3917 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3918 */
3919 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3921
3922 /* Now we can safely count the number of proclocks */
3924 if (data->nelements > els)
3925 {
3926 els = data->nelements;
3927 data->locks = (LockInstanceData *)
3928 repalloc(data->locks, sizeof(LockInstanceData) * els);
3929 }
3930
3931 /* Now scan the tables to copy the data */
3933
3934 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3935 {
3936 PGPROC *proc = proclock->tag.myProc;
3937 LOCK *lock = proclock->tag.myLock;
3938 LockInstanceData *instance = &data->locks[el];
3939
3940 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3941 instance->holdMask = proclock->holdMask;
3942 if (proc->waitLock == proclock->tag.myLock)
3943 instance->waitLockMode = proc->waitLockMode;
3944 else
3945 instance->waitLockMode = NoLock;
3946 instance->vxid.procNumber = proc->vxid.procNumber;
3947 instance->vxid.localTransactionId = proc->vxid.lxid;
3948 instance->pid = proc->pid;
3949 instance->leaderPid = proclock->groupLeader->pid;
3950 instance->fastpath = false;
3951 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3952
3953 el++;
3954 }
3955
3956 /*
3957 * And release locks. We do this in reverse order for two reasons: (1)
3958 * Anyone else who needs more than one of the locks will be trying to lock
3959 * them in increasing order; we don't want to release the other process
3960 * until it can get all the locks it needs. (2) This avoids O(N^2)
3961 * behavior inside LWLockRelease.
3962 */
3963 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3965
3966 Assert(el == data->nelements);
3967
3968 return data;
3969}
3970
3971/*
3972 * GetBlockerStatusData - Return a summary of the lock manager's state
3973 * concerning locks that are blocking the specified PID or any member of
3974 * the PID's lock group, for use in a user-level reporting function.
3975 *
3976 * For each PID within the lock group that is awaiting some heavyweight lock,
3977 * the return data includes an array of LockInstanceData objects, which are
3978 * the same data structure used by GetLockStatusData; but unlike that function,
3979 * this one reports only the PROCLOCKs associated with the lock that that PID
3980 * is blocked on. (Hence, all the locktags should be the same for any one
3981 * blocked PID.) In addition, we return an array of the PIDs of those backends
3982 * that are ahead of the blocked PID in the lock's wait queue. These can be
3983 * compared with the PIDs in the LockInstanceData objects to determine which
3984 * waiters are ahead of or behind the blocked PID in the queue.
3985 *
3986 * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3987 * waiting on any heavyweight lock, return empty arrays.
3988 *
3989 * The design goal is to hold the LWLocks for as short a time as possible;
3990 * thus, this function simply makes a copy of the necessary data and releases
3991 * the locks, allowing the caller to contemplate and format the data for as
3992 * long as it pleases.
3993 */
3996{
3998 PGPROC *proc;
3999 int i;
4000
4002
4003 /*
4004 * Guess how much space we'll need, and preallocate. Most of the time
4005 * this will avoid needing to do repalloc while holding the LWLocks. (We
4006 * assume, but check with an Assert, that MaxBackends is enough entries
4007 * for the procs[] array; the other two could need enlargement, though.)
4008 */
4009 data->nprocs = data->nlocks = data->npids = 0;
4010 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
4011 data->procs = palloc_array(BlockedProcData, data->maxprocs);
4012 data->locks = palloc_array(LockInstanceData, data->maxlocks);
4013 data->waiter_pids = palloc_array(int, data->maxpids);
4014
4015 /*
4016 * In order to search the ProcArray for blocked_pid and assume that that
4017 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4018 * In addition, to examine the lock grouping fields of any other backend,
4019 * we must hold all the hash partition locks. (Only one of those locks is
4020 * actually relevant for any one lock group, but we can't know which one
4021 * ahead of time.) It's fairly annoying to hold all those locks
4022 * throughout this, but it's no worse than GetLockStatusData(), and it
4023 * does have the advantage that we're guaranteed to return a
4024 * self-consistent instantaneous state.
4025 */
4027
4029
4030 /* Nothing to do if it's gone */
4031 if (proc != NULL)
4032 {
4033 /*
4034 * Acquire lock on the entire shared lock data structure. See notes
4035 * in GetLockStatusData().
4036 */
4037 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4039
4040 if (proc->lockGroupLeader == NULL)
4041 {
4042 /* Easy case, proc is not a lock group member */
4044 }
4045 else
4046 {
4047 /* Examine all procs in proc's lock group */
4048 dlist_iter iter;
4049
4051 {
4053
4054 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4056 }
4057 }
4058
4059 /*
4060 * And release locks. See notes in GetLockStatusData().
4061 */
4062 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4064
4065 Assert(data->nprocs <= data->maxprocs);
4066 }
4067
4069
4070 return data;
4071}
4072
4073/* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4074static void
4076{
4077 LOCK *theLock = blocked_proc->waitLock;
4082 int queue_size;
4083
4084 /* Nothing to do if this proc is not blocked */
4085 if (theLock == NULL)
4086 return;
4087
4088 /* Set up a procs[] element */
4089 bproc = &data->procs[data->nprocs++];
4090 bproc->pid = blocked_proc->pid;
4091 bproc->first_lock = data->nlocks;
4092 bproc->first_waiter = data->npids;
4093
4094 /*
4095 * We may ignore the proc's fast-path arrays, since nothing in those could
4096 * be related to a contended lock.
4097 */
4098
4099 /* Collect all PROCLOCKs associated with theLock */
4100 dlist_foreach(proclock_iter, &theLock->procLocks)
4101 {
4102 PROCLOCK *proclock =
4103 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4104 PGPROC *proc = proclock->tag.myProc;
4105 LOCK *lock = proclock->tag.myLock;
4107
4108 if (data->nlocks >= data->maxlocks)
4109 {
4110 data->maxlocks += MaxBackends;
4111 data->locks = (LockInstanceData *)
4112 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4113 }
4114
4115 instance = &data->locks[data->nlocks];
4116 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4117 instance->holdMask = proclock->holdMask;
4118 if (proc->waitLock == lock)
4119 instance->waitLockMode = proc->waitLockMode;
4120 else
4121 instance->waitLockMode = NoLock;
4122 instance->vxid.procNumber = proc->vxid.procNumber;
4123 instance->vxid.localTransactionId = proc->vxid.lxid;
4124 instance->pid = proc->pid;
4125 instance->leaderPid = proclock->groupLeader->pid;
4126 instance->fastpath = false;
4127 data->nlocks++;
4128 }
4129
4130 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4131 waitQueue = &(theLock->waitProcs);
4132 queue_size = dclist_count(waitQueue);
4133
4134 if (queue_size > data->maxpids - data->npids)
4135 {
4136 data->maxpids = Max(data->maxpids + MaxBackends,
4137 data->npids + queue_size);
4138 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4139 sizeof(int) * data->maxpids);
4140 }
4141
4142 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4144 {
4146
4148 break;
4149 data->waiter_pids[data->npids++] = queued_proc->pid;
4150 queued_proc = (PGPROC *) queued_proc->links.next;
4151 }
4152
4153 bproc->num_locks = data->nlocks - bproc->first_lock;
4154 bproc->num_waiters = data->npids - bproc->first_waiter;
4155}
4156
4157/*
4158 * Returns a list of currently held AccessExclusiveLocks, for use by
4159 * LogStandbySnapshot(). The result is a palloc'd array,
4160 * with the number of elements returned into *nlocks.
4161 *
4162 * XXX This currently takes a lock on all partitions of the lock table,
4163 * but it's possible to do better. By reference counting locks and storing
4164 * the value in the ProcArray entry for each backend we could tell if any
4165 * locks need recording without having to acquire the partition locks and
4166 * scan the lock table. Whether that's worth the additional overhead
4167 * is pretty dubious though.
4168 */
4170GetRunningTransactionLocks(int *nlocks)
4171{
4173 PROCLOCK *proclock;
4175 int i;
4176 int index;
4177 int els;
4178
4179 /*
4180 * Acquire lock on the entire shared lock data structure.
4181 *
4182 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4183 */
4184 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4186
4187 /* Now we can safely count the number of proclocks */
4189
4190 /*
4191 * Allocating enough space for all locks in the lock table is overkill,
4192 * but it's more convenient and faster than having to enlarge the array.
4193 */
4195
4196 /* Now scan the tables to copy the data */
4198
4199 /*
4200 * If lock is a currently granted AccessExclusiveLock then it will have
4201 * just one proclock holder, so locks are never accessed twice in this
4202 * particular case. Don't copy this code for use elsewhere because in the
4203 * general case this will give you duplicate locks when looking at
4204 * non-exclusive lock types.
4205 */
4206 index = 0;
4207 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4208 {
4209 /* make sure this definition matches the one used in LockAcquire */
4210 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4212 {
4213 PGPROC *proc = proclock->tag.myProc;
4214 LOCK *lock = proclock->tag.myLock;
4215 TransactionId xid = proc->xid;
4216
4217 /*
4218 * Don't record locks for transactions if we know they have
4219 * already issued their WAL record for commit but not yet released
4220 * lock. It is still possible that we see locks held by already
4221 * complete transactions, if they haven't yet zeroed their xids.
4222 */
4223 if (!TransactionIdIsValid(xid))
4224 continue;
4225
4226 accessExclusiveLocks[index].xid = xid;
4229
4230 index++;
4231 }
4232 }
4233
4234 Assert(index <= els);
4235
4236 /*
4237 * And release locks. We do this in reverse order for two reasons: (1)
4238 * Anyone else who needs more than one of the locks will be trying to lock
4239 * them in increasing order; we don't want to release the other process
4240 * until it can get all the locks it needs. (2) This avoids O(N^2)
4241 * behavior inside LWLockRelease.
4242 */
4243 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4245
4246 *nlocks = index;
4247 return accessExclusiveLocks;
4248}
4249
4250/* Provide the textual name of any lock mode */
4251const char *
4253{
4255 Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4257}
4258
4259#ifdef LOCK_DEBUG
4260/*
4261 * Dump all locks in the given proc's myProcLocks lists.
4262 *
4263 * Caller is responsible for having acquired appropriate LWLocks.
4264 */
4265void
4266DumpLocks(PGPROC *proc)
4267{
4268 int i;
4269
4270 if (proc == NULL)
4271 return;
4272
4273 if (proc->waitLock)
4274 LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4275
4276 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4277 {
4278 dlist_head *procLocks = &proc->myProcLocks[i];
4279 dlist_iter iter;
4280
4281 dlist_foreach(iter, procLocks)
4282 {
4283 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4284 LOCK *lock = proclock->tag.myLock;
4285
4286 Assert(proclock->tag.myProc == proc);
4287 PROCLOCK_PRINT("DumpLocks", proclock);
4288 LOCK_PRINT("DumpLocks", lock, 0);
4289 }
4290 }
4291}
4292
4293/*
4294 * Dump all lmgr locks.
4295 *
4296 * Caller is responsible for having acquired appropriate LWLocks.
4297 */
4298void
4299DumpAllLocks(void)
4300{
4301 PGPROC *proc;
4302 PROCLOCK *proclock;
4303 LOCK *lock;
4304 HASH_SEQ_STATUS status;
4305
4306 proc = MyProc;
4307
4308 if (proc && proc->waitLock)
4309 LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4310
4312
4313 while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4314 {
4315 PROCLOCK_PRINT("DumpAllLocks", proclock);
4316
4317 lock = proclock->tag.myLock;
4318 if (lock)
4319 LOCK_PRINT("DumpAllLocks", lock, 0);
4320 else
4321 elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4322 }
4323}
4324#endif /* LOCK_DEBUG */
4325
4326/*
4327 * LOCK 2PC resource manager's routines
4328 */
4329
4330/*
4331 * Re-acquire a lock belonging to a transaction that was prepared.
4332 *
4333 * Because this function is run at db startup, re-acquiring the locks should
4334 * never conflict with running transactions because there are none. We
4335 * assume that the lock state represented by the stored 2PC files is legal.
4336 *
4337 * When switching from Hot Standby mode to normal operation, the locks will
4338 * be already held by the startup process. The locks are acquired for the new
4339 * procs without checking for conflicts, so we don't get a conflict between the
4340 * startup process and the dummy procs, even though we will momentarily have
4341 * a situation where two procs are holding the same AccessExclusiveLock,
4342 * which isn't normally possible because the conflict. If we're in standby
4343 * mode, but a recovery snapshot hasn't been established yet, it's possible
4344 * that some but not all of the locks are already held by the startup process.
4345 *
4346 * This approach is simple, but also a bit dangerous, because if there isn't
4347 * enough shared memory to acquire the locks, an error will be thrown, which
4348 * is promoted to FATAL and recovery will abort, bringing down postmaster.
4349 * A safer approach would be to transfer the locks like we do in
4350 * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4351 * read-only backends to use up all the shared lock memory anyway, so that
4352 * replaying the WAL record that needs to acquire a lock will throw an error
4353 * and PANIC anyway.
4354 */
4355void
4357 void *recdata, uint32 len)
4358{
4360 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4361 LOCKTAG *locktag;
4362 LOCKMODE lockmode;
4364 LOCK *lock;
4365 PROCLOCK *proclock;
4367 bool found;
4368 uint32 hashcode;
4370 int partition;
4373
4374 Assert(len == sizeof(TwoPhaseLockRecord));
4375 locktag = &rec->locktag;
4376 lockmode = rec->lockmode;
4378
4380 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4382
4383 hashcode = LockTagHashCode(locktag);
4384 partition = LockHashPartition(hashcode);
4386
4388
4389 /*
4390 * Find or create a lock with this tag.
4391 */
4393 locktag,
4394 hashcode,
4396 &found);
4397 if (!lock)
4398 {
4400 ereport(ERROR,
4402 errmsg("out of shared memory"),
4403 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4404 }
4405
4406 /*
4407 * if it's a new lock object, initialize it
4408 */
4409 if (!found)
4410 {
4411 lock->grantMask = 0;
4412 lock->waitMask = 0;
4413 dlist_init(&lock->procLocks);
4414 dclist_init(&lock->waitProcs);
4415 lock->nRequested = 0;
4416 lock->nGranted = 0;
4417 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4418 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4419 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4420 }
4421 else
4422 {
4423 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4424 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4425 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4426 Assert(lock->nGranted <= lock->nRequested);
4427 }
4428
4429 /*
4430 * Create the hash key for the proclock table.
4431 */
4432 proclocktag.myLock = lock;
4433 proclocktag.myProc = proc;
4434
4436
4437 /*
4438 * Find or create a proclock entry with this tag
4439 */
4441 &proclocktag,
4444 &found);
4445 if (!proclock)
4446 {
4447 /* Oops, not enough shmem for the proclock */
4448 if (lock->nRequested == 0)
4449 {
4450 /*
4451 * There are no other requestors of this lock, so garbage-collect
4452 * the lock object. We *must* do this to avoid a permanent leak
4453 * of shared memory, because there won't be anything to cause
4454 * anyone to release the lock object later.
4455 */
4458 &(lock->tag),
4459 hashcode,
4461 NULL))
4462 elog(PANIC, "lock table corrupted");
4463 }
4465 ereport(ERROR,
4467 errmsg("out of shared memory"),
4468 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4469 }
4470
4471 /*
4472 * If new, initialize the new entry
4473 */
4474 if (!found)
4475 {
4476 Assert(proc->lockGroupLeader == NULL);
4477 proclock->groupLeader = proc;
4478 proclock->holdMask = 0;
4479 proclock->releaseMask = 0;
4480 /* Add proclock to appropriate lists */
4481 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4483 &proclock->procLink);
4484 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4485 }
4486 else
4487 {
4488 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4489 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4490 }
4491
4492 /*
4493 * lock->nRequested and lock->requested[] count the total number of
4494 * requests, whether granted or waiting, so increment those immediately.
4495 */
4496 lock->nRequested++;
4497 lock->requested[lockmode]++;
4498 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4499
4500 /*
4501 * We shouldn't already hold the desired lock.
4502 */
4503 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4504 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4505 lockMethodTable->lockModeNames[lockmode],
4506 lock->tag.locktag_field1, lock->tag.locktag_field2,
4507 lock->tag.locktag_field3);
4508
4509 /*
4510 * We ignore any possible conflicts and just grant ourselves the lock. Not
4511 * only because we don't bother, but also to avoid deadlocks when
4512 * switching from standby to normal mode. See function comment.
4513 */
4514 GrantLock(lock, proclock, lockmode);
4515
4516 /*
4517 * Bump strong lock count, to make sure any fast-path lock requests won't
4518 * be granted without consulting the primary lock table.
4519 */
4520 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4521 {
4523
4527 }
4528
4530}
4531
4532/*
4533 * Re-acquire a lock belonging to a transaction that was prepared, when
4534 * starting up into hot standby mode.
4535 */
4536void
4538 void *recdata, uint32 len)
4539{
4541 LOCKTAG *locktag;
4542 LOCKMODE lockmode;
4544
4545 Assert(len == sizeof(TwoPhaseLockRecord));
4546 locktag = &rec->locktag;
4547 lockmode = rec->lockmode;
4549
4551 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4552
4553 if (lockmode == AccessExclusiveLock &&
4554 locktag->locktag_type == LOCKTAG_RELATION)
4555 {
4557 locktag->locktag_field1 /* dboid */ ,
4558 locktag->locktag_field2 /* reloid */ );
4559 }
4560}
4561
4562
4563/*
4564 * 2PC processing routine for COMMIT PREPARED case.
4565 *
4566 * Find and release the lock indicated by the 2PC record.
4567 */
4568void
4570 void *recdata, uint32 len)
4571{
4573 PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4574 LOCKTAG *locktag;
4577
4578 Assert(len == sizeof(TwoPhaseLockRecord));
4579 locktag = &rec->locktag;
4581
4583 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4585
4586 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4587}
4588
4589/*
4590 * 2PC processing routine for ROLLBACK PREPARED case.
4591 *
4592 * This is actually just the same as the COMMIT case.
4593 */
4594void
4596 void *recdata, uint32 len)
4597{
4598 lock_twophase_postcommit(fxid, info, recdata, len);
4599}
4600
4601/*
4602 * VirtualXactLockTableInsert
4603 *
4604 * Take vxid lock via the fast-path. There can't be any pre-existing
4605 * lockers, as we haven't advertised this vxid via the ProcArray yet.
4606 *
4607 * Since MyProc->fpLocalTransactionId will normally contain the same data
4608 * as MyProc->vxid.lxid, you might wonder if we really need both. The
4609 * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4610 * examined by procarray.c, while fpLocalTransactionId is protected by
4611 * fpInfoLock and is used only by the locking subsystem. Doing it this
4612 * way makes it easier to verify that there are no funny race conditions.
4613 *
4614 * We don't bother recording this lock in the local lock table, since it's
4615 * only ever released at the end of a transaction. Instead,
4616 * LockReleaseAll() calls VirtualXactLockTableCleanup().
4617 */
4618void
4620{
4622
4624
4627 Assert(MyProc->fpVXIDLock == false);
4628
4629 MyProc->fpVXIDLock = true;
4631
4633}
4634
4635/*
4636 * VirtualXactLockTableCleanup
4637 *
4638 * Check whether a VXID lock has been materialized; if so, release it,
4639 * unblocking waiters.
4640 */
4641void
4643{
4644 bool fastpath;
4645 LocalTransactionId lxid;
4646
4648
4649 /*
4650 * Clean up shared memory state.
4651 */
4653
4654 fastpath = MyProc->fpVXIDLock;
4656 MyProc->fpVXIDLock = false;
4658
4660
4661 /*
4662 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4663 * that means someone transferred the lock to the main lock table.
4664 */
4665 if (!fastpath && LocalTransactionIdIsValid(lxid))
4666 {
4668 LOCKTAG locktag;
4669
4670 vxid.procNumber = MyProcNumber;
4671 vxid.localTransactionId = lxid;
4672 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4673
4675 &locktag, ExclusiveLock, false);
4676 }
4677}
4678
4679/*
4680 * XactLockForVirtualXact
4681 *
4682 * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4683 * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4684 * functions, it assumes "xid" is never a subtransaction and that "xid" is
4685 * prepared, committed, or aborted.
4686 *
4687 * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4688 * known as "vxid" before its PREPARE TRANSACTION.
4689 */
4690static bool
4692 TransactionId xid, bool wait)
4693{
4694 bool more = false;
4695
4696 /* There is no point to wait for 2PCs if you have no 2PCs. */
4697 if (max_prepared_xacts == 0)
4698 return true;
4699
4700 do
4701 {
4703 LOCKTAG tag;
4704
4705 /* Clear state from previous iterations. */
4706 if (more)
4707 {
4709 more = false;
4710 }
4711
4712 /* If we have no xid, try to find one. */
4713 if (!TransactionIdIsValid(xid))
4714 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4715 if (!TransactionIdIsValid(xid))
4716 {
4717 Assert(!more);
4718 return true;
4719 }
4720
4721 /* Check or wait for XID completion. */
4722 SET_LOCKTAG_TRANSACTION(tag, xid);
4723 lar = LockAcquire(&tag, ShareLock, false, !wait);
4725 return false;
4726 LockRelease(&tag, ShareLock, false);
4727 } while (more);
4728
4729 return true;
4730}
4731
4732/*
4733 * VirtualXactLock
4734 *
4735 * If wait = true, wait as long as the given VXID or any XID acquired by the
4736 * same transaction is still running. Then, return true.
4737 *
4738 * If wait = false, just check whether that VXID or one of those XIDs is still
4739 * running, and return true or false.
4740 */
4741bool
4743{
4744 LOCKTAG tag;
4745 PGPROC *proc;
4747
4749
4751 /* no vxid lock; localTransactionId is a normal, locked XID */
4752 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4753
4755
4756 /*
4757 * If a lock table entry must be made, this is the PGPROC on whose behalf
4758 * it must be done. Note that the transaction might end or the PGPROC
4759 * might be reassigned to a new backend before we get around to examining
4760 * it, but it doesn't matter. If we find upon examination that the
4761 * relevant lxid is no longer running here, that's enough to prove that
4762 * it's no longer running anywhere.
4763 */
4764 proc = ProcNumberGetProc(vxid.procNumber);
4765 if (proc == NULL)
4766 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4767
4768 /*
4769 * We must acquire this lock before checking the procNumber and lxid
4770 * against the ones we're waiting for. The target backend will only set
4771 * or clear lxid while holding this lock.
4772 */
4774
4775 if (proc->vxid.procNumber != vxid.procNumber
4777 {
4778 /* VXID ended */
4779 LWLockRelease(&proc->fpInfoLock);
4780 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4781 }
4782
4783 /*
4784 * If we aren't asked to wait, there's no need to set up a lock table
4785 * entry. The transaction is still in progress, so just return false.
4786 */
4787 if (!wait)
4788 {
4789 LWLockRelease(&proc->fpInfoLock);
4790 return false;
4791 }
4792
4793 /*
4794 * OK, we're going to need to sleep on the VXID. But first, we must set
4795 * up the primary lock table entry, if needed (ie, convert the proc's
4796 * fast-path lock on its VXID to a regular lock).
4797 */
4798 if (proc->fpVXIDLock)
4799 {
4800 PROCLOCK *proclock;
4801 uint32 hashcode;
4803
4804 hashcode = LockTagHashCode(&tag);
4805
4808
4810 &tag, hashcode, ExclusiveLock);
4811 if (!proclock)
4812 {
4814 LWLockRelease(&proc->fpInfoLock);
4815 ereport(ERROR,
4817 errmsg("out of shared memory"),
4818 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4819 }
4820 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4821
4823
4824 proc->fpVXIDLock = false;
4825 }
4826
4827 /*
4828 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4829 * search. The proc might have assigned this XID but not yet locked it,
4830 * in which case the proc will lock this XID before releasing the VXID.
4831 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4832 * so we won't save an XID of a different VXID. It doesn't matter whether
4833 * we save this before or after setting up the primary lock table entry.
4834 */
4835 xid = proc->xid;
4836
4837 /* Done with proc->fpLockBits */
4838 LWLockRelease(&proc->fpInfoLock);
4839
4840 /* Time to wait. */
4841 (void) LockAcquire(&tag, ShareLock, false, false);
4842
4843 LockRelease(&tag, ShareLock, false);
4844 return XactLockForVirtualXact(vxid, xid, wait);
4845}
4846
4847/*
4848 * LockWaiterCount
4849 *
4850 * Find the number of lock requester on this locktag
4851 */
4852int
4853LockWaiterCount(const LOCKTAG *locktag)
4854{
4856 LOCK *lock;
4857 bool found;
4858 uint32 hashcode;
4860 int waiters = 0;
4861
4863 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4864
4865 hashcode = LockTagHashCode(locktag);
4868
4870 locktag,
4871 hashcode,
4872 HASH_FIND,
4873 &found);
4874 if (found)
4875 {
4876 Assert(lock != NULL);
4877 waiters = lock->nRequested;
4878 }
4880
4881 return waiters;
4882}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
#define Max(x, y)
Definition c.h:991
#define Assert(condition)
Definition c.h:873
int64_t int64
Definition c.h:543
uint16_t uint16
Definition c.h:545
uint32_t uint32
Definition c.h:546
#define lengthof(array)
Definition c.h:803
uint32 LocalTransactionId
Definition c.h:668
#define MemSet(start, val, len)
Definition c.h:1013
uint32 TransactionId
Definition c.h:666
size_t Size
Definition c.h:619
int64 TimestampTz
Definition timestamp.h:39
void DeadLockReport(void)
Definition deadlock.c:1075
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:952
Size hash_estimate_size(int64 num_entries, Size entrysize)
Definition dynahash.c:783
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:358
void hash_destroy(HTAB *hashp)
Definition dynahash.c:865
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition dynahash.c:965
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1415
int64 hash_get_num_entries(HTAB *hashp)
Definition dynahash.c:1336
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition dynahash.c:1140
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition dynahash.c:908
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1380
ErrorContextCallback * error_context_stack
Definition elog.c:95
int errhint(const char *fmt,...)
Definition elog.c:1330
int errcode(int sqlerrcode)
Definition elog.c:863
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition elog.c:1285
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define LOG
Definition elog.h:31
#define PG_RE_THROW()
Definition elog.h:405
#define errcontext
Definition elog.h:198
#define PG_TRY(...)
Definition elog.h:372
#define WARNING
Definition elog.h:36
#define PG_END_TRY(...)
Definition elog.h:397
#define PANIC
Definition elog.h:42
#define ERROR
Definition elog.h:39
#define PG_CATCH(...)
Definition elog.h:382
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_array(type, count)
Definition fe_memutils.h:77
int MyProcPid
Definition globals.c:47
ProcNumber MyProcNumber
Definition globals.c:90
int MaxBackends
Definition globals.c:146
@ HASH_FIND
Definition hsearch.h:113
@ HASH_REMOVE
Definition hsearch.h:115
@ HASH_ENTER
Definition hsearch.h:114
@ HASH_ENTER_NULL
Definition hsearch.h:116
#define HASH_CONTEXT
Definition hsearch.h:102
#define HASH_ELEM
Definition hsearch.h:95
#define HASH_FUNCTION
Definition hsearch.h:98
#define HASH_BLOBS
Definition hsearch.h:97
#define HASH_PARTITION
Definition hsearch.h:92
#define dlist_foreach(iter, lhead)
Definition ilist.h:623
static void dlist_init(dlist_head *head)
Definition ilist.h:314
static void dlist_delete(dlist_node *node)
Definition ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition ilist.h:932
static bool dclist_is_empty(const dclist_head *head)
Definition ilist.h:682
#define dlist_foreach_modify(iter, lhead)
Definition ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition ilist.h:776
static void dclist_init(dclist_head *head)
Definition ilist.h:671
#define dlist_container(type, membername, ptr)
Definition ilist.h:593
#define dclist_foreach(iter, lhead)
Definition ilist.h:970
int j
Definition isn.c:78
int i
Definition isn.c:77
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition lmgr.c:1249
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition lock.c:4692
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition lock.c:809
static LOCALLOCK * awaitedLock
Definition lock.c:328
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition lock.c:1476
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition lock.c:2736
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition lock.c:643
void lock_twophase_postabort(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4596
#define LOCK_PRINT(where, lock, type)
Definition lock.c:405
void PostPrepare_Locks(FullTransactionId fxid)
Definition lock.c:3572
void lock_twophase_standby_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4538
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition lock.c:623
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition lock.c:1283
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition lock.c:2958
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition lock.c:4620
#define NLOCKENTS()
Definition lock.c:56
#define FastPathStrongLockHashPartition(hashcode)
Definition lock.c:303
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition lock.c:605
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition lock.c:256
void GrantAwaitedLock(void)
Definition lock.c:1889
int LockWaiterCount(const LOCKTAG *locktag)
Definition lock.c:4854
void AtPrepare_Locks(void)
Definition lock.c:3476
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:2102
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition lock.c:242
Size LockManagerShmemSize(void)
Definition lock.c:3756
#define FAST_PATH_REL_GROUP(rel)
Definition lock.c:217
void InitLockManagerAccess(void)
Definition lock.c:505
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition lock.c:1658
void VirtualXactLockTableCleanup(void)
Definition lock.c:4643
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition lock.c:4743
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition lock.c:3069
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition lock.c:312
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition lock.c:2046
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
Definition lock.c:836
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition lock.c:2307
#define FAST_PATH_SLOT(group, index)
Definition lock.c:224
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition lock.c:1464
#define ConflictsWithRelationFastPath(locktag, mode)
Definition lock.c:273
void ResetAwaitedLock(void)
Definition lock.c:1907
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition lock.c:2861
static HTAB * LockMethodLocalHash
Definition lock.c:323
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2706
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition lock.c:1681
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition lock.c:252
#define PROCLOCK_PRINT(where, proclockP)
Definition lock.c:406
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition lock.c:1738
static uint32 proclock_hash(const void *key, Size keysize)
Definition lock.c:574
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2825
void AbortStrongLockAcquire(void)
Definition lock.c:1860
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2782
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition lock.c:176
static HTAB * LockMethodLockHash
Definition lock.c:321
static ResourceOwner awaitedOwner
Definition lock.c:329
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition lock.c:3996
void LockManagerShmemInit(void)
Definition lock.c:444
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1932
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:696
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition lock.c:4253
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition lock.c:4076
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition lock.c:254
void lock_twophase_postcommit(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4570
static const LockMethod LockMethods[]
Definition lock.c:150
static void waitonlock_error_callback(void *arg)
Definition lock.c:2020
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2611
LOCALLOCK * GetAwaitedLock(void)
Definition lock.c:1898
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition lock.c:2581
void MarkLockClear(LOCALLOCK *locallock)
Definition lock.c:1920
LockData * GetLockStatusData(void)
Definition lock.c:3793
#define FAST_PATH_GET_BITS(proc, n)
Definition lock.c:245
static LOCALLOCK * StrongLockInProgress
Definition lock.c:327
#define FAST_PATH_BITS_PER_SLOT
Definition lock.c:241
int FastPathLockGroupsPerBackend
Definition lock.c:202
#define EligibleForRelationFastPath(locktag, mode)
Definition lock.c:267
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition lock.c:557
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition lock.c:1824
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition lock.c:1529
void lock_twophase_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4357
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1792
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition lock.c:2646
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition lock.c:527
static void FinishStrongLockAcquire(void)
Definition lock.c:1850
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition lock.c:301
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition lock.c:4171
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition lock.c:3284
static void CheckForSessionAndXactLocks(void)
Definition lock.c:3388
static HTAB * LockMethodProcLockHash
Definition lock.c:322
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition lock.c:539
uint16 LOCKMETHODID
Definition lock.h:124
#define LOCK_LOCKTAG(lock)
Definition lock.h:327
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition lock.h:237
@ LOCKTAG_OBJECT
Definition lock.h:147
@ LOCKTAG_RELATION_EXTEND
Definition lock.h:140
@ LOCKTAG_TUPLE
Definition lock.h:143
@ LOCKTAG_VIRTUALTRANSACTION
Definition lock.h:145
#define VirtualTransactionIdIsValid(vxid)
Definition lock.h:69
#define LockHashPartitionLock(hashcode)
Definition lock.h:528
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition lock.h:79
#define LOCK_LOCKMETHOD(lock)
Definition lock.h:326
#define LOCKBIT_OFF(lockmode)
Definition lock.h:87
#define LOCALLOCK_LOCKMETHOD(llock)
Definition lock.h:445
#define InvalidLocalTransactionId
Definition lock.h:67
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition lock.h:228
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition lock.h:183
#define MAX_LOCKMODES
Definition lock.h:84
#define LOCKBIT_ON(lockmode)
Definition lock.h:86
#define LocalTransactionIdIsValid(lxid)
Definition lock.h:68
#define LOCALLOCK_LOCKTAG(llock)
Definition lock.h:446
#define LockHashPartition(hashcode)
Definition lock.h:526
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition lock.h:73
#define PROCLOCK_LOCKMETHOD(proclock)
Definition lock.h:384
#define LockHashPartitionLockByIndex(i)
Definition lock.h:531
LockAcquireResult
Definition lock.h:502
@ LOCKACQUIRE_ALREADY_CLEAR
Definition lock.h:506
@ LOCKACQUIRE_OK
Definition lock.h:504
@ LOCKACQUIRE_ALREADY_HELD
Definition lock.h:505
@ LOCKACQUIRE_NOT_AVAIL
Definition lock.h:503
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition lock.h:71
int LOCKMODE
Definition lockdefs.h:26
#define NoLock
Definition lockdefs.h:34
#define AccessExclusiveLock
Definition lockdefs.h:43
int LOCKMASK
Definition lockdefs.h:25
#define ExclusiveLock
Definition lockdefs.h:42
#define ShareLock
Definition lockdefs.h:40
#define MaxLockMode
Definition lockdefs.h:45
#define RowExclusiveLock
Definition lockdefs.h:38
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1176
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1793
#define NUM_LOCK_PARTITIONS
Definition lwlock.h:95
#define LOG2_NUM_LOCK_PARTITIONS
Definition lwlock.h:94
@ LW_SHARED
Definition lwlock.h:113
@ LW_EXCLUSIVE
Definition lwlock.h:112
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define END_CRIT_SECTION()
Definition miscadmin.h:152
void * arg
const void size_t len
const void * data
static char buf[DEFAULT_XLOG_SEG_SIZE]
static uint32 DatumGetUInt32(Datum X)
Definition postgres.h:232
static Datum PointerGetDatum(const void *X)
Definition postgres.h:352
uint64_t Datum
Definition postgres.h:70
unsigned int Oid
static int fb(int x)
#define FastPathLockSlotsPerBackend()
Definition proc.h:93
#define GetPGProcByNumber(n)
Definition proc.h:446
#define FP_LOCK_SLOTS_PER_GROUP
Definition proc.h:92
ProcWaitStatus
Definition proc.h:140
@ PROC_WAIT_STATUS_OK
Definition proc.h:141
@ PROC_WAIT_STATUS_WAITING
Definition proc.h:142
@ PROC_WAIT_STATUS_ERROR
Definition proc.h:143
PGPROC * BackendPidGetProcWithLock(int pid)
Definition procarray.c:3182
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition procarray.c:3101
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
void set_ps_display_remove_suffix(void)
Definition ps_status.c:439
void set_ps_display_suffix(const char *suffix)
Definition ps_status.c:387
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1059
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition resowner.c:902
ResourceOwner CurrentResourceOwner
Definition resowner.c:173
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1079
Size add_size(Size s1, Size s2)
Definition shmem.c:495
HTAB * ShmemInitHash(const char *name, int64 init_size, int64 max_size, HASHCTL *infoP, int hash_flags)
Definition shmem.c:334
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition shmem.c:389
#define SpinLockInit(lock)
Definition spin.h:57
#define SpinLockRelease(lock)
Definition spin.h:61
#define SpinLockAcquire(lock)
Definition spin.h:59
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition proc.c:1146
PGPROC * MyProc
Definition proc.c:67
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition proc.c:1906
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition proc.c:1315
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition proc.c:1745
PROC_HDR * ProcGlobal
Definition proc.c:79
void LogAccessExclusiveLockPrepare(void)
Definition standby.c:1449
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition standby.c:986
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition standby.c:1432
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
struct ErrorContextCallback * previous
Definition elog.h:297
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition lock.c:309
Size keysize
Definition hsearch.h:75
HashValueFunc hash
Definition hsearch.h:78
Size entrysize
Definition hsearch.h:76
int64 num_partitions
Definition hsearch.h:68
int64 nLocks
Definition lock.h:425
struct ResourceOwnerData * owner
Definition lock.h:424
uint8 locktag_type
Definition lock.h:172
uint32 locktag_field3
Definition lock.h:170
uint32 locktag_field1
Definition lock.h:168
uint8 locktag_lockmethodid
Definition lock.h:173
uint16 locktag_field4
Definition lock.h:171
uint32 locktag_field2
Definition lock.h:169
Definition lock.h:311
int nRequested
Definition lock.h:321
LOCKTAG tag
Definition lock.h:313
int requested[MAX_LOCKMODES]
Definition lock.h:320
dclist_head waitProcs
Definition lock.h:319
int granted[MAX_LOCKMODES]
Definition lock.h:322
LOCKMASK grantMask
Definition lock.h:316
LOCKMASK waitMask
Definition lock.h:317
int nGranted
Definition lock.h:323
dlist_head procLocks
Definition lock.h:318
const bool * trace_flag
Definition lock.h:115
const char *const * lockModeNames
Definition lock.h:114
Definition proc.h:179
LWLock fpInfoLock
Definition proc.h:316
LocalTransactionId lxid
Definition proc.h:217
PROCLOCK * waitProcLock
Definition proc.h:256
dlist_head lockGroupMembers
Definition proc.h:328
Oid * fpRelId
Definition proc.h:318
Oid databaseId
Definition proc.h:224
uint64 * fpLockBits
Definition proc.h:317
pg_atomic_uint64 waitStart
Definition proc.h:260
bool fpVXIDLock
Definition proc.h:319
ProcNumber procNumber
Definition proc.h:212
int pid
Definition proc.h:199
struct PGPROC::@131 vxid
LOCK * waitLock
Definition proc.h:255
TransactionId xid
Definition proc.h:189
LOCKMODE waitLockMode
Definition proc.h:257
PGPROC * lockGroupLeader
Definition proc.h:327
LocalTransactionId fpLocalTransactionId
Definition proc.h:320
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition proc.h:284
ProcWaitStatus waitStatus
Definition proc.h:184
dlist_node links
Definition proc.h:180
LOCK * myLock
Definition lock.h:367
PGPROC * myProc
Definition lock.h:368
LOCKMASK holdMask
Definition lock.h:378
dlist_node lockLink
Definition lock.h:380
PGPROC * groupLeader
Definition lock.h:377
LOCKMASK releaseMask
Definition lock.h:379
PROCLOCKTAG tag
Definition lock.h:374
dlist_node procLink
Definition lock.h:381
uint32 allProcCount
Definition proc.h:412
LOCKTAG locktag
Definition lock.c:160
LOCKMODE lockmode
Definition lock.c:161
LocalTransactionId localTransactionId
Definition lock.h:64
ProcNumber procNumber
Definition lock.h:63
dlist_node * cur
Definition ilist.h:179
dlist_node * next
Definition ilist.h:140
Definition type.h:96
#define InvalidTransactionId
Definition transam.h:31
#define XidFromFullTransactionId(x)
Definition transam.h:48
#define FirstNormalObjectId
Definition transam.h:197
#define TransactionIdIsValid(xid)
Definition transam.h:41
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition twophase.c:1271
int max_prepared_xacts
Definition twophase.c:116
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition twophase.c:857
PGPROC * TwoPhaseGetDummyProc(FullTransactionId fxid, bool lock_held)
Definition twophase.c:923
#define TWOPHASE_RM_LOCK_ID
const char * type
bool RecoveryInProgress(void)
Definition xlog.c:6460
#define XLogStandbyInfoActive()
Definition xlog.h:125
bool InRecovery
Definition xlogutils.c:50
#define InHotStandby
Definition xlogutils.h:60
static struct link * links
Definition zic.c:302

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition globals.c:94

Definition at line 267 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:

Definition at line 247 of file lock.c.

◆ FAST_PATH_BITS

#define FAST_PATH_BITS (   proc,
 
)    (proc)->fpLockBits[FAST_PATH_GROUP(n)]

Definition at line 244 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 241 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 256 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 254 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)

Definition at line 245 of file lock.c.

◆ FAST_PATH_GROUP

#define FAST_PATH_GROUP (   index)
Value:

Definition at line 233 of file lock.c.

◆ FAST_PATH_INDEX

#define FAST_PATH_INDEX (   index)
Value:

Definition at line 236 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 242 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 243 of file lock.c.

◆ FAST_PATH_REL_GROUP

#define FAST_PATH_REL_GROUP (   rel)     (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))

Definition at line 217 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 252 of file lock.c.

◆ FAST_PATH_SLOT

#define FAST_PATH_SLOT (   group,
  index 
)
Value:

Definition at line 224 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 300 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 301 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 303 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 405 of file lock.c.

◆ NLOCKENTS

Definition at line 56 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 406 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3476 of file lock.c.

3477{
3478 HASH_SEQ_STATUS status;
3480
3481 /* First, verify there aren't locks of both xact and session level */
3483
3484 /* Now do the per-locallock cleanup work */
3486
3487 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3488 {
3489 TwoPhaseLockRecord record;
3490 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3491 bool haveSessionLock;
3492 bool haveXactLock;
3493 int i;
3494
3495 /*
3496 * Ignore VXID locks. We don't want those to be held by prepared
3497 * transactions, since they aren't meaningful after a restart.
3498 */
3499 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3500 continue;
3501
3502 /* Ignore it if we don't actually hold the lock */
3503 if (locallock->nLocks <= 0)
3504 continue;
3505
3506 /* Scan to see whether we hold it at session or transaction level */
3507 haveSessionLock = haveXactLock = false;
3508 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3509 {
3510 if (lockOwners[i].owner == NULL)
3511 haveSessionLock = true;
3512 else
3513 haveXactLock = true;
3514 }
3515
3516 /* Ignore it if we have only session lock */
3517 if (!haveXactLock)
3518 continue;
3519
3520 /* This can't happen, because we already checked it */
3521 if (haveSessionLock)
3522 ereport(ERROR,
3524 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3525
3526 /*
3527 * If the local lock was taken via the fast-path, we need to move it
3528 * to the primary lock table, or just get a pointer to the existing
3529 * primary lock table entry if by chance it's already been
3530 * transferred.
3531 */
3532 if (locallock->proclock == NULL)
3533 {
3535 locallock->lock = locallock->proclock->tag.myLock;
3536 }
3537
3538 /*
3539 * Arrange to not release any strong lock count held by this lock
3540 * entry. We must retain the count until the prepared transaction is
3541 * committed or rolled back.
3542 */
3543 locallock->holdsStrongLockCount = false;
3544
3545 /*
3546 * Create a 2PC record.
3547 */
3548 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3549 record.lockmode = locallock->tag.mode;
3550
3552 &record, sizeof(TwoPhaseLockRecord));
3553 }
3554}

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), fb(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG_VIRTUALTRANSACTION, RegisterTwoPhaseRecord(), and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1824 of file lock.c.

1825{
1827 Assert(locallock->holdsStrongLockCount == false);
1828
1829 /*
1830 * Adding to a memory location is not atomic, so we take a spinlock to
1831 * ensure we don't collide with someone else trying to bump the count at
1832 * the same time.
1833 *
1834 * XXX: It might be worth considering using an atomic fetch-and-add
1835 * instruction here, on architectures where that is supported.
1836 */
1837
1840 locallock->holdsStrongLockCount = true;
1843}

References Assert, FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, fb(), FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1464 of file lock.c.

1465{
1466#ifdef USE_ASSERT_CHECKING
1469#endif
1470}

References fb(), LOCALLOCK_LOCKTAG, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3388 of file lock.c.

3389{
3390 typedef struct
3391 {
3392 LOCKTAG lock; /* identifies the lockable object */
3393 bool sessLock; /* is any lockmode held at session level? */
3394 bool xactLock; /* is any lockmode held at xact level? */
3396
3398 HTAB *lockhtab;
3399 HASH_SEQ_STATUS status;
3401
3402 /* Create a local hash table keyed by LOCKTAG only */
3403 hash_ctl.keysize = sizeof(LOCKTAG);
3404 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3406
3407 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3408 256, /* arbitrary initial size */
3409 &hash_ctl,
3411
3412 /* Scan local lock table to find entries for each LOCKTAG */
3414
3415 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3416 {
3417 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3419 bool found;
3420 int i;
3421
3422 /*
3423 * Ignore VXID locks. We don't want those to be held by prepared
3424 * transactions, since they aren't meaningful after a restart.
3425 */
3426 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3427 continue;
3428
3429 /* Ignore it if we don't actually hold the lock */
3430 if (locallock->nLocks <= 0)
3431 continue;
3432
3433 /* Otherwise, find or make an entry in lockhtab */
3435 &locallock->tag.lock,
3436 HASH_ENTER, &found);
3437 if (!found) /* initialize, if newly created */
3438 hentry->sessLock = hentry->xactLock = false;
3439
3440 /* Scan to see if we hold lock at session or xact level or both */
3441 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3442 {
3443 if (lockOwners[i].owner == NULL)
3444 hentry->sessLock = true;
3445 else
3446 hentry->xactLock = true;
3447 }
3448
3449 /*
3450 * We can throw error immediately when we see both types of locks; no
3451 * need to wait around to see if there are more violations.
3452 */
3453 if (hentry->sessLock && hentry->xactLock)
3454 ereport(ERROR,
3456 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3457 }
3458
3459 /* Success, so clean up */
3461}

References CurrentMemoryContext, ereport, errcode(), errmsg(), ERROR, fb(), HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and LOCKTAG_VIRTUALTRANSACTION.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1738 of file lock.c.

1741{
1742 /*
1743 * If this was my last hold on this lock, delete my entry in the proclock
1744 * table.
1745 */
1746 if (proclock->holdMask == 0)
1747 {
1749
1750 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1751 dlist_delete(&proclock->lockLink);
1752 dlist_delete(&proclock->procLink);
1753 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1755 &(proclock->tag),
1758 NULL))
1759 elog(PANIC, "proclock table corrupted");
1760 }
1761
1762 if (lock->nRequested == 0)
1763 {
1764 /*
1765 * The caller just released the last lock, so garbage-collect the lock
1766 * object.
1767 */
1768 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1771 &(lock->tag),
1772 hashcode,
1774 NULL))
1775 elog(PANIC, "lock table corrupted");
1776 }
1777 else if (wakeupNeeded)
1778 {
1779 /* There are waiters on this lock, so wake them up. */
1781 }
1782}

References Assert, dlist_delete(), dlist_is_empty(), elog, fb(), HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 623 of file lock.c.

624{
626
627 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
628 return true;
629
630 return false;
631}

References DEFAULT_LOCKMETHOD, fb(), LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2958 of file lock.c.

2959{
2961 LOCKTAG *locktag = &locallock->tag.lock;
2962 PROCLOCK *proclock = NULL;
2964 Oid relid = locktag->locktag_field2;
2965 uint32 i,
2966 group;
2967
2968 /* fast-path group the lock belongs to */
2969 group = FAST_PATH_REL_GROUP(relid);
2970
2972
2973 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2974 {
2975 uint32 lockmode;
2976
2977 /* index into the whole per-backend array */
2978 uint32 f = FAST_PATH_SLOT(group, i);
2979
2980 /* Look for an allocated slot matching the given relid. */
2981 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2982 continue;
2983
2984 /* If we don't have a lock of the given mode, forget it! */
2985 lockmode = locallock->tag.mode;
2986 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2987 break;
2988
2989 /* Find or create lock object. */
2991
2992 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2993 locallock->hashcode, lockmode);
2994 if (!proclock)
2995 {
2998 ereport(ERROR,
3000 errmsg("out of shared memory"),
3001 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3002 }
3003 GrantLock(proclock->tag.myLock, proclock, lockmode);
3004 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3005
3007
3008 /* No need to examine remaining slots. */
3009 break;
3010 }
3011
3013
3014 /* Lock may have already been transferred by some other backend. */
3015 if (proclock == NULL)
3016 {
3017 LOCK *lock;
3020
3022
3024 locktag,
3025 locallock->hashcode,
3026 HASH_FIND,
3027 NULL);
3028 if (!lock)
3029 elog(ERROR, "failed to re-find shared lock object");
3030
3031 proclocktag.myLock = lock;
3032 proclocktag.myProc = MyProc;
3033
3035 proclock = (PROCLOCK *)
3037 &proclocktag,
3039 HASH_FIND,
3040 NULL);
3041 if (!proclock)
3042 elog(ERROR, "failed to re-find shared proclock object");
3044 }
3045
3046 return proclock;
3047}

References DEFAULT_LOCKMETHOD, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), i, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, MyProc, ProcLockHashCode(), SetupLockInTable(), and PROCLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2782 of file lock.c.

2783{
2784 uint32 i;
2786
2787 /* fast-path group the lock belongs to */
2788 uint32 group = FAST_PATH_REL_GROUP(relid);
2789
2790 /* Scan for existing entry for this relid, remembering empty slot. */
2791 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2792 {
2793 /* index into the whole per-backend array */
2794 uint32 f = FAST_PATH_SLOT(group, i);
2795
2796 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2797 unused_slot = f;
2798 else if (MyProc->fpRelId[f] == relid)
2799 {
2800 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2801 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2802 return true;
2803 }
2804 }
2805
2806 /* If no existing entry, use any empty slot. */
2808 {
2809 MyProc->fpRelId[unused_slot] = relid;
2811 ++FastPathLocalUseCounts[group];
2812 return true;
2813 }
2814
2815 /* No existing entry, and no empty slot. */
2816 return false;
2817}

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SET_LOCKMODE, FAST_PATH_SLOT, FastPathLocalUseCounts, FastPathLockSlotsPerBackend, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2861 of file lock.c.

2863{
2865 Oid relid = locktag->locktag_field2;
2866 uint32 i;
2867
2868 /* fast-path group the lock belongs to */
2869 uint32 group = FAST_PATH_REL_GROUP(relid);
2870
2871 /*
2872 * Every PGPROC that can potentially hold a fast-path lock is present in
2873 * ProcGlobal->allProcs. Prepared transactions are not, but any
2874 * outstanding fast-path locks held by prepared transactions are
2875 * transferred to the main lock table.
2876 */
2877 for (i = 0; i < ProcGlobal->allProcCount; i++)
2878 {
2879 PGPROC *proc = GetPGProcByNumber(i);
2880 uint32 j;
2881
2883
2884 /*
2885 * If the target backend isn't referencing the same database as the
2886 * lock, then we needn't examine the individual relation IDs at all;
2887 * none of them can be relevant.
2888 *
2889 * proc->databaseId is set at backend startup time and never changes
2890 * thereafter, so it might be safe to perform this test before
2891 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2892 * assume that if the target backend holds any fast-path locks, it
2893 * must have performed a memory-fencing operation (in particular, an
2894 * LWLock acquisition) since setting proc->databaseId. However, it's
2895 * less clear that our backend is certain to have performed a memory
2896 * fencing operation since the other backend set proc->databaseId. So
2897 * for now, we test it after acquiring the LWLock just to be safe.
2898 *
2899 * Also skip groups without any registered fast-path locks.
2900 */
2901 if (proc->databaseId != locktag->locktag_field1 ||
2902 proc->fpLockBits[group] == 0)
2903 {
2904 LWLockRelease(&proc->fpInfoLock);
2905 continue;
2906 }
2907
2908 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2909 {
2910 uint32 lockmode;
2911
2912 /* index into the whole per-backend array */
2913 uint32 f = FAST_PATH_SLOT(group, j);
2914
2915 /* Look for an allocated slot matching the given relid. */
2916 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2917 continue;
2918
2919 /* Find or create lock object. */
2921 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2923 ++lockmode)
2924 {
2925 PROCLOCK *proclock;
2926
2927 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2928 continue;
2929 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2930 hashcode, lockmode);
2931 if (!proclock)
2932 {
2934 LWLockRelease(&proc->fpInfoLock);
2935 return false;
2936 }
2937 GrantLock(proclock->tag.myLock, proclock, lockmode);
2938 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2939 }
2941
2942 /* No need to examine remaining slots. */
2943 break;
2944 }
2945 LWLockRelease(&proc->fpInfoLock);
2946 }
2947 return true;
2948}

References PROC_HDR::allProcCount, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GetPGProcByNumber, GrantLock(), i, j, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2825 of file lock.c.

2826{
2827 uint32 i;
2828 bool result = false;
2829
2830 /* fast-path group the lock belongs to */
2831 uint32 group = FAST_PATH_REL_GROUP(relid);
2832
2833 FastPathLocalUseCounts[group] = 0;
2834 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2835 {
2836 /* index into the whole per-backend array */
2837 uint32 f = FAST_PATH_SLOT(group, i);
2838
2839 if (MyProc->fpRelId[f] == relid
2840 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2841 {
2842 Assert(!result);
2843 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2844 result = true;
2845 /* we continue iterating so as to update FastPathLocalUseCount */
2846 }
2847 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2848 ++FastPathLocalUseCounts[group];
2849 }
2850 return result;
2851}

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FastPathLocalUseCounts, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1850 of file lock.c.

1851{
1853}

References fb(), and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetAwaitedLock()

LOCALLOCK * GetAwaitedLock ( void  )

Definition at line 1898 of file lock.c.

1899{
1900 return awaitedLock;
1901}

References awaitedLock.

Referenced by LockErrorCleanup(), ProcessRecoveryConflictInterrupt(), and ProcSleep().

◆ GetBlockerStatusData()

BlockedProcsData * GetBlockerStatusData ( int  blocked_pid)

Definition at line 3996 of file lock.c.

3997{
3999 PGPROC *proc;
4000 int i;
4001
4003
4004 /*
4005 * Guess how much space we'll need, and preallocate. Most of the time
4006 * this will avoid needing to do repalloc while holding the LWLocks. (We
4007 * assume, but check with an Assert, that MaxBackends is enough entries
4008 * for the procs[] array; the other two could need enlargement, though.)
4009 */
4010 data->nprocs = data->nlocks = data->npids = 0;
4011 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
4012 data->procs = palloc_array(BlockedProcData, data->maxprocs);
4013 data->locks = palloc_array(LockInstanceData, data->maxlocks);
4014 data->waiter_pids = palloc_array(int, data->maxpids);
4015
4016 /*
4017 * In order to search the ProcArray for blocked_pid and assume that that
4018 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4019 * In addition, to examine the lock grouping fields of any other backend,
4020 * we must hold all the hash partition locks. (Only one of those locks is
4021 * actually relevant for any one lock group, but we can't know which one
4022 * ahead of time.) It's fairly annoying to hold all those locks
4023 * throughout this, but it's no worse than GetLockStatusData(), and it
4024 * does have the advantage that we're guaranteed to return a
4025 * self-consistent instantaneous state.
4026 */
4028
4030
4031 /* Nothing to do if it's gone */
4032 if (proc != NULL)
4033 {
4034 /*
4035 * Acquire lock on the entire shared lock data structure. See notes
4036 * in GetLockStatusData().
4037 */
4038 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4040
4041 if (proc->lockGroupLeader == NULL)
4042 {
4043 /* Easy case, proc is not a lock group member */
4045 }
4046 else
4047 {
4048 /* Examine all procs in proc's lock group */
4049 dlist_iter iter;
4050
4052 {
4054
4055 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4057 }
4058 }
4059
4060 /*
4061 * And release locks. See notes in GetLockStatusData().
4062 */
4063 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4065
4066 Assert(data->nprocs <= data->maxprocs);
4067 }
4068
4070
4071 return data;
4072}

References Assert, BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, fb(), GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, palloc_array, and palloc_object.

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId * GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int countp 
)

Definition at line 3069 of file lock.c.

3070{
3074 LOCK *lock;
3077 PROCLOCK *proclock;
3078 uint32 hashcode;
3080 int count = 0;
3081 int fast_count = 0;
3082
3084 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3087 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3088
3089 /*
3090 * Allocate memory to store results, and fill with InvalidVXID. We only
3091 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3092 * InHotStandby allocate once in TopMemoryContext.
3093 */
3094 if (InHotStandby)
3095 {
3096 if (vxids == NULL)
3099 sizeof(VirtualTransactionId) *
3101 }
3102 else
3104
3105 /* Compute hash code and partition lock, and look up conflicting modes. */
3106 hashcode = LockTagHashCode(locktag);
3108 conflictMask = lockMethodTable->conflictTab[lockmode];
3109
3110 /*
3111 * Fast path locks might not have been entered in the primary lock table.
3112 * If the lock we're dealing with could conflict with such a lock, we must
3113 * examine each backend's fast-path array for conflicts.
3114 */
3115 if (ConflictsWithRelationFastPath(locktag, lockmode))
3116 {
3117 int i;
3118 Oid relid = locktag->locktag_field2;
3120
3121 /* fast-path group the lock belongs to */
3122 uint32 group = FAST_PATH_REL_GROUP(relid);
3123
3124 /*
3125 * Iterate over relevant PGPROCs. Anything held by a prepared
3126 * transaction will have been transferred to the primary lock table,
3127 * so we need not worry about those. This is all a bit fuzzy, because
3128 * new locks could be taken after we've visited a particular
3129 * partition, but the callers had better be prepared to deal with that
3130 * anyway, since the locks could equally well be taken between the
3131 * time we return the value and the time the caller does something
3132 * with it.
3133 */
3134 for (i = 0; i < ProcGlobal->allProcCount; i++)
3135 {
3136 PGPROC *proc = GetPGProcByNumber(i);
3137 uint32 j;
3138
3139 /* A backend never blocks itself */
3140 if (proc == MyProc)
3141 continue;
3142
3144
3145 /*
3146 * If the target backend isn't referencing the same database as
3147 * the lock, then we needn't examine the individual relation IDs
3148 * at all; none of them can be relevant.
3149 *
3150 * See FastPathTransferRelationLocks() for discussion of why we do
3151 * this test after acquiring the lock.
3152 *
3153 * Also skip groups without any registered fast-path locks.
3154 */
3155 if (proc->databaseId != locktag->locktag_field1 ||
3156 proc->fpLockBits[group] == 0)
3157 {
3158 LWLockRelease(&proc->fpInfoLock);
3159 continue;
3160 }
3161
3162 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3163 {
3165
3166 /* index into the whole per-backend array */
3167 uint32 f = FAST_PATH_SLOT(group, j);
3168
3169 /* Look for an allocated slot matching the given relid. */
3170 if (relid != proc->fpRelId[f])
3171 continue;
3172 lockmask = FAST_PATH_GET_BITS(proc, f);
3173 if (!lockmask)
3174 continue;
3176
3177 /*
3178 * There can only be one entry per relation, so if we found it
3179 * and it doesn't conflict, we can skip the rest of the slots.
3180 */
3181 if ((lockmask & conflictMask) == 0)
3182 break;
3183
3184 /* Conflict! */
3185 GET_VXID_FROM_PGPROC(vxid, *proc);
3186
3188 vxids[count++] = vxid;
3189 /* else, xact already committed or aborted */
3190
3191 /* No need to examine remaining slots. */
3192 break;
3193 }
3194
3195 LWLockRelease(&proc->fpInfoLock);
3196 }
3197 }
3198
3199 /* Remember how many fast-path conflicts we found. */
3200 fast_count = count;
3201
3202 /*
3203 * Look up the lock object matching the tag.
3204 */
3206
3208 locktag,
3209 hashcode,
3210 HASH_FIND,
3211 NULL);
3212 if (!lock)
3213 {
3214 /*
3215 * If the lock object doesn't exist, there is nothing holding a lock
3216 * on this lockable object.
3217 */
3219 vxids[count].procNumber = INVALID_PROC_NUMBER;
3220 vxids[count].localTransactionId = InvalidLocalTransactionId;
3221 if (countp)
3222 *countp = count;
3223 return vxids;
3224 }
3225
3226 /*
3227 * Examine each existing holder (or awaiter) of the lock.
3228 */
3230 {
3231 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3232
3233 if (conflictMask & proclock->holdMask)
3234 {
3235 PGPROC *proc = proclock->tag.myProc;
3236
3237 /* A backend never blocks itself */
3238 if (proc != MyProc)
3239 {
3241
3242 GET_VXID_FROM_PGPROC(vxid, *proc);
3243
3245 {
3246 int i;
3247
3248 /* Avoid duplicate entries. */
3249 for (i = 0; i < fast_count; ++i)
3251 break;
3252 if (i >= fast_count)
3253 vxids[count++] = vxid;
3254 }
3255 /* else, xact already committed or aborted */
3256 }
3257 }
3258 }
3259
3261
3262 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3263 elog(PANIC, "too many conflicting locks found");
3264
3265 vxids[count].procNumber = INVALID_PROC_NUMBER;
3266 vxids[count].localTransactionId = InvalidLocalTransactionId;
3267 if (countp)
3268 *countp = count;
3269 return vxids;
3270}

References PROC_HDR::allProcCount, ConflictsWithRelationFastPath, PGPROC::databaseId, dlist_container, dlist_foreach, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, GetPGProcByNumber, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, INVALID_PROC_NUMBER, InvalidLocalTransactionId, j, lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, palloc0_array, PANIC, ProcGlobal, LOCK::procLocks, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char * GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 527 of file lock.c.

References Assert, fb(), lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData * GetLockStatusData ( void  )

Definition at line 3793 of file lock.c.

3794{
3795 LockData *data;
3796 PROCLOCK *proclock;
3798 int els;
3799 int el;
3800 int i;
3801
3803
3804 /* Guess how much space we'll need. */
3805 els = MaxBackends;
3806 el = 0;
3808
3809 /*
3810 * First, we iterate through the per-backend fast-path arrays, locking
3811 * them one at a time. This might produce an inconsistent picture of the
3812 * system state, but taking all of those LWLocks at the same time seems
3813 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3814 * matter too much, because none of these locks can be involved in lock
3815 * conflicts anyway - anything that might must be present in the main lock
3816 * table. (For the same reason, we don't sweat about making leaderPid
3817 * completely valid. We cannot safely dereference another backend's
3818 * lockGroupLeader field without holding all lock partition locks, and
3819 * it's not worth that.)
3820 */
3821 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3822 {
3823 PGPROC *proc = GetPGProcByNumber(i);
3824
3825 /* Skip backends with pid=0, as they don't hold fast-path locks */
3826 if (proc->pid == 0)
3827 continue;
3828
3830
3831 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3832 {
3833 /* Skip groups without registered fast-path locks */
3834 if (proc->fpLockBits[g] == 0)
3835 continue;
3836
3837 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3838 {
3840 uint32 f = FAST_PATH_SLOT(g, j);
3842
3843 /* Skip unallocated slots */
3844 if (!lockbits)
3845 continue;
3846
3847 if (el >= els)
3848 {
3849 els += MaxBackends;
3850 data->locks = (LockInstanceData *)
3851 repalloc(data->locks, sizeof(LockInstanceData) * els);
3852 }
3853
3854 instance = &data->locks[el];
3856 proc->fpRelId[f]);
3858 instance->waitLockMode = NoLock;
3859 instance->vxid.procNumber = proc->vxid.procNumber;
3860 instance->vxid.localTransactionId = proc->vxid.lxid;
3861 instance->pid = proc->pid;
3862 instance->leaderPid = proc->pid;
3863 instance->fastpath = true;
3864
3865 /*
3866 * Successfully taking fast path lock means there were no
3867 * conflicting locks.
3868 */
3869 instance->waitStart = 0;
3870
3871 el++;
3872 }
3873 }
3874
3875 if (proc->fpVXIDLock)
3876 {
3879
3880 if (el >= els)
3881 {
3882 els += MaxBackends;
3883 data->locks = (LockInstanceData *)
3884 repalloc(data->locks, sizeof(LockInstanceData) * els);
3885 }
3886
3887 vxid.procNumber = proc->vxid.procNumber;
3889
3890 instance = &data->locks[el];
3892 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3893 instance->waitLockMode = NoLock;
3894 instance->vxid.procNumber = proc->vxid.procNumber;
3895 instance->vxid.localTransactionId = proc->vxid.lxid;
3896 instance->pid = proc->pid;
3897 instance->leaderPid = proc->pid;
3898 instance->fastpath = true;
3899 instance->waitStart = 0;
3900
3901 el++;
3902 }
3903
3904 LWLockRelease(&proc->fpInfoLock);
3905 }
3906
3907 /*
3908 * Next, acquire lock on the entire shared lock data structure. We do
3909 * this so that, at least for locks in the primary lock table, the state
3910 * will be self-consistent.
3911 *
3912 * Since this is a read-only operation, we take shared instead of
3913 * exclusive lock. There's not a whole lot of point to this, because all
3914 * the normal operations require exclusive lock, but it doesn't hurt
3915 * anything either. It will at least allow two backends to do
3916 * GetLockStatusData in parallel.
3917 *
3918 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3919 */
3920 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3922
3923 /* Now we can safely count the number of proclocks */
3925 if (data->nelements > els)
3926 {
3927 els = data->nelements;
3928 data->locks = (LockInstanceData *)
3929 repalloc(data->locks, sizeof(LockInstanceData) * els);
3930 }
3931
3932 /* Now scan the tables to copy the data */
3934
3935 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3936 {
3937 PGPROC *proc = proclock->tag.myProc;
3938 LOCK *lock = proclock->tag.myLock;
3939 LockInstanceData *instance = &data->locks[el];
3940
3941 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3942 instance->holdMask = proclock->holdMask;
3943 if (proc->waitLock == proclock->tag.myLock)
3944 instance->waitLockMode = proc->waitLockMode;
3945 else
3946 instance->waitLockMode = NoLock;
3947 instance->vxid.procNumber = proc->vxid.procNumber;
3948 instance->vxid.localTransactionId = proc->vxid.lxid;
3949 instance->pid = proc->pid;
3950 instance->leaderPid = proclock->groupLeader->pid;
3951 instance->fastpath = false;
3952 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3953
3954 el++;
3955 }
3956
3957 /*
3958 * And release locks. We do this in reverse order for two reasons: (1)
3959 * Anyone else who needs more than one of the locks will be trying to lock
3960 * them in increasing order; we don't want to release the other process
3961 * until it can get all the locks it needs. (2) This avoids O(N^2)
3962 * behavior inside LWLockRelease.
3963 */
3964 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3966
3967 Assert(el == data->nelements);
3968
3969 return data;
3970}

References PROC_HDR::allProcCount, Assert, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_SLOT, FastPathLockGroupsPerBackend, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpLockBits, PGPROC::fpRelId, PGPROC::fpVXIDLock, GetPGProcByNumber, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, j, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc_array, palloc_object, pg_atomic_read_u64(), PGPROC::pid, ProcGlobal, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, PGPROC::vxid, PGPROC::waitLock, PGPROC::waitLockMode, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 539 of file lock.c.

References Assert, fb(), lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock * GetRunningTransactionLocks ( int nlocks)

Definition at line 4171 of file lock.c.

4172{
4174 PROCLOCK *proclock;
4176 int i;
4177 int index;
4178 int els;
4179
4180 /*
4181 * Acquire lock on the entire shared lock data structure.
4182 *
4183 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4184 */
4185 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4187
4188 /* Now we can safely count the number of proclocks */
4190
4191 /*
4192 * Allocating enough space for all locks in the lock table is overkill,
4193 * but it's more convenient and faster than having to enlarge the array.
4194 */
4196
4197 /* Now scan the tables to copy the data */
4199
4200 /*
4201 * If lock is a currently granted AccessExclusiveLock then it will have
4202 * just one proclock holder, so locks are never accessed twice in this
4203 * particular case. Don't copy this code for use elsewhere because in the
4204 * general case this will give you duplicate locks when looking at
4205 * non-exclusive lock types.
4206 */
4207 index = 0;
4208 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4209 {
4210 /* make sure this definition matches the one used in LockAcquire */
4211 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4213 {
4214 PGPROC *proc = proclock->tag.myProc;
4215 LOCK *lock = proclock->tag.myLock;
4216 TransactionId xid = proc->xid;
4217
4218 /*
4219 * Don't record locks for transactions if we know they have
4220 * already issued their WAL record for commit but not yet released
4221 * lock. It is still possible that we see locks held by already
4222 * complete transactions, if they haven't yet zeroed their xids.
4223 */
4224 if (!TransactionIdIsValid(xid))
4225 continue;
4226
4227 accessExclusiveLocks[index].xid = xid;
4230
4231 index++;
4232 }
4233 }
4234
4235 Assert(index <= els);
4236
4237 /*
4238 * And release locks. We do this in reverse order for two reasons: (1)
4239 * Anyone else who needs more than one of the locks will be trying to lock
4240 * them in increasing order; we don't want to release the other process
4241 * until it can get all the locks it needs. (2) This avoids O(N^2)
4242 * behavior inside LWLockRelease.
4243 */
4244 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4246
4247 *nlocks = index;
4248 return accessExclusiveLocks;
4249}

References AccessExclusiveLock, Assert, fb(), hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 4076 of file lock.c.

4077{
4078 LOCK *theLock = blocked_proc->waitLock;
4083 int queue_size;
4084
4085 /* Nothing to do if this proc is not blocked */
4086 if (theLock == NULL)
4087 return;
4088
4089 /* Set up a procs[] element */
4090 bproc = &data->procs[data->nprocs++];
4091 bproc->pid = blocked_proc->pid;
4092 bproc->first_lock = data->nlocks;
4093 bproc->first_waiter = data->npids;
4094
4095 /*
4096 * We may ignore the proc's fast-path arrays, since nothing in those could
4097 * be related to a contended lock.
4098 */
4099
4100 /* Collect all PROCLOCKs associated with theLock */
4101 dlist_foreach(proclock_iter, &theLock->procLocks)
4102 {
4103 PROCLOCK *proclock =
4104 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4105 PGPROC *proc = proclock->tag.myProc;
4106 LOCK *lock = proclock->tag.myLock;
4108
4109 if (data->nlocks >= data->maxlocks)
4110 {
4111 data->maxlocks += MaxBackends;
4112 data->locks = (LockInstanceData *)
4113 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4114 }
4115
4116 instance = &data->locks[data->nlocks];
4117 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4118 instance->holdMask = proclock->holdMask;
4119 if (proc->waitLock == lock)
4120 instance->waitLockMode = proc->waitLockMode;
4121 else
4122 instance->waitLockMode = NoLock;
4123 instance->vxid.procNumber = proc->vxid.procNumber;
4124 instance->vxid.localTransactionId = proc->vxid.lxid;
4125 instance->pid = proc->pid;
4126 instance->leaderPid = proclock->groupLeader->pid;
4127 instance->fastpath = false;
4128 data->nlocks++;
4129 }
4130
4131 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4132 waitQueue = &(theLock->waitProcs);
4133 queue_size = dclist_count(waitQueue);
4134
4135 if (queue_size > data->maxpids - data->npids)
4136 {
4137 data->maxpids = Max(data->maxpids + MaxBackends,
4138 data->npids + queue_size);
4139 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4140 sizeof(int) * data->maxpids);
4141 }
4142
4143 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4145 {
4147
4149 break;
4150 data->waiter_pids[data->npids++] = queued_proc->pid;
4151 queued_proc = (PGPROC *) queued_proc->links.next;
4152 }
4153
4154 bproc->num_locks = data->nlocks - bproc->first_lock;
4155 bproc->num_waiters = data->npids - bproc->first_waiter;
4156}

References data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, fb(), PROCLOCK::groupLeader, PROCLOCK::holdMask, links, PGPROC::lxid, Max, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, PGPROC::pid, PGPROC::procNumber, repalloc(), LOCK::tag, PROCLOCK::tag, PGPROC::vxid, PGPROC::waitLock, and PGPROC::waitLockMode.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1889 of file lock.c.

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1658 of file lock.c.

1659{
1660 lock->nGranted++;
1661 lock->granted[lockmode]++;
1662 lock->grantMask |= LOCKBIT_ON(lockmode);
1663 if (lock->granted[lockmode] == lock->requested[lockmode])
1664 lock->waitMask &= LOCKBIT_OFF(lockmode);
1665 proclock->holdMask |= LOCKBIT_ON(lockmode);
1666 LOCK_PRINT("GrantLock", lock, lockmode);
1667 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1668 Assert(lock->nGranted <= lock->nRequested);
1669}

References Assert, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), JoinWaitQueue(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1792 of file lock.c.

1793{
1794 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1795 int i;
1796
1797 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1798 /* Count the total */
1799 locallock->nLocks++;
1800 /* Count the per-owner lock */
1801 for (i = 0; i < locallock->numLockOwners; i++)
1802 {
1803 if (lockOwners[i].owner == owner)
1804 {
1805 lockOwners[i].nLocks++;
1806 return;
1807 }
1808 }
1809 lockOwners[i].owner = owner;
1810 lockOwners[i].nLocks = 1;
1811 locallock->numLockOwners++;
1812 if (owner != NULL)
1814
1815 /* Indicate that the lock is acquired for certain types of locks. */
1817}

References Assert, CheckAndSetLockHeld(), fb(), i, LOCALLOCKOWNER::nLocks, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLockManagerAccess()

void InitLockManagerAccess ( void  )

Definition at line 505 of file lock.c.

506{
507 /*
508 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
509 * counts and resource owner information.
510 */
511 HASHCTL info;
512
513 info.keysize = sizeof(LOCALLOCKTAG);
514 info.entrysize = sizeof(LOCALLOCK);
515
516 LockMethodLocalHash = hash_create("LOCALLOCK hash",
517 16,
518 &info,
520}

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, and LockMethodLocalHash.

Referenced by BaseInit().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

Definition at line 4596 of file lock.c.

4598{
4599 lock_twophase_postcommit(fxid, info, recdata, len);
4600}

References fb(), len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

◆ lock_twophase_recover()

void lock_twophase_recover ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

Definition at line 4357 of file lock.c.

4359{
4361 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4362 LOCKTAG *locktag;
4363 LOCKMODE lockmode;
4365 LOCK *lock;
4366 PROCLOCK *proclock;
4368 bool found;
4369 uint32 hashcode;
4371 int partition;
4374
4375 Assert(len == sizeof(TwoPhaseLockRecord));
4376 locktag = &rec->locktag;
4377 lockmode = rec->lockmode;
4379
4381 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4383
4384 hashcode = LockTagHashCode(locktag);
4385 partition = LockHashPartition(hashcode);
4387
4389
4390 /*
4391 * Find or create a lock with this tag.
4392 */
4394 locktag,
4395 hashcode,
4397 &found);
4398 if (!lock)
4399 {
4401 ereport(ERROR,
4403 errmsg("out of shared memory"),
4404 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4405 }
4406
4407 /*
4408 * if it's a new lock object, initialize it
4409 */
4410 if (!found)
4411 {
4412 lock->grantMask = 0;
4413 lock->waitMask = 0;
4414 dlist_init(&lock->procLocks);
4415 dclist_init(&lock->waitProcs);
4416 lock->nRequested = 0;
4417 lock->nGranted = 0;
4418 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4419 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4420 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4421 }
4422 else
4423 {
4424 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4425 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4426 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4427 Assert(lock->nGranted <= lock->nRequested);
4428 }
4429
4430 /*
4431 * Create the hash key for the proclock table.
4432 */
4433 proclocktag.myLock = lock;
4434 proclocktag.myProc = proc;
4435
4437
4438 /*
4439 * Find or create a proclock entry with this tag
4440 */
4442 &proclocktag,
4445 &found);
4446 if (!proclock)
4447 {
4448 /* Oops, not enough shmem for the proclock */
4449 if (lock->nRequested == 0)
4450 {
4451 /*
4452 * There are no other requestors of this lock, so garbage-collect
4453 * the lock object. We *must* do this to avoid a permanent leak
4454 * of shared memory, because there won't be anything to cause
4455 * anyone to release the lock object later.
4456 */
4459 &(lock->tag),
4460 hashcode,
4462 NULL))
4463 elog(PANIC, "lock table corrupted");
4464 }
4466 ereport(ERROR,
4468 errmsg("out of shared memory"),
4469 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4470 }
4471
4472 /*
4473 * If new, initialize the new entry
4474 */
4475 if (!found)
4476 {
4477 Assert(proc->lockGroupLeader == NULL);
4478 proclock->groupLeader = proc;
4479 proclock->holdMask = 0;
4480 proclock->releaseMask = 0;
4481 /* Add proclock to appropriate lists */
4482 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4484 &proclock->procLink);
4485 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4486 }
4487 else
4488 {
4489 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4490 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4491 }
4492
4493 /*
4494 * lock->nRequested and lock->requested[] count the total number of
4495 * requests, whether granted or waiting, so increment those immediately.
4496 */
4497 lock->nRequested++;
4498 lock->requested[lockmode]++;
4499 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4500
4501 /*
4502 * We shouldn't already hold the desired lock.
4503 */
4504 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4505 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4506 lockMethodTable->lockModeNames[lockmode],
4507 lock->tag.locktag_field1, lock->tag.locktag_field2,
4508 lock->tag.locktag_field3);
4509
4510 /*
4511 * We ignore any possible conflicts and just grant ourselves the lock. Not
4512 * only because we don't bother, but also to avoid deadlocks when
4513 * switching from standby to normal mode. See function comment.
4514 */
4515 GrantLock(lock, proclock, lockmode);
4516
4517 /*
4518 * Bump strong lock count, to make sure any fast-path lock requests won't
4519 * be granted without consulting the primary lock table.
4520 */
4521 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4522 {
4524
4528 }
4529
4531}

References Assert, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, fb(), LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

Definition at line 4538 of file lock.c.

4540{
4542 LOCKTAG *locktag;
4543 LOCKMODE lockmode;
4545
4546 Assert(len == sizeof(TwoPhaseLockRecord));
4547 locktag = &rec->locktag;
4548 lockmode = rec->lockmode;
4550
4552 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4553
4554 if (lockmode == AccessExclusiveLock &&
4555 locktag->locktag_type == LOCKTAG_RELATION)
4556 {
4558 locktag->locktag_field1 /* dboid */ ,
4559 locktag->locktag_field2 /* reloid */ );
4560 }
4561}

References AccessExclusiveLock, Assert, elog, ERROR, fb(), len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, StandbyAcquireAccessExclusiveLock(), and XidFromFullTransactionId.

◆ LockAcquire()

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp,
bool  logLockFailure 
)

Definition at line 836 of file lock.c.

843{
848 LOCK *lock;
849 PROCLOCK *proclock;
850 bool found;
851 ResourceOwner owner;
852 uint32 hashcode;
854 bool found_conflict;
856 bool log_lock = false;
857
859 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
862 elog(ERROR, "unrecognized lock mode: %d", lockmode);
863
864 if (RecoveryInProgress() && !InRecovery &&
865 (locktag->locktag_type == LOCKTAG_OBJECT ||
866 locktag->locktag_type == LOCKTAG_RELATION) &&
867 lockmode > RowExclusiveLock)
870 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
871 lockMethodTable->lockModeNames[lockmode]),
872 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
873
874#ifdef LOCK_DEBUG
875 if (LOCK_DEBUG_ENABLED(locktag))
876 elog(LOG, "LockAcquire: lock [%u,%u] %s",
877 locktag->locktag_field1, locktag->locktag_field2,
878 lockMethodTable->lockModeNames[lockmode]);
879#endif
880
881 /* Identify owner for lock */
882 if (sessionLock)
883 owner = NULL;
884 else
885 owner = CurrentResourceOwner;
886
887 /*
888 * Find or create a LOCALLOCK entry for this lock and lockmode
889 */
890 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
891 localtag.lock = *locktag;
892 localtag.mode = lockmode;
893
895 &localtag,
896 HASH_ENTER, &found);
897
898 /*
899 * if it's a new locallock object, initialize it
900 */
901 if (!found)
902 {
903 locallock->lock = NULL;
904 locallock->proclock = NULL;
905 locallock->hashcode = LockTagHashCode(&(localtag.lock));
906 locallock->nLocks = 0;
907 locallock->holdsStrongLockCount = false;
908 locallock->lockCleared = false;
909 locallock->numLockOwners = 0;
910 locallock->maxLockOwners = 8;
911 locallock->lockOwners = NULL; /* in case next line fails */
912 locallock->lockOwners = (LOCALLOCKOWNER *)
914 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
915 }
916 else
917 {
918 /* Make sure there will be room to remember the lock */
919 if (locallock->numLockOwners >= locallock->maxLockOwners)
920 {
921 int newsize = locallock->maxLockOwners * 2;
922
923 locallock->lockOwners = (LOCALLOCKOWNER *)
924 repalloc(locallock->lockOwners,
925 newsize * sizeof(LOCALLOCKOWNER));
926 locallock->maxLockOwners = newsize;
927 }
928 }
929 hashcode = locallock->hashcode;
930
931 if (locallockp)
933
934 /*
935 * If we already hold the lock, we can just increase the count locally.
936 *
937 * If lockCleared is already set, caller need not worry about absorbing
938 * sinval messages related to the lock's object.
939 */
940 if (locallock->nLocks > 0)
941 {
943 if (locallock->lockCleared)
945 else
947 }
948
949 /*
950 * We don't acquire any other heavyweight lock while holding the relation
951 * extension lock. We do allow to acquire the same relation extension
952 * lock more than once but that case won't reach here.
953 */
955
956 /*
957 * Prepare to emit a WAL record if acquisition of this lock needs to be
958 * replayed in a standby server.
959 *
960 * Here we prepare to log; after lock is acquired we'll issue log record.
961 * This arrangement simplifies error recovery in case the preparation step
962 * fails.
963 *
964 * Only AccessExclusiveLocks can conflict with lock types that read-only
965 * transactions can acquire in a standby server. Make sure this definition
966 * matches the one in GetRunningTransactionLocks().
967 */
968 if (lockmode >= AccessExclusiveLock &&
969 locktag->locktag_type == LOCKTAG_RELATION &&
972 {
974 log_lock = true;
975 }
976
977 /*
978 * Attempt to take lock via fast path, if eligible. But if we remember
979 * having filled up the fast path array, we don't attempt to make any
980 * further use of it until we release some locks. It's possible that some
981 * other backend has transferred some of those locks to the shared hash
982 * table, leaving space free, but it's not worth acquiring the LWLock just
983 * to check. It's also possible that we're acquiring a second or third
984 * lock type on a relation we have already locked using the fast-path, but
985 * for now we don't worry about that case either.
986 */
987 if (EligibleForRelationFastPath(locktag, lockmode) &&
989 {
991 bool acquired;
992
993 /*
994 * LWLockAcquire acts as a memory sequencing point, so it's safe to
995 * assume that any strong locker whose increment to
996 * FastPathStrongRelationLocks->counts becomes visible after we test
997 * it has yet to begin to transfer fast-path locks.
998 */
1001 acquired = false;
1002 else
1004 lockmode);
1006 if (acquired)
1007 {
1008 /*
1009 * The locallock might contain stale pointers to some old shared
1010 * objects; we MUST reset these to null before considering the
1011 * lock to be acquired via fast-path.
1012 */
1013 locallock->lock = NULL;
1014 locallock->proclock = NULL;
1015 GrantLockLocal(locallock, owner);
1016 return LOCKACQUIRE_OK;
1017 }
1018 }
1019
1020 /*
1021 * If this lock could potentially have been taken via the fast-path by
1022 * some other backend, we must (temporarily) disable further use of the
1023 * fast-path for this lock tag, and migrate any locks already taken via
1024 * this method to the main lock table.
1025 */
1026 if (ConflictsWithRelationFastPath(locktag, lockmode))
1027 {
1029
1032 hashcode))
1033 {
1035 if (locallock->nLocks == 0)
1037 if (locallockp)
1038 *locallockp = NULL;
1040 ereport(ERROR,
1042 errmsg("out of shared memory"),
1043 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1044 else
1045 return LOCKACQUIRE_NOT_AVAIL;
1046 }
1047 }
1048
1049 /*
1050 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1051 * take it via the fast-path, either, so we've got to mess with the shared
1052 * lock table.
1053 */
1055
1057
1058 /*
1059 * Find or create lock and proclock entries with this tag
1060 *
1061 * Note: if the locallock object already existed, it might have a pointer
1062 * to the lock already ... but we should not assume that that pointer is
1063 * valid, since a lock object with zero hold and request counts can go
1064 * away anytime. So we have to use SetupLockInTable() to recompute the
1065 * lock and proclock pointers, even if they're already set.
1066 */
1067 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1068 hashcode, lockmode);
1069 if (!proclock)
1070 {
1073 if (locallock->nLocks == 0)
1075 if (locallockp)
1076 *locallockp = NULL;
1078 ereport(ERROR,
1080 errmsg("out of shared memory"),
1081 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1082 else
1083 return LOCKACQUIRE_NOT_AVAIL;
1084 }
1085 locallock->proclock = proclock;
1086 lock = proclock->tag.myLock;
1087 locallock->lock = lock;
1088
1089 /*
1090 * If lock requested conflicts with locks requested by waiters, must join
1091 * wait queue. Otherwise, check for conflict with already-held locks.
1092 * (That's last because most complex check.)
1093 */
1094 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1095 found_conflict = true;
1096 else
1098 lock, proclock);
1099
1100 if (!found_conflict)
1101 {
1102 /* No conflict with held or previously requested locks */
1103 GrantLock(lock, proclock, lockmode);
1105 }
1106 else
1107 {
1108 /*
1109 * Join the lock's wait queue. We call this even in the dontWait
1110 * case, because JoinWaitQueue() may discover that we can acquire the
1111 * lock immediately after all.
1112 */
1114 }
1115
1117 {
1118 /*
1119 * We're not getting the lock because a deadlock was detected already
1120 * while trying to join the wait queue, or because we would have to
1121 * wait but the caller requested no blocking.
1122 *
1123 * Undo the changes to shared entries before releasing the partition
1124 * lock.
1125 */
1127
1128 if (proclock->holdMask == 0)
1129 {
1131
1133 hashcode);
1134 dlist_delete(&proclock->lockLink);
1135 dlist_delete(&proclock->procLink);
1137 &(proclock->tag),
1140 NULL))
1141 elog(PANIC, "proclock table corrupted");
1142 }
1143 else
1144 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1145 lock->nRequested--;
1146 lock->requested[lockmode]--;
1147 LOCK_PRINT("LockAcquire: did not join wait queue",
1148 lock, lockmode);
1149 Assert((lock->nRequested > 0) &&
1150 (lock->requested[lockmode] >= 0));
1151 Assert(lock->nGranted <= lock->nRequested);
1153 if (locallock->nLocks == 0)
1155
1156 if (dontWait)
1157 {
1158 /*
1159 * Log lock holders and waiters as a detail log message if
1160 * logLockFailure = true and lock acquisition fails with dontWait
1161 * = true
1162 */
1163 if (logLockFailure)
1164 {
1168 const char *modename;
1169 int lockHoldersNum = 0;
1170
1174
1175 DescribeLockTag(&buf, &locallock->tag.lock);
1176 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1177 lockmode);
1178
1179 /* Gather a list of all lock holders and waiters */
1184
1185 ereport(LOG,
1186 (errmsg("process %d could not obtain %s on %s",
1187 MyProcPid, modename, buf.data),
1189 "Process holding the lock: %s, Wait queue: %s.",
1190 "Processes holding the lock: %s, Wait queue: %s.",
1192 lock_holders_sbuf.data,
1193 lock_waiters_sbuf.data)));
1194
1195 pfree(buf.data);
1198 }
1199 if (locallockp)
1200 *locallockp = NULL;
1201 return LOCKACQUIRE_NOT_AVAIL;
1202 }
1203 else
1204 {
1206 /* DeadLockReport() will not return */
1207 }
1208 }
1209
1210 /*
1211 * We are now in the lock queue, or the lock was already granted. If
1212 * queued, go to sleep.
1213 */
1215 {
1216 Assert(!dontWait);
1217 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1218 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1220
1222
1223 /*
1224 * NOTE: do not do any material change of state between here and
1225 * return. All required changes in locktable state must have been
1226 * done when the lock was granted to us --- see notes in WaitOnLock.
1227 */
1228
1230 {
1231 /*
1232 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1233 * now.
1234 */
1235 Assert(!dontWait);
1237 /* DeadLockReport() will not return */
1238 }
1239 }
1240 else
1243
1244 /* The lock was granted to us. Update the local lock entry accordingly */
1245 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1246 GrantLockLocal(locallock, owner);
1247
1248 /*
1249 * Lock state is fully up-to-date now; if we error out after this, no
1250 * special error cleanup is required.
1251 */
1253
1254 /*
1255 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1256 * standby server.
1257 */
1258 if (log_lock)
1259 {
1260 /*
1261 * Decode the locktag back to the original values, to avoid sending
1262 * lots of empty bytes with every message. See lock.h to check how a
1263 * locktag is defined for LOCKTAG_RELATION
1264 */
1266 locktag->locktag_field2);
1267 }
1268
1269 return LOCKACQUIRE_OK;
1270}

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert, BeginStrongLockAcquire(), buf, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, CurrentResourceOwner, DeadLockReport(), DescribeLockTag(), dlist_delete(), EligibleForRelationFastPath, elog, ereport, errcode(), errdetail_log_plural(), errhint(), errmsg(), ERROR, FAST_PATH_REL_GROUP, FastPathGrantRelationLock(), FastPathLocalUseCounts, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), fb(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, GetLockHoldersAndWaiters(), GetLockmodeName(), GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), PROCLOCK::holdMask, initStringInfo(), InRecovery, JoinWaitQueue(), lengthof, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemoryContextAlloc(), MemSet, PROCLOCKTAG::myLock, MyProc, MyProcPid, LOCK::nGranted, LOCK::nRequested, PANIC, pfree(), PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_OK, PROC_WAIT_STATUS_WAITING, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), ConditionalLockTuple(), ConditionalXactLockTableWait(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1529 of file lock.c.

1533{
1534 int numLockModes = lockMethodTable->numLockModes;
1536 int conflictMask = lockMethodTable->conflictTab[lockmode];
1540 int i;
1541
1542 /*
1543 * first check for global conflicts: If no locks conflict with my request,
1544 * then I get the lock.
1545 *
1546 * Checking for conflict: lock->grantMask represents the types of
1547 * currently held locks. conflictTable[lockmode] has a bit set for each
1548 * type of lock that conflicts with request. Bitwise compare tells if
1549 * there is a conflict.
1550 */
1551 if (!(conflictMask & lock->grantMask))
1552 {
1553 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1554 return false;
1555 }
1556
1557 /*
1558 * Rats. Something conflicts. But it could still be my own lock, or a
1559 * lock held by another member of my locking group. First, figure out how
1560 * many conflicts remain after subtracting out any locks I hold myself.
1561 */
1562 myLocks = proclock->holdMask;
1563 for (i = 1; i <= numLockModes; i++)
1564 {
1565 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1566 {
1567 conflictsRemaining[i] = 0;
1568 continue;
1569 }
1570 conflictsRemaining[i] = lock->granted[i];
1571 if (myLocks & LOCKBIT_ON(i))
1574 }
1575
1576 /* If no conflicts remain, we get the lock. */
1577 if (totalConflictsRemaining == 0)
1578 {
1579 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1580 return false;
1581 }
1582
1583 /* If no group locking, it's definitely a conflict. */
1584 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1585 {
1586 Assert(proclock->tag.myProc == MyProc);
1587 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1588 proclock);
1589 return true;
1590 }
1591
1592 /*
1593 * The relation extension lock conflict even between the group members.
1594 */
1596 {
1597 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1598 proclock);
1599 return true;
1600 }
1601
1602 /*
1603 * Locks held in conflicting modes by members of our own lock group are
1604 * not real conflicts; we can subtract those out and see if we still have
1605 * a conflict. This is O(N) in the number of processes holding or
1606 * awaiting locks on this object. We could improve that by making the
1607 * shared memory state more complex (and larger) but it doesn't seem worth
1608 * it.
1609 */
1611 {
1613 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1614
1615 if (proclock != otherproclock &&
1616 proclock->groupLeader == otherproclock->groupLeader &&
1617 (otherproclock->holdMask & conflictMask) != 0)
1618 {
1619 int intersectMask = otherproclock->holdMask & conflictMask;
1620
1621 for (i = 1; i <= numLockModes; i++)
1622 {
1623 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1624 {
1625 if (conflictsRemaining[i] <= 0)
1626 elog(PANIC, "proclocks held do not match lock");
1629 }
1630 }
1631
1632 if (totalConflictsRemaining == 0)
1633 {
1634 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1635 proclock);
1636 return false;
1637 }
1638 }
1639 }
1640
1641 /* Nope, it's a real conflict. */
1642 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1643 return true;
1644}

References Assert, dlist_container, dlist_foreach, elog, fb(), LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by JoinWaitQueue(), LockAcquireExtended(), and ProcLockWakeup().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 696 of file lock.c.

697{
702 LOCK *lock;
703 PROCLOCK *proclock;
705 bool hasWaiters = false;
706
708 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
711 elog(ERROR, "unrecognized lock mode: %d", lockmode);
712
713#ifdef LOCK_DEBUG
714 if (LOCK_DEBUG_ENABLED(locktag))
715 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
716 locktag->locktag_field1, locktag->locktag_field2,
717 lockMethodTable->lockModeNames[lockmode]);
718#endif
719
720 /*
721 * Find the LOCALLOCK entry for this lock and lockmode
722 */
723 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
724 localtag.lock = *locktag;
725 localtag.mode = lockmode;
726
728 &localtag,
729 HASH_FIND, NULL);
730
731 /*
732 * let the caller print its own error message, too. Do not ereport(ERROR).
733 */
734 if (!locallock || locallock->nLocks <= 0)
735 {
736 elog(WARNING, "you don't own a lock of type %s",
737 lockMethodTable->lockModeNames[lockmode]);
738 return false;
739 }
740
741 /*
742 * Check the shared lock table.
743 */
745
747
748 /*
749 * We don't need to re-find the lock or proclock, since we kept their
750 * addresses in the locallock table, and they couldn't have been removed
751 * while we were holding a lock on them.
752 */
753 lock = locallock->lock;
754 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
755 proclock = locallock->proclock;
756 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
757
758 /*
759 * Double-check that we are actually holding a lock of the type we want to
760 * release.
761 */
762 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
763 {
764 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
766 elog(WARNING, "you don't own a lock of type %s",
767 lockMethodTable->lockModeNames[lockmode]);
769 return false;
770 }
771
772 /*
773 * Do the checking.
774 */
775 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
776 hasWaiters = true;
777
779
780 return hasWaiters;
781}

References elog, ERROR, fb(), HASH_FIND, hash_search(), PROCLOCK::holdMask, lengthof, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  orstronger 
)

Definition at line 643 of file lock.c.

645{
648
649 /*
650 * See if there is a LOCALLOCK entry for this lock and lockmode
651 */
652 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
653 localtag.lock = *locktag;
654 localtag.mode = lockmode;
655
657 &localtag,
658 HASH_FIND, NULL);
659
660 if (locallock && locallock->nLocks > 0)
661 return true;
662
663 if (orstronger)
664 {
666
667 for (slockmode = lockmode + 1;
669 slockmode++)
670 {
671 if (LockHeldByMe(locktag, slockmode, false))
672 return true;
673 }
674 }
675
676 return false;
677}

References fb(), HASH_FIND, hash_search(), LockHeldByMe(), LockMethodLocalHash, MaxLockMode, and MemSet.

Referenced by CheckRelationLockedByMe(), CheckRelationOidLockedByMe(), LockHeldByMe(), and UpdateSubscriptionRelState().

◆ LockManagerShmemInit()

void LockManagerShmemInit ( void  )

Definition at line 444 of file lock.c.

445{
446 HASHCTL info;
449 bool found;
450
451 /*
452 * Compute init/max size to request for lock hashtables. Note these
453 * calculations must agree with LockManagerShmemSize!
454 */
457
458 /*
459 * Allocate hash table for LOCK structs. This stores per-locked-object
460 * information.
461 */
462 info.keysize = sizeof(LOCKTAG);
463 info.entrysize = sizeof(LOCK);
465
466 LockMethodLockHash = ShmemInitHash("LOCK hash",
469 &info,
471
472 /* Assume an average of 2 holders per lock */
473 max_table_size *= 2;
474 init_table_size *= 2;
475
476 /*
477 * Allocate hash table for PROCLOCK structs. This stores
478 * per-lock-per-holder information.
479 */
480 info.keysize = sizeof(PROCLOCKTAG);
481 info.entrysize = sizeof(PROCLOCK);
482 info.hash = proclock_hash;
484
485 LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
488 &info,
490
491 /*
492 * Allocate fast-path structures.
493 */
495 ShmemInitStruct("Fast Path Strong Relation Lock Data",
496 sizeof(FastPathStrongRelationLockData), &found);
497 if (!found)
499}

References HASHCTL::entrysize, FastPathStrongRelationLocks, fb(), HASHCTL::hash, HASH_BLOBS, HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateOrAttachShmemStructs().

◆ LockManagerShmemSize()

Size LockManagerShmemSize ( void  )

Definition at line 3756 of file lock.c.

3757{
3758 Size size = 0;
3759 long max_table_size;
3760
3761 /* lock hash table */
3763 size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3764
3765 /* proclock hash table */
3766 max_table_size *= 2;
3767 size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3768
3769 /*
3770 * Since NLOCKENTS is only an estimate, add 10% safety margin.
3771 */
3772 size = add_size(size, size / 10);
3773
3774 return size;
3775}

References add_size(), fb(), hash_estimate_size(), and NLOCKENTS.

Referenced by CalculateShmemSize().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2706 of file lock.c.

2707{
2709
2710 Assert(parent != NULL);
2711
2712 if (locallocks == NULL)
2713 {
2714 HASH_SEQ_STATUS status;
2716
2718
2719 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2721 }
2722 else
2723 {
2724 int i;
2725
2726 for (i = nlocks - 1; i >= 0; i--)
2727 LockReassignOwner(locallocks[i], parent);
2728 }
2729}

References Assert, CurrentResourceOwner, fb(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), and ResourceOwnerGetParent().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2736 of file lock.c.

2737{
2738 LOCALLOCKOWNER *lockOwners;
2739 int i;
2740 int ic = -1;
2741 int ip = -1;
2742
2743 /*
2744 * Scan to see if there are any locks belonging to current owner or its
2745 * parent
2746 */
2747 lockOwners = locallock->lockOwners;
2748 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2749 {
2750 if (lockOwners[i].owner == CurrentResourceOwner)
2751 ic = i;
2752 else if (lockOwners[i].owner == parent)
2753 ip = i;
2754 }
2755
2756 if (ic < 0)
2757 return; /* no current locks */
2758
2759 if (ip < 0)
2760 {
2761 /* Parent has no slot, so just give it the child's slot */
2762 lockOwners[ic].owner = parent;
2764 }
2765 else
2766 {
2767 /* Merge child's count with parent's */
2768 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2769 /* compact out unused slot */
2770 locallock->numLockOwners--;
2771 if (ic < locallock->numLockOwners)
2772 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2773 }
2775}

References CurrentResourceOwner, fb(), i, LOCALLOCKOWNER::nLocks, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3284 of file lock.c.

3287{
3288 LOCK *lock;
3289 PROCLOCK *proclock;
3291 uint32 hashcode;
3294 bool wakeupNeeded;
3295
3296 hashcode = LockTagHashCode(locktag);
3298
3300
3301 /*
3302 * Re-find the lock object (it had better be there).
3303 */
3305 locktag,
3306 hashcode,
3307 HASH_FIND,
3308 NULL);
3309 if (!lock)
3310 elog(PANIC, "failed to re-find shared lock object");
3311
3312 /*
3313 * Re-find the proclock object (ditto).
3314 */
3315 proclocktag.myLock = lock;
3316 proclocktag.myProc = proc;
3317
3319
3321 &proclocktag,
3323 HASH_FIND,
3324 NULL);
3325 if (!proclock)
3326 elog(PANIC, "failed to re-find shared proclock object");
3327
3328 /*
3329 * Double-check that we are actually holding a lock of the type we want to
3330 * release.
3331 */
3332 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3333 {
3334 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3336 elog(WARNING, "you don't own a lock of type %s",
3337 lockMethodTable->lockModeNames[lockmode]);
3338 return;
3339 }
3340
3341 /*
3342 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3343 */
3344 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3345
3346 CleanUpLock(lock, proclock,
3347 lockMethodTable, hashcode,
3348 wakeupNeeded);
3349
3351
3352 /*
3353 * Decrement strong lock count. This logic is needed only for 2PC.
3354 */
3356 && ConflictsWithRelationFastPath(locktag, lockmode))
3357 {
3359
3364 }
3365}

References Assert, CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, fb(), HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire, SpinLockRelease, UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 2102 of file lock.c.

2103{
2108 LOCK *lock;
2109 PROCLOCK *proclock;
2111 bool wakeupNeeded;
2112
2114 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2117 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2118
2119#ifdef LOCK_DEBUG
2120 if (LOCK_DEBUG_ENABLED(locktag))
2121 elog(LOG, "LockRelease: lock [%u,%u] %s",
2122 locktag->locktag_field1, locktag->locktag_field2,
2123 lockMethodTable->lockModeNames[lockmode]);
2124#endif
2125
2126 /*
2127 * Find the LOCALLOCK entry for this lock and lockmode
2128 */
2129 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2130 localtag.lock = *locktag;
2131 localtag.mode = lockmode;
2132
2134 &localtag,
2135 HASH_FIND, NULL);
2136
2137 /*
2138 * let the caller print its own error message, too. Do not ereport(ERROR).
2139 */
2140 if (!locallock || locallock->nLocks <= 0)
2141 {
2142 elog(WARNING, "you don't own a lock of type %s",
2143 lockMethodTable->lockModeNames[lockmode]);
2144 return false;
2145 }
2146
2147 /*
2148 * Decrease the count for the resource owner.
2149 */
2150 {
2151 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2152 ResourceOwner owner;
2153 int i;
2154
2155 /* Identify owner for lock */
2156 if (sessionLock)
2157 owner = NULL;
2158 else
2159 owner = CurrentResourceOwner;
2160
2161 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2162 {
2163 if (lockOwners[i].owner == owner)
2164 {
2165 Assert(lockOwners[i].nLocks > 0);
2166 if (--lockOwners[i].nLocks == 0)
2167 {
2168 if (owner != NULL)
2170 /* compact out unused slot */
2171 locallock->numLockOwners--;
2172 if (i < locallock->numLockOwners)
2173 lockOwners[i] = lockOwners[locallock->numLockOwners];
2174 }
2175 break;
2176 }
2177 }
2178 if (i < 0)
2179 {
2180 /* don't release a lock belonging to another owner */
2181 elog(WARNING, "you don't own a lock of type %s",
2182 lockMethodTable->lockModeNames[lockmode]);
2183 return false;
2184 }
2185 }
2186
2187 /*
2188 * Decrease the total local count. If we're still holding the lock, we're
2189 * done.
2190 */
2191 locallock->nLocks--;
2192
2193 if (locallock->nLocks > 0)
2194 return true;
2195
2196 /*
2197 * At this point we can no longer suppose we are clear of invalidation
2198 * messages related to this lock. Although we'll delete the LOCALLOCK
2199 * object before any intentional return from this routine, it seems worth
2200 * the trouble to explicitly reset lockCleared right now, just in case
2201 * some error prevents us from deleting the LOCALLOCK.
2202 */
2203 locallock->lockCleared = false;
2204
2205 /* Attempt fast release of any lock eligible for the fast path. */
2206 if (EligibleForRelationFastPath(locktag, lockmode) &&
2208 {
2209 bool released;
2210
2211 /*
2212 * We might not find the lock here, even if we originally entered it
2213 * here. Another backend may have moved it to the main table.
2214 */
2217 lockmode);
2219 if (released)
2220 {
2222 return true;
2223 }
2224 }
2225
2226 /*
2227 * Otherwise we've got to mess with the shared lock table.
2228 */
2230
2232
2233 /*
2234 * Normally, we don't need to re-find the lock or proclock, since we kept
2235 * their addresses in the locallock table, and they couldn't have been
2236 * removed while we were holding a lock on them. But it's possible that
2237 * the lock was taken fast-path and has since been moved to the main hash
2238 * table by another backend, in which case we will need to look up the
2239 * objects here. We assume the lock field is NULL if so.
2240 */
2241 lock = locallock->lock;
2242 if (!lock)
2243 {
2245
2246 Assert(EligibleForRelationFastPath(locktag, lockmode));
2248 locktag,
2249 locallock->hashcode,
2250 HASH_FIND,
2251 NULL);
2252 if (!lock)
2253 elog(ERROR, "failed to re-find shared lock object");
2254 locallock->lock = lock;
2255
2256 proclocktag.myLock = lock;
2257 proclocktag.myProc = MyProc;
2259 &proclocktag,
2260 HASH_FIND,
2261 NULL);
2262 if (!locallock->proclock)
2263 elog(ERROR, "failed to re-find shared proclock object");
2264 }
2265 LOCK_PRINT("LockRelease: found", lock, lockmode);
2266 proclock = locallock->proclock;
2267 PROCLOCK_PRINT("LockRelease: found", proclock);
2268
2269 /*
2270 * Double-check that we are actually holding a lock of the type we want to
2271 * release.
2272 */
2273 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2274 {
2275 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2277 elog(WARNING, "you don't own a lock of type %s",
2278 lockMethodTable->lockModeNames[lockmode]);
2280 return false;
2281 }
2282
2283 /*
2284 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2285 */
2286 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2287
2288 CleanUpLock(lock, proclock,
2289 lockMethodTable, locallock->hashcode,
2290 wakeupNeeded);
2291
2293
2295 return true;
2296}

References Assert, CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FAST_PATH_REL_GROUP, FastPathLocalUseCounts, FastPathUnGrantRelationLock(), fb(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), PROCLOCK::holdMask, i, lengthof, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, MyProc, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SearchSysCacheLocked1(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2307 of file lock.c.

2308{
2309 HASH_SEQ_STATUS status;
2311 int i,
2312 numLockModes;
2314 LOCK *lock;
2315 int partition;
2316 bool have_fast_path_lwlock = false;
2317
2319 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2321
2322#ifdef LOCK_DEBUG
2323 if (*(lockMethodTable->trace_flag))
2324 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2325#endif
2326
2327 /*
2328 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2329 * the only way that the lock we hold on our own VXID can ever get
2330 * released: it is always and only released when a toplevel transaction
2331 * ends.
2332 */
2335
2336 numLockModes = lockMethodTable->numLockModes;
2337
2338 /*
2339 * First we run through the locallock table and get rid of unwanted
2340 * entries, then we scan the process's proclocks and get rid of those. We
2341 * do this separately because we may have multiple locallock entries
2342 * pointing to the same proclock, and we daren't end up with any dangling
2343 * pointers. Fast-path locks are cleaned up during the locallock table
2344 * scan, though.
2345 */
2347
2348 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2349 {
2350 /*
2351 * If the LOCALLOCK entry is unused, something must've gone wrong
2352 * while trying to acquire this lock. Just forget the local entry.
2353 */
2354 if (locallock->nLocks == 0)
2355 {
2357 continue;
2358 }
2359
2360 /* Ignore items that are not of the lockmethod to be removed */
2362 continue;
2363
2364 /*
2365 * If we are asked to release all locks, we can just zap the entry.
2366 * Otherwise, must scan to see if there are session locks. We assume
2367 * there is at most one lockOwners entry for session locks.
2368 */
2369 if (!allLocks)
2370 {
2371 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2372
2373 /* If session lock is above array position 0, move it down to 0 */
2374 for (i = 0; i < locallock->numLockOwners; i++)
2375 {
2376 if (lockOwners[i].owner == NULL)
2377 lockOwners[0] = lockOwners[i];
2378 else
2379 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2380 }
2381
2382 if (locallock->numLockOwners > 0 &&
2383 lockOwners[0].owner == NULL &&
2384 lockOwners[0].nLocks > 0)
2385 {
2386 /* Fix the locallock to show just the session locks */
2387 locallock->nLocks = lockOwners[0].nLocks;
2388 locallock->numLockOwners = 1;
2389 /* We aren't deleting this locallock, so done */
2390 continue;
2391 }
2392 else
2393 locallock->numLockOwners = 0;
2394 }
2395
2396#ifdef USE_ASSERT_CHECKING
2397
2398 /*
2399 * Tuple locks are currently held only for short durations within a
2400 * transaction. Check that we didn't forget to release one.
2401 */
2403 elog(WARNING, "tuple lock held at commit");
2404#endif
2405
2406 /*
2407 * If the lock or proclock pointers are NULL, this lock was taken via
2408 * the relation fast-path (and is not known to have been transferred).
2409 */
2410 if (locallock->proclock == NULL || locallock->lock == NULL)
2411 {
2412 LOCKMODE lockmode = locallock->tag.mode;
2413 Oid relid;
2414
2415 /* Verify that a fast-path lock is what we've got. */
2416 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2417 elog(PANIC, "locallock table corrupted");
2418
2419 /*
2420 * If we don't currently hold the LWLock that protects our
2421 * fast-path data structures, we must acquire it before attempting
2422 * to release the lock via the fast-path. We will continue to
2423 * hold the LWLock until we're done scanning the locallock table,
2424 * unless we hit a transferred fast-path lock. (XXX is this
2425 * really such a good idea? There could be a lot of entries ...)
2426 */
2428 {
2430 have_fast_path_lwlock = true;
2431 }
2432
2433 /* Attempt fast-path release. */
2434 relid = locallock->tag.lock.locktag_field2;
2435 if (FastPathUnGrantRelationLock(relid, lockmode))
2436 {
2438 continue;
2439 }
2440
2441 /*
2442 * Our lock, originally taken via the fast path, has been
2443 * transferred to the main lock table. That's going to require
2444 * some extra work, so release our fast-path lock before starting.
2445 */
2447 have_fast_path_lwlock = false;
2448
2449 /*
2450 * Now dump the lock. We haven't got a pointer to the LOCK or
2451 * PROCLOCK in this case, so we have to handle this a bit
2452 * differently than a normal lock release. Unfortunately, this
2453 * requires an extra LWLock acquire-and-release cycle on the
2454 * partitionLock, but hopefully it shouldn't happen often.
2455 */
2457 &locallock->tag.lock, lockmode, false);
2459 continue;
2460 }
2461
2462 /* Mark the proclock to show we need to release this lockmode */
2463 if (locallock->nLocks > 0)
2464 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2465
2466 /* And remove the locallock hashtable entry */
2468 }
2469
2470 /* Done with the fast-path data structures */
2473
2474 /*
2475 * Now, scan each lock partition separately.
2476 */
2478 {
2480 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2482
2484
2485 /*
2486 * If the proclock list for this partition is empty, we can skip
2487 * acquiring the partition lock. This optimization is trickier than
2488 * it looks, because another backend could be in process of adding
2489 * something to our proclock list due to promoting one of our
2490 * fast-path locks. However, any such lock must be one that we
2491 * decided not to delete above, so it's okay to skip it again now;
2492 * we'd just decide not to delete it again. We must, however, be
2493 * careful to re-fetch the list header once we've acquired the
2494 * partition lock, to be sure we have a valid, up-to-date pointer.
2495 * (There is probably no significant risk if pointer fetch/store is
2496 * atomic, but we don't wish to assume that.)
2497 *
2498 * XXX This argument assumes that the locallock table correctly
2499 * represents all of our fast-path locks. While allLocks mode
2500 * guarantees to clean up all of our normal locks regardless of the
2501 * locallock situation, we lose that guarantee for fast-path locks.
2502 * This is not ideal.
2503 */
2504 if (dlist_is_empty(procLocks))
2505 continue; /* needn't examine this partition */
2506
2508
2510 {
2511 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2512 bool wakeupNeeded = false;
2513
2514 Assert(proclock->tag.myProc == MyProc);
2515
2516 lock = proclock->tag.myLock;
2517
2518 /* Ignore items that are not of the lockmethod to be removed */
2519 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2520 continue;
2521
2522 /*
2523 * In allLocks mode, force release of all locks even if locallock
2524 * table had problems
2525 */
2526 if (allLocks)
2527 proclock->releaseMask = proclock->holdMask;
2528 else
2529 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2530
2531 /*
2532 * Ignore items that have nothing to be released, unless they have
2533 * holdMask == 0 and are therefore recyclable
2534 */
2535 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2536 continue;
2537
2538 PROCLOCK_PRINT("LockReleaseAll", proclock);
2539 LOCK_PRINT("LockReleaseAll", lock, 0);
2540 Assert(lock->nRequested >= 0);
2541 Assert(lock->nGranted >= 0);
2542 Assert(lock->nGranted <= lock->nRequested);
2543 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2544
2545 /*
2546 * Release the previously-marked lock modes
2547 */
2548 for (i = 1; i <= numLockModes; i++)
2549 {
2550 if (proclock->releaseMask & LOCKBIT_ON(i))
2551 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2553 }
2554 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2555 Assert(lock->nGranted <= lock->nRequested);
2556 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2557
2558 proclock->releaseMask = 0;
2559
2560 /* CleanUpLock will wake up waiters if needed. */
2561 CleanUpLock(lock, proclock,
2563 LockTagHashCode(&lock->tag),
2564 wakeupNeeded);
2565 } /* loop over PROCLOCKs within this partition */
2566
2568 } /* loop over partitions */
2569
2570#ifdef LOCK_DEBUG
2571 if (*(lockMethodTable->trace_flag))
2572 elog(LOG, "LockReleaseAll done");
2573#endif
2574}

References Assert, CleanUpLock(), DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), fb(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCK_LOCKTAG, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LockRefindAndRelease(), LOCKTAG_TUPLE, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCKOWNER::owner, PANIC, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), LOCK::tag, PROCLOCK::tag, UnGrantLock(), VirtualXactLockTableCleanup(), and WARNING.

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2611 of file lock.c.

2612{
2613 if (locallocks == NULL)
2614 {
2615 HASH_SEQ_STATUS status;
2617
2619
2620 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2622 }
2623 else
2624 {
2625 int i;
2626
2627 for (i = nlocks - 1; i >= 0; i--)
2629 }
2630}

References fb(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and ReleaseLockIfHeld().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2581 of file lock.c.

2582{
2583 HASH_SEQ_STATUS status;
2585
2587 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2588
2590
2591 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2592 {
2593 /* Ignore items that are not of the specified lock method */
2595 continue;
2596
2598 }
2599}

References elog, ERROR, fb(), hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, and ReleaseLockIfHeld().

Referenced by pg_advisory_unlock_all().

◆ LockTagHashCode()

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4854 of file lock.c.

4855{
4857 LOCK *lock;
4858 bool found;
4859 uint32 hashcode;
4861 int waiters = 0;
4862
4864 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4865
4866 hashcode = LockTagHashCode(locktag);
4869
4871 locktag,
4872 hashcode,
4873 HASH_FIND,
4874 &found);
4875 if (found)
4876 {
4877 Assert(lock != NULL);
4878 waiters = lock->nRequested;
4879 }
4881
4882 return waiters;
4883}

References Assert, elog, ERROR, fb(), HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

Definition at line 1920 of file lock.c.

1921{
1922 Assert(locallock->nLocks > 0);
1923 locallock->lockCleared = true;
1924}

References Assert, and fb().

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ PostPrepare_Locks()

void PostPrepare_Locks ( FullTransactionId  fxid)

Definition at line 3572 of file lock.c.

3573{
3574 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3575 HASH_SEQ_STATUS status;
3577 LOCK *lock;
3578 PROCLOCK *proclock;
3580 int partition;
3581
3582 /* Can't prepare a lock group follower. */
3585
3586 /* This is a critical section: any error means big trouble */
3588
3589 /*
3590 * First we run through the locallock table and get rid of unwanted
3591 * entries, then we scan the process's proclocks and transfer them to the
3592 * target proc.
3593 *
3594 * We do this separately because we may have multiple locallock entries
3595 * pointing to the same proclock, and we daren't end up with any dangling
3596 * pointers.
3597 */
3599
3600 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3601 {
3602 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3603 bool haveSessionLock;
3604 bool haveXactLock;
3605 int i;
3606
3607 if (locallock->proclock == NULL || locallock->lock == NULL)
3608 {
3609 /*
3610 * We must've run out of shared memory while trying to set up this
3611 * lock. Just forget the local entry.
3612 */
3613 Assert(locallock->nLocks == 0);
3615 continue;
3616 }
3617
3618 /* Ignore VXID locks */
3619 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3620 continue;
3621
3622 /* Scan to see whether we hold it at session or transaction level */
3623 haveSessionLock = haveXactLock = false;
3624 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3625 {
3626 if (lockOwners[i].owner == NULL)
3627 haveSessionLock = true;
3628 else
3629 haveXactLock = true;
3630 }
3631
3632 /* Ignore it if we have only session lock */
3633 if (!haveXactLock)
3634 continue;
3635
3636 /* This can't happen, because we already checked it */
3637 if (haveSessionLock)
3638 ereport(PANIC,
3640 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3641
3642 /* Mark the proclock to show we need to release this lockmode */
3643 if (locallock->nLocks > 0)
3644 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3645
3646 /* And remove the locallock hashtable entry */
3648 }
3649
3650 /*
3651 * Now, scan each lock partition separately.
3652 */
3654 {
3656 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3658
3660
3661 /*
3662 * If the proclock list for this partition is empty, we can skip
3663 * acquiring the partition lock. This optimization is safer than the
3664 * situation in LockReleaseAll, because we got rid of any fast-path
3665 * locks during AtPrepare_Locks, so there cannot be any case where
3666 * another backend is adding something to our lists now. For safety,
3667 * though, we code this the same way as in LockReleaseAll.
3668 */
3669 if (dlist_is_empty(procLocks))
3670 continue; /* needn't examine this partition */
3671
3673
3675 {
3676 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3677
3678 Assert(proclock->tag.myProc == MyProc);
3679
3680 lock = proclock->tag.myLock;
3681
3682 /* Ignore VXID locks */
3684 continue;
3685
3686 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3687 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3688 Assert(lock->nRequested >= 0);
3689 Assert(lock->nGranted >= 0);
3690 Assert(lock->nGranted <= lock->nRequested);
3691 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3692
3693 /* Ignore it if nothing to release (must be a session lock) */
3694 if (proclock->releaseMask == 0)
3695 continue;
3696
3697 /* Else we should be releasing all locks */
3698 if (proclock->releaseMask != proclock->holdMask)
3699 elog(PANIC, "we seem to have dropped a bit somewhere");
3700
3701 /*
3702 * We cannot simply modify proclock->tag.myProc to reassign
3703 * ownership of the lock, because that's part of the hash key and
3704 * the proclock would then be in the wrong hash chain. Instead
3705 * use hash_update_hash_key. (We used to create a new hash entry,
3706 * but that risks out-of-memory failure if other processes are
3707 * busy making proclocks too.) We must unlink the proclock from
3708 * our procLink chain and put it into the new proc's chain, too.
3709 *
3710 * Note: the updated proclock hash key will still belong to the
3711 * same hash partition, cf proclock_hash(). So the partition lock
3712 * we already hold is sufficient for this.
3713 */
3714 dlist_delete(&proclock->procLink);
3715
3716 /*
3717 * Create the new hash key for the proclock.
3718 */
3719 proclocktag.myLock = lock;
3720 proclocktag.myProc = newproc;
3721
3722 /*
3723 * Update groupLeader pointer to point to the new proc. (We'd
3724 * better not be a member of somebody else's lock group!)
3725 */
3726 Assert(proclock->groupLeader == proclock->tag.myProc);
3727 proclock->groupLeader = newproc;
3728
3729 /*
3730 * Update the proclock. We should not find any existing entry for
3731 * the same hash key, since there can be only one entry for any
3732 * given lock with my own proc.
3733 */
3735 proclock,
3736 &proclocktag))
3737 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3738
3739 /* Re-link into the new proc's proclock list */
3740 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3741
3742 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3743 } /* loop over PROCLOCKs within this partition */
3744
3746 } /* loop over partitions */
3747
3749}

References Assert, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), fb(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, NUM_LOCK_PARTITIONS, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void key,
Size  keysize 
)
static

Definition at line 574 of file lock.c.

575{
576 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
579
580 Assert(keysize == sizeof(PROCLOCKTAG));
581
582 /* Look into the associated LOCK object, and compute its hash code */
583 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
584
585 /*
586 * To make the hash code also depend on the PGPROC, we xor the proc
587 * struct's address into the hash code, left-shifted so that the
588 * partition-number bits don't change. Since this is only a hash, we
589 * don't care if we lose high-order bits of the address; use an
590 * intermediate variable to suppress cast-pointer-to-int warnings.
591 */
594
595 return lockhash;
596}

References Assert, DatumGetUInt32(), fb(), LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, and PointerGetDatum().

Referenced by LockManagerShmemInit().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 605 of file lock.c.

606{
607 uint32 lockhash = hashcode;
609
610 /*
611 * This must match proclock_hash()!
612 */
615
616 return lockhash;
617}

References DatumGetUInt32(), fb(), LOG2_NUM_LOCK_PARTITIONS, and PointerGetDatum().

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2646 of file lock.c.

2647{
2648 ResourceOwner owner;
2649 LOCALLOCKOWNER *lockOwners;
2650 int i;
2651
2652 /* Identify owner for lock (must match LockRelease!) */
2653 if (sessionLock)
2654 owner = NULL;
2655 else
2656 owner = CurrentResourceOwner;
2657
2658 /* Scan to see if there are any locks belonging to the target owner */
2659 lockOwners = locallock->lockOwners;
2660 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2661 {
2662 if (lockOwners[i].owner == owner)
2663 {
2664 Assert(lockOwners[i].nLocks > 0);
2665 if (lockOwners[i].nLocks < locallock->nLocks)
2666 {
2667 /*
2668 * We will still hold this lock after forgetting this
2669 * ResourceOwner.
2670 */
2671 locallock->nLocks -= lockOwners[i].nLocks;
2672 /* compact out unused slot */
2673 locallock->numLockOwners--;
2674 if (owner != NULL)
2676 if (i < locallock->numLockOwners)
2677 lockOwners[i] = lockOwners[locallock->numLockOwners];
2678 }
2679 else
2680 {
2681 Assert(lockOwners[i].nLocks == locallock->nLocks);
2682 /* We want to call LockRelease just once */
2683 lockOwners[i].nLocks = 1;
2684 locallock->nLocks = 1;
2685 if (!LockRelease(&locallock->tag.lock,
2686 locallock->tag.mode,
2687 sessionLock))
2688 elog(WARNING, "ReleaseLockIfHeld: failed??");
2689 }
2690 break;
2691 }
2692 }
2693}

References Assert, CurrentResourceOwner, elog, fb(), i, LockRelease(), LOCALLOCKOWNER::nLocks, ResourceOwnerForgetLock(), and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 2046 of file lock.c.

2047{
2048 LOCK *waitLock = proc->waitLock;
2049 PROCLOCK *proclock = proc->waitProcLock;
2050 LOCKMODE lockmode = proc->waitLockMode;
2052
2053 /* Make sure proc is waiting */
2055 Assert(proc->links.next != NULL);
2056 Assert(waitLock);
2057 Assert(!dclist_is_empty(&waitLock->waitProcs));
2059
2060 /* Remove proc from lock's wait queue */
2061 dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
2062
2063 /* Undo increments of request counts by waiting process */
2064 Assert(waitLock->nRequested > 0);
2065 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2066 waitLock->nRequested--;
2067 Assert(waitLock->requested[lockmode] > 0);
2068 waitLock->requested[lockmode]--;
2069 /* don't forget to clear waitMask bit if appropriate */
2070 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2071 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2072
2073 /* Clean up the proc's own state, and pass it the ok/fail signal */
2074 proc->waitLock = NULL;
2075 proc->waitProcLock = NULL;
2077
2078 /*
2079 * Delete the proclock immediately if it represents no already-held locks.
2080 * (This must happen now because if the owner of the lock decides to
2081 * release it, and the requested/granted counts then go to zero,
2082 * LockRelease expects there to be no remaining proclocks.) Then see if
2083 * any other waiters for the lock can be woken up now.
2084 */
2085 CleanUpLock(waitLock, proclock,
2086 LockMethods[lockmethodid], hashcode,
2087 true);
2088}

References Assert, CleanUpLock(), dclist_delete_from_thoroughly(), dclist_is_empty(), fb(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, dlist_node::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), and LockErrorCleanup().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1476 of file lock.c.

1477{
1478 int i;
1479
1480 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1481 {
1482 if (locallock->lockOwners[i].owner != NULL)
1483 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1484 }
1485 locallock->numLockOwners = 0;
1486 if (locallock->lockOwners != NULL)
1487 pfree(locallock->lockOwners);
1488 locallock->lockOwners = NULL;
1489
1490 if (locallock->holdsStrongLockCount)
1491 {
1493
1495
1499 locallock->holdsStrongLockCount = false;
1501 }
1502
1504 &(locallock->tag),
1505 HASH_REMOVE, NULL))
1506 elog(WARNING, "locallock table corrupted");
1507
1508 /*
1509 * Indicate that the lock is released for certain types of locks
1510 */
1512}

References Assert, CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, fb(), HASH_REMOVE, hash_search(), i, LockMethodLocalHash, FastPathStrongRelationLockData::mutex, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire, SpinLockRelease, and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ ResetAwaitedLock()

void ResetAwaitedLock ( void  )

Definition at line 1907 of file lock.c.

1908{
1909 awaitedLock = NULL;
1910}

References awaitedLock, and fb().

Referenced by LockErrorCleanup().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1283 of file lock.c.

1285{
1286 LOCK *lock;
1287 PROCLOCK *proclock;
1290 bool found;
1291
1292 /*
1293 * Find or create a lock with this tag.
1294 */
1296 locktag,
1297 hashcode,
1299 &found);
1300 if (!lock)
1301 return NULL;
1302
1303 /*
1304 * if it's a new lock object, initialize it
1305 */
1306 if (!found)
1307 {
1308 lock->grantMask = 0;
1309 lock->waitMask = 0;
1310 dlist_init(&lock->procLocks);
1311 dclist_init(&lock->waitProcs);
1312 lock->nRequested = 0;
1313 lock->nGranted = 0;
1314 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1315 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1316 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1317 }
1318 else
1319 {
1320 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1321 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1322 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1323 Assert(lock->nGranted <= lock->nRequested);
1324 }
1325
1326 /*
1327 * Create the hash key for the proclock table.
1328 */
1329 proclocktag.myLock = lock;
1330 proclocktag.myProc = proc;
1331
1333
1334 /*
1335 * Find or create a proclock entry with this tag
1336 */
1338 &proclocktag,
1341 &found);
1342 if (!proclock)
1343 {
1344 /* Oops, not enough shmem for the proclock */
1345 if (lock->nRequested == 0)
1346 {
1347 /*
1348 * There are no other requestors of this lock, so garbage-collect
1349 * the lock object. We *must* do this to avoid a permanent leak
1350 * of shared memory, because there won't be anything to cause
1351 * anyone to release the lock object later.
1352 */
1353 Assert(dlist_is_empty(&(lock->procLocks)));
1355 &(lock->tag),
1356 hashcode,
1358 NULL))
1359 elog(PANIC, "lock table corrupted");
1360 }
1361 return NULL;
1362 }
1363
1364 /*
1365 * If new, initialize the new entry
1366 */
1367 if (!found)
1368 {
1370
1371 /*
1372 * It might seem unsafe to access proclock->groupLeader without a
1373 * lock, but it's not really. Either we are initializing a proclock
1374 * on our own behalf, in which case our group leader isn't changing
1375 * because the group leader for a process can only ever be changed by
1376 * the process itself; or else we are transferring a fast-path lock to
1377 * the main lock table, in which case that process can't change its
1378 * lock group leader without first releasing all of its locks (and in
1379 * particular the one we are currently transferring).
1380 */
1381 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1382 proc->lockGroupLeader : proc;
1383 proclock->holdMask = 0;
1384 proclock->releaseMask = 0;
1385 /* Add proclock to appropriate lists */
1386 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1387 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1388 PROCLOCK_PRINT("LockAcquire: new", proclock);
1389 }
1390 else
1391 {
1392 PROCLOCK_PRINT("LockAcquire: found", proclock);
1393 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1394
1395#ifdef CHECK_DEADLOCK_RISK
1396
1397 /*
1398 * Issue warning if we already hold a lower-level lock on this object
1399 * and do not hold a lock of the requested level or higher. This
1400 * indicates a deadlock-prone coding practice (eg, we'd have a
1401 * deadlock if another backend were following the same code path at
1402 * about the same time).
1403 *
1404 * This is not enabled by default, because it may generate log entries
1405 * about user-level coding practices that are in fact safe in context.
1406 * It can be enabled to help find system-level problems.
1407 *
1408 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1409 * better to use a table. For now, though, this works.
1410 */
1411 {
1412 int i;
1413
1414 for (i = lockMethodTable->numLockModes; i > 0; i--)
1415 {
1416 if (proclock->holdMask & LOCKBIT_ON(i))
1417 {
1418 if (i >= (int) lockmode)
1419 break; /* safe: we have a lock >= req level */
1420 elog(LOG, "deadlock risk: raising lock level"
1421 " from %s to %s on object %u/%u/%u",
1422 lockMethodTable->lockModeNames[i],
1423 lockMethodTable->lockModeNames[lockmode],
1424 lock->tag.locktag_field1, lock->tag.locktag_field2,
1425 lock->tag.locktag_field3);
1426 break;
1427 }
1428 }
1429 }
1430#endif /* CHECK_DEADLOCK_RISK */
1431 }
1432
1433 /*
1434 * lock->nRequested and lock->requested[] count the total number of
1435 * requests, whether granted or waiting, so increment those immediately.
1436 * The other counts don't increment till we get the lock.
1437 */
1438 lock->nRequested++;
1439 lock->requested[lockmode]++;
1440 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1441
1442 /*
1443 * We shouldn't already hold the desired lock; else locallock table is
1444 * broken.
1445 */
1446 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1447 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1448 lockMethodTable->lockModeNames[lockmode],
1449 lock->tag.locktag_field1, lock->tag.locktag_field2,
1450 lock->tag.locktag_field3);
1451
1452 return proclock;
1453}

References Assert, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ERROR, fb(), LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1681 of file lock.c.

1683{
1684 bool wakeupNeeded = false;
1685
1686 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1687 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1688 Assert(lock->nGranted <= lock->nRequested);
1689
1690 /*
1691 * fix the general lock stats
1692 */
1693 lock->nRequested--;
1694 lock->requested[lockmode]--;
1695 lock->nGranted--;
1696 lock->granted[lockmode]--;
1697
1698 if (lock->granted[lockmode] == 0)
1699 {
1700 /* change the conflict mask. No more of this lock type. */
1701 lock->grantMask &= LOCKBIT_OFF(lockmode);
1702 }
1703
1704 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1705
1706 /*
1707 * We need only run ProcLockWakeup if the released lock conflicts with at
1708 * least one of the lock types requested by waiter(s). Otherwise whatever
1709 * conflict made them wait must still exist. NOTE: before MVCC, we could
1710 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1711 * not true anymore, because the remaining granted locks might belong to
1712 * some waiter, who could now be awakened because he doesn't conflict with
1713 * his own locks.
1714 */
1715 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1716 wakeupNeeded = true;
1717
1718 /*
1719 * Now fix the per-proclock state.
1720 */
1721 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1722 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1723
1724 return wakeupNeeded;
1725}

References Assert, fb(), LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4743 of file lock.c.

4744{
4745 LOCKTAG tag;
4746 PGPROC *proc;
4748
4750
4752 /* no vxid lock; localTransactionId is a normal, locked XID */
4753 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4754
4756
4757 /*
4758 * If a lock table entry must be made, this is the PGPROC on whose behalf
4759 * it must be done. Note that the transaction might end or the PGPROC
4760 * might be reassigned to a new backend before we get around to examining
4761 * it, but it doesn't matter. If we find upon examination that the
4762 * relevant lxid is no longer running here, that's enough to prove that
4763 * it's no longer running anywhere.
4764 */
4765 proc = ProcNumberGetProc(vxid.procNumber);
4766 if (proc == NULL)
4767 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4768
4769 /*
4770 * We must acquire this lock before checking the procNumber and lxid
4771 * against the ones we're waiting for. The target backend will only set
4772 * or clear lxid while holding this lock.
4773 */
4775
4776 if (proc->vxid.procNumber != vxid.procNumber
4778 {
4779 /* VXID ended */
4780 LWLockRelease(&proc->fpInfoLock);
4781 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4782 }
4783
4784 /*
4785 * If we aren't asked to wait, there's no need to set up a lock table
4786 * entry. The transaction is still in progress, so just return false.
4787 */
4788 if (!wait)
4789 {
4790 LWLockRelease(&proc->fpInfoLock);
4791 return false;
4792 }
4793
4794 /*
4795 * OK, we're going to need to sleep on the VXID. But first, we must set
4796 * up the primary lock table entry, if needed (ie, convert the proc's
4797 * fast-path lock on its VXID to a regular lock).
4798 */
4799 if (proc->fpVXIDLock)
4800 {
4801 PROCLOCK *proclock;
4802 uint32 hashcode;
4804
4805 hashcode = LockTagHashCode(&tag);
4806
4809
4811 &tag, hashcode, ExclusiveLock);
4812 if (!proclock)
4813 {
4815 LWLockRelease(&proc->fpInfoLock);
4816 ereport(ERROR,
4818 errmsg("out of shared memory"),
4819 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4820 }
4821 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4822
4824
4825 proc->fpVXIDLock = false;
4826 }
4827
4828 /*
4829 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4830 * search. The proc might have assigned this XID but not yet locked it,
4831 * in which case the proc will lock this XID before releasing the VXID.
4832 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4833 * so we won't save an XID of a different VXID. It doesn't matter whether
4834 * we save this before or after setting up the primary lock table entry.
4835 */
4836 xid = proc->xid;
4837
4838 /* Done with proc->fpLockBits */
4839 LWLockRelease(&proc->fpInfoLock);
4840
4841 /* Time to wait. */
4842 (void) LockAcquire(&tag, ShareLock, false, false);
4843
4844 LockRelease(&tag, ShareLock, false);
4845 return XactLockForVirtualXact(vxid, xid, wait);
4846}

References Assert, DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, fb(), PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, VirtualTransactionId::procNumber, PGPROC::procNumber, ProcNumberGetProc(), SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, PGPROC::vxid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4643 of file lock.c.

4644{
4645 bool fastpath;
4646 LocalTransactionId lxid;
4647
4649
4650 /*
4651 * Clean up shared memory state.
4652 */
4654
4655 fastpath = MyProc->fpVXIDLock;
4657 MyProc->fpVXIDLock = false;
4659
4661
4662 /*
4663 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4664 * that means someone transferred the lock to the main lock table.
4665 */
4666 if (!fastpath && LocalTransactionIdIsValid(lxid))
4667 {
4669 LOCKTAG locktag;
4670
4671 vxid.procNumber = MyProcNumber;
4672 vxid.localTransactionId = lxid;
4673 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4674
4676 &locktag, ExclusiveLock, false);
4677 }
4678}

References Assert, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, INVALID_PROC_NUMBER, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyProc, MyProcNumber, VirtualTransactionId::procNumber, PGPROC::procNumber, SET_LOCKTAG_VIRTUALTRANSACTION, and PGPROC::vxid.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static ProcWaitStatus WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1932 of file lock.c.

1933{
1934 ProcWaitStatus result;
1936
1937 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1938 locallock->tag.lock.locktag_field2,
1939 locallock->tag.lock.locktag_field3,
1940 locallock->tag.lock.locktag_field4,
1941 locallock->tag.lock.locktag_type,
1942 locallock->tag.mode);
1943
1944 /* Setup error traceback support for ereport() */
1949
1950 /* adjust the process title to indicate that it's waiting */
1951 set_ps_display_suffix("waiting");
1952
1953 /*
1954 * Record the fact that we are waiting for a lock, so that
1955 * LockErrorCleanup will clean up if cancel/die happens.
1956 */
1958 awaitedOwner = owner;
1959
1960 /*
1961 * NOTE: Think not to put any shared-state cleanup after the call to
1962 * ProcSleep, in either the normal or failure path. The lock state must
1963 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1964 * waiting for the lock. This is necessary because of the possibility
1965 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1966 * grants us the lock, but before we've noticed it. Hence, after granting,
1967 * the locktable state must fully reflect the fact that we own the lock;
1968 * we can't do additional work on return.
1969 *
1970 * We can and do use a PG_TRY block to try to clean up after failure, but
1971 * this still has a major limitation: elog(FATAL) can occur while waiting
1972 * (eg, a "die" interrupt), and then control won't come back here. So all
1973 * cleanup of essential state should happen in LockErrorCleanup, not here.
1974 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1975 * is unimportant if the process exits.
1976 */
1977 PG_TRY();
1978 {
1979 result = ProcSleep(locallock);
1980 }
1981 PG_CATCH();
1982 {
1983 /* In this path, awaitedLock remains set until LockErrorCleanup */
1984
1985 /* reset ps display to remove the suffix */
1987
1988 /* and propagate the error */
1989 PG_RE_THROW();
1990 }
1991 PG_END_TRY();
1992
1993 /*
1994 * We no longer want LockErrorCleanup to do anything.
1995 */
1996 awaitedLock = NULL;
1997
1998 /* reset ps display to remove the suffix */
2000
2002
2003 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2004 locallock->tag.lock.locktag_field2,
2005 locallock->tag.lock.locktag_field3,
2006 locallock->tag.lock.locktag_field4,
2007 locallock->tag.lock.locktag_type,
2008 locallock->tag.mode);
2009
2010 return result;
2011}

References awaitedLock, awaitedOwner, error_context_stack, fb(), PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, ErrorContextCallback::previous, ProcSleep(), set_ps_display_remove_suffix(), set_ps_display_suffix(), and waitonlock_error_callback().

Referenced by LockAcquireExtended().

◆ waitonlock_error_callback()

static void waitonlock_error_callback ( void arg)
static

Definition at line 2020 of file lock.c.

2021{
2023 const LOCKTAG *tag = &locallock->tag.lock;
2024 LOCKMODE mode = locallock->tag.mode;
2026
2029
2030 errcontext("waiting for %s on %s",
2032 locktagbuf.data);
2033}

References arg, DescribeLockTag(), errcontext, fb(), GetLockmodeName(), initStringInfo(), LOCKTAG::locktag_lockmethodid, and mode.

Referenced by WaitOnLock().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4692 of file lock.c.

4694{
4695 bool more = false;
4696
4697 /* There is no point to wait for 2PCs if you have no 2PCs. */
4698 if (max_prepared_xacts == 0)
4699 return true;
4700
4701 do
4702 {
4704 LOCKTAG tag;
4705
4706 /* Clear state from previous iterations. */
4707 if (more)
4708 {
4710 more = false;
4711 }
4712
4713 /* If we have no xid, try to find one. */
4714 if (!TransactionIdIsValid(xid))
4715 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4716 if (!TransactionIdIsValid(xid))
4717 {
4718 Assert(!more);
4719 return true;
4720 }
4721
4722 /* Check or wait for XID completion. */
4723 SET_LOCKTAG_TRANSACTION(tag, xid);
4724 lar = LockAcquire(&tag, ShareLock, false, !wait);
4726 return false;
4727 LockRelease(&tag, ShareLock, false);
4728 } while (more);
4729
4730 return true;
4731}

References Assert, fb(), InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 328 of file lock.c.

Referenced by GetAwaitedLock(), GrantAwaitedLock(), ResetAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 329 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition lock.c:122
static const char *const lock_mode_names[]
Definition lock.c:108
static const LOCKMASK LockConflicts[]
Definition lock.c:65

Definition at line 125 of file lock.c.

125 {
129#ifdef LOCK_DEBUG
131#else
133#endif
134};

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 122 of file lock.c.

◆ FastPathLocalUseCounts

int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
static

◆ FastPathLockGroupsPerBackend

int FastPathLockGroupsPerBackend = 0

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 108 of file lock.c.

109{
110 "INVALID",
111 "AccessShareLock",
112 "RowShareLock",
113 "RowExclusiveLock",
114 "ShareUpdateExclusiveLock",
115 "ShareLock",
116 "ShareRowExclusiveLock",
117 "ExclusiveLock",
118 "AccessExclusiveLock"
119};

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 65 of file lock.c.

65 {
66 0,
67
68 /* AccessShareLock */
70
71 /* RowShareLock */
73
74 /* RowExclusiveLock */
77
78 /* ShareUpdateExclusiveLock */
82
83 /* ShareLock */
87
88 /* ShareRowExclusiveLock */
92
93 /* ExclusiveLock */
98
99 /* AccessExclusiveLock */
104
105};
#define ShareRowExclusiveLock
Definition lockdefs.h:41
#define AccessShareLock
Definition lockdefs.h:36
#define RowShareLock
Definition lockdefs.h:37

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ log_lock_failures

bool log_lock_failures = false

Definition at line 54 of file lock.c.

Referenced by heap_acquire_tuplock(), heap_lock_tuple(), and heapam_tuple_lock().

◆ max_locks_per_xact

int max_locks_per_xact

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 191 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 327 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod

const LockMethodData user_lockmethod
static
Initial value:

Definition at line 136 of file lock.c.

136 {
140#ifdef LOCK_DEBUG
142#else
144#endif
145};