PostgreSQL Source Code git master
Loading...
Searching...
No Matches
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "storage/subsystems.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_REL_GROUP(rel)    (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
 
#define FAST_PATH_SLOT(group, index)
 
#define FAST_PATH_GROUP(index)
 
#define FAST_PATH_INDEX(index)
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_BITS(proc, n)   (proc)->fpLockBits[FAST_PATH_GROUP(n)]
 
#define FAST_PATH_GET_BITS(proc, n)    ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static void LockManagerShmemRequest (void *arg)
 
static void LockManagerShmemInit (void *arg)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static ProcWaitStatus WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner)
 
static void waitonlock_error_callback (void *arg)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void InitLockManagerAccess (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
LOCALLOCKGetAwaitedLock (void)
 
void ResetAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (FullTransactionId fxid)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const charGetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
bool log_lock_failures = false
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCounts [FP_LOCK_GROUPS_PER_BACKEND_MAX]
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
int FastPathLockGroupsPerBackend = 0
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
const ShmemCallbacks LockManagerShmemCallbacks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define ShareUpdateExclusiveLock
Definition lockdefs.h:39
#define DEFAULT_LOCKMETHOD
Definition locktag.h:25
@ LOCKTAG_RELATION
Definition locktag.h:37
static PgChecksumMode mode
#define InvalidOid

Definition at line 276 of file lock.c.

309{
310 slock_t mutex;
313
315
316static void LockManagerShmemRequest(void *arg);
317static void LockManagerShmemInit(void *arg);
318
321 .init_fn = LockManagerShmemInit,
322};
323
324
325/*
326 * Pointers to hash tables containing lock state
327 *
328 * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
329 * shared memory; LockMethodLocalHash is local to each backend.
330 */
334
335
336/* private state for error cleanup */
338static LOCALLOCK *awaitedLock;
340
341
342#ifdef LOCK_DEBUG
343
344/*------
345 * The following configuration options are available for lock debugging:
346 *
347 * TRACE_LOCKS -- give a bunch of output what's going on in this file
348 * TRACE_USERLOCKS -- same but for user locks
349 * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
350 * (use to avoid output on system tables)
351 * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
352 * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
353 *
354 * Furthermore, but in storage/lmgr/lwlock.c:
355 * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
356 *
357 * Define LOCK_DEBUG at compile time to get all these enabled.
358 * --------
359 */
360
362bool Trace_locks = false;
363bool Trace_userlocks = false;
364int Trace_lock_table = 0;
365bool Debug_deadlocks = false;
366
367
368inline static bool
369LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
370{
371 return
374 || (Trace_lock_table &&
376}
377
378
379inline static void
380LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
381{
382 if (LOCK_DEBUG_ENABLED(&lock->tag))
383 elog(LOG,
384 "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
385 "req(%d,%d,%d,%d,%d,%d,%d)=%d "
386 "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
387 where, lock,
391 lock->grantMask,
392 lock->requested[1], lock->requested[2], lock->requested[3],
393 lock->requested[4], lock->requested[5], lock->requested[6],
394 lock->requested[7], lock->nRequested,
395 lock->granted[1], lock->granted[2], lock->granted[3],
396 lock->granted[4], lock->granted[5], lock->granted[6],
397 lock->granted[7], lock->nGranted,
398 dclist_count(&lock->waitProcs),
399 LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
400}
401
402
403inline static void
404PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
405{
406 if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
407 elog(LOG,
408 "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
409 where, proclockP, proclockP->tag.myLock,
411 proclockP->tag.myProc, (int) proclockP->holdMask);
412}
413#else /* not LOCK_DEBUG */
414
415#define LOCK_PRINT(where, lock, type) ((void) 0)
416#define PROCLOCK_PRINT(where, proclockP) ((void) 0)
417#endif /* not LOCK_DEBUG */
418
419
420static uint32 proclock_hash(const void *key, Size keysize);
423 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
426static void FinishStrongLockAcquire(void);
428static void waitonlock_error_callback(void *arg);
431static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
433static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
435 bool wakeupNeeded);
437 LOCKTAG *locktag, LOCKMODE lockmode,
441
442
443/*
444 * Register the lock manager's shmem data structures.
445 *
446 * In addition to this, each backend must also call InitLockManagerAccess() to
447 * create the locallock hash table.
448 */
449static void
451{
453
454 /*
455 * Compute sizes for lock hashtables. Note that these calculations must
456 * agree with LockManagerShmemSize!
457 */
459
460 /*
461 * Hash table for LOCK structs. This stores per-locked-object
462 * information.
463 */
464 ShmemRequestHash(.name = "LOCK hash",
465 .nelems = max_table_size,
466 .ptr = &LockMethodLockHash,
467 .hash_info.keysize = sizeof(LOCKTAG),
468 .hash_info.entrysize = sizeof(LOCK),
469 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
470 .hash_flags = HASH_ELEM | HASH_BLOBS | HASH_PARTITION,
471 );
472
473 /* Assume an average of 2 holders per lock */
474 max_table_size *= 2;
475
476 ShmemRequestHash(.name = "PROCLOCK hash",
477 .nelems = max_table_size,
479 .hash_info.keysize = sizeof(PROCLOCKTAG),
480 .hash_info.entrysize = sizeof(PROCLOCK),
481 .hash_info.hash = proclock_hash,
482 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
483 .hash_flags = HASH_ELEM | HASH_FUNCTION | HASH_PARTITION,
484 );
485
486 ShmemRequestStruct(.name = "Fast Path Strong Relation Lock Data",
487 .size = sizeof(FastPathStrongRelationLockData),
488 .ptr = (void **) (void *) &FastPathStrongRelationLocks,
489 );
490}
491
492static void
494{
496}
497
498/*
499 * Initialize the lock manager's backend-private data structures.
500 */
501void
503{
504 /*
505 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
506 * counts and resource owner information.
507 */
508 HASHCTL info;
509
510 info.keysize = sizeof(LOCALLOCKTAG);
511 info.entrysize = sizeof(LOCALLOCK);
512
513 LockMethodLocalHash = hash_create("LOCALLOCK hash",
514 16,
515 &info,
517}
518
519
520/*
521 * Fetch the lock method table associated with a given lock
522 */
524GetLocksMethodTable(const LOCK *lock)
525{
527
530}
531
532/*
533 * Fetch the lock method table associated with a given locktag
534 */
536GetLockTagsMethodTable(const LOCKTAG *locktag)
537{
539
542}
543
544
545/*
546 * Compute the hash code associated with a LOCKTAG.
547 *
548 * To avoid unnecessary recomputations of the hash code, we try to do this
549 * just once per function, and then pass it around as needed. Aside from
550 * passing the hashcode to hash_search_with_hash_value(), we can extract
551 * the lock partition number from the hashcode.
552 */
553uint32
554LockTagHashCode(const LOCKTAG *locktag)
555{
556 return get_hash_value(LockMethodLockHash, locktag);
557}
558
559/*
560 * Compute the hash code associated with a PROCLOCKTAG.
561 *
562 * Because we want to use just one set of partition locks for both the
563 * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
564 * fall into the same partition number as their associated LOCKs.
565 * dynahash.c expects the partition number to be the low-order bits of
566 * the hash code, and therefore a PROCLOCKTAG's hash code must have the
567 * same low-order bits as the associated LOCKTAG's hash code. We achieve
568 * this with this specialized hash function.
569 */
570static uint32
571proclock_hash(const void *key, Size keysize)
572{
573 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
576
577 Assert(keysize == sizeof(PROCLOCKTAG));
578
579 /* Look into the associated LOCK object, and compute its hash code */
580 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
581
582 /*
583 * To make the hash code also depend on the PGPROC, we xor the proc
584 * struct's address into the hash code, left-shifted so that the
585 * partition-number bits don't change. Since this is only a hash, we
586 * don't care if we lose high-order bits of the address; use an
587 * intermediate variable to suppress cast-pointer-to-int warnings.
588 */
591
592 return lockhash;
593}
594
595/*
596 * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
597 * for its underlying LOCK.
598 *
599 * We use this just to avoid redundant calls of LockTagHashCode().
600 */
601static inline uint32
603{
604 uint32 lockhash = hashcode;
606
607 /*
608 * This must match proclock_hash()!
609 */
612
613 return lockhash;
614}
615
616/*
617 * Given two lock modes, return whether they would conflict.
618 */
619bool
621{
623
624 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
625 return true;
626
627 return false;
628}
629
630/*
631 * LockHeldByMe -- test whether lock 'locktag' is held by the current
632 * transaction
633 *
634 * Returns true if current transaction holds a lock on 'tag' of mode
635 * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
636 * ("Stronger" is defined as "numerically higher", which is a bit
637 * semantically dubious but is OK for the purposes we use this for.)
638 */
639bool
640LockHeldByMe(const LOCKTAG *locktag,
641 LOCKMODE lockmode, bool orstronger)
642{
645
646 /*
647 * See if there is a LOCALLOCK entry for this lock and lockmode
648 */
649 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
650 localtag.lock = *locktag;
651 localtag.mode = lockmode;
652
654 &localtag,
655 HASH_FIND, NULL);
656
657 if (locallock && locallock->nLocks > 0)
658 return true;
659
660 if (orstronger)
661 {
663
664 for (slockmode = lockmode + 1;
666 slockmode++)
667 {
668 if (LockHeldByMe(locktag, slockmode, false))
669 return true;
670 }
671 }
672
673 return false;
674}
675
676#ifdef USE_ASSERT_CHECKING
677/*
678 * GetLockMethodLocalHash -- return the hash of local locks, for modules that
679 * evaluate assertions based on all locks held.
680 */
681HTAB *
683{
684 return LockMethodLocalHash;
685}
686#endif
687
688/*
689 * LockHasWaiters -- look up 'locktag' and check if releasing this
690 * lock would wake up other processes waiting for it.
691 */
692bool
693LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
694{
699 LOCK *lock;
700 PROCLOCK *proclock;
702 bool hasWaiters = false;
703
705 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
708 elog(ERROR, "unrecognized lock mode: %d", lockmode);
709
710#ifdef LOCK_DEBUG
711 if (LOCK_DEBUG_ENABLED(locktag))
712 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
713 locktag->locktag_field1, locktag->locktag_field2,
714 lockMethodTable->lockModeNames[lockmode]);
715#endif
716
717 /*
718 * Find the LOCALLOCK entry for this lock and lockmode
719 */
720 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
721 localtag.lock = *locktag;
722 localtag.mode = lockmode;
723
725 &localtag,
726 HASH_FIND, NULL);
727
728 /*
729 * let the caller print its own error message, too. Do not ereport(ERROR).
730 */
731 if (!locallock || locallock->nLocks <= 0)
732 {
733 elog(WARNING, "you don't own a lock of type %s",
734 lockMethodTable->lockModeNames[lockmode]);
735 return false;
736 }
737
738 /*
739 * Check the shared lock table.
740 */
742
744
745 /*
746 * We don't need to re-find the lock or proclock, since we kept their
747 * addresses in the locallock table, and they couldn't have been removed
748 * while we were holding a lock on them.
749 */
750 lock = locallock->lock;
751 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
752 proclock = locallock->proclock;
753 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
754
755 /*
756 * Double-check that we are actually holding a lock of the type we want to
757 * release.
758 */
759 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
760 {
761 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
763 elog(WARNING, "you don't own a lock of type %s",
764 lockMethodTable->lockModeNames[lockmode]);
766 return false;
767 }
768
769 /*
770 * Do the checking.
771 */
772 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
773 hasWaiters = true;
774
776
777 return hasWaiters;
778}
779
780/*
781 * LockAcquire -- Check for lock conflicts, sleep if conflict found,
782 * set lock if/when no conflicts.
783 *
784 * Inputs:
785 * locktag: unique identifier for the lockable object
786 * lockmode: lock mode to acquire
787 * sessionLock: if true, acquire lock for session not current transaction
788 * dontWait: if true, don't wait to acquire lock
789 *
790 * Returns one of:
791 * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
792 * LOCKACQUIRE_OK lock successfully acquired
793 * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
794 * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
795 *
796 * In the normal case where dontWait=false and the caller doesn't need to
797 * distinguish a freshly acquired lock from one already taken earlier in
798 * this same transaction, there is no need to examine the return value.
799 *
800 * Side Effects: The lock is acquired and recorded in lock tables.
801 *
802 * NOTE: if we wait for the lock, there is no way to abort the wait
803 * short of aborting the transaction.
804 */
806LockAcquire(const LOCKTAG *locktag,
807 LOCKMODE lockmode,
808 bool sessionLock,
809 bool dontWait)
810{
811 return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
812 true, NULL, false);
813}
814
815/*
816 * LockAcquireExtended - allows us to specify additional options
817 *
818 * reportMemoryError specifies whether a lock request that fills the lock
819 * table should generate an ERROR or not. Passing "false" allows the caller
820 * to attempt to recover from lock-table-full situations, perhaps by forcibly
821 * canceling other lock holders and then retrying. Note, however, that the
822 * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
823 * in combination with dontWait = true, as the cause of failure couldn't be
824 * distinguished.
825 *
826 * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
827 * table entry if a lock is successfully acquired, or NULL if not.
828 *
829 * logLockFailure indicates whether to log details when a lock acquisition
830 * fails with dontWait = true.
831 */
833LockAcquireExtended(const LOCKTAG *locktag,
834 LOCKMODE lockmode,
835 bool sessionLock,
836 bool dontWait,
839 bool logLockFailure)
840{
845 LOCK *lock;
846 PROCLOCK *proclock;
847 bool found;
848 ResourceOwner owner;
849 uint32 hashcode;
851 bool found_conflict;
853 bool log_lock = false;
854
856 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
859 elog(ERROR, "unrecognized lock mode: %d", lockmode);
860
861 if (RecoveryInProgress() && !InRecovery &&
862 (locktag->locktag_type == LOCKTAG_OBJECT ||
863 locktag->locktag_type == LOCKTAG_RELATION) &&
864 lockmode > RowExclusiveLock)
867 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
868 lockMethodTable->lockModeNames[lockmode]),
869 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
870
871#ifdef LOCK_DEBUG
872 if (LOCK_DEBUG_ENABLED(locktag))
873 elog(LOG, "LockAcquire: lock [%u,%u] %s",
874 locktag->locktag_field1, locktag->locktag_field2,
875 lockMethodTable->lockModeNames[lockmode]);
876#endif
877
878 /* Identify owner for lock */
879 if (sessionLock)
880 owner = NULL;
881 else
882 owner = CurrentResourceOwner;
883
884 /*
885 * Find or create a LOCALLOCK entry for this lock and lockmode
886 */
887 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
888 localtag.lock = *locktag;
889 localtag.mode = lockmode;
890
892 &localtag,
893 HASH_ENTER, &found);
894
895 /*
896 * if it's a new locallock object, initialize it
897 */
898 if (!found)
899 {
900 locallock->lock = NULL;
901 locallock->proclock = NULL;
902 locallock->hashcode = LockTagHashCode(&(localtag.lock));
903 locallock->nLocks = 0;
904 locallock->holdsStrongLockCount = false;
905 locallock->lockCleared = false;
906 locallock->numLockOwners = 0;
907 locallock->maxLockOwners = 8;
908 locallock->lockOwners = NULL; /* in case next line fails */
909 locallock->lockOwners = (LOCALLOCKOWNER *)
911 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
912 }
913 else
914 {
915 /* Make sure there will be room to remember the lock */
916 if (locallock->numLockOwners >= locallock->maxLockOwners)
917 {
918 int newsize = locallock->maxLockOwners * 2;
919
920 locallock->lockOwners = (LOCALLOCKOWNER *)
921 repalloc(locallock->lockOwners,
922 newsize * sizeof(LOCALLOCKOWNER));
923 locallock->maxLockOwners = newsize;
924 }
925 }
926 hashcode = locallock->hashcode;
927
928 if (locallockp)
930
931 /*
932 * If we already hold the lock, we can just increase the count locally.
933 *
934 * If lockCleared is already set, caller need not worry about absorbing
935 * sinval messages related to the lock's object.
936 */
937 if (locallock->nLocks > 0)
938 {
940 if (locallock->lockCleared)
942 else
944 }
945
946 /*
947 * We don't acquire any other heavyweight lock while holding the relation
948 * extension lock. We do allow to acquire the same relation extension
949 * lock more than once but that case won't reach here.
950 */
952
953 /*
954 * Prepare to emit a WAL record if acquisition of this lock needs to be
955 * replayed in a standby server.
956 *
957 * Here we prepare to log; after lock is acquired we'll issue log record.
958 * This arrangement simplifies error recovery in case the preparation step
959 * fails.
960 *
961 * Only AccessExclusiveLocks can conflict with lock types that read-only
962 * transactions can acquire in a standby server. Make sure this definition
963 * matches the one in GetRunningTransactionLocks().
964 */
965 if (lockmode >= AccessExclusiveLock &&
966 locktag->locktag_type == LOCKTAG_RELATION &&
969 {
971 log_lock = true;
972 }
973
974 /*
975 * Attempt to take lock via fast path, if eligible. But if we remember
976 * having filled up the fast path array, we don't attempt to make any
977 * further use of it until we release some locks. It's possible that some
978 * other backend has transferred some of those locks to the shared hash
979 * table, leaving space free, but it's not worth acquiring the LWLock just
980 * to check. It's also possible that we're acquiring a second or third
981 * lock type on a relation we have already locked using the fast-path, but
982 * for now we don't worry about that case either.
983 */
984 if (EligibleForRelationFastPath(locktag, lockmode))
985 {
988 {
990 bool acquired;
991
992 /*
993 * LWLockAcquire acts as a memory sequencing point, so it's safe
994 * to assume that any strong locker whose increment to
995 * FastPathStrongRelationLocks->counts becomes visible after we
996 * test it has yet to begin to transfer fast-path locks.
997 */
1000 acquired = false;
1001 else
1003 lockmode);
1005 if (acquired)
1006 {
1007 /*
1008 * The locallock might contain stale pointers to some old
1009 * shared objects; we MUST reset these to null before
1010 * considering the lock to be acquired via fast-path.
1011 */
1012 locallock->lock = NULL;
1013 locallock->proclock = NULL;
1014 GrantLockLocal(locallock, owner);
1015 return LOCKACQUIRE_OK;
1016 }
1017 }
1018 else
1019 {
1020 /*
1021 * Increment the lock statistics counter if lock could not be
1022 * acquired via the fast-path.
1023 */
1024 pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type);
1025 }
1026 }
1027
1028 /*
1029 * If this lock could potentially have been taken via the fast-path by
1030 * some other backend, we must (temporarily) disable further use of the
1031 * fast-path for this lock tag, and migrate any locks already taken via
1032 * this method to the main lock table.
1033 */
1034 if (ConflictsWithRelationFastPath(locktag, lockmode))
1035 {
1037
1040 hashcode))
1041 {
1043 if (locallock->nLocks == 0)
1045 if (locallockp)
1046 *locallockp = NULL;
1048 ereport(ERROR,
1050 errmsg("out of shared memory"),
1051 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1052 else
1053 return LOCKACQUIRE_NOT_AVAIL;
1054 }
1055 }
1056
1057 /*
1058 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1059 * take it via the fast-path, either, so we've got to mess with the shared
1060 * lock table.
1061 */
1063
1065
1066 /*
1067 * Find or create lock and proclock entries with this tag
1068 *
1069 * Note: if the locallock object already existed, it might have a pointer
1070 * to the lock already ... but we should not assume that that pointer is
1071 * valid, since a lock object with zero hold and request counts can go
1072 * away anytime. So we have to use SetupLockInTable() to recompute the
1073 * lock and proclock pointers, even if they're already set.
1074 */
1075 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1076 hashcode, lockmode);
1077 if (!proclock)
1078 {
1081 if (locallock->nLocks == 0)
1083 if (locallockp)
1084 *locallockp = NULL;
1086 ereport(ERROR,
1088 errmsg("out of shared memory"),
1089 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1090 else
1091 return LOCKACQUIRE_NOT_AVAIL;
1092 }
1093 locallock->proclock = proclock;
1094 lock = proclock->tag.myLock;
1095 locallock->lock = lock;
1096
1097 /*
1098 * If lock requested conflicts with locks requested by waiters, must join
1099 * wait queue. Otherwise, check for conflict with already-held locks.
1100 * (That's last because most complex check.)
1101 */
1102 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1103 found_conflict = true;
1104 else
1106 lock, proclock);
1107
1108 if (!found_conflict)
1109 {
1110 /* No conflict with held or previously requested locks */
1111 GrantLock(lock, proclock, lockmode);
1113 }
1114 else
1115 {
1116 /*
1117 * Join the lock's wait queue. We call this even in the dontWait
1118 * case, because JoinWaitQueue() may discover that we can acquire the
1119 * lock immediately after all.
1120 */
1122 }
1123
1125 {
1126 /*
1127 * We're not getting the lock because a deadlock was detected already
1128 * while trying to join the wait queue, or because we would have to
1129 * wait but the caller requested no blocking.
1130 *
1131 * Undo the changes to shared entries before releasing the partition
1132 * lock.
1133 */
1135
1136 if (proclock->holdMask == 0)
1137 {
1139
1141 hashcode);
1142 dlist_delete(&proclock->lockLink);
1143 dlist_delete(&proclock->procLink);
1145 &(proclock->tag),
1148 NULL))
1149 elog(PANIC, "proclock table corrupted");
1150 }
1151 else
1152 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1153 lock->nRequested--;
1154 lock->requested[lockmode]--;
1155 LOCK_PRINT("LockAcquire: did not join wait queue",
1156 lock, lockmode);
1157 Assert((lock->nRequested > 0) &&
1158 (lock->requested[lockmode] >= 0));
1159 Assert(lock->nGranted <= lock->nRequested);
1161 if (locallock->nLocks == 0)
1163
1164 if (dontWait)
1165 {
1166 /*
1167 * Log lock holders and waiters as a detail log message if
1168 * logLockFailure = true and lock acquisition fails with dontWait
1169 * = true
1170 */
1171 if (logLockFailure)
1172 {
1176 const char *modename;
1177 int lockHoldersNum = 0;
1178
1182
1183 DescribeLockTag(&buf, &locallock->tag.lock);
1184 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1185 lockmode);
1186
1187 /* Gather a list of all lock holders and waiters */
1192
1193 ereport(LOG,
1194 (errmsg("process %d could not obtain %s on %s",
1195 MyProcPid, modename, buf.data),
1197 "Process holding the lock: %s, Wait queue: %s.",
1198 "Processes holding the lock: %s, Wait queue: %s.",
1200 lock_holders_sbuf.data,
1201 lock_waiters_sbuf.data)));
1202
1203 pfree(buf.data);
1206 }
1207 if (locallockp)
1208 *locallockp = NULL;
1209 return LOCKACQUIRE_NOT_AVAIL;
1210 }
1211 else
1212 {
1214 /* DeadLockReport() will not return */
1215 }
1216 }
1217
1218 /*
1219 * We are now in the lock queue, or the lock was already granted. If
1220 * queued, go to sleep.
1221 */
1223 {
1224 Assert(!dontWait);
1225 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1226 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1228
1230
1231 /*
1232 * NOTE: do not do any material change of state between here and
1233 * return. All required changes in locktable state must have been
1234 * done when the lock was granted to us --- see notes in WaitOnLock.
1235 */
1236
1238 {
1239 /*
1240 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1241 * now.
1242 */
1243 Assert(!dontWait);
1245 /* DeadLockReport() will not return */
1246 }
1247 }
1248 else
1251
1252 /* The lock was granted to us. Update the local lock entry accordingly */
1253 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1254 GrantLockLocal(locallock, owner);
1255
1256 /*
1257 * Lock state is fully up-to-date now; if we error out after this, no
1258 * special error cleanup is required.
1259 */
1261
1262 /*
1263 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1264 * standby server.
1265 */
1266 if (log_lock)
1267 {
1268 /*
1269 * Decode the locktag back to the original values, to avoid sending
1270 * lots of empty bytes with every message. See lock.h to check how a
1271 * locktag is defined for LOCKTAG_RELATION
1272 */
1274 locktag->locktag_field2);
1275 }
1276
1277 return LOCKACQUIRE_OK;
1278}
1279
1280/*
1281 * Find or create LOCK and PROCLOCK objects as needed for a new lock
1282 * request.
1283 *
1284 * Returns the PROCLOCK object, or NULL if we failed to create the objects
1285 * for lack of shared memory.
1286 *
1287 * The appropriate partition lock must be held at entry, and will be
1288 * held at exit.
1289 */
1290static PROCLOCK *
1292 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1293{
1294 LOCK *lock;
1295 PROCLOCK *proclock;
1298 bool found;
1299
1300 /*
1301 * Find or create a lock with this tag.
1302 */
1304 locktag,
1305 hashcode,
1307 &found);
1308 if (!lock)
1309 return NULL;
1310
1311 /*
1312 * if it's a new lock object, initialize it
1313 */
1314 if (!found)
1315 {
1316 lock->grantMask = 0;
1317 lock->waitMask = 0;
1318 dlist_init(&lock->procLocks);
1319 dclist_init(&lock->waitProcs);
1320 lock->nRequested = 0;
1321 lock->nGranted = 0;
1322 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1323 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1324 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1325 }
1326 else
1327 {
1328 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1329 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1330 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1331 Assert(lock->nGranted <= lock->nRequested);
1332 }
1333
1334 /*
1335 * Create the hash key for the proclock table.
1336 */
1337 proclocktag.myLock = lock;
1338 proclocktag.myProc = proc;
1339
1341
1342 /*
1343 * Find or create a proclock entry with this tag
1344 */
1346 &proclocktag,
1349 &found);
1350 if (!proclock)
1351 {
1352 /* Oops, not enough shmem for the proclock */
1353 if (lock->nRequested == 0)
1354 {
1355 /*
1356 * There are no other requestors of this lock, so garbage-collect
1357 * the lock object. We *must* do this to avoid a permanent leak
1358 * of shared memory, because there won't be anything to cause
1359 * anyone to release the lock object later.
1360 */
1361 Assert(dlist_is_empty(&(lock->procLocks)));
1363 &(lock->tag),
1364 hashcode,
1366 NULL))
1367 elog(PANIC, "lock table corrupted");
1368 }
1369 return NULL;
1370 }
1371
1372 /*
1373 * If new, initialize the new entry
1374 */
1375 if (!found)
1376 {
1378
1379 /*
1380 * It might seem unsafe to access proclock->groupLeader without a
1381 * lock, but it's not really. Either we are initializing a proclock
1382 * on our own behalf, in which case our group leader isn't changing
1383 * because the group leader for a process can only ever be changed by
1384 * the process itself; or else we are transferring a fast-path lock to
1385 * the main lock table, in which case that process can't change its
1386 * lock group leader without first releasing all of its locks (and in
1387 * particular the one we are currently transferring).
1388 */
1389 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1390 proc->lockGroupLeader : proc;
1391 proclock->holdMask = 0;
1392 proclock->releaseMask = 0;
1393 /* Add proclock to appropriate lists */
1394 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1395 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1396 PROCLOCK_PRINT("LockAcquire: new", proclock);
1397 }
1398 else
1399 {
1400 PROCLOCK_PRINT("LockAcquire: found", proclock);
1401 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1402
1403#ifdef CHECK_DEADLOCK_RISK
1404
1405 /*
1406 * Issue warning if we already hold a lower-level lock on this object
1407 * and do not hold a lock of the requested level or higher. This
1408 * indicates a deadlock-prone coding practice (eg, we'd have a
1409 * deadlock if another backend were following the same code path at
1410 * about the same time).
1411 *
1412 * This is not enabled by default, because it may generate log entries
1413 * about user-level coding practices that are in fact safe in context.
1414 * It can be enabled to help find system-level problems.
1415 *
1416 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1417 * better to use a table. For now, though, this works.
1418 */
1419 {
1420 int i;
1421
1422 for (i = lockMethodTable->numLockModes; i > 0; i--)
1423 {
1424 if (proclock->holdMask & LOCKBIT_ON(i))
1425 {
1426 if (i >= (int) lockmode)
1427 break; /* safe: we have a lock >= req level */
1428 elog(LOG, "deadlock risk: raising lock level"
1429 " from %s to %s on object %u/%u/%u",
1430 lockMethodTable->lockModeNames[i],
1431 lockMethodTable->lockModeNames[lockmode],
1432 lock->tag.locktag_field1, lock->tag.locktag_field2,
1433 lock->tag.locktag_field3);
1434 break;
1435 }
1436 }
1437 }
1438#endif /* CHECK_DEADLOCK_RISK */
1439 }
1440
1441 /*
1442 * lock->nRequested and lock->requested[] count the total number of
1443 * requests, whether granted or waiting, so increment those immediately.
1444 * The other counts don't increment till we get the lock.
1445 */
1446 lock->nRequested++;
1447 lock->requested[lockmode]++;
1448 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1449
1450 /*
1451 * We shouldn't already hold the desired lock; else locallock table is
1452 * broken.
1453 */
1454 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1455 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1456 lockMethodTable->lockModeNames[lockmode],
1457 lock->tag.locktag_field1, lock->tag.locktag_field2,
1458 lock->tag.locktag_field3);
1459
1460 return proclock;
1461}
1462
1463/*
1464 * Check and set/reset the flag that we hold the relation extension lock.
1465 *
1466 * It is callers responsibility that this function is called after
1467 * acquiring/releasing the relation extension lock.
1468 *
1469 * Pass acquired as true if lock is acquired, false otherwise.
1470 */
1471static inline void
1473{
1474#ifdef USE_ASSERT_CHECKING
1477#endif
1478}
1479
1480/*
1481 * Subroutine to free a locallock entry
1482 */
1483static void
1485{
1486 int i;
1487
1488 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1489 {
1490 if (locallock->lockOwners[i].owner != NULL)
1491 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1492 }
1493 locallock->numLockOwners = 0;
1494 if (locallock->lockOwners != NULL)
1495 pfree(locallock->lockOwners);
1496 locallock->lockOwners = NULL;
1497
1498 if (locallock->holdsStrongLockCount)
1499 {
1501
1503
1507 locallock->holdsStrongLockCount = false;
1509 }
1510
1512 &(locallock->tag),
1513 HASH_REMOVE, NULL))
1514 elog(WARNING, "locallock table corrupted");
1515
1516 /*
1517 * Indicate that the lock is released for certain types of locks
1518 */
1520}
1521
1522/*
1523 * LockCheckConflicts -- test whether requested lock conflicts
1524 * with those already granted
1525 *
1526 * Returns true if conflict, false if no conflict.
1527 *
1528 * NOTES:
1529 * Here's what makes this complicated: one process's locks don't
1530 * conflict with one another, no matter what purpose they are held for
1531 * (eg, session and transaction locks do not conflict). Nor do the locks
1532 * of one process in a lock group conflict with those of another process in
1533 * the same group. So, we must subtract off these locks when determining
1534 * whether the requested new lock conflicts with those already held.
1535 */
1536bool
1538 LOCKMODE lockmode,
1539 LOCK *lock,
1540 PROCLOCK *proclock)
1541{
1542 int numLockModes = lockMethodTable->numLockModes;
1544 int conflictMask = lockMethodTable->conflictTab[lockmode];
1548 int i;
1549
1550 /*
1551 * first check for global conflicts: If no locks conflict with my request,
1552 * then I get the lock.
1553 *
1554 * Checking for conflict: lock->grantMask represents the types of
1555 * currently held locks. conflictTable[lockmode] has a bit set for each
1556 * type of lock that conflicts with request. Bitwise compare tells if
1557 * there is a conflict.
1558 */
1559 if (!(conflictMask & lock->grantMask))
1560 {
1561 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1562 return false;
1563 }
1564
1565 /*
1566 * Rats. Something conflicts. But it could still be my own lock, or a
1567 * lock held by another member of my locking group. First, figure out how
1568 * many conflicts remain after subtracting out any locks I hold myself.
1569 */
1570 myLocks = proclock->holdMask;
1571 for (i = 1; i <= numLockModes; i++)
1572 {
1573 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1574 {
1575 conflictsRemaining[i] = 0;
1576 continue;
1577 }
1578 conflictsRemaining[i] = lock->granted[i];
1579 if (myLocks & LOCKBIT_ON(i))
1582 }
1583
1584 /* If no conflicts remain, we get the lock. */
1585 if (totalConflictsRemaining == 0)
1586 {
1587 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1588 return false;
1589 }
1590
1591 /* If no group locking, it's definitely a conflict. */
1592 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1593 {
1594 Assert(proclock->tag.myProc == MyProc);
1595 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1596 proclock);
1597 return true;
1598 }
1599
1600 /*
1601 * The relation extension lock conflict even between the group members.
1602 */
1604 {
1605 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1606 proclock);
1607 return true;
1608 }
1609
1610 /*
1611 * Locks held in conflicting modes by members of our own lock group are
1612 * not real conflicts; we can subtract those out and see if we still have
1613 * a conflict. This is O(N) in the number of processes holding or
1614 * awaiting locks on this object. We could improve that by making the
1615 * shared memory state more complex (and larger) but it doesn't seem worth
1616 * it.
1617 */
1619 {
1621 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1622
1623 if (proclock != otherproclock &&
1624 proclock->groupLeader == otherproclock->groupLeader &&
1625 (otherproclock->holdMask & conflictMask) != 0)
1626 {
1627 int intersectMask = otherproclock->holdMask & conflictMask;
1628
1629 for (i = 1; i <= numLockModes; i++)
1630 {
1631 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1632 {
1633 if (conflictsRemaining[i] <= 0)
1634 elog(PANIC, "proclocks held do not match lock");
1637 }
1638 }
1639
1640 if (totalConflictsRemaining == 0)
1641 {
1642 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1643 proclock);
1644 return false;
1645 }
1646 }
1647 }
1648
1649 /* Nope, it's a real conflict. */
1650 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1651 return true;
1652}
1653
1654/*
1655 * GrantLock -- update the lock and proclock data structures to show
1656 * the lock request has been granted.
1657 *
1658 * NOTE: if proc was blocked, it also needs to be removed from the wait list
1659 * and have its waitLock/waitProcLock fields cleared. That's not done here.
1660 *
1661 * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1662 * table entry; but since we may be awaking some other process, we can't do
1663 * that here; it's done by GrantLockLocal, instead.
1664 */
1665void
1666GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1667{
1668 lock->nGranted++;
1669 lock->granted[lockmode]++;
1670 lock->grantMask |= LOCKBIT_ON(lockmode);
1671 if (lock->granted[lockmode] == lock->requested[lockmode])
1672 lock->waitMask &= LOCKBIT_OFF(lockmode);
1673 proclock->holdMask |= LOCKBIT_ON(lockmode);
1674 LOCK_PRINT("GrantLock", lock, lockmode);
1675 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1676 Assert(lock->nGranted <= lock->nRequested);
1677}
1678
1679/*
1680 * UnGrantLock -- opposite of GrantLock.
1681 *
1682 * Updates the lock and proclock data structures to show that the lock
1683 * is no longer held nor requested by the current holder.
1684 *
1685 * Returns true if there were any waiters waiting on the lock that
1686 * should now be woken up with ProcLockWakeup.
1687 */
1688static bool
1689UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1691{
1692 bool wakeupNeeded = false;
1693
1694 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1695 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1696 Assert(lock->nGranted <= lock->nRequested);
1697
1698 /*
1699 * fix the general lock stats
1700 */
1701 lock->nRequested--;
1702 lock->requested[lockmode]--;
1703 lock->nGranted--;
1704 lock->granted[lockmode]--;
1705
1706 if (lock->granted[lockmode] == 0)
1707 {
1708 /* change the conflict mask. No more of this lock type. */
1709 lock->grantMask &= LOCKBIT_OFF(lockmode);
1710 }
1711
1712 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1713
1714 /*
1715 * We need only run ProcLockWakeup if the released lock conflicts with at
1716 * least one of the lock types requested by waiter(s). Otherwise whatever
1717 * conflict made them wait must still exist. NOTE: before MVCC, we could
1718 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1719 * not true anymore, because the remaining granted locks might belong to
1720 * some waiter, who could now be awakened because he doesn't conflict with
1721 * his own locks.
1722 */
1723 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1724 wakeupNeeded = true;
1725
1726 /*
1727 * Now fix the per-proclock state.
1728 */
1729 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1730 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1731
1732 return wakeupNeeded;
1733}
1734
1735/*
1736 * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1737 * proclock and lock objects if possible, and call ProcLockWakeup if there
1738 * are remaining requests and the caller says it's OK. (Normally, this
1739 * should be called after UnGrantLock, and wakeupNeeded is the result from
1740 * UnGrantLock.)
1741 *
1742 * The appropriate partition lock must be held at entry, and will be
1743 * held at exit.
1744 */
1745static void
1746CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1748 bool wakeupNeeded)
1749{
1750 /*
1751 * If this was my last hold on this lock, delete my entry in the proclock
1752 * table.
1753 */
1754 if (proclock->holdMask == 0)
1755 {
1757
1758 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1759 dlist_delete(&proclock->lockLink);
1760 dlist_delete(&proclock->procLink);
1761 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1763 &(proclock->tag),
1766 NULL))
1767 elog(PANIC, "proclock table corrupted");
1768 }
1769
1770 if (lock->nRequested == 0)
1771 {
1772 /*
1773 * The caller just released the last lock, so garbage-collect the lock
1774 * object.
1775 */
1776 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1779 &(lock->tag),
1780 hashcode,
1782 NULL))
1783 elog(PANIC, "lock table corrupted");
1784 }
1785 else if (wakeupNeeded)
1786 {
1787 /* There are waiters on this lock, so wake them up. */
1789 }
1790}
1791
1792/*
1793 * GrantLockLocal -- update the locallock data structures to show
1794 * the lock request has been granted.
1795 *
1796 * We expect that LockAcquire made sure there is room to add a new
1797 * ResourceOwner entry.
1798 */
1799static void
1801{
1802 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1803 int i;
1804
1805 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1806 /* Count the total */
1807 locallock->nLocks++;
1808 /* Count the per-owner lock */
1809 for (i = 0; i < locallock->numLockOwners; i++)
1810 {
1811 if (lockOwners[i].owner == owner)
1812 {
1813 lockOwners[i].nLocks++;
1814 return;
1815 }
1816 }
1817 lockOwners[i].owner = owner;
1818 lockOwners[i].nLocks = 1;
1819 locallock->numLockOwners++;
1820 if (owner != NULL)
1822
1823 /* Indicate that the lock is acquired for certain types of locks. */
1825}
1826
1827/*
1828 * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1829 * and arrange for error cleanup if it fails
1830 */
1831static void
1833{
1835 Assert(locallock->holdsStrongLockCount == false);
1836
1837 /*
1838 * Adding to a memory location is not atomic, so we take a spinlock to
1839 * ensure we don't collide with someone else trying to bump the count at
1840 * the same time.
1841 *
1842 * XXX: It might be worth considering using an atomic fetch-and-add
1843 * instruction here, on architectures where that is supported.
1844 */
1845
1848 locallock->holdsStrongLockCount = true;
1851}
1852
1853/*
1854 * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1855 * acquisition once it's no longer needed
1856 */
1857static void
1859{
1861}
1862
1863/*
1864 * AbortStrongLockAcquire - undo strong lock state changes performed by
1865 * BeginStrongLockAcquire.
1866 */
1867void
1869{
1872
1873 if (locallock == NULL)
1874 return;
1875
1877 Assert(locallock->holdsStrongLockCount == true);
1881 locallock->holdsStrongLockCount = false;
1884}
1885
1886/*
1887 * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1888 * WaitOnLock on.
1889 *
1890 * proc.c needs this for the case where we are booted off the lock by
1891 * timeout, but discover that someone granted us the lock anyway.
1892 *
1893 * We could just export GrantLockLocal, but that would require including
1894 * resowner.h in lock.h, which creates circularity.
1895 */
1896void
1897GrantAwaitedLock(void)
1898{
1900}
1901
1902/*
1903 * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1904 */
1905LOCALLOCK *
1906GetAwaitedLock(void)
1907{
1908 return awaitedLock;
1909}
1910
1911/*
1912 * ResetAwaitedLock -- Forget that we are waiting on a lock.
1913 */
1914void
1915ResetAwaitedLock(void)
1916{
1917 awaitedLock = NULL;
1918}
1919
1920/*
1921 * MarkLockClear -- mark an acquired lock as "clear"
1922 *
1923 * This means that we know we have absorbed all sinval messages that other
1924 * sessions generated before we acquired this lock, and so we can confidently
1925 * assume we know about any catalog changes protected by this lock.
1926 */
1927void
1929{
1930 Assert(locallock->nLocks > 0);
1931 locallock->lockCleared = true;
1932}
1933
1934/*
1935 * WaitOnLock -- wait to acquire a lock
1936 *
1937 * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1938 */
1939static ProcWaitStatus
1941{
1944
1945 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1946 locallock->tag.lock.locktag_field2,
1947 locallock->tag.lock.locktag_field3,
1948 locallock->tag.lock.locktag_field4,
1949 locallock->tag.lock.locktag_type,
1950 locallock->tag.mode);
1951
1952 /* Setup error traceback support for ereport() */
1957
1958 /* adjust the process title to indicate that it's waiting */
1959 set_ps_display_suffix("waiting");
1960
1961 /*
1962 * Record the fact that we are waiting for a lock, so that
1963 * LockErrorCleanup will clean up if cancel/die happens.
1964 */
1966 awaitedOwner = owner;
1967
1968 /*
1969 * NOTE: Think not to put any shared-state cleanup after the call to
1970 * ProcSleep, in either the normal or failure path. The lock state must
1971 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1972 * waiting for the lock. This is necessary because of the possibility
1973 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1974 * grants us the lock, but before we've noticed it. Hence, after granting,
1975 * the locktable state must fully reflect the fact that we own the lock;
1976 * we can't do additional work on return.
1977 *
1978 * We can and do use a PG_TRY block to try to clean up after failure, but
1979 * this still has a major limitation: elog(FATAL) can occur while waiting
1980 * (eg, a "die" interrupt), and then control won't come back here. So all
1981 * cleanup of essential state should happen in LockErrorCleanup, not here.
1982 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1983 * is unimportant if the process exits.
1984 */
1985 PG_TRY();
1986 {
1988 }
1989 PG_CATCH();
1990 {
1991 /* In this path, awaitedLock remains set until LockErrorCleanup */
1992
1993 /* reset ps display to remove the suffix */
1995
1996 /* and propagate the error */
1997 PG_RE_THROW();
1998 }
1999 PG_END_TRY();
2000
2001 /*
2002 * We no longer want LockErrorCleanup to do anything.
2003 */
2004 awaitedLock = NULL;
2005
2006 /* reset ps display to remove the suffix */
2008
2010
2011 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2012 locallock->tag.lock.locktag_field2,
2013 locallock->tag.lock.locktag_field3,
2014 locallock->tag.lock.locktag_field4,
2015 locallock->tag.lock.locktag_type,
2016 locallock->tag.mode);
2017
2018 return result;
2019}
2020
2021/*
2022 * error context callback for failures in WaitOnLock
2023 *
2024 * We report which lock was being waited on, in the same style used in
2025 * deadlock reports. This helps with lock timeout errors in particular.
2026 */
2027static void
2029{
2031 const LOCKTAG *tag = &locallock->tag.lock;
2032 LOCKMODE mode = locallock->tag.mode;
2034
2037
2038 errcontext("waiting for %s on %s",
2040 locktagbuf.data);
2041}
2042
2043/*
2044 * Remove a proc from the wait-queue it is on (caller must know it is on one).
2045 * This is only used when the proc has failed to get the lock, so we set its
2046 * waitStatus to PROC_WAIT_STATUS_ERROR.
2047 *
2048 * Appropriate partition lock must be held by caller. Also, caller is
2049 * responsible for signaling the proc if needed.
2050 *
2051 * NB: this does not clean up any locallock object that may exist for the lock.
2052 */
2053void
2054RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
2055{
2056 LOCK *waitLock = proc->waitLock;
2057 PROCLOCK *proclock = proc->waitProcLock;
2058 LOCKMODE lockmode = proc->waitLockMode;
2060
2061 /* Make sure proc is waiting */
2064 Assert(waitLock);
2065 Assert(!dclist_is_empty(&waitLock->waitProcs));
2067
2068 /* Remove proc from lock's wait queue */
2070
2071 /* Undo increments of request counts by waiting process */
2072 Assert(waitLock->nRequested > 0);
2073 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2074 waitLock->nRequested--;
2075 Assert(waitLock->requested[lockmode] > 0);
2076 waitLock->requested[lockmode]--;
2077 /* don't forget to clear waitMask bit if appropriate */
2078 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2079 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2080
2081 /* Clean up the proc's own state, and pass it the ok/fail signal */
2082 proc->waitLock = NULL;
2083 proc->waitProcLock = NULL;
2085
2086 /*
2087 * Delete the proclock immediately if it represents no already-held locks.
2088 * (This must happen now because if the owner of the lock decides to
2089 * release it, and the requested/granted counts then go to zero,
2090 * LockRelease expects there to be no remaining proclocks.) Then see if
2091 * any other waiters for the lock can be woken up now.
2092 */
2093 CleanUpLock(waitLock, proclock,
2094 LockMethods[lockmethodid], hashcode,
2095 true);
2096}
2097
2098/*
2099 * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2100 * Release a session lock if 'sessionLock' is true, else release a
2101 * regular transaction lock.
2102 *
2103 * Side Effects: find any waiting processes that are now wakable,
2104 * grant them their requested locks and awaken them.
2105 * (We have to grant the lock here to avoid a race between
2106 * the waking process and any new process to
2107 * come along and request the lock.)
2108 */
2109bool
2110LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2111{
2116 LOCK *lock;
2117 PROCLOCK *proclock;
2119 bool wakeupNeeded;
2120
2122 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2125 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2126
2127#ifdef LOCK_DEBUG
2128 if (LOCK_DEBUG_ENABLED(locktag))
2129 elog(LOG, "LockRelease: lock [%u,%u] %s",
2130 locktag->locktag_field1, locktag->locktag_field2,
2131 lockMethodTable->lockModeNames[lockmode]);
2132#endif
2133
2134 /*
2135 * Find the LOCALLOCK entry for this lock and lockmode
2136 */
2137 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2138 localtag.lock = *locktag;
2139 localtag.mode = lockmode;
2140
2142 &localtag,
2143 HASH_FIND, NULL);
2144
2145 /*
2146 * let the caller print its own error message, too. Do not ereport(ERROR).
2147 */
2148 if (!locallock || locallock->nLocks <= 0)
2149 {
2150 elog(WARNING, "you don't own a lock of type %s",
2151 lockMethodTable->lockModeNames[lockmode]);
2152 return false;
2153 }
2154
2155 /*
2156 * Decrease the count for the resource owner.
2157 */
2158 {
2159 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2160 ResourceOwner owner;
2161 int i;
2162
2163 /* Identify owner for lock */
2164 if (sessionLock)
2165 owner = NULL;
2166 else
2167 owner = CurrentResourceOwner;
2168
2169 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2170 {
2171 if (lockOwners[i].owner == owner)
2172 {
2173 Assert(lockOwners[i].nLocks > 0);
2174 if (--lockOwners[i].nLocks == 0)
2175 {
2176 if (owner != NULL)
2178 /* compact out unused slot */
2179 locallock->numLockOwners--;
2180 if (i < locallock->numLockOwners)
2181 lockOwners[i] = lockOwners[locallock->numLockOwners];
2182 }
2183 break;
2184 }
2185 }
2186 if (i < 0)
2187 {
2188 /* don't release a lock belonging to another owner */
2189 elog(WARNING, "you don't own a lock of type %s",
2190 lockMethodTable->lockModeNames[lockmode]);
2191 return false;
2192 }
2193 }
2194
2195 /*
2196 * Decrease the total local count. If we're still holding the lock, we're
2197 * done.
2198 */
2199 locallock->nLocks--;
2200
2201 if (locallock->nLocks > 0)
2202 return true;
2203
2204 /*
2205 * At this point we can no longer suppose we are clear of invalidation
2206 * messages related to this lock. Although we'll delete the LOCALLOCK
2207 * object before any intentional return from this routine, it seems worth
2208 * the trouble to explicitly reset lockCleared right now, just in case
2209 * some error prevents us from deleting the LOCALLOCK.
2210 */
2211 locallock->lockCleared = false;
2212
2213 /* Attempt fast release of any lock eligible for the fast path. */
2214 if (EligibleForRelationFastPath(locktag, lockmode) &&
2216 {
2217 bool released;
2218
2219 /*
2220 * We might not find the lock here, even if we originally entered it
2221 * here. Another backend may have moved it to the main table.
2222 */
2225 lockmode);
2227 if (released)
2228 {
2230 return true;
2231 }
2232 }
2233
2234 /*
2235 * Otherwise we've got to mess with the shared lock table.
2236 */
2238
2240
2241 /*
2242 * Normally, we don't need to re-find the lock or proclock, since we kept
2243 * their addresses in the locallock table, and they couldn't have been
2244 * removed while we were holding a lock on them. But it's possible that
2245 * the lock was taken fast-path and has since been moved to the main hash
2246 * table by another backend, in which case we will need to look up the
2247 * objects here. We assume the lock field is NULL if so.
2248 */
2249 lock = locallock->lock;
2250 if (!lock)
2251 {
2253
2254 Assert(EligibleForRelationFastPath(locktag, lockmode));
2256 locktag,
2257 locallock->hashcode,
2258 HASH_FIND,
2259 NULL);
2260 if (!lock)
2261 elog(ERROR, "failed to re-find shared lock object");
2262 locallock->lock = lock;
2263
2264 proclocktag.myLock = lock;
2265 proclocktag.myProc = MyProc;
2267 &proclocktag,
2268 HASH_FIND,
2269 NULL);
2270 if (!locallock->proclock)
2271 elog(ERROR, "failed to re-find shared proclock object");
2272 }
2273 LOCK_PRINT("LockRelease: found", lock, lockmode);
2274 proclock = locallock->proclock;
2275 PROCLOCK_PRINT("LockRelease: found", proclock);
2276
2277 /*
2278 * Double-check that we are actually holding a lock of the type we want to
2279 * release.
2280 */
2281 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2282 {
2283 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2285 elog(WARNING, "you don't own a lock of type %s",
2286 lockMethodTable->lockModeNames[lockmode]);
2288 return false;
2289 }
2290
2291 /*
2292 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2293 */
2294 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2295
2296 CleanUpLock(lock, proclock,
2297 lockMethodTable, locallock->hashcode,
2298 wakeupNeeded);
2299
2301
2303 return true;
2304}
2305
2306/*
2307 * LockReleaseAll -- Release all locks of the specified lock method that
2308 * are held by the current process.
2309 *
2310 * Well, not necessarily *all* locks. The available behaviors are:
2311 * allLocks == true: release all locks including session locks.
2312 * allLocks == false: release all non-session locks.
2313 */
2314void
2316{
2317 HASH_SEQ_STATUS status;
2319 int i,
2320 numLockModes;
2322 LOCK *lock;
2323 int partition;
2324 bool have_fast_path_lwlock = false;
2325
2327 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2329
2330#ifdef LOCK_DEBUG
2331 if (*(lockMethodTable->trace_flag))
2332 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2333#endif
2334
2335 /*
2336 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2337 * the only way that the lock we hold on our own VXID can ever get
2338 * released: it is always and only released when a toplevel transaction
2339 * ends.
2340 */
2343
2344 numLockModes = lockMethodTable->numLockModes;
2345
2346 /*
2347 * First we run through the locallock table and get rid of unwanted
2348 * entries, then we scan the process's proclocks and get rid of those. We
2349 * do this separately because we may have multiple locallock entries
2350 * pointing to the same proclock, and we daren't end up with any dangling
2351 * pointers. Fast-path locks are cleaned up during the locallock table
2352 * scan, though.
2353 */
2355
2356 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2357 {
2358 /*
2359 * If the LOCALLOCK entry is unused, something must've gone wrong
2360 * while trying to acquire this lock. Just forget the local entry.
2361 */
2362 if (locallock->nLocks == 0)
2363 {
2365 continue;
2366 }
2367
2368 /* Ignore items that are not of the lockmethod to be removed */
2370 continue;
2371
2372 /*
2373 * If we are asked to release all locks, we can just zap the entry.
2374 * Otherwise, must scan to see if there are session locks. We assume
2375 * there is at most one lockOwners entry for session locks.
2376 */
2377 if (!allLocks)
2378 {
2379 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2380
2381 /* If session lock is above array position 0, move it down to 0 */
2382 for (i = 0; i < locallock->numLockOwners; i++)
2383 {
2384 if (lockOwners[i].owner == NULL)
2385 lockOwners[0] = lockOwners[i];
2386 else
2387 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2388 }
2389
2390 if (locallock->numLockOwners > 0 &&
2391 lockOwners[0].owner == NULL &&
2392 lockOwners[0].nLocks > 0)
2393 {
2394 /* Fix the locallock to show just the session locks */
2395 locallock->nLocks = lockOwners[0].nLocks;
2396 locallock->numLockOwners = 1;
2397 /* We aren't deleting this locallock, so done */
2398 continue;
2399 }
2400 else
2401 locallock->numLockOwners = 0;
2402 }
2403
2404#ifdef USE_ASSERT_CHECKING
2405
2406 /*
2407 * Tuple locks are currently held only for short durations within a
2408 * transaction. Check that we didn't forget to release one.
2409 */
2411 elog(WARNING, "tuple lock held at commit");
2412#endif
2413
2414 /*
2415 * If the lock or proclock pointers are NULL, this lock was taken via
2416 * the relation fast-path (and is not known to have been transferred).
2417 */
2418 if (locallock->proclock == NULL || locallock->lock == NULL)
2419 {
2420 LOCKMODE lockmode = locallock->tag.mode;
2421 Oid relid;
2422
2423 /* Verify that a fast-path lock is what we've got. */
2424 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2425 elog(PANIC, "locallock table corrupted");
2426
2427 /*
2428 * If we don't currently hold the LWLock that protects our
2429 * fast-path data structures, we must acquire it before attempting
2430 * to release the lock via the fast-path. We will continue to
2431 * hold the LWLock until we're done scanning the locallock table,
2432 * unless we hit a transferred fast-path lock. (XXX is this
2433 * really such a good idea? There could be a lot of entries ...)
2434 */
2436 {
2438 have_fast_path_lwlock = true;
2439 }
2440
2441 /* Attempt fast-path release. */
2442 relid = locallock->tag.lock.locktag_field2;
2443 if (FastPathUnGrantRelationLock(relid, lockmode))
2444 {
2446 continue;
2447 }
2448
2449 /*
2450 * Our lock, originally taken via the fast path, has been
2451 * transferred to the main lock table. That's going to require
2452 * some extra work, so release our fast-path lock before starting.
2453 */
2455 have_fast_path_lwlock = false;
2456
2457 /*
2458 * Now dump the lock. We haven't got a pointer to the LOCK or
2459 * PROCLOCK in this case, so we have to handle this a bit
2460 * differently than a normal lock release. Unfortunately, this
2461 * requires an extra LWLock acquire-and-release cycle on the
2462 * partitionLock, but hopefully it shouldn't happen often.
2463 */
2465 &locallock->tag.lock, lockmode, false);
2467 continue;
2468 }
2469
2470 /* Mark the proclock to show we need to release this lockmode */
2471 if (locallock->nLocks > 0)
2472 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2473
2474 /* And remove the locallock hashtable entry */
2476 }
2477
2478 /* Done with the fast-path data structures */
2481
2482 /*
2483 * Now, scan each lock partition separately.
2484 */
2486 {
2488 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2490
2492
2493 /*
2494 * If the proclock list for this partition is empty, we can skip
2495 * acquiring the partition lock. This optimization is trickier than
2496 * it looks, because another backend could be in process of adding
2497 * something to our proclock list due to promoting one of our
2498 * fast-path locks. However, any such lock must be one that we
2499 * decided not to delete above, so it's okay to skip it again now;
2500 * we'd just decide not to delete it again. We must, however, be
2501 * careful to re-fetch the list header once we've acquired the
2502 * partition lock, to be sure we have a valid, up-to-date pointer.
2503 * (There is probably no significant risk if pointer fetch/store is
2504 * atomic, but we don't wish to assume that.)
2505 *
2506 * XXX This argument assumes that the locallock table correctly
2507 * represents all of our fast-path locks. While allLocks mode
2508 * guarantees to clean up all of our normal locks regardless of the
2509 * locallock situation, we lose that guarantee for fast-path locks.
2510 * This is not ideal.
2511 */
2512 if (dlist_is_empty(procLocks))
2513 continue; /* needn't examine this partition */
2514
2516
2518 {
2519 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2520 bool wakeupNeeded = false;
2521
2522 Assert(proclock->tag.myProc == MyProc);
2523
2524 lock = proclock->tag.myLock;
2525
2526 /* Ignore items that are not of the lockmethod to be removed */
2527 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2528 continue;
2529
2530 /*
2531 * In allLocks mode, force release of all locks even if locallock
2532 * table had problems
2533 */
2534 if (allLocks)
2535 proclock->releaseMask = proclock->holdMask;
2536 else
2537 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2538
2539 /*
2540 * Ignore items that have nothing to be released, unless they have
2541 * holdMask == 0 and are therefore recyclable
2542 */
2543 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2544 continue;
2545
2546 PROCLOCK_PRINT("LockReleaseAll", proclock);
2547 LOCK_PRINT("LockReleaseAll", lock, 0);
2548 Assert(lock->nRequested >= 0);
2549 Assert(lock->nGranted >= 0);
2550 Assert(lock->nGranted <= lock->nRequested);
2551 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2552
2553 /*
2554 * Release the previously-marked lock modes
2555 */
2556 for (i = 1; i <= numLockModes; i++)
2557 {
2558 if (proclock->releaseMask & LOCKBIT_ON(i))
2559 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2561 }
2562 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2563 Assert(lock->nGranted <= lock->nRequested);
2564 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2565
2566 proclock->releaseMask = 0;
2567
2568 /* CleanUpLock will wake up waiters if needed. */
2569 CleanUpLock(lock, proclock,
2571 LockTagHashCode(&lock->tag),
2572 wakeupNeeded);
2573 } /* loop over PROCLOCKs within this partition */
2574
2576 } /* loop over partitions */
2577
2578#ifdef LOCK_DEBUG
2579 if (*(lockMethodTable->trace_flag))
2580 elog(LOG, "LockReleaseAll done");
2581#endif
2582}
2583
2584/*
2585 * LockReleaseSession -- Release all session locks of the specified lock method
2586 * that are held by the current process.
2587 */
2588void
2590{
2591 HASH_SEQ_STATUS status;
2593
2595 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2596
2598
2599 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2600 {
2601 /* Ignore items that are not of the specified lock method */
2603 continue;
2604
2606 }
2607}
2608
2609/*
2610 * LockReleaseCurrentOwner
2611 * Release all locks belonging to CurrentResourceOwner
2612 *
2613 * If the caller knows what those locks are, it can pass them as an array.
2614 * That speeds up the call significantly, when a lot of locks are held.
2615 * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2616 * table to find them.
2617 */
2618void
2620{
2621 if (locallocks == NULL)
2622 {
2623 HASH_SEQ_STATUS status;
2625
2627
2628 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2630 }
2631 else
2632 {
2633 int i;
2634
2635 for (i = nlocks - 1; i >= 0; i--)
2637 }
2638}
2639
2640/*
2641 * ReleaseLockIfHeld
2642 * Release any session-level locks on this lockable object if sessionLock
2643 * is true; else, release any locks held by CurrentResourceOwner.
2644 *
2645 * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2646 * locks), but without refactoring LockRelease() we cannot support releasing
2647 * locks belonging to resource owners other than CurrentResourceOwner.
2648 * If we were to refactor, it'd be a good idea to fix it so we don't have to
2649 * do a hashtable lookup of the locallock, too. However, currently this
2650 * function isn't used heavily enough to justify refactoring for its
2651 * convenience.
2652 */
2653static void
2655{
2656 ResourceOwner owner;
2657 LOCALLOCKOWNER *lockOwners;
2658 int i;
2659
2660 /* Identify owner for lock (must match LockRelease!) */
2661 if (sessionLock)
2662 owner = NULL;
2663 else
2664 owner = CurrentResourceOwner;
2665
2666 /* Scan to see if there are any locks belonging to the target owner */
2667 lockOwners = locallock->lockOwners;
2668 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2669 {
2670 if (lockOwners[i].owner == owner)
2671 {
2672 Assert(lockOwners[i].nLocks > 0);
2673 if (lockOwners[i].nLocks < locallock->nLocks)
2674 {
2675 /*
2676 * We will still hold this lock after forgetting this
2677 * ResourceOwner.
2678 */
2679 locallock->nLocks -= lockOwners[i].nLocks;
2680 /* compact out unused slot */
2681 locallock->numLockOwners--;
2682 if (owner != NULL)
2684 if (i < locallock->numLockOwners)
2685 lockOwners[i] = lockOwners[locallock->numLockOwners];
2686 }
2687 else
2688 {
2689 Assert(lockOwners[i].nLocks == locallock->nLocks);
2690 /* We want to call LockRelease just once */
2691 lockOwners[i].nLocks = 1;
2692 locallock->nLocks = 1;
2693 if (!LockRelease(&locallock->tag.lock,
2694 locallock->tag.mode,
2695 sessionLock))
2696 elog(WARNING, "ReleaseLockIfHeld: failed??");
2697 }
2698 break;
2699 }
2700 }
2701}
2702
2703/*
2704 * LockReassignCurrentOwner
2705 * Reassign all locks belonging to CurrentResourceOwner to belong
2706 * to its parent resource owner.
2707 *
2708 * If the caller knows what those locks are, it can pass them as an array.
2709 * That speeds up the call significantly, when a lot of locks are held
2710 * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2711 * and we'll traverse through our hash table to find them.
2712 */
2713void
2715{
2717
2718 Assert(parent != NULL);
2719
2720 if (locallocks == NULL)
2721 {
2722 HASH_SEQ_STATUS status;
2724
2726
2727 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2729 }
2730 else
2731 {
2732 int i;
2733
2734 for (i = nlocks - 1; i >= 0; i--)
2735 LockReassignOwner(locallocks[i], parent);
2736 }
2737}
2738
2739/*
2740 * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2741 * CurrentResourceOwner to its parent.
2742 */
2743static void
2745{
2746 LOCALLOCKOWNER *lockOwners;
2747 int i;
2748 int ic = -1;
2749 int ip = -1;
2750
2751 /*
2752 * Scan to see if there are any locks belonging to current owner or its
2753 * parent
2754 */
2755 lockOwners = locallock->lockOwners;
2756 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2757 {
2758 if (lockOwners[i].owner == CurrentResourceOwner)
2759 ic = i;
2760 else if (lockOwners[i].owner == parent)
2761 ip = i;
2762 }
2763
2764 if (ic < 0)
2765 return; /* no current locks */
2766
2767 if (ip < 0)
2768 {
2769 /* Parent has no slot, so just give it the child's slot */
2770 lockOwners[ic].owner = parent;
2772 }
2773 else
2774 {
2775 /* Merge child's count with parent's */
2776 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2777 /* compact out unused slot */
2778 locallock->numLockOwners--;
2779 if (ic < locallock->numLockOwners)
2780 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2781 }
2783}
2784
2785/*
2786 * FastPathGrantRelationLock
2787 * Grant lock using per-backend fast-path array, if there is space.
2788 */
2789static bool
2790FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2791{
2792 uint32 i;
2794
2795 /* fast-path group the lock belongs to */
2796 uint32 group = FAST_PATH_REL_GROUP(relid);
2797
2798 /* Scan for existing entry for this relid, remembering empty slot. */
2799 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2800 {
2801 /* index into the whole per-backend array */
2802 uint32 f = FAST_PATH_SLOT(group, i);
2803
2804 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2805 unused_slot = f;
2806 else if (MyProc->fpRelId[f] == relid)
2807 {
2808 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2809 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2810 return true;
2811 }
2812 }
2813
2814 /* If no existing entry, use any empty slot. */
2816 {
2817 MyProc->fpRelId[unused_slot] = relid;
2819 ++FastPathLocalUseCounts[group];
2820 return true;
2821 }
2822
2823 /* No existing entry, and no empty slot. */
2824 return false;
2825}
2826
2827/*
2828 * FastPathUnGrantRelationLock
2829 * Release fast-path lock, if present. Update backend-private local
2830 * use count, while we're at it.
2831 */
2832static bool
2834{
2835 uint32 i;
2836 bool result = false;
2837
2838 /* fast-path group the lock belongs to */
2839 uint32 group = FAST_PATH_REL_GROUP(relid);
2840
2841 FastPathLocalUseCounts[group] = 0;
2842 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2843 {
2844 /* index into the whole per-backend array */
2845 uint32 f = FAST_PATH_SLOT(group, i);
2846
2847 if (MyProc->fpRelId[f] == relid
2848 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2849 {
2850 Assert(!result);
2851 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2852 result = true;
2853 /* we continue iterating so as to update FastPathLocalUseCount */
2854 }
2855 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2856 ++FastPathLocalUseCounts[group];
2857 }
2858 return result;
2859}
2860
2861/*
2862 * FastPathTransferRelationLocks
2863 * Transfer locks matching the given lock tag from per-backend fast-path
2864 * arrays to the shared hash table.
2865 *
2866 * Returns true if successful, false if ran out of shared memory.
2867 */
2868static bool
2870 uint32 hashcode)
2871{
2873 Oid relid = locktag->locktag_field2;
2874 uint32 i;
2875
2876 /* fast-path group the lock belongs to */
2877 uint32 group = FAST_PATH_REL_GROUP(relid);
2878
2879 /*
2880 * Every PGPROC that can potentially hold a fast-path lock is present in
2881 * ProcGlobal->allProcs. Prepared transactions are not, but any
2882 * outstanding fast-path locks held by prepared transactions are
2883 * transferred to the main lock table.
2884 */
2885 for (i = 0; i < ProcGlobal->allProcCount; i++)
2886 {
2887 PGPROC *proc = GetPGProcByNumber(i);
2888 uint32 j;
2889
2891
2892 /*
2893 * If the target backend isn't referencing the same database as the
2894 * lock, then we needn't examine the individual relation IDs at all;
2895 * none of them can be relevant.
2896 *
2897 * proc->databaseId is set at backend startup time and never changes
2898 * thereafter, so it might be safe to perform this test before
2899 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2900 * assume that if the target backend holds any fast-path locks, it
2901 * must have performed a memory-fencing operation (in particular, an
2902 * LWLock acquisition) since setting proc->databaseId. However, it's
2903 * less clear that our backend is certain to have performed a memory
2904 * fencing operation since the other backend set proc->databaseId. So
2905 * for now, we test it after acquiring the LWLock just to be safe.
2906 *
2907 * Also skip groups without any registered fast-path locks.
2908 */
2909 if (proc->databaseId != locktag->locktag_field1 ||
2910 proc->fpLockBits[group] == 0)
2911 {
2912 LWLockRelease(&proc->fpInfoLock);
2913 continue;
2914 }
2915
2916 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2917 {
2918 uint32 lockmode;
2919
2920 /* index into the whole per-backend array */
2921 uint32 f = FAST_PATH_SLOT(group, j);
2922
2923 /* Look for an allocated slot matching the given relid. */
2924 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2925 continue;
2926
2927 /* Find or create lock object. */
2929 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2931 ++lockmode)
2932 {
2933 PROCLOCK *proclock;
2934
2935 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2936 continue;
2937 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2938 hashcode, lockmode);
2939 if (!proclock)
2940 {
2942 LWLockRelease(&proc->fpInfoLock);
2943 return false;
2944 }
2945 GrantLock(proclock->tag.myLock, proclock, lockmode);
2946 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2947 }
2949
2950 /* No need to examine remaining slots. */
2951 break;
2952 }
2953 LWLockRelease(&proc->fpInfoLock);
2954 }
2955 return true;
2956}
2957
2958/*
2959 * FastPathGetRelationLockEntry
2960 * Return the PROCLOCK for a lock originally taken via the fast-path,
2961 * transferring it to the primary lock table if necessary.
2962 *
2963 * Note: caller takes care of updating the locallock object.
2964 */
2965static PROCLOCK *
2967{
2969 LOCKTAG *locktag = &locallock->tag.lock;
2970 PROCLOCK *proclock = NULL;
2972 Oid relid = locktag->locktag_field2;
2973 uint32 i,
2974 group;
2975
2976 /* fast-path group the lock belongs to */
2977 group = FAST_PATH_REL_GROUP(relid);
2978
2980
2981 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2982 {
2983 uint32 lockmode;
2984
2985 /* index into the whole per-backend array */
2986 uint32 f = FAST_PATH_SLOT(group, i);
2987
2988 /* Look for an allocated slot matching the given relid. */
2989 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2990 continue;
2991
2992 /* If we don't have a lock of the given mode, forget it! */
2993 lockmode = locallock->tag.mode;
2994 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2995 break;
2996
2997 /* Find or create lock object. */
2999
3000 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
3001 locallock->hashcode, lockmode);
3002 if (!proclock)
3003 {
3006 ereport(ERROR,
3008 errmsg("out of shared memory"),
3009 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3010 }
3011 GrantLock(proclock->tag.myLock, proclock, lockmode);
3012 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3013
3015
3016 /* No need to examine remaining slots. */
3017 break;
3018 }
3019
3021
3022 /* Lock may have already been transferred by some other backend. */
3023 if (proclock == NULL)
3024 {
3025 LOCK *lock;
3028
3030
3032 locktag,
3033 locallock->hashcode,
3034 HASH_FIND,
3035 NULL);
3036 if (!lock)
3037 elog(ERROR, "failed to re-find shared lock object");
3038
3039 proclocktag.myLock = lock;
3040 proclocktag.myProc = MyProc;
3041
3043 proclock = (PROCLOCK *)
3045 &proclocktag,
3047 HASH_FIND,
3048 NULL);
3049 if (!proclock)
3050 elog(ERROR, "failed to re-find shared proclock object");
3052 }
3053
3054 return proclock;
3055}
3056
3057/*
3058 * GetLockConflicts
3059 * Get an array of VirtualTransactionIds of xacts currently holding locks
3060 * that would conflict with the specified lock/lockmode.
3061 * xacts merely awaiting such a lock are NOT reported.
3062 *
3063 * The result array is palloc'd and is terminated with an invalid VXID.
3064 * *countp, if not null, is updated to the number of items set.
3065 *
3066 * Of course, the result could be out of date by the time it's returned, so
3067 * use of this function has to be thought about carefully. Similarly, a
3068 * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3069 * lock it holds. Existing callers don't care about a locker after that
3070 * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3071 * pg_xact updates and before releasing locks.
3072 *
3073 * Note we never include the current xact's vxid in the result array,
3074 * since an xact never blocks itself.
3075 */
3077GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3078{
3082 LOCK *lock;
3085 PROCLOCK *proclock;
3086 uint32 hashcode;
3088 int count = 0;
3089 int fast_count = 0;
3090
3092 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3095 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3096
3097 /*
3098 * Allocate memory to store results, and fill with InvalidVXID. We only
3099 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3100 * InHotStandby allocate once in TopMemoryContext.
3101 */
3102 if (InHotStandby)
3103 {
3104 if (vxids == NULL)
3107 sizeof(VirtualTransactionId) *
3109 }
3110 else
3112
3113 /* Compute hash code and partition lock, and look up conflicting modes. */
3114 hashcode = LockTagHashCode(locktag);
3116 conflictMask = lockMethodTable->conflictTab[lockmode];
3117
3118 /*
3119 * Fast path locks might not have been entered in the primary lock table.
3120 * If the lock we're dealing with could conflict with such a lock, we must
3121 * examine each backend's fast-path array for conflicts.
3122 */
3123 if (ConflictsWithRelationFastPath(locktag, lockmode))
3124 {
3125 int i;
3126 Oid relid = locktag->locktag_field2;
3128
3129 /* fast-path group the lock belongs to */
3130 uint32 group = FAST_PATH_REL_GROUP(relid);
3131
3132 /*
3133 * Iterate over relevant PGPROCs. Anything held by a prepared
3134 * transaction will have been transferred to the primary lock table,
3135 * so we need not worry about those. This is all a bit fuzzy, because
3136 * new locks could be taken after we've visited a particular
3137 * partition, but the callers had better be prepared to deal with that
3138 * anyway, since the locks could equally well be taken between the
3139 * time we return the value and the time the caller does something
3140 * with it.
3141 */
3142 for (i = 0; i < ProcGlobal->allProcCount; i++)
3143 {
3144 PGPROC *proc = GetPGProcByNumber(i);
3145 uint32 j;
3146
3147 /* A backend never blocks itself */
3148 if (proc == MyProc)
3149 continue;
3150
3152
3153 /*
3154 * If the target backend isn't referencing the same database as
3155 * the lock, then we needn't examine the individual relation IDs
3156 * at all; none of them can be relevant.
3157 *
3158 * See FastPathTransferRelationLocks() for discussion of why we do
3159 * this test after acquiring the lock.
3160 *
3161 * Also skip groups without any registered fast-path locks.
3162 */
3163 if (proc->databaseId != locktag->locktag_field1 ||
3164 proc->fpLockBits[group] == 0)
3165 {
3166 LWLockRelease(&proc->fpInfoLock);
3167 continue;
3168 }
3169
3170 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3171 {
3173
3174 /* index into the whole per-backend array */
3175 uint32 f = FAST_PATH_SLOT(group, j);
3176
3177 /* Look for an allocated slot matching the given relid. */
3178 if (relid != proc->fpRelId[f])
3179 continue;
3180 lockmask = FAST_PATH_GET_BITS(proc, f);
3181 if (!lockmask)
3182 continue;
3184
3185 /*
3186 * There can only be one entry per relation, so if we found it
3187 * and it doesn't conflict, we can skip the rest of the slots.
3188 */
3189 if ((lockmask & conflictMask) == 0)
3190 break;
3191
3192 /* Conflict! */
3193 GET_VXID_FROM_PGPROC(vxid, *proc);
3194
3196 vxids[count++] = vxid;
3197 /* else, xact already committed or aborted */
3198
3199 /* No need to examine remaining slots. */
3200 break;
3201 }
3202
3203 LWLockRelease(&proc->fpInfoLock);
3204 }
3205 }
3206
3207 /* Remember how many fast-path conflicts we found. */
3208 fast_count = count;
3209
3210 /*
3211 * Look up the lock object matching the tag.
3212 */
3214
3216 locktag,
3217 hashcode,
3218 HASH_FIND,
3219 NULL);
3220 if (!lock)
3221 {
3222 /*
3223 * If the lock object doesn't exist, there is nothing holding a lock
3224 * on this lockable object.
3225 */
3227 vxids[count].procNumber = INVALID_PROC_NUMBER;
3228 vxids[count].localTransactionId = InvalidLocalTransactionId;
3229 if (countp)
3230 *countp = count;
3231 return vxids;
3232 }
3233
3234 /*
3235 * Examine each existing holder (or awaiter) of the lock.
3236 */
3238 {
3239 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3240
3241 if (conflictMask & proclock->holdMask)
3242 {
3243 PGPROC *proc = proclock->tag.myProc;
3244
3245 /* A backend never blocks itself */
3246 if (proc != MyProc)
3247 {
3249
3250 GET_VXID_FROM_PGPROC(vxid, *proc);
3251
3253 {
3254 int i;
3255
3256 /* Avoid duplicate entries. */
3257 for (i = 0; i < fast_count; ++i)
3259 break;
3260 if (i >= fast_count)
3261 vxids[count++] = vxid;
3262 }
3263 /* else, xact already committed or aborted */
3264 }
3265 }
3266 }
3267
3269
3270 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3271 elog(PANIC, "too many conflicting locks found");
3272
3273 vxids[count].procNumber = INVALID_PROC_NUMBER;
3274 vxids[count].localTransactionId = InvalidLocalTransactionId;
3275 if (countp)
3276 *countp = count;
3277 return vxids;
3278}
3279
3280/*
3281 * Find a lock in the shared lock table and release it. It is the caller's
3282 * responsibility to verify that this is a sane thing to do. (For example, it
3283 * would be bad to release a lock here if there might still be a LOCALLOCK
3284 * object with pointers to it.)
3285 *
3286 * We currently use this in two situations: first, to release locks held by
3287 * prepared transactions on commit (see lock_twophase_postcommit); and second,
3288 * to release locks taken via the fast-path, transferred to the main hash
3289 * table, and then released (see LockReleaseAll).
3290 */
3291static void
3293 LOCKTAG *locktag, LOCKMODE lockmode,
3295{
3296 LOCK *lock;
3297 PROCLOCK *proclock;
3299 uint32 hashcode;
3302 bool wakeupNeeded;
3303
3304 hashcode = LockTagHashCode(locktag);
3306
3308
3309 /*
3310 * Re-find the lock object (it had better be there).
3311 */
3313 locktag,
3314 hashcode,
3315 HASH_FIND,
3316 NULL);
3317 if (!lock)
3318 elog(PANIC, "failed to re-find shared lock object");
3319
3320 /*
3321 * Re-find the proclock object (ditto).
3322 */
3323 proclocktag.myLock = lock;
3324 proclocktag.myProc = proc;
3325
3327
3329 &proclocktag,
3331 HASH_FIND,
3332 NULL);
3333 if (!proclock)
3334 elog(PANIC, "failed to re-find shared proclock object");
3335
3336 /*
3337 * Double-check that we are actually holding a lock of the type we want to
3338 * release.
3339 */
3340 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3341 {
3342 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3344 elog(WARNING, "you don't own a lock of type %s",
3345 lockMethodTable->lockModeNames[lockmode]);
3346 return;
3347 }
3348
3349 /*
3350 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3351 */
3352 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3353
3354 CleanUpLock(lock, proclock,
3355 lockMethodTable, hashcode,
3356 wakeupNeeded);
3357
3359
3360 /*
3361 * Decrement strong lock count. This logic is needed only for 2PC.
3362 */
3364 && ConflictsWithRelationFastPath(locktag, lockmode))
3365 {
3367
3372 }
3373}
3374
3375/*
3376 * CheckForSessionAndXactLocks
3377 * Check to see if transaction holds both session-level and xact-level
3378 * locks on the same object; if so, throw an error.
3379 *
3380 * If we have both session- and transaction-level locks on the same object,
3381 * PREPARE TRANSACTION must fail. This should never happen with regular
3382 * locks, since we only take those at session level in some special operations
3383 * like VACUUM. It's possible to hit this with advisory locks, though.
3384 *
3385 * It would be nice if we could keep the session hold and give away the
3386 * transactional hold to the prepared xact. However, that would require two
3387 * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3388 * available when it comes time for PostPrepare_Locks to do the deed.
3389 * So for now, we error out while we can still do so safely.
3390 *
3391 * Since the LOCALLOCK table stores a separate entry for each lockmode,
3392 * we can't implement this check by examining LOCALLOCK entries in isolation.
3393 * We must build a transient hashtable that is indexed by locktag only.
3394 */
3395static void
3397{
3398 typedef struct
3399 {
3400 LOCKTAG lock; /* identifies the lockable object */
3401 bool sessLock; /* is any lockmode held at session level? */
3402 bool xactLock; /* is any lockmode held at xact level? */
3404
3406 HTAB *lockhtab;
3407 HASH_SEQ_STATUS status;
3409
3410 /* Create a local hash table keyed by LOCKTAG only */
3411 hash_ctl.keysize = sizeof(LOCKTAG);
3412 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3414
3415 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3416 256, /* arbitrary initial size */
3417 &hash_ctl,
3419
3420 /* Scan local lock table to find entries for each LOCKTAG */
3422
3423 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3424 {
3425 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3427 bool found;
3428 int i;
3429
3430 /*
3431 * Ignore VXID locks. We don't want those to be held by prepared
3432 * transactions, since they aren't meaningful after a restart.
3433 */
3434 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3435 continue;
3436
3437 /* Ignore it if we don't actually hold the lock */
3438 if (locallock->nLocks <= 0)
3439 continue;
3440
3441 /* Otherwise, find or make an entry in lockhtab */
3443 &locallock->tag.lock,
3444 HASH_ENTER, &found);
3445 if (!found) /* initialize, if newly created */
3446 hentry->sessLock = hentry->xactLock = false;
3447
3448 /* Scan to see if we hold lock at session or xact level or both */
3449 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3450 {
3451 if (lockOwners[i].owner == NULL)
3452 hentry->sessLock = true;
3453 else
3454 hentry->xactLock = true;
3455 }
3456
3457 /*
3458 * We can throw error immediately when we see both types of locks; no
3459 * need to wait around to see if there are more violations.
3460 */
3461 if (hentry->sessLock && hentry->xactLock)
3462 ereport(ERROR,
3464 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3465 }
3466
3467 /* Success, so clean up */
3469}
3470
3471/*
3472 * AtPrepare_Locks
3473 * Do the preparatory work for a PREPARE: make 2PC state file records
3474 * for all locks currently held.
3475 *
3476 * Session-level locks are ignored, as are VXID locks.
3477 *
3478 * For the most part, we don't need to touch shared memory for this ---
3479 * all the necessary state information is in the locallock table.
3480 * Fast-path locks are an exception, however: we move any such locks to
3481 * the main table before allowing PREPARE TRANSACTION to succeed.
3482 */
3483void
3484AtPrepare_Locks(void)
3485{
3486 HASH_SEQ_STATUS status;
3488
3489 /* First, verify there aren't locks of both xact and session level */
3491
3492 /* Now do the per-locallock cleanup work */
3494
3495 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3496 {
3497 TwoPhaseLockRecord record;
3498 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3499 bool haveSessionLock;
3500 bool haveXactLock;
3501 int i;
3502
3503 /*
3504 * Ignore VXID locks. We don't want those to be held by prepared
3505 * transactions, since they aren't meaningful after a restart.
3506 */
3507 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3508 continue;
3509
3510 /* Ignore it if we don't actually hold the lock */
3511 if (locallock->nLocks <= 0)
3512 continue;
3513
3514 /* Scan to see whether we hold it at session or transaction level */
3515 haveSessionLock = haveXactLock = false;
3516 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3517 {
3518 if (lockOwners[i].owner == NULL)
3519 haveSessionLock = true;
3520 else
3521 haveXactLock = true;
3522 }
3523
3524 /* Ignore it if we have only session lock */
3525 if (!haveXactLock)
3526 continue;
3527
3528 /* This can't happen, because we already checked it */
3529 if (haveSessionLock)
3530 ereport(ERROR,
3532 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3533
3534 /*
3535 * If the local lock was taken via the fast-path, we need to move it
3536 * to the primary lock table, or just get a pointer to the existing
3537 * primary lock table entry if by chance it's already been
3538 * transferred.
3539 */
3540 if (locallock->proclock == NULL)
3541 {
3543 locallock->lock = locallock->proclock->tag.myLock;
3544 }
3545
3546 /*
3547 * Arrange to not release any strong lock count held by this lock
3548 * entry. We must retain the count until the prepared transaction is
3549 * committed or rolled back.
3550 */
3551 locallock->holdsStrongLockCount = false;
3552
3553 /*
3554 * Create a 2PC record.
3555 */
3556 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3557 record.lockmode = locallock->tag.mode;
3558
3560 &record, sizeof(TwoPhaseLockRecord));
3561 }
3562}
3563
3564/*
3565 * PostPrepare_Locks
3566 * Clean up after successful PREPARE
3567 *
3568 * Here, we want to transfer ownership of our locks to a dummy PGPROC
3569 * that's now associated with the prepared transaction, and we want to
3570 * clean out the corresponding entries in the LOCALLOCK table.
3571 *
3572 * Note: by removing the LOCALLOCK entries, we are leaving dangling
3573 * pointers in the transaction's resource owner. This is OK at the
3574 * moment since resowner.c doesn't try to free locks retail at a toplevel
3575 * transaction commit or abort. We could alternatively zero out nLocks
3576 * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3577 * but that probably costs more cycles.
3578 */
3579void
3581{
3582 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3583 HASH_SEQ_STATUS status;
3585 LOCK *lock;
3586 PROCLOCK *proclock;
3588 int partition;
3589
3590 /* Can't prepare a lock group follower. */
3593
3594 /* This is a critical section: any error means big trouble */
3596
3597 /*
3598 * First we run through the locallock table and get rid of unwanted
3599 * entries, then we scan the process's proclocks and transfer them to the
3600 * target proc.
3601 *
3602 * We do this separately because we may have multiple locallock entries
3603 * pointing to the same proclock, and we daren't end up with any dangling
3604 * pointers.
3605 */
3607
3608 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3609 {
3610 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3611 bool haveSessionLock;
3612 bool haveXactLock;
3613 int i;
3614
3615 if (locallock->proclock == NULL || locallock->lock == NULL)
3616 {
3617 /*
3618 * We must've run out of shared memory while trying to set up this
3619 * lock. Just forget the local entry.
3620 */
3621 Assert(locallock->nLocks == 0);
3623 continue;
3624 }
3625
3626 /* Ignore VXID locks */
3627 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3628 continue;
3629
3630 /* Scan to see whether we hold it at session or transaction level */
3631 haveSessionLock = haveXactLock = false;
3632 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3633 {
3634 if (lockOwners[i].owner == NULL)
3635 haveSessionLock = true;
3636 else
3637 haveXactLock = true;
3638 }
3639
3640 /* Ignore it if we have only session lock */
3641 if (!haveXactLock)
3642 continue;
3643
3644 /* This can't happen, because we already checked it */
3645 if (haveSessionLock)
3646 ereport(PANIC,
3648 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3649
3650 /* Mark the proclock to show we need to release this lockmode */
3651 if (locallock->nLocks > 0)
3652 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3653
3654 /* And remove the locallock hashtable entry */
3656 }
3657
3658 /*
3659 * Now, scan each lock partition separately.
3660 */
3662 {
3664 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3666
3668
3669 /*
3670 * If the proclock list for this partition is empty, we can skip
3671 * acquiring the partition lock. This optimization is safer than the
3672 * situation in LockReleaseAll, because we got rid of any fast-path
3673 * locks during AtPrepare_Locks, so there cannot be any case where
3674 * another backend is adding something to our lists now. For safety,
3675 * though, we code this the same way as in LockReleaseAll.
3676 */
3677 if (dlist_is_empty(procLocks))
3678 continue; /* needn't examine this partition */
3679
3681
3683 {
3684 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3685
3686 Assert(proclock->tag.myProc == MyProc);
3687
3688 lock = proclock->tag.myLock;
3689
3690 /* Ignore VXID locks */
3692 continue;
3693
3694 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3695 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3696 Assert(lock->nRequested >= 0);
3697 Assert(lock->nGranted >= 0);
3698 Assert(lock->nGranted <= lock->nRequested);
3699 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3700
3701 /* Ignore it if nothing to release (must be a session lock) */
3702 if (proclock->releaseMask == 0)
3703 continue;
3704
3705 /* Else we should be releasing all locks */
3706 if (proclock->releaseMask != proclock->holdMask)
3707 elog(PANIC, "we seem to have dropped a bit somewhere");
3708
3709 /*
3710 * We cannot simply modify proclock->tag.myProc to reassign
3711 * ownership of the lock, because that's part of the hash key and
3712 * the proclock would then be in the wrong hash chain. Instead
3713 * use hash_update_hash_key. (We used to create a new hash entry,
3714 * but that risks out-of-memory failure if other processes are
3715 * busy making proclocks too.) We must unlink the proclock from
3716 * our procLink chain and put it into the new proc's chain, too.
3717 *
3718 * Note: the updated proclock hash key will still belong to the
3719 * same hash partition, cf proclock_hash(). So the partition lock
3720 * we already hold is sufficient for this.
3721 */
3722 dlist_delete(&proclock->procLink);
3723
3724 /*
3725 * Create the new hash key for the proclock.
3726 */
3727 proclocktag.myLock = lock;
3728 proclocktag.myProc = newproc;
3729
3730 /*
3731 * Update groupLeader pointer to point to the new proc. (We'd
3732 * better not be a member of somebody else's lock group!)
3733 */
3734 Assert(proclock->groupLeader == proclock->tag.myProc);
3735 proclock->groupLeader = newproc;
3736
3737 /*
3738 * Update the proclock. We should not find any existing entry for
3739 * the same hash key, since there can be only one entry for any
3740 * given lock with my own proc.
3741 */
3743 proclock,
3744 &proclocktag))
3745 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3746
3747 /* Re-link into the new proc's proclock list */
3748 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3749
3750 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3751 } /* loop over PROCLOCKs within this partition */
3752
3754 } /* loop over partitions */
3755
3757}
3758
3759
3760/*
3761 * GetLockStatusData - Return a summary of the lock manager's internal
3762 * status, for use in a user-level reporting function.
3763 *
3764 * The return data consists of an array of LockInstanceData objects,
3765 * which are a lightly abstracted version of the PROCLOCK data structures,
3766 * i.e. there is one entry for each unique lock and interested PGPROC.
3767 * It is the caller's responsibility to match up related items (such as
3768 * references to the same lockable object or PGPROC) if wanted.
3769 *
3770 * The design goal is to hold the LWLocks for as short a time as possible;
3771 * thus, this function simply makes a copy of the necessary data and releases
3772 * the locks, allowing the caller to contemplate and format the data for as
3773 * long as it pleases.
3774 */
3775LockData *
3777{
3778 LockData *data;
3779 PROCLOCK *proclock;
3781 int els;
3782 int el;
3783 int i;
3784
3786
3787 /* Guess how much space we'll need. */
3788 els = MaxBackends;
3789 el = 0;
3791
3792 /*
3793 * First, we iterate through the per-backend fast-path arrays, locking
3794 * them one at a time. This might produce an inconsistent picture of the
3795 * system state, but taking all of those LWLocks at the same time seems
3796 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3797 * matter too much, because none of these locks can be involved in lock
3798 * conflicts anyway - anything that might must be present in the main lock
3799 * table. (For the same reason, we don't sweat about making leaderPid
3800 * completely valid. We cannot safely dereference another backend's
3801 * lockGroupLeader field without holding all lock partition locks, and
3802 * it's not worth that.)
3803 */
3804 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3805 {
3806 PGPROC *proc = GetPGProcByNumber(i);
3807
3808 /* Skip backends with pid=0, as they don't hold fast-path locks */
3809 if (proc->pid == 0)
3810 continue;
3811
3813
3814 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3815 {
3816 /* Skip groups without registered fast-path locks */
3817 if (proc->fpLockBits[g] == 0)
3818 continue;
3819
3820 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3821 {
3823 uint32 f = FAST_PATH_SLOT(g, j);
3825
3826 /* Skip unallocated slots */
3827 if (!lockbits)
3828 continue;
3829
3830 if (el >= els)
3831 {
3832 els += MaxBackends;
3833 data->locks = (LockInstanceData *)
3834 repalloc(data->locks, sizeof(LockInstanceData) * els);
3835 }
3836
3837 instance = &data->locks[el];
3839 proc->fpRelId[f]);
3841 instance->waitLockMode = NoLock;
3842 instance->vxid.procNumber = proc->vxid.procNumber;
3843 instance->vxid.localTransactionId = proc->vxid.lxid;
3844 instance->pid = proc->pid;
3845 instance->leaderPid = proc->pid;
3846 instance->fastpath = true;
3847
3848 /*
3849 * Successfully taking fast path lock means there were no
3850 * conflicting locks.
3851 */
3852 instance->waitStart = 0;
3853
3854 el++;
3855 }
3856 }
3857
3858 if (proc->fpVXIDLock)
3859 {
3862
3863 if (el >= els)
3864 {
3865 els += MaxBackends;
3866 data->locks = (LockInstanceData *)
3867 repalloc(data->locks, sizeof(LockInstanceData) * els);
3868 }
3869
3870 vxid.procNumber = proc->vxid.procNumber;
3872
3873 instance = &data->locks[el];
3875 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3876 instance->waitLockMode = NoLock;
3877 instance->vxid.procNumber = proc->vxid.procNumber;
3878 instance->vxid.localTransactionId = proc->vxid.lxid;
3879 instance->pid = proc->pid;
3880 instance->leaderPid = proc->pid;
3881 instance->fastpath = true;
3882 instance->waitStart = 0;
3883
3884 el++;
3885 }
3886
3887 LWLockRelease(&proc->fpInfoLock);
3888 }
3889
3890 /*
3891 * Next, acquire lock on the entire shared lock data structure. We do
3892 * this so that, at least for locks in the primary lock table, the state
3893 * will be self-consistent.
3894 *
3895 * Since this is a read-only operation, we take shared instead of
3896 * exclusive lock. There's not a whole lot of point to this, because all
3897 * the normal operations require exclusive lock, but it doesn't hurt
3898 * anything either. It will at least allow two backends to do
3899 * GetLockStatusData in parallel.
3900 *
3901 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3902 */
3903 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3905
3906 /* Now we can safely count the number of proclocks */
3908 if (data->nelements > els)
3909 {
3910 els = data->nelements;
3911 data->locks = (LockInstanceData *)
3912 repalloc(data->locks, sizeof(LockInstanceData) * els);
3913 }
3914
3915 /* Now scan the tables to copy the data */
3917
3918 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3919 {
3920 PGPROC *proc = proclock->tag.myProc;
3921 LOCK *lock = proclock->tag.myLock;
3922 LockInstanceData *instance = &data->locks[el];
3923
3924 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3925 instance->holdMask = proclock->holdMask;
3926 if (proc->waitLock == proclock->tag.myLock)
3927 instance->waitLockMode = proc->waitLockMode;
3928 else
3929 instance->waitLockMode = NoLock;
3930 instance->vxid.procNumber = proc->vxid.procNumber;
3931 instance->vxid.localTransactionId = proc->vxid.lxid;
3932 instance->pid = proc->pid;
3933 instance->leaderPid = proclock->groupLeader->pid;
3934 instance->fastpath = false;
3935 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3936
3937 el++;
3938 }
3939
3940 /*
3941 * And release locks. We do this in reverse order for two reasons: (1)
3942 * Anyone else who needs more than one of the locks will be trying to lock
3943 * them in increasing order; we don't want to release the other process
3944 * until it can get all the locks it needs. (2) This avoids O(N^2)
3945 * behavior inside LWLockRelease.
3946 */
3947 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3949
3950 Assert(el == data->nelements);
3951
3952 return data;
3953}
3954
3955/*
3956 * GetBlockerStatusData - Return a summary of the lock manager's state
3957 * concerning locks that are blocking the specified PID or any member of
3958 * the PID's lock group, for use in a user-level reporting function.
3959 *
3960 * For each PID within the lock group that is awaiting some heavyweight lock,
3961 * the return data includes an array of LockInstanceData objects, which are
3962 * the same data structure used by GetLockStatusData; but unlike that function,
3963 * this one reports only the PROCLOCKs associated with the lock that that PID
3964 * is blocked on. (Hence, all the locktags should be the same for any one
3965 * blocked PID.) In addition, we return an array of the PIDs of those backends
3966 * that are ahead of the blocked PID in the lock's wait queue. These can be
3967 * compared with the PIDs in the LockInstanceData objects to determine which
3968 * waiters are ahead of or behind the blocked PID in the queue.
3969 *
3970 * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3971 * waiting on any heavyweight lock, return empty arrays.
3972 *
3973 * The design goal is to hold the LWLocks for as short a time as possible;
3974 * thus, this function simply makes a copy of the necessary data and releases
3975 * the locks, allowing the caller to contemplate and format the data for as
3976 * long as it pleases.
3977 */
3980{
3982 PGPROC *proc;
3983 int i;
3984
3986
3987 /*
3988 * Guess how much space we'll need, and preallocate. Most of the time
3989 * this will avoid needing to do repalloc while holding the LWLocks. (We
3990 * assume, but check with an Assert, that MaxBackends is enough entries
3991 * for the procs[] array; the other two could need enlargement, though.)
3992 */
3993 data->nprocs = data->nlocks = data->npids = 0;
3994 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3995 data->procs = palloc_array(BlockedProcData, data->maxprocs);
3996 data->locks = palloc_array(LockInstanceData, data->maxlocks);
3997 data->waiter_pids = palloc_array(int, data->maxpids);
3998
3999 /*
4000 * In order to search the ProcArray for blocked_pid and assume that that
4001 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4002 * In addition, to examine the lock grouping fields of any other backend,
4003 * we must hold all the hash partition locks. (Only one of those locks is
4004 * actually relevant for any one lock group, but we can't know which one
4005 * ahead of time.) It's fairly annoying to hold all those locks
4006 * throughout this, but it's no worse than GetLockStatusData(), and it
4007 * does have the advantage that we're guaranteed to return a
4008 * self-consistent instantaneous state.
4009 */
4011
4013
4014 /* Nothing to do if it's gone */
4015 if (proc != NULL)
4016 {
4017 /*
4018 * Acquire lock on the entire shared lock data structure. See notes
4019 * in GetLockStatusData().
4020 */
4021 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4023
4024 if (proc->lockGroupLeader == NULL)
4025 {
4026 /* Easy case, proc is not a lock group member */
4028 }
4029 else
4030 {
4031 /* Examine all procs in proc's lock group */
4032 dlist_iter iter;
4033
4035 {
4037
4038 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4040 }
4041 }
4042
4043 /*
4044 * And release locks. See notes in GetLockStatusData().
4045 */
4046 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4048
4049 Assert(data->nprocs <= data->maxprocs);
4050 }
4051
4053
4054 return data;
4055}
4056
4057/* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4058static void
4060{
4061 LOCK *theLock = blocked_proc->waitLock;
4066 int queue_size;
4067
4068 /* Nothing to do if this proc is not blocked */
4069 if (theLock == NULL)
4070 return;
4071
4072 /* Set up a procs[] element */
4073 bproc = &data->procs[data->nprocs++];
4074 bproc->pid = blocked_proc->pid;
4075 bproc->first_lock = data->nlocks;
4076 bproc->first_waiter = data->npids;
4077
4078 /*
4079 * We may ignore the proc's fast-path arrays, since nothing in those could
4080 * be related to a contended lock.
4081 */
4082
4083 /* Collect all PROCLOCKs associated with theLock */
4084 dlist_foreach(proclock_iter, &theLock->procLocks)
4085 {
4086 PROCLOCK *proclock =
4087 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4088 PGPROC *proc = proclock->tag.myProc;
4089 LOCK *lock = proclock->tag.myLock;
4091
4092 if (data->nlocks >= data->maxlocks)
4093 {
4094 data->maxlocks += MaxBackends;
4095 data->locks = (LockInstanceData *)
4096 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4097 }
4098
4099 instance = &data->locks[data->nlocks];
4100 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4101 instance->holdMask = proclock->holdMask;
4102 if (proc->waitLock == lock)
4103 instance->waitLockMode = proc->waitLockMode;
4104 else
4105 instance->waitLockMode = NoLock;
4106 instance->vxid.procNumber = proc->vxid.procNumber;
4107 instance->vxid.localTransactionId = proc->vxid.lxid;
4108 instance->pid = proc->pid;
4109 instance->leaderPid = proclock->groupLeader->pid;
4110 instance->fastpath = false;
4111 data->nlocks++;
4112 }
4113
4114 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4115 waitQueue = &(theLock->waitProcs);
4116 queue_size = dclist_count(waitQueue);
4117
4118 if (queue_size > data->maxpids - data->npids)
4119 {
4120 data->maxpids = Max(data->maxpids + MaxBackends,
4121 data->npids + queue_size);
4122 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4123 sizeof(int) * data->maxpids);
4124 }
4125
4126 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4128 {
4130
4132 break;
4133 data->waiter_pids[data->npids++] = queued_proc->pid;
4134 }
4135
4136 bproc->num_locks = data->nlocks - bproc->first_lock;
4137 bproc->num_waiters = data->npids - bproc->first_waiter;
4138}
4139
4140/*
4141 * Returns a list of currently held AccessExclusiveLocks, for use by
4142 * LogStandbySnapshot(). The result is a palloc'd array,
4143 * with the number of elements returned into *nlocks.
4144 *
4145 * XXX This currently takes a lock on all partitions of the lock table,
4146 * but it's possible to do better. By reference counting locks and storing
4147 * the value in the ProcArray entry for each backend we could tell if any
4148 * locks need recording without having to acquire the partition locks and
4149 * scan the lock table. Whether that's worth the additional overhead
4150 * is pretty dubious though.
4151 */
4153GetRunningTransactionLocks(int *nlocks)
4154{
4156 PROCLOCK *proclock;
4158 int i;
4159 int index;
4160 int els;
4161
4162 /*
4163 * Acquire lock on the entire shared lock data structure.
4164 *
4165 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4166 */
4167 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4169
4170 /* Now we can safely count the number of proclocks */
4172
4173 /*
4174 * Allocating enough space for all locks in the lock table is overkill,
4175 * but it's more convenient and faster than having to enlarge the array.
4176 */
4178
4179 /* Now scan the tables to copy the data */
4181
4182 /*
4183 * If lock is a currently granted AccessExclusiveLock then it will have
4184 * just one proclock holder, so locks are never accessed twice in this
4185 * particular case. Don't copy this code for use elsewhere because in the
4186 * general case this will give you duplicate locks when looking at
4187 * non-exclusive lock types.
4188 */
4189 index = 0;
4190 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4191 {
4192 /* make sure this definition matches the one used in LockAcquire */
4193 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4195 {
4196 PGPROC *proc = proclock->tag.myProc;
4197 LOCK *lock = proclock->tag.myLock;
4198 TransactionId xid = proc->xid;
4199
4200 /*
4201 * Don't record locks for transactions if we know they have
4202 * already issued their WAL record for commit but not yet released
4203 * lock. It is still possible that we see locks held by already
4204 * complete transactions, if they haven't yet zeroed their xids.
4205 */
4206 if (!TransactionIdIsValid(xid))
4207 continue;
4208
4209 accessExclusiveLocks[index].xid = xid;
4212
4213 index++;
4214 }
4215 }
4216
4217 Assert(index <= els);
4218
4219 /*
4220 * And release locks. We do this in reverse order for two reasons: (1)
4221 * Anyone else who needs more than one of the locks will be trying to lock
4222 * them in increasing order; we don't want to release the other process
4223 * until it can get all the locks it needs. (2) This avoids O(N^2)
4224 * behavior inside LWLockRelease.
4225 */
4226 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4228
4229 *nlocks = index;
4230 return accessExclusiveLocks;
4231}
4232
4233/* Provide the textual name of any lock mode */
4234const char *
4236{
4238 Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4240}
4241
4242#ifdef LOCK_DEBUG
4243/*
4244 * Dump all locks in the given proc's myProcLocks lists.
4245 *
4246 * Caller is responsible for having acquired appropriate LWLocks.
4247 */
4248void
4249DumpLocks(PGPROC *proc)
4250{
4251 int i;
4252
4253 if (proc == NULL)
4254 return;
4255
4256 if (proc->waitLock)
4257 LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4258
4259 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4260 {
4261 dlist_head *procLocks = &proc->myProcLocks[i];
4262 dlist_iter iter;
4263
4264 dlist_foreach(iter, procLocks)
4265 {
4266 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4267 LOCK *lock = proclock->tag.myLock;
4268
4269 Assert(proclock->tag.myProc == proc);
4270 PROCLOCK_PRINT("DumpLocks", proclock);
4271 LOCK_PRINT("DumpLocks", lock, 0);
4272 }
4273 }
4274}
4275
4276/*
4277 * Dump all lmgr locks.
4278 *
4279 * Caller is responsible for having acquired appropriate LWLocks.
4280 */
4281void
4282DumpAllLocks(void)
4283{
4284 PGPROC *proc;
4285 PROCLOCK *proclock;
4286 LOCK *lock;
4287 HASH_SEQ_STATUS status;
4288
4289 proc = MyProc;
4290
4291 if (proc && proc->waitLock)
4292 LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4293
4295
4296 while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4297 {
4298 PROCLOCK_PRINT("DumpAllLocks", proclock);
4299
4300 lock = proclock->tag.myLock;
4301 if (lock)
4302 LOCK_PRINT("DumpAllLocks", lock, 0);
4303 else
4304 elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4305 }
4306}
4307#endif /* LOCK_DEBUG */
4308
4309/*
4310 * LOCK 2PC resource manager's routines
4311 */
4312
4313/*
4314 * Re-acquire a lock belonging to a transaction that was prepared.
4315 *
4316 * Because this function is run at db startup, re-acquiring the locks should
4317 * never conflict with running transactions because there are none. We
4318 * assume that the lock state represented by the stored 2PC files is legal.
4319 *
4320 * When switching from Hot Standby mode to normal operation, the locks will
4321 * be already held by the startup process. The locks are acquired for the new
4322 * procs without checking for conflicts, so we don't get a conflict between the
4323 * startup process and the dummy procs, even though we will momentarily have
4324 * a situation where two procs are holding the same AccessExclusiveLock,
4325 * which isn't normally possible because the conflict. If we're in standby
4326 * mode, but a recovery snapshot hasn't been established yet, it's possible
4327 * that some but not all of the locks are already held by the startup process.
4328 *
4329 * This approach is simple, but also a bit dangerous, because if there isn't
4330 * enough shared memory to acquire the locks, an error will be thrown, which
4331 * is promoted to FATAL and recovery will abort, bringing down postmaster.
4332 * A safer approach would be to transfer the locks like we do in
4333 * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4334 * read-only backends to use up all the shared lock memory anyway, so that
4335 * replaying the WAL record that needs to acquire a lock will throw an error
4336 * and PANIC anyway.
4337 */
4338void
4340 void *recdata, uint32 len)
4341{
4343 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4344 LOCKTAG *locktag;
4345 LOCKMODE lockmode;
4347 LOCK *lock;
4348 PROCLOCK *proclock;
4350 bool found;
4351 uint32 hashcode;
4353 int partition;
4356
4357 Assert(len == sizeof(TwoPhaseLockRecord));
4358 locktag = &rec->locktag;
4359 lockmode = rec->lockmode;
4361
4363 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4365
4366 hashcode = LockTagHashCode(locktag);
4367 partition = LockHashPartition(hashcode);
4369
4371
4372 /*
4373 * Find or create a lock with this tag.
4374 */
4376 locktag,
4377 hashcode,
4379 &found);
4380 if (!lock)
4381 {
4383 ereport(ERROR,
4385 errmsg("out of shared memory"),
4386 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4387 }
4388
4389 /*
4390 * if it's a new lock object, initialize it
4391 */
4392 if (!found)
4393 {
4394 lock->grantMask = 0;
4395 lock->waitMask = 0;
4396 dlist_init(&lock->procLocks);
4397 dclist_init(&lock->waitProcs);
4398 lock->nRequested = 0;
4399 lock->nGranted = 0;
4400 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4401 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4402 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4403 }
4404 else
4405 {
4406 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4407 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4408 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4409 Assert(lock->nGranted <= lock->nRequested);
4410 }
4411
4412 /*
4413 * Create the hash key for the proclock table.
4414 */
4415 proclocktag.myLock = lock;
4416 proclocktag.myProc = proc;
4417
4419
4420 /*
4421 * Find or create a proclock entry with this tag
4422 */
4424 &proclocktag,
4427 &found);
4428 if (!proclock)
4429 {
4430 /* Oops, not enough shmem for the proclock */
4431 if (lock->nRequested == 0)
4432 {
4433 /*
4434 * There are no other requestors of this lock, so garbage-collect
4435 * the lock object. We *must* do this to avoid a permanent leak
4436 * of shared memory, because there won't be anything to cause
4437 * anyone to release the lock object later.
4438 */
4441 &(lock->tag),
4442 hashcode,
4444 NULL))
4445 elog(PANIC, "lock table corrupted");
4446 }
4448 ereport(ERROR,
4450 errmsg("out of shared memory"),
4451 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4452 }
4453
4454 /*
4455 * If new, initialize the new entry
4456 */
4457 if (!found)
4458 {
4459 Assert(proc->lockGroupLeader == NULL);
4460 proclock->groupLeader = proc;
4461 proclock->holdMask = 0;
4462 proclock->releaseMask = 0;
4463 /* Add proclock to appropriate lists */
4464 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4466 &proclock->procLink);
4467 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4468 }
4469 else
4470 {
4471 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4472 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4473 }
4474
4475 /*
4476 * lock->nRequested and lock->requested[] count the total number of
4477 * requests, whether granted or waiting, so increment those immediately.
4478 */
4479 lock->nRequested++;
4480 lock->requested[lockmode]++;
4481 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4482
4483 /*
4484 * We shouldn't already hold the desired lock.
4485 */
4486 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4487 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4488 lockMethodTable->lockModeNames[lockmode],
4489 lock->tag.locktag_field1, lock->tag.locktag_field2,
4490 lock->tag.locktag_field3);
4491
4492 /*
4493 * We ignore any possible conflicts and just grant ourselves the lock. Not
4494 * only because we don't bother, but also to avoid deadlocks when
4495 * switching from standby to normal mode. See function comment.
4496 */
4497 GrantLock(lock, proclock, lockmode);
4498
4499 /*
4500 * Bump strong lock count, to make sure any fast-path lock requests won't
4501 * be granted without consulting the primary lock table.
4502 */
4503 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4504 {
4506
4510 }
4511
4513}
4514
4515/*
4516 * Re-acquire a lock belonging to a transaction that was prepared, when
4517 * starting up into hot standby mode.
4518 */
4519void
4521 void *recdata, uint32 len)
4522{
4524 LOCKTAG *locktag;
4525 LOCKMODE lockmode;
4527
4528 Assert(len == sizeof(TwoPhaseLockRecord));
4529 locktag = &rec->locktag;
4530 lockmode = rec->lockmode;
4532
4534 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4535
4536 if (lockmode == AccessExclusiveLock &&
4537 locktag->locktag_type == LOCKTAG_RELATION)
4538 {
4540 locktag->locktag_field1 /* dboid */ ,
4541 locktag->locktag_field2 /* reloid */ );
4542 }
4543}
4544
4545
4546/*
4547 * 2PC processing routine for COMMIT PREPARED case.
4548 *
4549 * Find and release the lock indicated by the 2PC record.
4550 */
4551void
4553 void *recdata, uint32 len)
4554{
4556 PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4557 LOCKTAG *locktag;
4560
4561 Assert(len == sizeof(TwoPhaseLockRecord));
4562 locktag = &rec->locktag;
4564
4566 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4568
4569 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4570}
4571
4572/*
4573 * 2PC processing routine for ROLLBACK PREPARED case.
4574 *
4575 * This is actually just the same as the COMMIT case.
4576 */
4577void
4579 void *recdata, uint32 len)
4580{
4581 lock_twophase_postcommit(fxid, info, recdata, len);
4582}
4583
4584/*
4585 * VirtualXactLockTableInsert
4586 *
4587 * Take vxid lock via the fast-path. There can't be any pre-existing
4588 * lockers, as we haven't advertised this vxid via the ProcArray yet.
4589 *
4590 * Since MyProc->fpLocalTransactionId will normally contain the same data
4591 * as MyProc->vxid.lxid, you might wonder if we really need both. The
4592 * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4593 * examined by procarray.c, while fpLocalTransactionId is protected by
4594 * fpInfoLock and is used only by the locking subsystem. Doing it this
4595 * way makes it easier to verify that there are no funny race conditions.
4596 *
4597 * We don't bother recording this lock in the local lock table, since it's
4598 * only ever released at the end of a transaction. Instead,
4599 * LockReleaseAll() calls VirtualXactLockTableCleanup().
4600 */
4601void
4603{
4605
4607
4610 Assert(MyProc->fpVXIDLock == false);
4611
4612 MyProc->fpVXIDLock = true;
4614
4616}
4617
4618/*
4619 * VirtualXactLockTableCleanup
4620 *
4621 * Check whether a VXID lock has been materialized; if so, release it,
4622 * unblocking waiters.
4623 */
4624void
4626{
4627 bool fastpath;
4628 LocalTransactionId lxid;
4629
4631
4632 /*
4633 * Clean up shared memory state.
4634 */
4636
4637 fastpath = MyProc->fpVXIDLock;
4639 MyProc->fpVXIDLock = false;
4641
4643
4644 /*
4645 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4646 * that means someone transferred the lock to the main lock table.
4647 */
4648 if (!fastpath && LocalTransactionIdIsValid(lxid))
4649 {
4651 LOCKTAG locktag;
4652
4653 vxid.procNumber = MyProcNumber;
4654 vxid.localTransactionId = lxid;
4655 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4656
4658 &locktag, ExclusiveLock, false);
4659 }
4660}
4661
4662/*
4663 * XactLockForVirtualXact
4664 *
4665 * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4666 * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4667 * functions, it assumes "xid" is never a subtransaction and that "xid" is
4668 * prepared, committed, or aborted.
4669 *
4670 * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4671 * known as "vxid" before its PREPARE TRANSACTION.
4672 */
4673static bool
4675 TransactionId xid, bool wait)
4676{
4677 bool more = false;
4678
4679 /* There is no point to wait for 2PCs if you have no 2PCs. */
4680 if (max_prepared_xacts == 0)
4681 return true;
4682
4683 do
4684 {
4686 LOCKTAG tag;
4687
4688 /* Clear state from previous iterations. */
4689 if (more)
4690 {
4692 more = false;
4693 }
4694
4695 /* If we have no xid, try to find one. */
4696 if (!TransactionIdIsValid(xid))
4697 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4698 if (!TransactionIdIsValid(xid))
4699 {
4700 Assert(!more);
4701 return true;
4702 }
4703
4704 /* Check or wait for XID completion. */
4705 SET_LOCKTAG_TRANSACTION(tag, xid);
4706 lar = LockAcquire(&tag, ShareLock, false, !wait);
4708 return false;
4709 LockRelease(&tag, ShareLock, false);
4710 } while (more);
4711
4712 return true;
4713}
4714
4715/*
4716 * VirtualXactLock
4717 *
4718 * If wait = true, wait as long as the given VXID or any XID acquired by the
4719 * same transaction is still running. Then, return true.
4720 *
4721 * If wait = false, just check whether that VXID or one of those XIDs is still
4722 * running, and return true or false.
4723 */
4724bool
4726{
4727 LOCKTAG tag;
4728 PGPROC *proc;
4730
4732
4734 /* no vxid lock; localTransactionId is a normal, locked XID */
4735 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4736
4738
4739 /*
4740 * If a lock table entry must be made, this is the PGPROC on whose behalf
4741 * it must be done. Note that the transaction might end or the PGPROC
4742 * might be reassigned to a new backend before we get around to examining
4743 * it, but it doesn't matter. If we find upon examination that the
4744 * relevant lxid is no longer running here, that's enough to prove that
4745 * it's no longer running anywhere.
4746 */
4747 proc = ProcNumberGetProc(vxid.procNumber);
4748 if (proc == NULL)
4749 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4750
4751 /*
4752 * We must acquire this lock before checking the procNumber and lxid
4753 * against the ones we're waiting for. The target backend will only set
4754 * or clear lxid while holding this lock.
4755 */
4757
4758 if (proc->vxid.procNumber != vxid.procNumber
4760 {
4761 /* VXID ended */
4762 LWLockRelease(&proc->fpInfoLock);
4763 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4764 }
4765
4766 /*
4767 * If we aren't asked to wait, there's no need to set up a lock table
4768 * entry. The transaction is still in progress, so just return false.
4769 */
4770 if (!wait)
4771 {
4772 LWLockRelease(&proc->fpInfoLock);
4773 return false;
4774 }
4775
4776 /*
4777 * OK, we're going to need to sleep on the VXID. But first, we must set
4778 * up the primary lock table entry, if needed (ie, convert the proc's
4779 * fast-path lock on its VXID to a regular lock).
4780 */
4781 if (proc->fpVXIDLock)
4782 {
4783 PROCLOCK *proclock;
4784 uint32 hashcode;
4786
4787 hashcode = LockTagHashCode(&tag);
4788
4791
4793 &tag, hashcode, ExclusiveLock);
4794 if (!proclock)
4795 {
4797 LWLockRelease(&proc->fpInfoLock);
4798 ereport(ERROR,
4800 errmsg("out of shared memory"),
4801 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4802 }
4803 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4804
4806
4807 proc->fpVXIDLock = false;
4808 }
4809
4810 /*
4811 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4812 * search. The proc might have assigned this XID but not yet locked it,
4813 * in which case the proc will lock this XID before releasing the VXID.
4814 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4815 * so we won't save an XID of a different VXID. It doesn't matter whether
4816 * we save this before or after setting up the primary lock table entry.
4817 */
4818 xid = proc->xid;
4819
4820 /* Done with proc->fpLockBits */
4821 LWLockRelease(&proc->fpInfoLock);
4822
4823 /* Time to wait. */
4824 (void) LockAcquire(&tag, ShareLock, false, false);
4825
4826 LockRelease(&tag, ShareLock, false);
4827 return XactLockForVirtualXact(vxid, xid, wait);
4828}
4829
4830/*
4831 * LockWaiterCount
4832 *
4833 * Find the number of lock requester on this locktag
4834 */
4835int
4836LockWaiterCount(const LOCKTAG *locktag)
4837{
4839 LOCK *lock;
4840 bool found;
4841 uint32 hashcode;
4843 int waiters = 0;
4844
4846 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4847
4848 hashcode = LockTagHashCode(locktag);
4851
4853 locktag,
4854 hashcode,
4855 HASH_FIND,
4856 &found);
4857 if (found)
4858 {
4859 Assert(lock != NULL);
4860 waiters = lock->nRequested;
4861 }
4863
4864 return waiters;
4865}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
#define Max(x, y)
Definition c.h:1085
#define Assert(condition)
Definition c.h:943
int64_t int64
Definition c.h:621
uint16_t uint16
Definition c.h:623
uint32_t uint32
Definition c.h:624
#define lengthof(array)
Definition c.h:873
uint32 LocalTransactionId
Definition c.h:738
#define MemSet(start, val, len)
Definition c.h:1107
uint32 TransactionId
Definition c.h:736
size_t Size
Definition c.h:689
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
int64 TimestampTz
Definition timestamp.h:39
void DeadLockReport(void)
Definition deadlock.c:1075
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:889
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:360
void hash_destroy(HTAB *hashp)
Definition dynahash.c:802
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition dynahash.c:902
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1352
int64 hash_get_num_entries(HTAB *hashp)
Definition dynahash.c:1273
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition dynahash.c:1077
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition dynahash.c:845
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1317
Datum arg
Definition elog.c:1322
ErrorContextCallback * error_context_stack
Definition elog.c:99
int errcode(int sqlerrcode)
Definition elog.c:874
#define LOG
Definition elog.h:32
#define PG_RE_THROW()
Definition elog.h:407
#define errcontext
Definition elog.h:200
int errhint(const char *fmt,...) pg_attribute_printf(1
#define PG_TRY(...)
Definition elog.h:374
#define WARNING
Definition elog.h:37
#define PG_END_TRY(...)
Definition elog.h:399
#define PANIC
Definition elog.h:44
#define ERROR
Definition elog.h:40
#define PG_CATCH(...)
Definition elog.h:384
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
int int int int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...) pg_attribute_printf(1
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_array(type, count)
Definition fe_memutils.h:77
int MyProcPid
Definition globals.c:49
ProcNumber MyProcNumber
Definition globals.c:92
int MaxBackends
Definition globals.c:149
@ HASH_FIND
Definition hsearch.h:108
@ HASH_REMOVE
Definition hsearch.h:110
@ HASH_ENTER
Definition hsearch.h:109
@ HASH_ENTER_NULL
Definition hsearch.h:111
#define HASH_CONTEXT
Definition hsearch.h:97
#define HASH_ELEM
Definition hsearch.h:90
#define HASH_FUNCTION
Definition hsearch.h:93
#define HASH_BLOBS
Definition hsearch.h:92
#define HASH_PARTITION
Definition hsearch.h:87
#define dlist_foreach(iter, lhead)
Definition ilist.h:623
static void dlist_init(dlist_head *head)
Definition ilist.h:314
static void dlist_delete(dlist_node *node)
Definition ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition ilist.h:932
static bool dclist_is_empty(const dclist_head *head)
Definition ilist.h:682
static bool dlist_node_is_detached(const dlist_node *node)
Definition ilist.h:525
#define dlist_foreach_modify(iter, lhead)
Definition ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition ilist.h:776
static void dclist_init(dclist_head *head)
Definition ilist.h:671
#define dlist_container(type, membername, ptr)
Definition ilist.h:593
#define dclist_foreach(iter, lhead)
Definition ilist.h:970
int j
Definition isn.c:78
int i
Definition isn.c:77
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition lmgr.c:1249
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition lock.c:4675
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition lock.c:807
static LOCALLOCK * awaitedLock
Definition lock.c:339
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition lock.c:1485
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition lock.c:2745
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition lock.c:641
void lock_twophase_postabort(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4579
#define LOCK_PRINT(where, lock, type)
Definition lock.c:416
void PostPrepare_Locks(FullTransactionId fxid)
Definition lock.c:3581
void lock_twophase_standby_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4521
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition lock.c:621
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition lock.c:1292
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition lock.c:2967
const ShmemCallbacks LockManagerShmemCallbacks
Definition lock.c:320
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition lock.c:4603
#define NLOCKENTS()
Definition lock.c:59
#define FastPathStrongLockHashPartition(hashcode)
Definition lock.c:306
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition lock.c:603
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition lock.c:259
void GrantAwaitedLock(void)
Definition lock.c:1898
int LockWaiterCount(const LOCKTAG *locktag)
Definition lock.c:4837
void AtPrepare_Locks(void)
Definition lock.c:3485
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:2111
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition lock.c:245
#define FAST_PATH_REL_GROUP(rel)
Definition lock.c:220
void InitLockManagerAccess(void)
Definition lock.c:503
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition lock.c:1667
void VirtualXactLockTableCleanup(void)
Definition lock.c:4626
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition lock.c:4726
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition lock.c:3078
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition lock.c:315
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition lock.c:2055
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
Definition lock.c:834
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition lock.c:2316
#define FAST_PATH_SLOT(group, index)
Definition lock.c:227
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition lock.c:1473
#define ConflictsWithRelationFastPath(locktag, mode)
Definition lock.c:276
void ResetAwaitedLock(void)
Definition lock.c:1916
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition lock.c:2870
static HTAB * LockMethodLocalHash
Definition lock.c:334
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2715
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition lock.c:1690
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition lock.c:255
#define PROCLOCK_PRINT(where, proclockP)
Definition lock.c:417
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition lock.c:1747
static uint32 proclock_hash(const void *key, Size keysize)
Definition lock.c:572
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2834
void AbortStrongLockAcquire(void)
Definition lock.c:1869
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2791
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition lock.c:179
static HTAB * LockMethodLockHash
Definition lock.c:332
static ResourceOwner awaitedOwner
Definition lock.c:340
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition lock.c:3980
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1941
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:694
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition lock.c:4236
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition lock.c:4060
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition lock.c:257
void lock_twophase_postcommit(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4553
static const LockMethod LockMethods[]
Definition lock.c:153
static void waitonlock_error_callback(void *arg)
Definition lock.c:2029
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2620
LOCALLOCK * GetAwaitedLock(void)
Definition lock.c:1907
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition lock.c:2590
void MarkLockClear(LOCALLOCK *locallock)
Definition lock.c:1929
LockData * GetLockStatusData(void)
Definition lock.c:3777
#define FAST_PATH_GET_BITS(proc, n)
Definition lock.c:248
static LOCALLOCK * StrongLockInProgress
Definition lock.c:338
#define FAST_PATH_BITS_PER_SLOT
Definition lock.c:244
int FastPathLockGroupsPerBackend
Definition lock.c:205
#define EligibleForRelationFastPath(locktag, mode)
Definition lock.c:270
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition lock.c:555
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition lock.c:1833
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition lock.c:1538
static void LockManagerShmemRequest(void *arg)
Definition lock.c:451
void lock_twophase_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4340
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1801
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition lock.c:2655
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition lock.c:525
static void FinishStrongLockAcquire(void)
Definition lock.c:1859
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition lock.c:304
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition lock.c:4154
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition lock.c:3293
static void CheckForSessionAndXactLocks(void)
Definition lock.c:3397
static HTAB * LockMethodProcLockHash
Definition lock.c:333
static void LockManagerShmemInit(void *arg)
Definition lock.c:494
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition lock.c:537
#define LOCK_LOCKTAG(lock)
Definition lock.h:156
#define VirtualTransactionIdIsValid(vxid)
Definition lock.h:70
#define LockHashPartitionLock(hashcode)
Definition lock.h:357
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition lock.h:80
#define LOCK_LOCKMETHOD(lock)
Definition lock.h:155
#define LOCKBIT_OFF(lockmode)
Definition lock.h:88
#define LOCALLOCK_LOCKMETHOD(llock)
Definition lock.h:274
#define InvalidLocalTransactionId
Definition lock.h:68
#define MAX_LOCKMODES
Definition lock.h:85
#define LOCKBIT_ON(lockmode)
Definition lock.h:87
#define LocalTransactionIdIsValid(lxid)
Definition lock.h:69
#define LOCALLOCK_LOCKTAG(llock)
Definition lock.h:275
#define LockHashPartition(hashcode)
Definition lock.h:355
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition lock.h:74
#define PROCLOCK_LOCKMETHOD(proclock)
Definition lock.h:213
#define LockHashPartitionLockByIndex(i)
Definition lock.h:360
LockAcquireResult
Definition lock.h:331
@ LOCKACQUIRE_ALREADY_CLEAR
Definition lock.h:335
@ LOCKACQUIRE_OK
Definition lock.h:333
@ LOCKACQUIRE_ALREADY_HELD
Definition lock.h:334
@ LOCKACQUIRE_NOT_AVAIL
Definition lock.h:332
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition lock.h:72
int LOCKMODE
Definition lockdefs.h:26
#define NoLock
Definition lockdefs.h:34
#define AccessExclusiveLock
Definition lockdefs.h:43
int LOCKMASK
Definition lockdefs.h:25
#define ExclusiveLock
Definition lockdefs.h:42
#define ShareLock
Definition lockdefs.h:40
#define MaxLockMode
Definition lockdefs.h:45
#define RowExclusiveLock
Definition lockdefs.h:38
uint16 LOCKMETHODID
Definition locktag.h:22
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition locktag.h:135
@ LOCKTAG_OBJECT
Definition locktag.h:45
@ LOCKTAG_RELATION_EXTEND
Definition locktag.h:38
@ LOCKTAG_TUPLE
Definition locktag.h:41
@ LOCKTAG_VIRTUALTRANSACTION
Definition locktag.h:43
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition locktag.h:126
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition locktag.h:81
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1150
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1767
#define NUM_LOCK_PARTITIONS
Definition lwlock.h:87
#define LOG2_NUM_LOCK_PARTITIONS
Definition lwlock.h:86
@ LW_SHARED
Definition lwlock.h:105
@ LW_EXCLUSIVE
Definition lwlock.h:104
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
#define START_CRIT_SECTION()
Definition miscadmin.h:152
#define END_CRIT_SECTION()
Definition miscadmin.h:154
static char * errmsg
const void size_t len
const void * data
static char buf[DEFAULT_XLOG_SEG_SIZE]
void pgstat_count_lock_fastpath_exceeded(uint8 locktag_type)
static uint32 DatumGetUInt32(Datum X)
Definition postgres.h:222
static Datum PointerGetDatum(const void *X)
Definition postgres.h:342
uint64_t Datum
Definition postgres.h:70
unsigned int Oid
static int fb(int x)
#define FastPathLockSlotsPerBackend()
Definition proc.h:97
#define GetPGProcByNumber(n)
Definition proc.h:504
#define FP_LOCK_SLOTS_PER_GROUP
Definition proc.h:96
ProcWaitStatus
Definition proc.h:144
@ PROC_WAIT_STATUS_OK
Definition proc.h:145
@ PROC_WAIT_STATUS_WAITING
Definition proc.h:146
@ PROC_WAIT_STATUS_ERROR
Definition proc.h:147
PGPROC * BackendPidGetProcWithLock(int pid)
Definition procarray.c:3192
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition procarray.c:3111
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
void set_ps_display_remove_suffix(void)
Definition ps_status.c:440
void set_ps_display_suffix(const char *suffix)
Definition ps_status.c:388
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1059
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition resowner.c:902
ResourceOwner CurrentResourceOwner
Definition resowner.c:173
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1079
#define ShmemRequestHash(...)
Definition shmem.h:179
#define ShmemRequestStruct(...)
Definition shmem.h:176
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition proc.c:1146
PGPROC * MyProc
Definition proc.c:71
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition proc.c:1941
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition proc.c:1315
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition proc.c:1776
PROC_HDR * ProcGlobal
Definition proc.c:74
void LogAccessExclusiveLockPrepare(void)
Definition standby.c:1471
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition standby.c:988
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition standby.c:1454
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
struct ErrorContextCallback * previous
Definition elog.h:299
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition lock.c:312
Size keysize
Definition hsearch.h:69
Size entrysize
Definition hsearch.h:70
Size keysize
Definition dynahash.c:241
int64 nLocks
Definition lock.h:254
struct ResourceOwnerData * owner
Definition lock.h:253
uint8 locktag_type
Definition locktag.h:70
uint32 locktag_field3
Definition locktag.h:68
uint32 locktag_field1
Definition locktag.h:66
uint8 locktag_lockmethodid
Definition locktag.h:71
uint16 locktag_field4
Definition locktag.h:69
uint32 locktag_field2
Definition locktag.h:67
Definition lock.h:140
int nRequested
Definition lock.h:150
LOCKTAG tag
Definition lock.h:142
int requested[MAX_LOCKMODES]
Definition lock.h:149
dclist_head waitProcs
Definition lock.h:148
int granted[MAX_LOCKMODES]
Definition lock.h:151
LOCKMASK grantMask
Definition lock.h:145
LOCKMASK waitMask
Definition lock.h:146
int nGranted
Definition lock.h:152
dlist_head procLocks
Definition lock.h:147
const bool * trace_flag
Definition lock.h:116
const char *const * lockModeNames
Definition lock.h:115
Definition proc.h:179
LWLock fpInfoLock
Definition proc.h:324
LocalTransactionId lxid
Definition proc.h:231
PROCLOCK * waitProcLock
Definition proc.h:306
dlist_head lockGroupMembers
Definition proc.h:299
Oid * fpRelId
Definition proc.h:326
Oid databaseId
Definition proc.h:201
uint64 * fpLockBits
Definition proc.h:325
pg_atomic_uint64 waitStart
Definition proc.h:311
bool fpVXIDLock
Definition proc.h:327
ProcNumber procNumber
Definition proc.h:226
int pid
Definition proc.h:197
struct PGPROC::@136 vxid
LOCK * waitLock
Definition proc.h:304
TransactionId xid
Definition proc.h:237
LOCKMODE waitLockMode
Definition proc.h:307
dlist_node waitLink
Definition proc.h:305
PGPROC * lockGroupLeader
Definition proc.h:298
LocalTransactionId fpLocalTransactionId
Definition proc.h:328
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition proc.h:321
ProcWaitStatus waitStatus
Definition proc.h:314
LOCK * myLock
Definition lock.h:196
PGPROC * myProc
Definition lock.h:197
LOCKMASK holdMask
Definition lock.h:207
dlist_node lockLink
Definition lock.h:209
PGPROC * groupLeader
Definition lock.h:206
LOCKMASK releaseMask
Definition lock.h:208
PROCLOCKTAG tag
Definition lock.h:203
dlist_node procLink
Definition lock.h:210
uint32 allProcCount
Definition proc.h:459
ShmemRequestCallback request_fn
Definition shmem.h:133
LOCKTAG locktag
Definition lock.c:163
LOCKMODE lockmode
Definition lock.c:164
LocalTransactionId localTransactionId
Definition lock.h:65
ProcNumber procNumber
Definition lock.h:64
dlist_node * cur
Definition ilist.h:179
Definition type.h:96
#define InvalidTransactionId
Definition transam.h:31
#define XidFromFullTransactionId(x)
Definition transam.h:48
#define FirstNormalObjectId
Definition transam.h:197
#define TransactionIdIsValid(xid)
Definition transam.h:41
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition twophase.c:1277
int max_prepared_xacts
Definition twophase.c:118
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition twophase.c:862
PGPROC * TwoPhaseGetDummyProc(FullTransactionId fxid, bool lock_held)
Definition twophase.c:929
#define TWOPHASE_RM_LOCK_ID
const char * type
const char * name
bool RecoveryInProgress(void)
Definition xlog.c:6830
#define XLogStandbyInfoActive()
Definition xlog.h:126
bool InRecovery
Definition xlogutils.c:50
#define InHotStandby
Definition xlogutils.h:60

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition globals.c:96

Definition at line 270 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:

Definition at line 250 of file lock.c.

◆ FAST_PATH_BITS

#define FAST_PATH_BITS (   proc,
 
)    (proc)->fpLockBits[FAST_PATH_GROUP(n)]

Definition at line 247 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 244 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 259 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 257 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)

Definition at line 248 of file lock.c.

◆ FAST_PATH_GROUP

#define FAST_PATH_GROUP (   index)
Value:

Definition at line 236 of file lock.c.

◆ FAST_PATH_INDEX

#define FAST_PATH_INDEX (   index)
Value:

Definition at line 239 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 245 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 246 of file lock.c.

◆ FAST_PATH_REL_GROUP

#define FAST_PATH_REL_GROUP (   rel)     (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))

Definition at line 220 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 255 of file lock.c.

◆ FAST_PATH_SLOT

#define FAST_PATH_SLOT (   group,
  index 
)
Value:

Definition at line 227 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 303 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 304 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 306 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 416 of file lock.c.

◆ NLOCKENTS

Definition at line 59 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 417 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3485 of file lock.c.

3486{
3487 HASH_SEQ_STATUS status;
3489
3490 /* First, verify there aren't locks of both xact and session level */
3492
3493 /* Now do the per-locallock cleanup work */
3495
3496 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3497 {
3498 TwoPhaseLockRecord record;
3499 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3500 bool haveSessionLock;
3501 bool haveXactLock;
3502 int i;
3503
3504 /*
3505 * Ignore VXID locks. We don't want those to be held by prepared
3506 * transactions, since they aren't meaningful after a restart.
3507 */
3508 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3509 continue;
3510
3511 /* Ignore it if we don't actually hold the lock */
3512 if (locallock->nLocks <= 0)
3513 continue;
3514
3515 /* Scan to see whether we hold it at session or transaction level */
3516 haveSessionLock = haveXactLock = false;
3517 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3518 {
3519 if (lockOwners[i].owner == NULL)
3520 haveSessionLock = true;
3521 else
3522 haveXactLock = true;
3523 }
3524
3525 /* Ignore it if we have only session lock */
3526 if (!haveXactLock)
3527 continue;
3528
3529 /* This can't happen, because we already checked it */
3530 if (haveSessionLock)
3531 ereport(ERROR,
3533 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3534
3535 /*
3536 * If the local lock was taken via the fast-path, we need to move it
3537 * to the primary lock table, or just get a pointer to the existing
3538 * primary lock table entry if by chance it's already been
3539 * transferred.
3540 */
3541 if (locallock->proclock == NULL)
3542 {
3544 locallock->lock = locallock->proclock->tag.myLock;
3545 }
3546
3547 /*
3548 * Arrange to not release any strong lock count held by this lock
3549 * entry. We must retain the count until the prepared transaction is
3550 * committed or rolled back.
3551 */
3552 locallock->holdsStrongLockCount = false;
3553
3554 /*
3555 * Create a 2PC record.
3556 */
3557 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3558 record.lockmode = locallock->tag.mode;
3559
3561 &record, sizeof(TwoPhaseLockRecord));
3562 }
3563}

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg, ERROR, FastPathGetRelationLockEntry(), fb(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG_VIRTUALTRANSACTION, memcpy(), RegisterTwoPhaseRecord(), and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1833 of file lock.c.

1834{
1836 Assert(locallock->holdsStrongLockCount == false);
1837
1838 /*
1839 * Adding to a memory location is not atomic, so we take a spinlock to
1840 * ensure we don't collide with someone else trying to bump the count at
1841 * the same time.
1842 *
1843 * XXX: It might be worth considering using an atomic fetch-and-add
1844 * instruction here, on architectures where that is supported.
1845 */
1846
1849 locallock->holdsStrongLockCount = true;
1852}

References Assert, FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, fb(), FastPathStrongRelationLockData::mutex, SpinLockAcquire(), SpinLockRelease(), and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1473 of file lock.c.

1474{
1475#ifdef USE_ASSERT_CHECKING
1478#endif
1479}

References fb(), LOCALLOCK_LOCKTAG, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3397 of file lock.c.

3398{
3399 typedef struct
3400 {
3401 LOCKTAG lock; /* identifies the lockable object */
3402 bool sessLock; /* is any lockmode held at session level? */
3403 bool xactLock; /* is any lockmode held at xact level? */
3405
3407 HTAB *lockhtab;
3408 HASH_SEQ_STATUS status;
3410
3411 /* Create a local hash table keyed by LOCKTAG only */
3412 hash_ctl.keysize = sizeof(LOCKTAG);
3413 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3415
3416 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3417 256, /* arbitrary initial size */
3418 &hash_ctl,
3420
3421 /* Scan local lock table to find entries for each LOCKTAG */
3423
3424 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3425 {
3426 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3428 bool found;
3429 int i;
3430
3431 /*
3432 * Ignore VXID locks. We don't want those to be held by prepared
3433 * transactions, since they aren't meaningful after a restart.
3434 */
3435 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3436 continue;
3437
3438 /* Ignore it if we don't actually hold the lock */
3439 if (locallock->nLocks <= 0)
3440 continue;
3441
3442 /* Otherwise, find or make an entry in lockhtab */
3444 &locallock->tag.lock,
3445 HASH_ENTER, &found);
3446 if (!found) /* initialize, if newly created */
3447 hentry->sessLock = hentry->xactLock = false;
3448
3449 /* Scan to see if we hold lock at session or xact level or both */
3450 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3451 {
3452 if (lockOwners[i].owner == NULL)
3453 hentry->sessLock = true;
3454 else
3455 hentry->xactLock = true;
3456 }
3457
3458 /*
3459 * We can throw error immediately when we see both types of locks; no
3460 * need to wait around to see if there are more violations.
3461 */
3462 if (hentry->sessLock && hentry->xactLock)
3463 ereport(ERROR,
3465 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3466 }
3467
3468 /* Success, so clean up */
3470}

References CurrentMemoryContext, ereport, errcode(), errmsg, ERROR, fb(), HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and LOCKTAG_VIRTUALTRANSACTION.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1747 of file lock.c.

1750{
1751 /*
1752 * If this was my last hold on this lock, delete my entry in the proclock
1753 * table.
1754 */
1755 if (proclock->holdMask == 0)
1756 {
1758
1759 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1760 dlist_delete(&proclock->lockLink);
1761 dlist_delete(&proclock->procLink);
1762 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1764 &(proclock->tag),
1767 NULL))
1768 elog(PANIC, "proclock table corrupted");
1769 }
1770
1771 if (lock->nRequested == 0)
1772 {
1773 /*
1774 * The caller just released the last lock, so garbage-collect the lock
1775 * object.
1776 */
1777 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1780 &(lock->tag),
1781 hashcode,
1783 NULL))
1784 elog(PANIC, "lock table corrupted");
1785 }
1786 else if (wakeupNeeded)
1787 {
1788 /* There are waiters on this lock, so wake them up. */
1790 }
1791}

References Assert, dlist_delete(), dlist_is_empty(), elog, fb(), HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 621 of file lock.c.

622{
624
625 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
626 return true;
627
628 return false;
629}

References DEFAULT_LOCKMETHOD, fb(), LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2967 of file lock.c.

2968{
2970 LOCKTAG *locktag = &locallock->tag.lock;
2971 PROCLOCK *proclock = NULL;
2973 Oid relid = locktag->locktag_field2;
2974 uint32 i,
2975 group;
2976
2977 /* fast-path group the lock belongs to */
2978 group = FAST_PATH_REL_GROUP(relid);
2979
2981
2982 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2983 {
2984 uint32 lockmode;
2985
2986 /* index into the whole per-backend array */
2987 uint32 f = FAST_PATH_SLOT(group, i);
2988
2989 /* Look for an allocated slot matching the given relid. */
2990 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2991 continue;
2992
2993 /* If we don't have a lock of the given mode, forget it! */
2994 lockmode = locallock->tag.mode;
2995 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2996 break;
2997
2998 /* Find or create lock object. */
3000
3001 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
3002 locallock->hashcode, lockmode);
3003 if (!proclock)
3004 {
3007 ereport(ERROR,
3009 errmsg("out of shared memory"),
3010 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3011 }
3012 GrantLock(proclock->tag.myLock, proclock, lockmode);
3013 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3014
3016
3017 /* No need to examine remaining slots. */
3018 break;
3019 }
3020
3022
3023 /* Lock may have already been transferred by some other backend. */
3024 if (proclock == NULL)
3025 {
3026 LOCK *lock;
3029
3031
3033 locktag,
3034 locallock->hashcode,
3035 HASH_FIND,
3036 NULL);
3037 if (!lock)
3038 elog(ERROR, "failed to re-find shared lock object");
3039
3040 proclocktag.myLock = lock;
3041 proclocktag.myProc = MyProc;
3042
3044 proclock = (PROCLOCK *)
3046 &proclocktag,
3048 HASH_FIND,
3049 NULL);
3050 if (!proclock)
3051 elog(ERROR, "failed to re-find shared proclock object");
3053 }
3054
3055 return proclock;
3056}

References DEFAULT_LOCKMETHOD, elog, ereport, errcode(), errhint(), errmsg, ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), i, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, MyProc, ProcLockHashCode(), SetupLockInTable(), and PROCLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2791 of file lock.c.

2792{
2793 uint32 i;
2795
2796 /* fast-path group the lock belongs to */
2797 uint32 group = FAST_PATH_REL_GROUP(relid);
2798
2799 /* Scan for existing entry for this relid, remembering empty slot. */
2800 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2801 {
2802 /* index into the whole per-backend array */
2803 uint32 f = FAST_PATH_SLOT(group, i);
2804
2805 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2806 unused_slot = f;
2807 else if (MyProc->fpRelId[f] == relid)
2808 {
2809 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2810 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2811 return true;
2812 }
2813 }
2814
2815 /* If no existing entry, use any empty slot. */
2817 {
2818 MyProc->fpRelId[unused_slot] = relid;
2820 ++FastPathLocalUseCounts[group];
2821 return true;
2822 }
2823
2824 /* No existing entry, and no empty slot. */
2825 return false;
2826}

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SET_LOCKMODE, FAST_PATH_SLOT, FastPathLocalUseCounts, FastPathLockSlotsPerBackend, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2870 of file lock.c.

2872{
2874 Oid relid = locktag->locktag_field2;
2875 uint32 i;
2876
2877 /* fast-path group the lock belongs to */
2878 uint32 group = FAST_PATH_REL_GROUP(relid);
2879
2880 /*
2881 * Every PGPROC that can potentially hold a fast-path lock is present in
2882 * ProcGlobal->allProcs. Prepared transactions are not, but any
2883 * outstanding fast-path locks held by prepared transactions are
2884 * transferred to the main lock table.
2885 */
2886 for (i = 0; i < ProcGlobal->allProcCount; i++)
2887 {
2888 PGPROC *proc = GetPGProcByNumber(i);
2889 uint32 j;
2890
2892
2893 /*
2894 * If the target backend isn't referencing the same database as the
2895 * lock, then we needn't examine the individual relation IDs at all;
2896 * none of them can be relevant.
2897 *
2898 * proc->databaseId is set at backend startup time and never changes
2899 * thereafter, so it might be safe to perform this test before
2900 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2901 * assume that if the target backend holds any fast-path locks, it
2902 * must have performed a memory-fencing operation (in particular, an
2903 * LWLock acquisition) since setting proc->databaseId. However, it's
2904 * less clear that our backend is certain to have performed a memory
2905 * fencing operation since the other backend set proc->databaseId. So
2906 * for now, we test it after acquiring the LWLock just to be safe.
2907 *
2908 * Also skip groups without any registered fast-path locks.
2909 */
2910 if (proc->databaseId != locktag->locktag_field1 ||
2911 proc->fpLockBits[group] == 0)
2912 {
2913 LWLockRelease(&proc->fpInfoLock);
2914 continue;
2915 }
2916
2917 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2918 {
2919 uint32 lockmode;
2920
2921 /* index into the whole per-backend array */
2922 uint32 f = FAST_PATH_SLOT(group, j);
2923
2924 /* Look for an allocated slot matching the given relid. */
2925 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2926 continue;
2927
2928 /* Find or create lock object. */
2930 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2932 ++lockmode)
2933 {
2934 PROCLOCK *proclock;
2935
2936 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2937 continue;
2938 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2939 hashcode, lockmode);
2940 if (!proclock)
2941 {
2943 LWLockRelease(&proc->fpInfoLock);
2944 return false;
2945 }
2946 GrantLock(proclock->tag.myLock, proclock, lockmode);
2947 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2948 }
2950
2951 /* No need to examine remaining slots. */
2952 break;
2953 }
2954 LWLockRelease(&proc->fpInfoLock);
2955 }
2956 return true;
2957}

References PROC_HDR::allProcCount, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GetPGProcByNumber, GrantLock(), i, j, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2834 of file lock.c.

2835{
2836 uint32 i;
2837 bool result = false;
2838
2839 /* fast-path group the lock belongs to */
2840 uint32 group = FAST_PATH_REL_GROUP(relid);
2841
2842 FastPathLocalUseCounts[group] = 0;
2843 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2844 {
2845 /* index into the whole per-backend array */
2846 uint32 f = FAST_PATH_SLOT(group, i);
2847
2848 if (MyProc->fpRelId[f] == relid
2849 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2850 {
2851 Assert(!result);
2852 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2853 result = true;
2854 /* we continue iterating so as to update FastPathLocalUseCount */
2855 }
2856 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2857 ++FastPathLocalUseCounts[group];
2858 }
2859 return result;
2860}

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FastPathLocalUseCounts, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, MyProc, and result.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1859 of file lock.c.

1860{
1862}

References fb(), and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetAwaitedLock()

LOCALLOCK * GetAwaitedLock ( void  )

Definition at line 1907 of file lock.c.

1908{
1909 return awaitedLock;
1910}

References awaitedLock.

Referenced by LockErrorCleanup(), ProcessRecoveryConflictInterrupt(), and ProcSleep().

◆ GetBlockerStatusData()

BlockedProcsData * GetBlockerStatusData ( int  blocked_pid)

Definition at line 3980 of file lock.c.

3981{
3983 PGPROC *proc;
3984 int i;
3985
3987
3988 /*
3989 * Guess how much space we'll need, and preallocate. Most of the time
3990 * this will avoid needing to do repalloc while holding the LWLocks. (We
3991 * assume, but check with an Assert, that MaxBackends is enough entries
3992 * for the procs[] array; the other two could need enlargement, though.)
3993 */
3994 data->nprocs = data->nlocks = data->npids = 0;
3995 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3996 data->procs = palloc_array(BlockedProcData, data->maxprocs);
3997 data->locks = palloc_array(LockInstanceData, data->maxlocks);
3998 data->waiter_pids = palloc_array(int, data->maxpids);
3999
4000 /*
4001 * In order to search the ProcArray for blocked_pid and assume that that
4002 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4003 * In addition, to examine the lock grouping fields of any other backend,
4004 * we must hold all the hash partition locks. (Only one of those locks is
4005 * actually relevant for any one lock group, but we can't know which one
4006 * ahead of time.) It's fairly annoying to hold all those locks
4007 * throughout this, but it's no worse than GetLockStatusData(), and it
4008 * does have the advantage that we're guaranteed to return a
4009 * self-consistent instantaneous state.
4010 */
4012
4014
4015 /* Nothing to do if it's gone */
4016 if (proc != NULL)
4017 {
4018 /*
4019 * Acquire lock on the entire shared lock data structure. See notes
4020 * in GetLockStatusData().
4021 */
4022 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4024
4025 if (proc->lockGroupLeader == NULL)
4026 {
4027 /* Easy case, proc is not a lock group member */
4029 }
4030 else
4031 {
4032 /* Examine all procs in proc's lock group */
4033 dlist_iter iter;
4034
4036 {
4038
4039 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4041 }
4042 }
4043
4044 /*
4045 * And release locks. See notes in GetLockStatusData().
4046 */
4047 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4049
4050 Assert(data->nprocs <= data->maxprocs);
4051 }
4052
4054
4055 return data;
4056}

References Assert, BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, fb(), GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, palloc_array, and palloc_object.

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId * GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int countp 
)

Definition at line 3078 of file lock.c.

3079{
3083 LOCK *lock;
3086 PROCLOCK *proclock;
3087 uint32 hashcode;
3089 int count = 0;
3090 int fast_count = 0;
3091
3093 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3096 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3097
3098 /*
3099 * Allocate memory to store results, and fill with InvalidVXID. We only
3100 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3101 * InHotStandby allocate once in TopMemoryContext.
3102 */
3103 if (InHotStandby)
3104 {
3105 if (vxids == NULL)
3108 sizeof(VirtualTransactionId) *
3110 }
3111 else
3113
3114 /* Compute hash code and partition lock, and look up conflicting modes. */
3115 hashcode = LockTagHashCode(locktag);
3117 conflictMask = lockMethodTable->conflictTab[lockmode];
3118
3119 /*
3120 * Fast path locks might not have been entered in the primary lock table.
3121 * If the lock we're dealing with could conflict with such a lock, we must
3122 * examine each backend's fast-path array for conflicts.
3123 */
3124 if (ConflictsWithRelationFastPath(locktag, lockmode))
3125 {
3126 int i;
3127 Oid relid = locktag->locktag_field2;
3129
3130 /* fast-path group the lock belongs to */
3131 uint32 group = FAST_PATH_REL_GROUP(relid);
3132
3133 /*
3134 * Iterate over relevant PGPROCs. Anything held by a prepared
3135 * transaction will have been transferred to the primary lock table,
3136 * so we need not worry about those. This is all a bit fuzzy, because
3137 * new locks could be taken after we've visited a particular
3138 * partition, but the callers had better be prepared to deal with that
3139 * anyway, since the locks could equally well be taken between the
3140 * time we return the value and the time the caller does something
3141 * with it.
3142 */
3143 for (i = 0; i < ProcGlobal->allProcCount; i++)
3144 {
3145 PGPROC *proc = GetPGProcByNumber(i);
3146 uint32 j;
3147
3148 /* A backend never blocks itself */
3149 if (proc == MyProc)
3150 continue;
3151
3153
3154 /*
3155 * If the target backend isn't referencing the same database as
3156 * the lock, then we needn't examine the individual relation IDs
3157 * at all; none of them can be relevant.
3158 *
3159 * See FastPathTransferRelationLocks() for discussion of why we do
3160 * this test after acquiring the lock.
3161 *
3162 * Also skip groups without any registered fast-path locks.
3163 */
3164 if (proc->databaseId != locktag->locktag_field1 ||
3165 proc->fpLockBits[group] == 0)
3166 {
3167 LWLockRelease(&proc->fpInfoLock);
3168 continue;
3169 }
3170
3171 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3172 {
3174
3175 /* index into the whole per-backend array */
3176 uint32 f = FAST_PATH_SLOT(group, j);
3177
3178 /* Look for an allocated slot matching the given relid. */
3179 if (relid != proc->fpRelId[f])
3180 continue;
3181 lockmask = FAST_PATH_GET_BITS(proc, f);
3182 if (!lockmask)
3183 continue;
3185
3186 /*
3187 * There can only be one entry per relation, so if we found it
3188 * and it doesn't conflict, we can skip the rest of the slots.
3189 */
3190 if ((lockmask & conflictMask) == 0)
3191 break;
3192
3193 /* Conflict! */
3194 GET_VXID_FROM_PGPROC(vxid, *proc);
3195
3197 vxids[count++] = vxid;
3198 /* else, xact already committed or aborted */
3199
3200 /* No need to examine remaining slots. */
3201 break;
3202 }
3203
3204 LWLockRelease(&proc->fpInfoLock);
3205 }
3206 }
3207
3208 /* Remember how many fast-path conflicts we found. */
3209 fast_count = count;
3210
3211 /*
3212 * Look up the lock object matching the tag.
3213 */
3215
3217 locktag,
3218 hashcode,
3219 HASH_FIND,
3220 NULL);
3221 if (!lock)
3222 {
3223 /*
3224 * If the lock object doesn't exist, there is nothing holding a lock
3225 * on this lockable object.
3226 */
3228 vxids[count].procNumber = INVALID_PROC_NUMBER;
3229 vxids[count].localTransactionId = InvalidLocalTransactionId;
3230 if (countp)
3231 *countp = count;
3232 return vxids;
3233 }
3234
3235 /*
3236 * Examine each existing holder (or awaiter) of the lock.
3237 */
3239 {
3240 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3241
3242 if (conflictMask & proclock->holdMask)
3243 {
3244 PGPROC *proc = proclock->tag.myProc;
3245
3246 /* A backend never blocks itself */
3247 if (proc != MyProc)
3248 {
3250
3251 GET_VXID_FROM_PGPROC(vxid, *proc);
3252
3254 {
3255 int i;
3256
3257 /* Avoid duplicate entries. */
3258 for (i = 0; i < fast_count; ++i)
3260 break;
3261 if (i >= fast_count)
3262 vxids[count++] = vxid;
3263 }
3264 /* else, xact already committed or aborted */
3265 }
3266 }
3267 }
3268
3270
3271 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3272 elog(PANIC, "too many conflicting locks found");
3273
3274 vxids[count].procNumber = INVALID_PROC_NUMBER;
3275 vxids[count].localTransactionId = InvalidLocalTransactionId;
3276 if (countp)
3277 *countp = count;
3278 return vxids;
3279}

References PROC_HDR::allProcCount, ConflictsWithRelationFastPath, PGPROC::databaseId, dlist_container, dlist_foreach, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, GetPGProcByNumber, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, INVALID_PROC_NUMBER, InvalidLocalTransactionId, j, lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, palloc0_array, PANIC, ProcGlobal, LOCK::procLocks, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char * GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 525 of file lock.c.

References Assert, fb(), lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData * GetLockStatusData ( void  )

Definition at line 3777 of file lock.c.

3778{
3779 LockData *data;
3780 PROCLOCK *proclock;
3782 int els;
3783 int el;
3784 int i;
3785
3787
3788 /* Guess how much space we'll need. */
3789 els = MaxBackends;
3790 el = 0;
3792
3793 /*
3794 * First, we iterate through the per-backend fast-path arrays, locking
3795 * them one at a time. This might produce an inconsistent picture of the
3796 * system state, but taking all of those LWLocks at the same time seems
3797 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3798 * matter too much, because none of these locks can be involved in lock
3799 * conflicts anyway - anything that might must be present in the main lock
3800 * table. (For the same reason, we don't sweat about making leaderPid
3801 * completely valid. We cannot safely dereference another backend's
3802 * lockGroupLeader field without holding all lock partition locks, and
3803 * it's not worth that.)
3804 */
3805 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3806 {
3807 PGPROC *proc = GetPGProcByNumber(i);
3808
3809 /* Skip backends with pid=0, as they don't hold fast-path locks */
3810 if (proc->pid == 0)
3811 continue;
3812
3814
3815 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3816 {
3817 /* Skip groups without registered fast-path locks */
3818 if (proc->fpLockBits[g] == 0)
3819 continue;
3820
3821 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3822 {
3824 uint32 f = FAST_PATH_SLOT(g, j);
3826
3827 /* Skip unallocated slots */
3828 if (!lockbits)
3829 continue;
3830
3831 if (el >= els)
3832 {
3833 els += MaxBackends;
3834 data->locks = (LockInstanceData *)
3835 repalloc(data->locks, sizeof(LockInstanceData) * els);
3836 }
3837
3838 instance = &data->locks[el];
3840 proc->fpRelId[f]);
3842 instance->waitLockMode = NoLock;
3843 instance->vxid.procNumber = proc->vxid.procNumber;
3844 instance->vxid.localTransactionId = proc->vxid.lxid;
3845 instance->pid = proc->pid;
3846 instance->leaderPid = proc->pid;
3847 instance->fastpath = true;
3848
3849 /*
3850 * Successfully taking fast path lock means there were no
3851 * conflicting locks.
3852 */
3853 instance->waitStart = 0;
3854
3855 el++;
3856 }
3857 }
3858
3859 if (proc->fpVXIDLock)
3860 {
3863
3864 if (el >= els)
3865 {
3866 els += MaxBackends;
3867 data->locks = (LockInstanceData *)
3868 repalloc(data->locks, sizeof(LockInstanceData) * els);
3869 }
3870
3871 vxid.procNumber = proc->vxid.procNumber;
3873
3874 instance = &data->locks[el];
3876 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3877 instance->waitLockMode = NoLock;
3878 instance->vxid.procNumber = proc->vxid.procNumber;
3879 instance->vxid.localTransactionId = proc->vxid.lxid;
3880 instance->pid = proc->pid;
3881 instance->leaderPid = proc->pid;
3882 instance->fastpath = true;
3883 instance->waitStart = 0;
3884
3885 el++;
3886 }
3887
3888 LWLockRelease(&proc->fpInfoLock);
3889 }
3890
3891 /*
3892 * Next, acquire lock on the entire shared lock data structure. We do
3893 * this so that, at least for locks in the primary lock table, the state
3894 * will be self-consistent.
3895 *
3896 * Since this is a read-only operation, we take shared instead of
3897 * exclusive lock. There's not a whole lot of point to this, because all
3898 * the normal operations require exclusive lock, but it doesn't hurt
3899 * anything either. It will at least allow two backends to do
3900 * GetLockStatusData in parallel.
3901 *
3902 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3903 */
3904 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3906
3907 /* Now we can safely count the number of proclocks */
3909 if (data->nelements > els)
3910 {
3911 els = data->nelements;
3912 data->locks = (LockInstanceData *)
3913 repalloc(data->locks, sizeof(LockInstanceData) * els);
3914 }
3915
3916 /* Now scan the tables to copy the data */
3918
3919 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3920 {
3921 PGPROC *proc = proclock->tag.myProc;
3922 LOCK *lock = proclock->tag.myLock;
3923 LockInstanceData *instance = &data->locks[el];
3924
3925 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3926 instance->holdMask = proclock->holdMask;
3927 if (proc->waitLock == proclock->tag.myLock)
3928 instance->waitLockMode = proc->waitLockMode;
3929 else
3930 instance->waitLockMode = NoLock;
3931 instance->vxid.procNumber = proc->vxid.procNumber;
3932 instance->vxid.localTransactionId = proc->vxid.lxid;
3933 instance->pid = proc->pid;
3934 instance->leaderPid = proclock->groupLeader->pid;
3935 instance->fastpath = false;
3936 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3937
3938 el++;
3939 }
3940
3941 /*
3942 * And release locks. We do this in reverse order for two reasons: (1)
3943 * Anyone else who needs more than one of the locks will be trying to lock
3944 * them in increasing order; we don't want to release the other process
3945 * until it can get all the locks it needs. (2) This avoids O(N^2)
3946 * behavior inside LWLockRelease.
3947 */
3948 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3950
3951 Assert(el == data->nelements);
3952
3953 return data;
3954}

References PROC_HDR::allProcCount, Assert, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_SLOT, FastPathLockGroupsPerBackend, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpLockBits, PGPROC::fpRelId, PGPROC::fpVXIDLock, GetPGProcByNumber, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, j, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, MaxBackends, memcpy(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc_array, palloc_object, pg_atomic_read_u64(), PGPROC::pid, ProcGlobal, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, PGPROC::vxid, PGPROC::waitLock, PGPROC::waitLockMode, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 537 of file lock.c.

References Assert, fb(), lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock * GetRunningTransactionLocks ( int nlocks)

Definition at line 4154 of file lock.c.

4155{
4157 PROCLOCK *proclock;
4159 int i;
4160 int index;
4161 int els;
4162
4163 /*
4164 * Acquire lock on the entire shared lock data structure.
4165 *
4166 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4167 */
4168 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4170
4171 /* Now we can safely count the number of proclocks */
4173
4174 /*
4175 * Allocating enough space for all locks in the lock table is overkill,
4176 * but it's more convenient and faster than having to enlarge the array.
4177 */
4179
4180 /* Now scan the tables to copy the data */
4182
4183 /*
4184 * If lock is a currently granted AccessExclusiveLock then it will have
4185 * just one proclock holder, so locks are never accessed twice in this
4186 * particular case. Don't copy this code for use elsewhere because in the
4187 * general case this will give you duplicate locks when looking at
4188 * non-exclusive lock types.
4189 */
4190 index = 0;
4191 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4192 {
4193 /* make sure this definition matches the one used in LockAcquire */
4194 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4196 {
4197 PGPROC *proc = proclock->tag.myProc;
4198 LOCK *lock = proclock->tag.myLock;
4199 TransactionId xid = proc->xid;
4200
4201 /*
4202 * Don't record locks for transactions if we know they have
4203 * already issued their WAL record for commit but not yet released
4204 * lock. It is still possible that we see locks held by already
4205 * complete transactions, if they haven't yet zeroed their xids.
4206 */
4207 if (!TransactionIdIsValid(xid))
4208 continue;
4209
4210 accessExclusiveLocks[index].xid = xid;
4213
4214 index++;
4215 }
4216 }
4217
4218 Assert(index <= els);
4219
4220 /*
4221 * And release locks. We do this in reverse order for two reasons: (1)
4222 * Anyone else who needs more than one of the locks will be trying to lock
4223 * them in increasing order; we don't want to release the other process
4224 * until it can get all the locks it needs. (2) This avoids O(N^2)
4225 * behavior inside LWLockRelease.
4226 */
4227 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4229
4230 *nlocks = index;
4231 return accessExclusiveLocks;
4232}

References AccessExclusiveLock, Assert, fb(), hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 4060 of file lock.c.

4061{
4062 LOCK *theLock = blocked_proc->waitLock;
4067 int queue_size;
4068
4069 /* Nothing to do if this proc is not blocked */
4070 if (theLock == NULL)
4071 return;
4072
4073 /* Set up a procs[] element */
4074 bproc = &data->procs[data->nprocs++];
4075 bproc->pid = blocked_proc->pid;
4076 bproc->first_lock = data->nlocks;
4077 bproc->first_waiter = data->npids;
4078
4079 /*
4080 * We may ignore the proc's fast-path arrays, since nothing in those could
4081 * be related to a contended lock.
4082 */
4083
4084 /* Collect all PROCLOCKs associated with theLock */
4085 dlist_foreach(proclock_iter, &theLock->procLocks)
4086 {
4087 PROCLOCK *proclock =
4088 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4089 PGPROC *proc = proclock->tag.myProc;
4090 LOCK *lock = proclock->tag.myLock;
4092
4093 if (data->nlocks >= data->maxlocks)
4094 {
4095 data->maxlocks += MaxBackends;
4096 data->locks = (LockInstanceData *)
4097 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4098 }
4099
4100 instance = &data->locks[data->nlocks];
4101 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4102 instance->holdMask = proclock->holdMask;
4103 if (proc->waitLock == lock)
4104 instance->waitLockMode = proc->waitLockMode;
4105 else
4106 instance->waitLockMode = NoLock;
4107 instance->vxid.procNumber = proc->vxid.procNumber;
4108 instance->vxid.localTransactionId = proc->vxid.lxid;
4109 instance->pid = proc->pid;
4110 instance->leaderPid = proclock->groupLeader->pid;
4111 instance->fastpath = false;
4112 data->nlocks++;
4113 }
4114
4115 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4116 waitQueue = &(theLock->waitProcs);
4117 queue_size = dclist_count(waitQueue);
4118
4119 if (queue_size > data->maxpids - data->npids)
4120 {
4121 data->maxpids = Max(data->maxpids + MaxBackends,
4122 data->npids + queue_size);
4123 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4124 sizeof(int) * data->maxpids);
4125 }
4126
4127 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4129 {
4131
4133 break;
4134 data->waiter_pids[data->npids++] = queued_proc->pid;
4135 }
4136
4137 bproc->num_locks = data->nlocks - bproc->first_lock;
4138 bproc->num_waiters = data->npids - bproc->first_waiter;
4139}

References data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, fb(), PROCLOCK::groupLeader, PROCLOCK::holdMask, PGPROC::lxid, Max, MaxBackends, memcpy(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, PGPROC::pid, PGPROC::procNumber, repalloc(), LOCK::tag, PROCLOCK::tag, PGPROC::vxid, PGPROC::waitLock, and PGPROC::waitLockMode.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1898 of file lock.c.

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1667 of file lock.c.

1668{
1669 lock->nGranted++;
1670 lock->granted[lockmode]++;
1671 lock->grantMask |= LOCKBIT_ON(lockmode);
1672 if (lock->granted[lockmode] == lock->requested[lockmode])
1673 lock->waitMask &= LOCKBIT_OFF(lockmode);
1674 proclock->holdMask |= LOCKBIT_ON(lockmode);
1675 LOCK_PRINT("GrantLock", lock, lockmode);
1676 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1677 Assert(lock->nGranted <= lock->nRequested);
1678}

References Assert, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), JoinWaitQueue(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1801 of file lock.c.

1802{
1803 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1804 int i;
1805
1806 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1807 /* Count the total */
1808 locallock->nLocks++;
1809 /* Count the per-owner lock */
1810 for (i = 0; i < locallock->numLockOwners; i++)
1811 {
1812 if (lockOwners[i].owner == owner)
1813 {
1814 lockOwners[i].nLocks++;
1815 return;
1816 }
1817 }
1818 lockOwners[i].owner = owner;
1819 lockOwners[i].nLocks = 1;
1820 locallock->numLockOwners++;
1821 if (owner != NULL)
1823
1824 /* Indicate that the lock is acquired for certain types of locks. */
1826}

References Assert, CheckAndSetLockHeld(), fb(), i, LOCALLOCKOWNER::nLocks, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLockManagerAccess()

void InitLockManagerAccess ( void  )

Definition at line 503 of file lock.c.

504{
505 /*
506 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
507 * counts and resource owner information.
508 */
509 HASHCTL info;
510
511 info.keysize = sizeof(LOCALLOCKTAG);
512 info.entrysize = sizeof(LOCALLOCK);
513
514 LockMethodLocalHash = hash_create("LOCALLOCK hash",
515 16,
516 &info,
518}

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, and LockMethodLocalHash.

Referenced by BaseInit().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

Definition at line 4579 of file lock.c.

4581{
4582 lock_twophase_postcommit(fxid, info, recdata, len);
4583}

References fb(), len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

◆ lock_twophase_recover()

void lock_twophase_recover ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

Definition at line 4340 of file lock.c.

4342{
4344 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4345 LOCKTAG *locktag;
4346 LOCKMODE lockmode;
4348 LOCK *lock;
4349 PROCLOCK *proclock;
4351 bool found;
4352 uint32 hashcode;
4354 int partition;
4357
4358 Assert(len == sizeof(TwoPhaseLockRecord));
4359 locktag = &rec->locktag;
4360 lockmode = rec->lockmode;
4362
4364 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4366
4367 hashcode = LockTagHashCode(locktag);
4368 partition = LockHashPartition(hashcode);
4370
4372
4373 /*
4374 * Find or create a lock with this tag.
4375 */
4377 locktag,
4378 hashcode,
4380 &found);
4381 if (!lock)
4382 {
4384 ereport(ERROR,
4386 errmsg("out of shared memory"),
4387 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4388 }
4389
4390 /*
4391 * if it's a new lock object, initialize it
4392 */
4393 if (!found)
4394 {
4395 lock->grantMask = 0;
4396 lock->waitMask = 0;
4397 dlist_init(&lock->procLocks);
4398 dclist_init(&lock->waitProcs);
4399 lock->nRequested = 0;
4400 lock->nGranted = 0;
4401 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4402 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4403 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4404 }
4405 else
4406 {
4407 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4408 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4409 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4410 Assert(lock->nGranted <= lock->nRequested);
4411 }
4412
4413 /*
4414 * Create the hash key for the proclock table.
4415 */
4416 proclocktag.myLock = lock;
4417 proclocktag.myProc = proc;
4418
4420
4421 /*
4422 * Find or create a proclock entry with this tag
4423 */
4425 &proclocktag,
4428 &found);
4429 if (!proclock)
4430 {
4431 /* Oops, not enough shmem for the proclock */
4432 if (lock->nRequested == 0)
4433 {
4434 /*
4435 * There are no other requestors of this lock, so garbage-collect
4436 * the lock object. We *must* do this to avoid a permanent leak
4437 * of shared memory, because there won't be anything to cause
4438 * anyone to release the lock object later.
4439 */
4442 &(lock->tag),
4443 hashcode,
4445 NULL))
4446 elog(PANIC, "lock table corrupted");
4447 }
4449 ereport(ERROR,
4451 errmsg("out of shared memory"),
4452 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4453 }
4454
4455 /*
4456 * If new, initialize the new entry
4457 */
4458 if (!found)
4459 {
4460 Assert(proc->lockGroupLeader == NULL);
4461 proclock->groupLeader = proc;
4462 proclock->holdMask = 0;
4463 proclock->releaseMask = 0;
4464 /* Add proclock to appropriate lists */
4465 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4467 &proclock->procLink);
4468 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4469 }
4470 else
4471 {
4472 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4473 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4474 }
4475
4476 /*
4477 * lock->nRequested and lock->requested[] count the total number of
4478 * requests, whether granted or waiting, so increment those immediately.
4479 */
4480 lock->nRequested++;
4481 lock->requested[lockmode]++;
4482 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4483
4484 /*
4485 * We shouldn't already hold the desired lock.
4486 */
4487 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4488 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4489 lockMethodTable->lockModeNames[lockmode],
4490 lock->tag.locktag_field1, lock->tag.locktag_field2,
4491 lock->tag.locktag_field3);
4492
4493 /*
4494 * We ignore any possible conflicts and just grant ourselves the lock. Not
4495 * only because we don't bother, but also to avoid deadlocks when
4496 * switching from standby to normal mode. See function comment.
4497 */
4498 GrantLock(lock, proclock, lockmode);
4499
4500 /*
4501 * Bump strong lock count, to make sure any fast-path lock requests won't
4502 * be granted without consulting the primary lock table.
4503 */
4504 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4505 {
4507
4511 }
4512
4514}

References Assert, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ereport, errcode(), errhint(), errmsg, ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, fb(), LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire(), SpinLockRelease(), LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

Definition at line 4521 of file lock.c.

4523{
4525 LOCKTAG *locktag;
4526 LOCKMODE lockmode;
4528
4529 Assert(len == sizeof(TwoPhaseLockRecord));
4530 locktag = &rec->locktag;
4531 lockmode = rec->lockmode;
4533
4535 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4536
4537 if (lockmode == AccessExclusiveLock &&
4538 locktag->locktag_type == LOCKTAG_RELATION)
4539 {
4541 locktag->locktag_field1 /* dboid */ ,
4542 locktag->locktag_field2 /* reloid */ );
4543 }
4544}

References AccessExclusiveLock, Assert, elog, ERROR, fb(), len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, StandbyAcquireAccessExclusiveLock(), and XidFromFullTransactionId.

◆ LockAcquire()

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp,
bool  logLockFailure 
)

Definition at line 834 of file lock.c.

841{
846 LOCK *lock;
847 PROCLOCK *proclock;
848 bool found;
849 ResourceOwner owner;
850 uint32 hashcode;
852 bool found_conflict;
854 bool log_lock = false;
855
857 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
860 elog(ERROR, "unrecognized lock mode: %d", lockmode);
861
862 if (RecoveryInProgress() && !InRecovery &&
863 (locktag->locktag_type == LOCKTAG_OBJECT ||
864 locktag->locktag_type == LOCKTAG_RELATION) &&
865 lockmode > RowExclusiveLock)
868 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
869 lockMethodTable->lockModeNames[lockmode]),
870 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
871
872#ifdef LOCK_DEBUG
873 if (LOCK_DEBUG_ENABLED(locktag))
874 elog(LOG, "LockAcquire: lock [%u,%u] %s",
875 locktag->locktag_field1, locktag->locktag_field2,
876 lockMethodTable->lockModeNames[lockmode]);
877#endif
878
879 /* Identify owner for lock */
880 if (sessionLock)
881 owner = NULL;
882 else
883 owner = CurrentResourceOwner;
884
885 /*
886 * Find or create a LOCALLOCK entry for this lock and lockmode
887 */
888 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
889 localtag.lock = *locktag;
890 localtag.mode = lockmode;
891
893 &localtag,
894 HASH_ENTER, &found);
895
896 /*
897 * if it's a new locallock object, initialize it
898 */
899 if (!found)
900 {
901 locallock->lock = NULL;
902 locallock->proclock = NULL;
903 locallock->hashcode = LockTagHashCode(&(localtag.lock));
904 locallock->nLocks = 0;
905 locallock->holdsStrongLockCount = false;
906 locallock->lockCleared = false;
907 locallock->numLockOwners = 0;
908 locallock->maxLockOwners = 8;
909 locallock->lockOwners = NULL; /* in case next line fails */
910 locallock->lockOwners = (LOCALLOCKOWNER *)
912 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
913 }
914 else
915 {
916 /* Make sure there will be room to remember the lock */
917 if (locallock->numLockOwners >= locallock->maxLockOwners)
918 {
919 int newsize = locallock->maxLockOwners * 2;
920
921 locallock->lockOwners = (LOCALLOCKOWNER *)
922 repalloc(locallock->lockOwners,
923 newsize * sizeof(LOCALLOCKOWNER));
924 locallock->maxLockOwners = newsize;
925 }
926 }
927 hashcode = locallock->hashcode;
928
929 if (locallockp)
931
932 /*
933 * If we already hold the lock, we can just increase the count locally.
934 *
935 * If lockCleared is already set, caller need not worry about absorbing
936 * sinval messages related to the lock's object.
937 */
938 if (locallock->nLocks > 0)
939 {
941 if (locallock->lockCleared)
943 else
945 }
946
947 /*
948 * We don't acquire any other heavyweight lock while holding the relation
949 * extension lock. We do allow to acquire the same relation extension
950 * lock more than once but that case won't reach here.
951 */
953
954 /*
955 * Prepare to emit a WAL record if acquisition of this lock needs to be
956 * replayed in a standby server.
957 *
958 * Here we prepare to log; after lock is acquired we'll issue log record.
959 * This arrangement simplifies error recovery in case the preparation step
960 * fails.
961 *
962 * Only AccessExclusiveLocks can conflict with lock types that read-only
963 * transactions can acquire in a standby server. Make sure this definition
964 * matches the one in GetRunningTransactionLocks().
965 */
966 if (lockmode >= AccessExclusiveLock &&
967 locktag->locktag_type == LOCKTAG_RELATION &&
970 {
972 log_lock = true;
973 }
974
975 /*
976 * Attempt to take lock via fast path, if eligible. But if we remember
977 * having filled up the fast path array, we don't attempt to make any
978 * further use of it until we release some locks. It's possible that some
979 * other backend has transferred some of those locks to the shared hash
980 * table, leaving space free, but it's not worth acquiring the LWLock just
981 * to check. It's also possible that we're acquiring a second or third
982 * lock type on a relation we have already locked using the fast-path, but
983 * for now we don't worry about that case either.
984 */
985 if (EligibleForRelationFastPath(locktag, lockmode))
986 {
989 {
991 bool acquired;
992
993 /*
994 * LWLockAcquire acts as a memory sequencing point, so it's safe
995 * to assume that any strong locker whose increment to
996 * FastPathStrongRelationLocks->counts becomes visible after we
997 * test it has yet to begin to transfer fast-path locks.
998 */
1001 acquired = false;
1002 else
1004 lockmode);
1006 if (acquired)
1007 {
1008 /*
1009 * The locallock might contain stale pointers to some old
1010 * shared objects; we MUST reset these to null before
1011 * considering the lock to be acquired via fast-path.
1012 */
1013 locallock->lock = NULL;
1014 locallock->proclock = NULL;
1015 GrantLockLocal(locallock, owner);
1016 return LOCKACQUIRE_OK;
1017 }
1018 }
1019 else
1020 {
1021 /*
1022 * Increment the lock statistics counter if lock could not be
1023 * acquired via the fast-path.
1024 */
1025 pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type);
1026 }
1027 }
1028
1029 /*
1030 * If this lock could potentially have been taken via the fast-path by
1031 * some other backend, we must (temporarily) disable further use of the
1032 * fast-path for this lock tag, and migrate any locks already taken via
1033 * this method to the main lock table.
1034 */
1035 if (ConflictsWithRelationFastPath(locktag, lockmode))
1036 {
1038
1041 hashcode))
1042 {
1044 if (locallock->nLocks == 0)
1046 if (locallockp)
1047 *locallockp = NULL;
1049 ereport(ERROR,
1051 errmsg("out of shared memory"),
1052 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1053 else
1054 return LOCKACQUIRE_NOT_AVAIL;
1055 }
1056 }
1057
1058 /*
1059 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1060 * take it via the fast-path, either, so we've got to mess with the shared
1061 * lock table.
1062 */
1064
1066
1067 /*
1068 * Find or create lock and proclock entries with this tag
1069 *
1070 * Note: if the locallock object already existed, it might have a pointer
1071 * to the lock already ... but we should not assume that that pointer is
1072 * valid, since a lock object with zero hold and request counts can go
1073 * away anytime. So we have to use SetupLockInTable() to recompute the
1074 * lock and proclock pointers, even if they're already set.
1075 */
1076 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1077 hashcode, lockmode);
1078 if (!proclock)
1079 {
1082 if (locallock->nLocks == 0)
1084 if (locallockp)
1085 *locallockp = NULL;
1087 ereport(ERROR,
1089 errmsg("out of shared memory"),
1090 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1091 else
1092 return LOCKACQUIRE_NOT_AVAIL;
1093 }
1094 locallock->proclock = proclock;
1095 lock = proclock->tag.myLock;
1096 locallock->lock = lock;
1097
1098 /*
1099 * If lock requested conflicts with locks requested by waiters, must join
1100 * wait queue. Otherwise, check for conflict with already-held locks.
1101 * (That's last because most complex check.)
1102 */
1103 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1104 found_conflict = true;
1105 else
1107 lock, proclock);
1108
1109 if (!found_conflict)
1110 {
1111 /* No conflict with held or previously requested locks */
1112 GrantLock(lock, proclock, lockmode);
1114 }
1115 else
1116 {
1117 /*
1118 * Join the lock's wait queue. We call this even in the dontWait
1119 * case, because JoinWaitQueue() may discover that we can acquire the
1120 * lock immediately after all.
1121 */
1123 }
1124
1126 {
1127 /*
1128 * We're not getting the lock because a deadlock was detected already
1129 * while trying to join the wait queue, or because we would have to
1130 * wait but the caller requested no blocking.
1131 *
1132 * Undo the changes to shared entries before releasing the partition
1133 * lock.
1134 */
1136
1137 if (proclock->holdMask == 0)
1138 {
1140
1142 hashcode);
1143 dlist_delete(&proclock->lockLink);
1144 dlist_delete(&proclock->procLink);
1146 &(proclock->tag),
1149 NULL))
1150 elog(PANIC, "proclock table corrupted");
1151 }
1152 else
1153 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1154 lock->nRequested--;
1155 lock->requested[lockmode]--;
1156 LOCK_PRINT("LockAcquire: did not join wait queue",
1157 lock, lockmode);
1158 Assert((lock->nRequested > 0) &&
1159 (lock->requested[lockmode] >= 0));
1160 Assert(lock->nGranted <= lock->nRequested);
1162 if (locallock->nLocks == 0)
1164
1165 if (dontWait)
1166 {
1167 /*
1168 * Log lock holders and waiters as a detail log message if
1169 * logLockFailure = true and lock acquisition fails with dontWait
1170 * = true
1171 */
1172 if (logLockFailure)
1173 {
1177 const char *modename;
1178 int lockHoldersNum = 0;
1179
1183
1184 DescribeLockTag(&buf, &locallock->tag.lock);
1185 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1186 lockmode);
1187
1188 /* Gather a list of all lock holders and waiters */
1193
1194 ereport(LOG,
1195 (errmsg("process %d could not obtain %s on %s",
1196 MyProcPid, modename, buf.data),
1198 "Process holding the lock: %s, Wait queue: %s.",
1199 "Processes holding the lock: %s, Wait queue: %s.",
1201 lock_holders_sbuf.data,
1202 lock_waiters_sbuf.data)));
1203
1204 pfree(buf.data);
1207 }
1208 if (locallockp)
1209 *locallockp = NULL;
1210 return LOCKACQUIRE_NOT_AVAIL;
1211 }
1212 else
1213 {
1215 /* DeadLockReport() will not return */
1216 }
1217 }
1218
1219 /*
1220 * We are now in the lock queue, or the lock was already granted. If
1221 * queued, go to sleep.
1222 */
1224 {
1225 Assert(!dontWait);
1226 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1227 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1229
1231
1232 /*
1233 * NOTE: do not do any material change of state between here and
1234 * return. All required changes in locktable state must have been
1235 * done when the lock was granted to us --- see notes in WaitOnLock.
1236 */
1237
1239 {
1240 /*
1241 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1242 * now.
1243 */
1244 Assert(!dontWait);
1246 /* DeadLockReport() will not return */
1247 }
1248 }
1249 else
1252
1253 /* The lock was granted to us. Update the local lock entry accordingly */
1254 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1255 GrantLockLocal(locallock, owner);
1256
1257 /*
1258 * Lock state is fully up-to-date now; if we error out after this, no
1259 * special error cleanup is required.
1260 */
1262
1263 /*
1264 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1265 * standby server.
1266 */
1267 if (log_lock)
1268 {
1269 /*
1270 * Decode the locktag back to the original values, to avoid sending
1271 * lots of empty bytes with every message. See lock.h to check how a
1272 * locktag is defined for LOCKTAG_RELATION
1273 */
1275 locktag->locktag_field2);
1276 }
1277
1278 return LOCKACQUIRE_OK;
1279}

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert, BeginStrongLockAcquire(), buf, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, CurrentResourceOwner, DeadLockReport(), DescribeLockTag(), dlist_delete(), EligibleForRelationFastPath, elog, ereport, errcode(), errdetail_log_plural(), errhint(), errmsg, ERROR, FAST_PATH_REL_GROUP, FastPathGrantRelationLock(), FastPathLocalUseCounts, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), fb(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, GetLockHoldersAndWaiters(), GetLockmodeName(), GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), PROCLOCK::holdMask, initStringInfo(), InRecovery, JoinWaitQueue(), lengthof, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemoryContextAlloc(), MemSet, PROCLOCKTAG::myLock, MyProc, MyProcPid, LOCK::nGranted, LOCK::nRequested, PANIC, pfree(), pgstat_count_lock_fastpath_exceeded(), PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_OK, PROC_WAIT_STATUS_WAITING, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), ConditionalLockTuple(), ConditionalXactLockTableWait(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1538 of file lock.c.

1542{
1543 int numLockModes = lockMethodTable->numLockModes;
1545 int conflictMask = lockMethodTable->conflictTab[lockmode];
1549 int i;
1550
1551 /*
1552 * first check for global conflicts: If no locks conflict with my request,
1553 * then I get the lock.
1554 *
1555 * Checking for conflict: lock->grantMask represents the types of
1556 * currently held locks. conflictTable[lockmode] has a bit set for each
1557 * type of lock that conflicts with request. Bitwise compare tells if
1558 * there is a conflict.
1559 */
1560 if (!(conflictMask & lock->grantMask))
1561 {
1562 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1563 return false;
1564 }
1565
1566 /*
1567 * Rats. Something conflicts. But it could still be my own lock, or a
1568 * lock held by another member of my locking group. First, figure out how
1569 * many conflicts remain after subtracting out any locks I hold myself.
1570 */
1571 myLocks = proclock->holdMask;
1572 for (i = 1; i <= numLockModes; i++)
1573 {
1574 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1575 {
1576 conflictsRemaining[i] = 0;
1577 continue;
1578 }
1579 conflictsRemaining[i] = lock->granted[i];
1580 if (myLocks & LOCKBIT_ON(i))
1583 }
1584
1585 /* If no conflicts remain, we get the lock. */
1586 if (totalConflictsRemaining == 0)
1587 {
1588 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1589 return false;
1590 }
1591
1592 /* If no group locking, it's definitely a conflict. */
1593 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1594 {
1595 Assert(proclock->tag.myProc == MyProc);
1596 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1597 proclock);
1598 return true;
1599 }
1600
1601 /*
1602 * The relation extension lock conflict even between the group members.
1603 */
1605 {
1606 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1607 proclock);
1608 return true;
1609 }
1610
1611 /*
1612 * Locks held in conflicting modes by members of our own lock group are
1613 * not real conflicts; we can subtract those out and see if we still have
1614 * a conflict. This is O(N) in the number of processes holding or
1615 * awaiting locks on this object. We could improve that by making the
1616 * shared memory state more complex (and larger) but it doesn't seem worth
1617 * it.
1618 */
1620 {
1622 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1623
1624 if (proclock != otherproclock &&
1625 proclock->groupLeader == otherproclock->groupLeader &&
1626 (otherproclock->holdMask & conflictMask) != 0)
1627 {
1628 int intersectMask = otherproclock->holdMask & conflictMask;
1629
1630 for (i = 1; i <= numLockModes; i++)
1631 {
1632 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1633 {
1634 if (conflictsRemaining[i] <= 0)
1635 elog(PANIC, "proclocks held do not match lock");
1638 }
1639 }
1640
1641 if (totalConflictsRemaining == 0)
1642 {
1643 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1644 proclock);
1645 return false;
1646 }
1647 }
1648 }
1649
1650 /* Nope, it's a real conflict. */
1651 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1652 return true;
1653}

References Assert, dlist_container, dlist_foreach, elog, fb(), LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by JoinWaitQueue(), LockAcquireExtended(), and ProcLockWakeup().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 694 of file lock.c.

695{
700 LOCK *lock;
701 PROCLOCK *proclock;
703 bool hasWaiters = false;
704
706 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
709 elog(ERROR, "unrecognized lock mode: %d", lockmode);
710
711#ifdef LOCK_DEBUG
712 if (LOCK_DEBUG_ENABLED(locktag))
713 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
714 locktag->locktag_field1, locktag->locktag_field2,
715 lockMethodTable->lockModeNames[lockmode]);
716#endif
717
718 /*
719 * Find the LOCALLOCK entry for this lock and lockmode
720 */
721 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
722 localtag.lock = *locktag;
723 localtag.mode = lockmode;
724
726 &localtag,
727 HASH_FIND, NULL);
728
729 /*
730 * let the caller print its own error message, too. Do not ereport(ERROR).
731 */
732 if (!locallock || locallock->nLocks <= 0)
733 {
734 elog(WARNING, "you don't own a lock of type %s",
735 lockMethodTable->lockModeNames[lockmode]);
736 return false;
737 }
738
739 /*
740 * Check the shared lock table.
741 */
743
745
746 /*
747 * We don't need to re-find the lock or proclock, since we kept their
748 * addresses in the locallock table, and they couldn't have been removed
749 * while we were holding a lock on them.
750 */
751 lock = locallock->lock;
752 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
753 proclock = locallock->proclock;
754 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
755
756 /*
757 * Double-check that we are actually holding a lock of the type we want to
758 * release.
759 */
760 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
761 {
762 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
764 elog(WARNING, "you don't own a lock of type %s",
765 lockMethodTable->lockModeNames[lockmode]);
767 return false;
768 }
769
770 /*
771 * Do the checking.
772 */
773 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
774 hasWaiters = true;
775
777
778 return hasWaiters;
779}

References elog, ERROR, fb(), HASH_FIND, hash_search(), PROCLOCK::holdMask, lengthof, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  orstronger 
)

Definition at line 641 of file lock.c.

643{
646
647 /*
648 * See if there is a LOCALLOCK entry for this lock and lockmode
649 */
650 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
651 localtag.lock = *locktag;
652 localtag.mode = lockmode;
653
655 &localtag,
656 HASH_FIND, NULL);
657
658 if (locallock && locallock->nLocks > 0)
659 return true;
660
661 if (orstronger)
662 {
664
665 for (slockmode = lockmode + 1;
667 slockmode++)
668 {
669 if (LockHeldByMe(locktag, slockmode, false))
670 return true;
671 }
672 }
673
674 return false;
675}

References fb(), HASH_FIND, hash_search(), LockHeldByMe(), LockMethodLocalHash, MaxLockMode, and MemSet.

Referenced by CheckRelationLockedByMe(), CheckRelationOidLockedByMe(), LockHeldByMe(), and UpdateSubscriptionRelState().

◆ LockManagerShmemInit()

static void LockManagerShmemInit ( void arg)
static

◆ LockManagerShmemRequest()

static void LockManagerShmemRequest ( void arg)
static

Definition at line 451 of file lock.c.

452{
454
455 /*
456 * Compute sizes for lock hashtables. Note that these calculations must
457 * agree with LockManagerShmemSize!
458 */
460
461 /*
462 * Hash table for LOCK structs. This stores per-locked-object
463 * information.
464 */
465 ShmemRequestHash(.name = "LOCK hash",
466 .nelems = max_table_size,
467 .ptr = &LockMethodLockHash,
468 .hash_info.keysize = sizeof(LOCKTAG),
469 .hash_info.entrysize = sizeof(LOCK),
470 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
471 .hash_flags = HASH_ELEM | HASH_BLOBS | HASH_PARTITION,
472 );
473
474 /* Assume an average of 2 holders per lock */
475 max_table_size *= 2;
476
477 ShmemRequestHash(.name = "PROCLOCK hash",
478 .nelems = max_table_size,
480 .hash_info.keysize = sizeof(PROCLOCKTAG),
481 .hash_info.entrysize = sizeof(PROCLOCK),
482 .hash_info.hash = proclock_hash,
483 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
484 .hash_flags = HASH_ELEM | HASH_FUNCTION | HASH_PARTITION,
485 );
486
487 ShmemRequestStruct(.name = "Fast Path Strong Relation Lock Data",
488 .size = sizeof(FastPathStrongRelationLockData),
489 .ptr = (void **) (void *) &FastPathStrongRelationLocks,
490 );
491}

References FastPathStrongRelationLocks, fb(), HASH_BLOBS, HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HTAB::keysize, LockMethodLockHash, LockMethodProcLockHash, name, NLOCKENTS, NUM_LOCK_PARTITIONS, proclock_hash(), ShmemRequestHash, and ShmemRequestStruct.

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2715 of file lock.c.

2716{
2718
2719 Assert(parent != NULL);
2720
2721 if (locallocks == NULL)
2722 {
2723 HASH_SEQ_STATUS status;
2725
2727
2728 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2730 }
2731 else
2732 {
2733 int i;
2734
2735 for (i = nlocks - 1; i >= 0; i--)
2736 LockReassignOwner(locallocks[i], parent);
2737 }
2738}

References Assert, CurrentResourceOwner, fb(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), and ResourceOwnerGetParent().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2745 of file lock.c.

2746{
2747 LOCALLOCKOWNER *lockOwners;
2748 int i;
2749 int ic = -1;
2750 int ip = -1;
2751
2752 /*
2753 * Scan to see if there are any locks belonging to current owner or its
2754 * parent
2755 */
2756 lockOwners = locallock->lockOwners;
2757 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2758 {
2759 if (lockOwners[i].owner == CurrentResourceOwner)
2760 ic = i;
2761 else if (lockOwners[i].owner == parent)
2762 ip = i;
2763 }
2764
2765 if (ic < 0)
2766 return; /* no current locks */
2767
2768 if (ip < 0)
2769 {
2770 /* Parent has no slot, so just give it the child's slot */
2771 lockOwners[ic].owner = parent;
2773 }
2774 else
2775 {
2776 /* Merge child's count with parent's */
2777 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2778 /* compact out unused slot */
2779 locallock->numLockOwners--;
2780 if (ic < locallock->numLockOwners)
2781 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2782 }
2784}

References CurrentResourceOwner, fb(), i, LOCALLOCKOWNER::nLocks, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3293 of file lock.c.

3296{
3297 LOCK *lock;
3298 PROCLOCK *proclock;
3300 uint32 hashcode;
3303 bool wakeupNeeded;
3304
3305 hashcode = LockTagHashCode(locktag);
3307
3309
3310 /*
3311 * Re-find the lock object (it had better be there).
3312 */
3314 locktag,
3315 hashcode,
3316 HASH_FIND,
3317 NULL);
3318 if (!lock)
3319 elog(PANIC, "failed to re-find shared lock object");
3320
3321 /*
3322 * Re-find the proclock object (ditto).
3323 */
3324 proclocktag.myLock = lock;
3325 proclocktag.myProc = proc;
3326
3328
3330 &proclocktag,
3332 HASH_FIND,
3333 NULL);
3334 if (!proclock)
3335 elog(PANIC, "failed to re-find shared proclock object");
3336
3337 /*
3338 * Double-check that we are actually holding a lock of the type we want to
3339 * release.
3340 */
3341 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3342 {
3343 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3345 elog(WARNING, "you don't own a lock of type %s",
3346 lockMethodTable->lockModeNames[lockmode]);
3347 return;
3348 }
3349
3350 /*
3351 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3352 */
3353 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3354
3355 CleanUpLock(lock, proclock,
3356 lockMethodTable, hashcode,
3357 wakeupNeeded);
3358
3360
3361 /*
3362 * Decrement strong lock count. This logic is needed only for 2PC.
3363 */
3365 && ConflictsWithRelationFastPath(locktag, lockmode))
3366 {
3368
3373 }
3374}

References Assert, CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, fb(), HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire(), SpinLockRelease(), UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 2111 of file lock.c.

2112{
2117 LOCK *lock;
2118 PROCLOCK *proclock;
2120 bool wakeupNeeded;
2121
2123 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2126 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2127
2128#ifdef LOCK_DEBUG
2129 if (LOCK_DEBUG_ENABLED(locktag))
2130 elog(LOG, "LockRelease: lock [%u,%u] %s",
2131 locktag->locktag_field1, locktag->locktag_field2,
2132 lockMethodTable->lockModeNames[lockmode]);
2133#endif
2134
2135 /*
2136 * Find the LOCALLOCK entry for this lock and lockmode
2137 */
2138 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2139 localtag.lock = *locktag;
2140 localtag.mode = lockmode;
2141
2143 &localtag,
2144 HASH_FIND, NULL);
2145
2146 /*
2147 * let the caller print its own error message, too. Do not ereport(ERROR).
2148 */
2149 if (!locallock || locallock->nLocks <= 0)
2150 {
2151 elog(WARNING, "you don't own a lock of type %s",
2152 lockMethodTable->lockModeNames[lockmode]);
2153 return false;
2154 }
2155
2156 /*
2157 * Decrease the count for the resource owner.
2158 */
2159 {
2160 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2161 ResourceOwner owner;
2162 int i;
2163
2164 /* Identify owner for lock */
2165 if (sessionLock)
2166 owner = NULL;
2167 else
2168 owner = CurrentResourceOwner;
2169
2170 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2171 {
2172 if (lockOwners[i].owner == owner)
2173 {
2174 Assert(lockOwners[i].nLocks > 0);
2175 if (--lockOwners[i].nLocks == 0)
2176 {
2177 if (owner != NULL)
2179 /* compact out unused slot */
2180 locallock->numLockOwners--;
2181 if (i < locallock->numLockOwners)
2182 lockOwners[i] = lockOwners[locallock->numLockOwners];
2183 }
2184 break;
2185 }
2186 }
2187 if (i < 0)
2188 {
2189 /* don't release a lock belonging to another owner */
2190 elog(WARNING, "you don't own a lock of type %s",
2191 lockMethodTable->lockModeNames[lockmode]);
2192 return false;
2193 }
2194 }
2195
2196 /*
2197 * Decrease the total local count. If we're still holding the lock, we're
2198 * done.
2199 */
2200 locallock->nLocks--;
2201
2202 if (locallock->nLocks > 0)
2203 return true;
2204
2205 /*
2206 * At this point we can no longer suppose we are clear of invalidation
2207 * messages related to this lock. Although we'll delete the LOCALLOCK
2208 * object before any intentional return from this routine, it seems worth
2209 * the trouble to explicitly reset lockCleared right now, just in case
2210 * some error prevents us from deleting the LOCALLOCK.
2211 */
2212 locallock->lockCleared = false;
2213
2214 /* Attempt fast release of any lock eligible for the fast path. */
2215 if (EligibleForRelationFastPath(locktag, lockmode) &&
2217 {
2218 bool released;
2219
2220 /*
2221 * We might not find the lock here, even if we originally entered it
2222 * here. Another backend may have moved it to the main table.
2223 */
2226 lockmode);
2228 if (released)
2229 {
2231 return true;
2232 }
2233 }
2234
2235 /*
2236 * Otherwise we've got to mess with the shared lock table.
2237 */
2239
2241
2242 /*
2243 * Normally, we don't need to re-find the lock or proclock, since we kept
2244 * their addresses in the locallock table, and they couldn't have been
2245 * removed while we were holding a lock on them. But it's possible that
2246 * the lock was taken fast-path and has since been moved to the main hash
2247 * table by another backend, in which case we will need to look up the
2248 * objects here. We assume the lock field is NULL if so.
2249 */
2250 lock = locallock->lock;
2251 if (!lock)
2252 {
2254
2255 Assert(EligibleForRelationFastPath(locktag, lockmode));
2257 locktag,
2258 locallock->hashcode,
2259 HASH_FIND,
2260 NULL);
2261 if (!lock)
2262 elog(ERROR, "failed to re-find shared lock object");
2263 locallock->lock = lock;
2264
2265 proclocktag.myLock = lock;
2266 proclocktag.myProc = MyProc;
2268 &proclocktag,
2269 HASH_FIND,
2270 NULL);
2271 if (!locallock->proclock)
2272 elog(ERROR, "failed to re-find shared proclock object");
2273 }
2274 LOCK_PRINT("LockRelease: found", lock, lockmode);
2275 proclock = locallock->proclock;
2276 PROCLOCK_PRINT("LockRelease: found", proclock);
2277
2278 /*
2279 * Double-check that we are actually holding a lock of the type we want to
2280 * release.
2281 */
2282 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2283 {
2284 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2286 elog(WARNING, "you don't own a lock of type %s",
2287 lockMethodTable->lockModeNames[lockmode]);
2289 return false;
2290 }
2291
2292 /*
2293 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2294 */
2295 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2296
2297 CleanUpLock(lock, proclock,
2298 lockMethodTable, locallock->hashcode,
2299 wakeupNeeded);
2300
2302
2304 return true;
2305}

References Assert, CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FAST_PATH_REL_GROUP, FastPathLocalUseCounts, FastPathUnGrantRelationLock(), fb(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), PROCLOCK::holdMask, i, lengthof, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, MyProc, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SearchSysCacheLocked1(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2316 of file lock.c.

2317{
2318 HASH_SEQ_STATUS status;
2320 int i,
2321 numLockModes;
2323 LOCK *lock;
2324 int partition;
2325 bool have_fast_path_lwlock = false;
2326
2328 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2330
2331#ifdef LOCK_DEBUG
2332 if (*(lockMethodTable->trace_flag))
2333 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2334#endif
2335
2336 /*
2337 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2338 * the only way that the lock we hold on our own VXID can ever get
2339 * released: it is always and only released when a toplevel transaction
2340 * ends.
2341 */
2344
2345 numLockModes = lockMethodTable->numLockModes;
2346
2347 /*
2348 * First we run through the locallock table and get rid of unwanted
2349 * entries, then we scan the process's proclocks and get rid of those. We
2350 * do this separately because we may have multiple locallock entries
2351 * pointing to the same proclock, and we daren't end up with any dangling
2352 * pointers. Fast-path locks are cleaned up during the locallock table
2353 * scan, though.
2354 */
2356
2357 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2358 {
2359 /*
2360 * If the LOCALLOCK entry is unused, something must've gone wrong
2361 * while trying to acquire this lock. Just forget the local entry.
2362 */
2363 if (locallock->nLocks == 0)
2364 {
2366 continue;
2367 }
2368
2369 /* Ignore items that are not of the lockmethod to be removed */
2371 continue;
2372
2373 /*
2374 * If we are asked to release all locks, we can just zap the entry.
2375 * Otherwise, must scan to see if there are session locks. We assume
2376 * there is at most one lockOwners entry for session locks.
2377 */
2378 if (!allLocks)
2379 {
2380 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2381
2382 /* If session lock is above array position 0, move it down to 0 */
2383 for (i = 0; i < locallock->numLockOwners; i++)
2384 {
2385 if (lockOwners[i].owner == NULL)
2386 lockOwners[0] = lockOwners[i];
2387 else
2388 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2389 }
2390
2391 if (locallock->numLockOwners > 0 &&
2392 lockOwners[0].owner == NULL &&
2393 lockOwners[0].nLocks > 0)
2394 {
2395 /* Fix the locallock to show just the session locks */
2396 locallock->nLocks = lockOwners[0].nLocks;
2397 locallock->numLockOwners = 1;
2398 /* We aren't deleting this locallock, so done */
2399 continue;
2400 }
2401 else
2402 locallock->numLockOwners = 0;
2403 }
2404
2405#ifdef USE_ASSERT_CHECKING
2406
2407 /*
2408 * Tuple locks are currently held only for short durations within a
2409 * transaction. Check that we didn't forget to release one.
2410 */
2412 elog(WARNING, "tuple lock held at commit");
2413#endif
2414
2415 /*
2416 * If the lock or proclock pointers are NULL, this lock was taken via
2417 * the relation fast-path (and is not known to have been transferred).
2418 */
2419 if (locallock->proclock == NULL || locallock->lock == NULL)
2420 {
2421 LOCKMODE lockmode = locallock->tag.mode;
2422 Oid relid;
2423
2424 /* Verify that a fast-path lock is what we've got. */
2425 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2426 elog(PANIC, "locallock table corrupted");
2427
2428 /*
2429 * If we don't currently hold the LWLock that protects our
2430 * fast-path data structures, we must acquire it before attempting
2431 * to release the lock via the fast-path. We will continue to
2432 * hold the LWLock until we're done scanning the locallock table,
2433 * unless we hit a transferred fast-path lock. (XXX is this
2434 * really such a good idea? There could be a lot of entries ...)
2435 */
2437 {
2439 have_fast_path_lwlock = true;
2440 }
2441
2442 /* Attempt fast-path release. */
2443 relid = locallock->tag.lock.locktag_field2;
2444 if (FastPathUnGrantRelationLock(relid, lockmode))
2445 {
2447 continue;
2448 }
2449
2450 /*
2451 * Our lock, originally taken via the fast path, has been
2452 * transferred to the main lock table. That's going to require
2453 * some extra work, so release our fast-path lock before starting.
2454 */
2456 have_fast_path_lwlock = false;
2457
2458 /*
2459 * Now dump the lock. We haven't got a pointer to the LOCK or
2460 * PROCLOCK in this case, so we have to handle this a bit
2461 * differently than a normal lock release. Unfortunately, this
2462 * requires an extra LWLock acquire-and-release cycle on the
2463 * partitionLock, but hopefully it shouldn't happen often.
2464 */
2466 &locallock->tag.lock, lockmode, false);
2468 continue;
2469 }
2470
2471 /* Mark the proclock to show we need to release this lockmode */
2472 if (locallock->nLocks > 0)
2473 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2474
2475 /* And remove the locallock hashtable entry */
2477 }
2478
2479 /* Done with the fast-path data structures */
2482
2483 /*
2484 * Now, scan each lock partition separately.
2485 */
2487 {
2489 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2491
2493
2494 /*
2495 * If the proclock list for this partition is empty, we can skip
2496 * acquiring the partition lock. This optimization is trickier than
2497 * it looks, because another backend could be in process of adding
2498 * something to our proclock list due to promoting one of our
2499 * fast-path locks. However, any such lock must be one that we
2500 * decided not to delete above, so it's okay to skip it again now;
2501 * we'd just decide not to delete it again. We must, however, be
2502 * careful to re-fetch the list header once we've acquired the
2503 * partition lock, to be sure we have a valid, up-to-date pointer.
2504 * (There is probably no significant risk if pointer fetch/store is
2505 * atomic, but we don't wish to assume that.)
2506 *
2507 * XXX This argument assumes that the locallock table correctly
2508 * represents all of our fast-path locks. While allLocks mode
2509 * guarantees to clean up all of our normal locks regardless of the
2510 * locallock situation, we lose that guarantee for fast-path locks.
2511 * This is not ideal.
2512 */
2513 if (dlist_is_empty(procLocks))
2514 continue; /* needn't examine this partition */
2515
2517
2519 {
2520 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2521 bool wakeupNeeded = false;
2522
2523 Assert(proclock->tag.myProc == MyProc);
2524
2525 lock = proclock->tag.myLock;
2526
2527 /* Ignore items that are not of the lockmethod to be removed */
2528 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2529 continue;
2530
2531 /*
2532 * In allLocks mode, force release of all locks even if locallock
2533 * table had problems
2534 */
2535 if (allLocks)
2536 proclock->releaseMask = proclock->holdMask;
2537 else
2538 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2539
2540 /*
2541 * Ignore items that have nothing to be released, unless they have
2542 * holdMask == 0 and are therefore recyclable
2543 */
2544 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2545 continue;
2546
2547 PROCLOCK_PRINT("LockReleaseAll", proclock);
2548 LOCK_PRINT("LockReleaseAll", lock, 0);
2549 Assert(lock->nRequested >= 0);
2550 Assert(lock->nGranted >= 0);
2551 Assert(lock->nGranted <= lock->nRequested);
2552 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2553
2554 /*
2555 * Release the previously-marked lock modes
2556 */
2557 for (i = 1; i <= numLockModes; i++)
2558 {
2559 if (proclock->releaseMask & LOCKBIT_ON(i))
2560 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2562 }
2563 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2564 Assert(lock->nGranted <= lock->nRequested);
2565 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2566
2567 proclock->releaseMask = 0;
2568
2569 /* CleanUpLock will wake up waiters if needed. */
2570 CleanUpLock(lock, proclock,
2572 LockTagHashCode(&lock->tag),
2573 wakeupNeeded);
2574 } /* loop over PROCLOCKs within this partition */
2575
2577 } /* loop over partitions */
2578
2579#ifdef LOCK_DEBUG
2580 if (*(lockMethodTable->trace_flag))
2581 elog(LOG, "LockReleaseAll done");
2582#endif
2583}

References Assert, CleanUpLock(), DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), fb(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCK_LOCKTAG, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LockRefindAndRelease(), LOCKTAG_TUPLE, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCKOWNER::owner, PANIC, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), LOCK::tag, PROCLOCK::tag, UnGrantLock(), VirtualXactLockTableCleanup(), and WARNING.

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2620 of file lock.c.

2621{
2622 if (locallocks == NULL)
2623 {
2624 HASH_SEQ_STATUS status;
2626
2628
2629 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2631 }
2632 else
2633 {
2634 int i;
2635
2636 for (i = nlocks - 1; i >= 0; i--)
2638 }
2639}

References fb(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and ReleaseLockIfHeld().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2590 of file lock.c.

2591{
2592 HASH_SEQ_STATUS status;
2594
2596 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2597
2599
2600 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2601 {
2602 /* Ignore items that are not of the specified lock method */
2604 continue;
2605
2607 }
2608}

References elog, ERROR, fb(), hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, and ReleaseLockIfHeld().

Referenced by pg_advisory_unlock_all().

◆ LockTagHashCode()

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4837 of file lock.c.

4838{
4840 LOCK *lock;
4841 bool found;
4842 uint32 hashcode;
4844 int waiters = 0;
4845
4847 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4848
4849 hashcode = LockTagHashCode(locktag);
4852
4854 locktag,
4855 hashcode,
4856 HASH_FIND,
4857 &found);
4858 if (found)
4859 {
4860 Assert(lock != NULL);
4861 waiters = lock->nRequested;
4862 }
4864
4865 return waiters;
4866}

References Assert, elog, ERROR, fb(), HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

Definition at line 1929 of file lock.c.

1930{
1931 Assert(locallock->nLocks > 0);
1932 locallock->lockCleared = true;
1933}

References Assert, and fb().

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ PostPrepare_Locks()

void PostPrepare_Locks ( FullTransactionId  fxid)

Definition at line 3581 of file lock.c.

3582{
3583 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3584 HASH_SEQ_STATUS status;
3586 LOCK *lock;
3587 PROCLOCK *proclock;
3589 int partition;
3590
3591 /* Can't prepare a lock group follower. */
3594
3595 /* This is a critical section: any error means big trouble */
3597
3598 /*
3599 * First we run through the locallock table and get rid of unwanted
3600 * entries, then we scan the process's proclocks and transfer them to the
3601 * target proc.
3602 *
3603 * We do this separately because we may have multiple locallock entries
3604 * pointing to the same proclock, and we daren't end up with any dangling
3605 * pointers.
3606 */
3608
3609 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3610 {
3611 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3612 bool haveSessionLock;
3613 bool haveXactLock;
3614 int i;
3615
3616 if (locallock->proclock == NULL || locallock->lock == NULL)
3617 {
3618 /*
3619 * We must've run out of shared memory while trying to set up this
3620 * lock. Just forget the local entry.
3621 */
3622 Assert(locallock->nLocks == 0);
3624 continue;
3625 }
3626
3627 /* Ignore VXID locks */
3628 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3629 continue;
3630
3631 /* Scan to see whether we hold it at session or transaction level */
3632 haveSessionLock = haveXactLock = false;
3633 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3634 {
3635 if (lockOwners[i].owner == NULL)
3636 haveSessionLock = true;
3637 else
3638 haveXactLock = true;
3639 }
3640
3641 /* Ignore it if we have only session lock */
3642 if (!haveXactLock)
3643 continue;
3644
3645 /* This can't happen, because we already checked it */
3646 if (haveSessionLock)
3647 ereport(PANIC,
3649 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3650
3651 /* Mark the proclock to show we need to release this lockmode */
3652 if (locallock->nLocks > 0)
3653 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3654
3655 /* And remove the locallock hashtable entry */
3657 }
3658
3659 /*
3660 * Now, scan each lock partition separately.
3661 */
3663 {
3665 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3667
3669
3670 /*
3671 * If the proclock list for this partition is empty, we can skip
3672 * acquiring the partition lock. This optimization is safer than the
3673 * situation in LockReleaseAll, because we got rid of any fast-path
3674 * locks during AtPrepare_Locks, so there cannot be any case where
3675 * another backend is adding something to our lists now. For safety,
3676 * though, we code this the same way as in LockReleaseAll.
3677 */
3678 if (dlist_is_empty(procLocks))
3679 continue; /* needn't examine this partition */
3680
3682
3684 {
3685 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3686
3687 Assert(proclock->tag.myProc == MyProc);
3688
3689 lock = proclock->tag.myLock;
3690
3691 /* Ignore VXID locks */
3693 continue;
3694
3695 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3696 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3697 Assert(lock->nRequested >= 0);
3698 Assert(lock->nGranted >= 0);
3699 Assert(lock->nGranted <= lock->nRequested);
3700 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3701
3702 /* Ignore it if nothing to release (must be a session lock) */
3703 if (proclock->releaseMask == 0)
3704 continue;
3705
3706 /* Else we should be releasing all locks */
3707 if (proclock->releaseMask != proclock->holdMask)
3708 elog(PANIC, "we seem to have dropped a bit somewhere");
3709
3710 /*
3711 * We cannot simply modify proclock->tag.myProc to reassign
3712 * ownership of the lock, because that's part of the hash key and
3713 * the proclock would then be in the wrong hash chain. Instead
3714 * use hash_update_hash_key. (We used to create a new hash entry,
3715 * but that risks out-of-memory failure if other processes are
3716 * busy making proclocks too.) We must unlink the proclock from
3717 * our procLink chain and put it into the new proc's chain, too.
3718 *
3719 * Note: the updated proclock hash key will still belong to the
3720 * same hash partition, cf proclock_hash(). So the partition lock
3721 * we already hold is sufficient for this.
3722 */
3723 dlist_delete(&proclock->procLink);
3724
3725 /*
3726 * Create the new hash key for the proclock.
3727 */
3728 proclocktag.myLock = lock;
3729 proclocktag.myProc = newproc;
3730
3731 /*
3732 * Update groupLeader pointer to point to the new proc. (We'd
3733 * better not be a member of somebody else's lock group!)
3734 */
3735 Assert(proclock->groupLeader == proclock->tag.myProc);
3736 proclock->groupLeader = newproc;
3737
3738 /*
3739 * Update the proclock. We should not find any existing entry for
3740 * the same hash key, since there can be only one entry for any
3741 * given lock with my own proc.
3742 */
3744 proclock,
3745 &proclocktag))
3746 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3747
3748 /* Re-link into the new proc's proclock list */
3749 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3750
3751 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3752 } /* loop over PROCLOCKs within this partition */
3753
3755 } /* loop over partitions */
3756
3758}

References Assert, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg, fb(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, NUM_LOCK_PARTITIONS, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void key,
Size  keysize 
)
static

Definition at line 572 of file lock.c.

573{
574 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
577
578 Assert(keysize == sizeof(PROCLOCKTAG));
579
580 /* Look into the associated LOCK object, and compute its hash code */
581 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
582
583 /*
584 * To make the hash code also depend on the PGPROC, we xor the proc
585 * struct's address into the hash code, left-shifted so that the
586 * partition-number bits don't change. Since this is only a hash, we
587 * don't care if we lose high-order bits of the address; use an
588 * intermediate variable to suppress cast-pointer-to-int warnings.
589 */
592
593 return lockhash;
594}

References Assert, DatumGetUInt32(), fb(), LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, and PointerGetDatum().

Referenced by LockManagerShmemRequest().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 603 of file lock.c.

604{
605 uint32 lockhash = hashcode;
607
608 /*
609 * This must match proclock_hash()!
610 */
613
614 return lockhash;
615}

References DatumGetUInt32(), fb(), LOG2_NUM_LOCK_PARTITIONS, and PointerGetDatum().

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2655 of file lock.c.

2656{
2657 ResourceOwner owner;
2658 LOCALLOCKOWNER *lockOwners;
2659 int i;
2660
2661 /* Identify owner for lock (must match LockRelease!) */
2662 if (sessionLock)
2663 owner = NULL;
2664 else
2665 owner = CurrentResourceOwner;
2666
2667 /* Scan to see if there are any locks belonging to the target owner */
2668 lockOwners = locallock->lockOwners;
2669 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2670 {
2671 if (lockOwners[i].owner == owner)
2672 {
2673 Assert(lockOwners[i].nLocks > 0);
2674 if (lockOwners[i].nLocks < locallock->nLocks)
2675 {
2676 /*
2677 * We will still hold this lock after forgetting this
2678 * ResourceOwner.
2679 */
2680 locallock->nLocks -= lockOwners[i].nLocks;
2681 /* compact out unused slot */
2682 locallock->numLockOwners--;
2683 if (owner != NULL)
2685 if (i < locallock->numLockOwners)
2686 lockOwners[i] = lockOwners[locallock->numLockOwners];
2687 }
2688 else
2689 {
2690 Assert(lockOwners[i].nLocks == locallock->nLocks);
2691 /* We want to call LockRelease just once */
2692 lockOwners[i].nLocks = 1;
2693 locallock->nLocks = 1;
2694 if (!LockRelease(&locallock->tag.lock,
2695 locallock->tag.mode,
2696 sessionLock))
2697 elog(WARNING, "ReleaseLockIfHeld: failed??");
2698 }
2699 break;
2700 }
2701 }
2702}

References Assert, CurrentResourceOwner, elog, fb(), i, LockRelease(), LOCALLOCKOWNER::nLocks, ResourceOwnerForgetLock(), and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 2055 of file lock.c.

2056{
2057 LOCK *waitLock = proc->waitLock;
2058 PROCLOCK *proclock = proc->waitProcLock;
2059 LOCKMODE lockmode = proc->waitLockMode;
2061
2062 /* Make sure proc is waiting */
2065 Assert(waitLock);
2066 Assert(!dclist_is_empty(&waitLock->waitProcs));
2068
2069 /* Remove proc from lock's wait queue */
2071
2072 /* Undo increments of request counts by waiting process */
2073 Assert(waitLock->nRequested > 0);
2074 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2075 waitLock->nRequested--;
2076 Assert(waitLock->requested[lockmode] > 0);
2077 waitLock->requested[lockmode]--;
2078 /* don't forget to clear waitMask bit if appropriate */
2079 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2080 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2081
2082 /* Clean up the proc's own state, and pass it the ok/fail signal */
2083 proc->waitLock = NULL;
2084 proc->waitProcLock = NULL;
2086
2087 /*
2088 * Delete the proclock immediately if it represents no already-held locks.
2089 * (This must happen now because if the owner of the lock decides to
2090 * release it, and the requested/granted counts then go to zero,
2091 * LockRelease expects there to be no remaining proclocks.) Then see if
2092 * any other waiters for the lock can be woken up now.
2093 */
2094 CleanUpLock(waitLock, proclock,
2095 LockMethods[lockmethodid], hashcode,
2096 true);
2097}

References Assert, CleanUpLock(), dclist_delete_from_thoroughly(), dclist_is_empty(), dlist_node_is_detached(), fb(), LOCK::granted, lengthof, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLink, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), and LockErrorCleanup().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1485 of file lock.c.

1486{
1487 int i;
1488
1489 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1490 {
1491 if (locallock->lockOwners[i].owner != NULL)
1492 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1493 }
1494 locallock->numLockOwners = 0;
1495 if (locallock->lockOwners != NULL)
1496 pfree(locallock->lockOwners);
1497 locallock->lockOwners = NULL;
1498
1499 if (locallock->holdsStrongLockCount)
1500 {
1502
1504
1508 locallock->holdsStrongLockCount = false;
1510 }
1511
1513 &(locallock->tag),
1514 HASH_REMOVE, NULL))
1515 elog(WARNING, "locallock table corrupted");
1516
1517 /*
1518 * Indicate that the lock is released for certain types of locks
1519 */
1521}

References Assert, CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, fb(), HASH_REMOVE, hash_search(), i, LockMethodLocalHash, FastPathStrongRelationLockData::mutex, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire(), SpinLockRelease(), and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ ResetAwaitedLock()

void ResetAwaitedLock ( void  )

Definition at line 1916 of file lock.c.

1917{
1918 awaitedLock = NULL;
1919}

References awaitedLock, and fb().

Referenced by LockErrorCleanup().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1292 of file lock.c.

1294{
1295 LOCK *lock;
1296 PROCLOCK *proclock;
1299 bool found;
1300
1301 /*
1302 * Find or create a lock with this tag.
1303 */
1305 locktag,
1306 hashcode,
1308 &found);
1309 if (!lock)
1310 return NULL;
1311
1312 /*
1313 * if it's a new lock object, initialize it
1314 */
1315 if (!found)
1316 {
1317 lock->grantMask = 0;
1318 lock->waitMask = 0;
1319 dlist_init(&lock->procLocks);
1320 dclist_init(&lock->waitProcs);
1321 lock->nRequested = 0;
1322 lock->nGranted = 0;
1323 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1324 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1325 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1326 }
1327 else
1328 {
1329 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1330 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1331 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1332 Assert(lock->nGranted <= lock->nRequested);
1333 }
1334
1335 /*
1336 * Create the hash key for the proclock table.
1337 */
1338 proclocktag.myLock = lock;
1339 proclocktag.myProc = proc;
1340
1342
1343 /*
1344 * Find or create a proclock entry with this tag
1345 */
1347 &proclocktag,
1350 &found);
1351 if (!proclock)
1352 {
1353 /* Oops, not enough shmem for the proclock */
1354 if (lock->nRequested == 0)
1355 {
1356 /*
1357 * There are no other requestors of this lock, so garbage-collect
1358 * the lock object. We *must* do this to avoid a permanent leak
1359 * of shared memory, because there won't be anything to cause
1360 * anyone to release the lock object later.
1361 */
1362 Assert(dlist_is_empty(&(lock->procLocks)));
1364 &(lock->tag),
1365 hashcode,
1367 NULL))
1368 elog(PANIC, "lock table corrupted");
1369 }
1370 return NULL;
1371 }
1372
1373 /*
1374 * If new, initialize the new entry
1375 */
1376 if (!found)
1377 {
1379
1380 /*
1381 * It might seem unsafe to access proclock->groupLeader without a
1382 * lock, but it's not really. Either we are initializing a proclock
1383 * on our own behalf, in which case our group leader isn't changing
1384 * because the group leader for a process can only ever be changed by
1385 * the process itself; or else we are transferring a fast-path lock to
1386 * the main lock table, in which case that process can't change its
1387 * lock group leader without first releasing all of its locks (and in
1388 * particular the one we are currently transferring).
1389 */
1390 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1391 proc->lockGroupLeader : proc;
1392 proclock->holdMask = 0;
1393 proclock->releaseMask = 0;
1394 /* Add proclock to appropriate lists */
1395 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1396 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1397 PROCLOCK_PRINT("LockAcquire: new", proclock);
1398 }
1399 else
1400 {
1401 PROCLOCK_PRINT("LockAcquire: found", proclock);
1402 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1403
1404#ifdef CHECK_DEADLOCK_RISK
1405
1406 /*
1407 * Issue warning if we already hold a lower-level lock on this object
1408 * and do not hold a lock of the requested level or higher. This
1409 * indicates a deadlock-prone coding practice (eg, we'd have a
1410 * deadlock if another backend were following the same code path at
1411 * about the same time).
1412 *
1413 * This is not enabled by default, because it may generate log entries
1414 * about user-level coding practices that are in fact safe in context.
1415 * It can be enabled to help find system-level problems.
1416 *
1417 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1418 * better to use a table. For now, though, this works.
1419 */
1420 {
1421 int i;
1422
1423 for (i = lockMethodTable->numLockModes; i > 0; i--)
1424 {
1425 if (proclock->holdMask & LOCKBIT_ON(i))
1426 {
1427 if (i >= (int) lockmode)
1428 break; /* safe: we have a lock >= req level */
1429 elog(LOG, "deadlock risk: raising lock level"
1430 " from %s to %s on object %u/%u/%u",
1431 lockMethodTable->lockModeNames[i],
1432 lockMethodTable->lockModeNames[lockmode],
1433 lock->tag.locktag_field1, lock->tag.locktag_field2,
1434 lock->tag.locktag_field3);
1435 break;
1436 }
1437 }
1438 }
1439#endif /* CHECK_DEADLOCK_RISK */
1440 }
1441
1442 /*
1443 * lock->nRequested and lock->requested[] count the total number of
1444 * requests, whether granted or waiting, so increment those immediately.
1445 * The other counts don't increment till we get the lock.
1446 */
1447 lock->nRequested++;
1448 lock->requested[lockmode]++;
1449 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1450
1451 /*
1452 * We shouldn't already hold the desired lock; else locallock table is
1453 * broken.
1454 */
1455 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1456 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1457 lockMethodTable->lockModeNames[lockmode],
1458 lock->tag.locktag_field1, lock->tag.locktag_field2,
1459 lock->tag.locktag_field3);
1460
1461 return proclock;
1462}

References Assert, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ERROR, fb(), LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1690 of file lock.c.

1692{
1693 bool wakeupNeeded = false;
1694
1695 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1696 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1697 Assert(lock->nGranted <= lock->nRequested);
1698
1699 /*
1700 * fix the general lock stats
1701 */
1702 lock->nRequested--;
1703 lock->requested[lockmode]--;
1704 lock->nGranted--;
1705 lock->granted[lockmode]--;
1706
1707 if (lock->granted[lockmode] == 0)
1708 {
1709 /* change the conflict mask. No more of this lock type. */
1710 lock->grantMask &= LOCKBIT_OFF(lockmode);
1711 }
1712
1713 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1714
1715 /*
1716 * We need only run ProcLockWakeup if the released lock conflicts with at
1717 * least one of the lock types requested by waiter(s). Otherwise whatever
1718 * conflict made them wait must still exist. NOTE: before MVCC, we could
1719 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1720 * not true anymore, because the remaining granted locks might belong to
1721 * some waiter, who could now be awakened because he doesn't conflict with
1722 * his own locks.
1723 */
1724 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1725 wakeupNeeded = true;
1726
1727 /*
1728 * Now fix the per-proclock state.
1729 */
1730 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1731 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1732
1733 return wakeupNeeded;
1734}

References Assert, fb(), LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4726 of file lock.c.

4727{
4728 LOCKTAG tag;
4729 PGPROC *proc;
4731
4733
4735 /* no vxid lock; localTransactionId is a normal, locked XID */
4736 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4737
4739
4740 /*
4741 * If a lock table entry must be made, this is the PGPROC on whose behalf
4742 * it must be done. Note that the transaction might end or the PGPROC
4743 * might be reassigned to a new backend before we get around to examining
4744 * it, but it doesn't matter. If we find upon examination that the
4745 * relevant lxid is no longer running here, that's enough to prove that
4746 * it's no longer running anywhere.
4747 */
4748 proc = ProcNumberGetProc(vxid.procNumber);
4749 if (proc == NULL)
4750 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4751
4752 /*
4753 * We must acquire this lock before checking the procNumber and lxid
4754 * against the ones we're waiting for. The target backend will only set
4755 * or clear lxid while holding this lock.
4756 */
4758
4759 if (proc->vxid.procNumber != vxid.procNumber
4761 {
4762 /* VXID ended */
4763 LWLockRelease(&proc->fpInfoLock);
4764 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4765 }
4766
4767 /*
4768 * If we aren't asked to wait, there's no need to set up a lock table
4769 * entry. The transaction is still in progress, so just return false.
4770 */
4771 if (!wait)
4772 {
4773 LWLockRelease(&proc->fpInfoLock);
4774 return false;
4775 }
4776
4777 /*
4778 * OK, we're going to need to sleep on the VXID. But first, we must set
4779 * up the primary lock table entry, if needed (ie, convert the proc's
4780 * fast-path lock on its VXID to a regular lock).
4781 */
4782 if (proc->fpVXIDLock)
4783 {
4784 PROCLOCK *proclock;
4785 uint32 hashcode;
4787
4788 hashcode = LockTagHashCode(&tag);
4789
4792
4794 &tag, hashcode, ExclusiveLock);
4795 if (!proclock)
4796 {
4798 LWLockRelease(&proc->fpInfoLock);
4799 ereport(ERROR,
4801 errmsg("out of shared memory"),
4802 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4803 }
4804 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4805
4807
4808 proc->fpVXIDLock = false;
4809 }
4810
4811 /*
4812 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4813 * search. The proc might have assigned this XID but not yet locked it,
4814 * in which case the proc will lock this XID before releasing the VXID.
4815 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4816 * so we won't save an XID of a different VXID. It doesn't matter whether
4817 * we save this before or after setting up the primary lock table entry.
4818 */
4819 xid = proc->xid;
4820
4821 /* Done with proc->fpLockBits */
4822 LWLockRelease(&proc->fpInfoLock);
4823
4824 /* Time to wait. */
4825 (void) LockAcquire(&tag, ShareLock, false, false);
4826
4827 LockRelease(&tag, ShareLock, false);
4828 return XactLockForVirtualXact(vxid, xid, wait);
4829}

References Assert, DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg, ERROR, ExclusiveLock, fb(), PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, VirtualTransactionId::procNumber, PGPROC::procNumber, ProcNumberGetProc(), SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, PGPROC::vxid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4626 of file lock.c.

4627{
4628 bool fastpath;
4629 LocalTransactionId lxid;
4630
4632
4633 /*
4634 * Clean up shared memory state.
4635 */
4637
4638 fastpath = MyProc->fpVXIDLock;
4640 MyProc->fpVXIDLock = false;
4642
4644
4645 /*
4646 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4647 * that means someone transferred the lock to the main lock table.
4648 */
4649 if (!fastpath && LocalTransactionIdIsValid(lxid))
4650 {
4652 LOCKTAG locktag;
4653
4654 vxid.procNumber = MyProcNumber;
4655 vxid.localTransactionId = lxid;
4656 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4657
4659 &locktag, ExclusiveLock, false);
4660 }
4661}

References Assert, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, INVALID_PROC_NUMBER, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyProc, MyProcNumber, VirtualTransactionId::procNumber, PGPROC::procNumber, SET_LOCKTAG_VIRTUALTRANSACTION, and PGPROC::vxid.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static ProcWaitStatus WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1941 of file lock.c.

1942{
1945
1946 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1947 locallock->tag.lock.locktag_field2,
1948 locallock->tag.lock.locktag_field3,
1949 locallock->tag.lock.locktag_field4,
1950 locallock->tag.lock.locktag_type,
1951 locallock->tag.mode);
1952
1953 /* Setup error traceback support for ereport() */
1958
1959 /* adjust the process title to indicate that it's waiting */
1960 set_ps_display_suffix("waiting");
1961
1962 /*
1963 * Record the fact that we are waiting for a lock, so that
1964 * LockErrorCleanup will clean up if cancel/die happens.
1965 */
1967 awaitedOwner = owner;
1968
1969 /*
1970 * NOTE: Think not to put any shared-state cleanup after the call to
1971 * ProcSleep, in either the normal or failure path. The lock state must
1972 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1973 * waiting for the lock. This is necessary because of the possibility
1974 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1975 * grants us the lock, but before we've noticed it. Hence, after granting,
1976 * the locktable state must fully reflect the fact that we own the lock;
1977 * we can't do additional work on return.
1978 *
1979 * We can and do use a PG_TRY block to try to clean up after failure, but
1980 * this still has a major limitation: elog(FATAL) can occur while waiting
1981 * (eg, a "die" interrupt), and then control won't come back here. So all
1982 * cleanup of essential state should happen in LockErrorCleanup, not here.
1983 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1984 * is unimportant if the process exits.
1985 */
1986 PG_TRY();
1987 {
1989 }
1990 PG_CATCH();
1991 {
1992 /* In this path, awaitedLock remains set until LockErrorCleanup */
1993
1994 /* reset ps display to remove the suffix */
1996
1997 /* and propagate the error */
1998 PG_RE_THROW();
1999 }
2000 PG_END_TRY();
2001
2002 /*
2003 * We no longer want LockErrorCleanup to do anything.
2004 */
2005 awaitedLock = NULL;
2006
2007 /* reset ps display to remove the suffix */
2009
2011
2012 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2013 locallock->tag.lock.locktag_field2,
2014 locallock->tag.lock.locktag_field3,
2015 locallock->tag.lock.locktag_field4,
2016 locallock->tag.lock.locktag_type,
2017 locallock->tag.mode);
2018
2019 return result;
2020}

References awaitedLock, awaitedOwner, error_context_stack, fb(), PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, ErrorContextCallback::previous, ProcSleep(), result, set_ps_display_remove_suffix(), set_ps_display_suffix(), and waitonlock_error_callback().

Referenced by LockAcquireExtended().

◆ waitonlock_error_callback()

static void waitonlock_error_callback ( void arg)
static

Definition at line 2029 of file lock.c.

2030{
2032 const LOCKTAG *tag = &locallock->tag.lock;
2033 LOCKMODE mode = locallock->tag.mode;
2035
2038
2039 errcontext("waiting for %s on %s",
2041 locktagbuf.data);
2042}

References arg, DescribeLockTag(), errcontext, fb(), GetLockmodeName(), initStringInfo(), LOCKTAG::locktag_lockmethodid, and mode.

Referenced by WaitOnLock().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4675 of file lock.c.

4677{
4678 bool more = false;
4679
4680 /* There is no point to wait for 2PCs if you have no 2PCs. */
4681 if (max_prepared_xacts == 0)
4682 return true;
4683
4684 do
4685 {
4687 LOCKTAG tag;
4688
4689 /* Clear state from previous iterations. */
4690 if (more)
4691 {
4693 more = false;
4694 }
4695
4696 /* If we have no xid, try to find one. */
4697 if (!TransactionIdIsValid(xid))
4698 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4699 if (!TransactionIdIsValid(xid))
4700 {
4701 Assert(!more);
4702 return true;
4703 }
4704
4705 /* Check or wait for XID completion. */
4706 SET_LOCKTAG_TRANSACTION(tag, xid);
4707 lar = LockAcquire(&tag, ShareLock, false, !wait);
4709 return false;
4710 LockRelease(&tag, ShareLock, false);
4711 } while (more);
4712
4713 return true;
4714}

References Assert, fb(), InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 339 of file lock.c.

Referenced by GetAwaitedLock(), GrantAwaitedLock(), ResetAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 340 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition lock.c:125
static const char *const lock_mode_names[]
Definition lock.c:111
static const LOCKMASK LockConflicts[]
Definition lock.c:68

Definition at line 128 of file lock.c.

128 {
132#ifdef LOCK_DEBUG
134#else
136#endif
137};

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 125 of file lock.c.

◆ FastPathLocalUseCounts

int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
static

◆ FastPathLockGroupsPerBackend

int FastPathLockGroupsPerBackend = 0

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 111 of file lock.c.

112{
113 "INVALID",
114 "AccessShareLock",
115 "RowShareLock",
116 "RowExclusiveLock",
117 "ShareUpdateExclusiveLock",
118 "ShareLock",
119 "ShareRowExclusiveLock",
120 "ExclusiveLock",
121 "AccessExclusiveLock"
122};

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 68 of file lock.c.

68 {
69 0,
70
71 /* AccessShareLock */
73
74 /* RowShareLock */
76
77 /* RowExclusiveLock */
80
81 /* ShareUpdateExclusiveLock */
85
86 /* ShareLock */
90
91 /* ShareRowExclusiveLock */
95
96 /* ExclusiveLock */
101
102 /* AccessExclusiveLock */
107
108};
#define ShareRowExclusiveLock
Definition lockdefs.h:41
#define AccessShareLock
Definition lockdefs.h:36
#define RowShareLock
Definition lockdefs.h:37

◆ LockManagerShmemCallbacks

const ShmemCallbacks LockManagerShmemCallbacks
Initial value:
= {
.request_fn = LockManagerShmemRequest,
}

Definition at line 320 of file lock.c.

320 {
321 .request_fn = LockManagerShmemRequest,
322 .init_fn = LockManagerShmemInit,
323};

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ log_lock_failures

bool log_lock_failures = false

Definition at line 57 of file lock.c.

Referenced by heap_acquire_tuplock(), heap_lock_tuple(), and heapam_tuple_lock().

◆ max_locks_per_xact

int max_locks_per_xact

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 194 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 338 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod

const LockMethodData user_lockmethod
static
Initial value:

Definition at line 139 of file lock.c.

139 {
143#ifdef LOCK_DEBUG
145#else
147#endif
148};