PostgreSQL Source Code git master
Loading...
Searching...
No Matches
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "storage/subsystems.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_REL_GROUP(rel)    (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
 
#define FAST_PATH_SLOT(group, index)
 
#define FAST_PATH_GROUP(index)
 
#define FAST_PATH_INDEX(index)
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_BITS(proc, n)   (proc)->fpLockBits[FAST_PATH_GROUP(n)]
 
#define FAST_PATH_GET_BITS(proc, n)    ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static void LockManagerShmemRequest (void *arg)
 
static void LockManagerShmemInit (void *arg)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static ProcWaitStatus WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner)
 
static void waitonlock_error_callback (void *arg)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void InitLockManagerAccess (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
LOCALLOCKGetAwaitedLock (void)
 
void ResetAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (FullTransactionId fxid)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const charGetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
bool log_lock_failures = false
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCounts [FP_LOCK_GROUPS_PER_BACKEND_MAX]
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
int FastPathLockGroupsPerBackend = 0
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
const ShmemCallbacks LockManagerShmemCallbacks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define ShareUpdateExclusiveLock
Definition lockdefs.h:39
#define DEFAULT_LOCKMETHOD
Definition locktag.h:25
@ LOCKTAG_RELATION
Definition locktag.h:37
static PgChecksumMode mode
#define InvalidOid

Definition at line 276 of file lock.c.

309{
310 slock_t mutex;
313
315
316static void LockManagerShmemRequest(void *arg);
317static void LockManagerShmemInit(void *arg);
318
321 .init_fn = LockManagerShmemInit,
322};
323
324
325/*
326 * Pointers to hash tables containing lock state
327 *
328 * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
329 * shared memory; LockMethodLocalHash is local to each backend.
330 */
334
335
336/* private state for error cleanup */
338static LOCALLOCK *awaitedLock;
340
341
342#ifdef LOCK_DEBUG
343
344/*------
345 * The following configuration options are available for lock debugging:
346 *
347 * TRACE_LOCKS -- give a bunch of output what's going on in this file
348 * TRACE_USERLOCKS -- same but for user locks
349 * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
350 * (use to avoid output on system tables)
351 * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
352 * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
353 *
354 * Furthermore, but in storage/lmgr/lwlock.c:
355 * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
356 *
357 * Define LOCK_DEBUG at compile time to get all these enabled.
358 * --------
359 */
360
362bool Trace_locks = false;
363bool Trace_userlocks = false;
364int Trace_lock_table = 0;
365bool Debug_deadlocks = false;
366
367
368inline static bool
369LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
370{
371 return
374 || (Trace_lock_table &&
376}
377
378
379inline static void
380LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
381{
382 if (LOCK_DEBUG_ENABLED(&lock->tag))
383 elog(LOG,
384 "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
385 "req(%d,%d,%d,%d,%d,%d,%d)=%d "
386 "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
387 where, lock,
391 lock->grantMask,
392 lock->requested[1], lock->requested[2], lock->requested[3],
393 lock->requested[4], lock->requested[5], lock->requested[6],
394 lock->requested[7], lock->nRequested,
395 lock->granted[1], lock->granted[2], lock->granted[3],
396 lock->granted[4], lock->granted[5], lock->granted[6],
397 lock->granted[7], lock->nGranted,
398 dclist_count(&lock->waitProcs),
399 LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
400}
401
402
403inline static void
404PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
405{
406 if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
407 elog(LOG,
408 "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
409 where, proclockP, proclockP->tag.myLock,
411 proclockP->tag.myProc, (int) proclockP->holdMask);
412}
413#else /* not LOCK_DEBUG */
414
415#define LOCK_PRINT(where, lock, type) ((void) 0)
416#define PROCLOCK_PRINT(where, proclockP) ((void) 0)
417#endif /* not LOCK_DEBUG */
418
419
420static uint32 proclock_hash(const void *key, Size keysize);
423 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
426static void FinishStrongLockAcquire(void);
428static void waitonlock_error_callback(void *arg);
431static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
433static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
435 bool wakeupNeeded);
437 LOCKTAG *locktag, LOCKMODE lockmode,
441
442
443/*
444 * Register the lock manager's shmem data structures.
445 *
446 * In addition to this, each backend must also call InitLockManagerAccess() to
447 * create the locallock hash table.
448 */
449static void
451{
453
454 /*
455 * Compute sizes for lock hashtables.
456 */
458
459 /*
460 * Hash table for LOCK structs. This stores per-locked-object
461 * information.
462 */
463 ShmemRequestHash(.name = "LOCK hash",
464 .nelems = max_table_size,
465 .ptr = &LockMethodLockHash,
466 .hash_info.keysize = sizeof(LOCKTAG),
467 .hash_info.entrysize = sizeof(LOCK),
468 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
469 .hash_flags = HASH_ELEM | HASH_BLOBS | HASH_PARTITION,
470 );
471
472 /* Assume an average of 2 holders per lock */
473 max_table_size *= 2;
474
475 ShmemRequestHash(.name = "PROCLOCK hash",
476 .nelems = max_table_size,
478 .hash_info.keysize = sizeof(PROCLOCKTAG),
479 .hash_info.entrysize = sizeof(PROCLOCK),
480 .hash_info.hash = proclock_hash,
481 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
482 .hash_flags = HASH_ELEM | HASH_FUNCTION | HASH_PARTITION,
483 );
484
485 ShmemRequestStruct(.name = "Fast Path Strong Relation Lock Data",
486 .size = sizeof(FastPathStrongRelationLockData),
487 .ptr = (void **) (void *) &FastPathStrongRelationLocks,
488 );
489}
490
491static void
493{
495}
496
497/*
498 * Initialize the lock manager's backend-private data structures.
499 */
500void
502{
503 /*
504 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
505 * counts and resource owner information.
506 */
507 HASHCTL info;
508
509 info.keysize = sizeof(LOCALLOCKTAG);
510 info.entrysize = sizeof(LOCALLOCK);
511
512 LockMethodLocalHash = hash_create("LOCALLOCK hash",
513 16,
514 &info,
516}
517
518
519/*
520 * Fetch the lock method table associated with a given lock
521 */
523GetLocksMethodTable(const LOCK *lock)
524{
526
529}
530
531/*
532 * Fetch the lock method table associated with a given locktag
533 */
535GetLockTagsMethodTable(const LOCKTAG *locktag)
536{
538
541}
542
543
544/*
545 * Compute the hash code associated with a LOCKTAG.
546 *
547 * To avoid unnecessary recomputations of the hash code, we try to do this
548 * just once per function, and then pass it around as needed. Aside from
549 * passing the hashcode to hash_search_with_hash_value(), we can extract
550 * the lock partition number from the hashcode.
551 */
552uint32
553LockTagHashCode(const LOCKTAG *locktag)
554{
555 return get_hash_value(LockMethodLockHash, locktag);
556}
557
558/*
559 * Compute the hash code associated with a PROCLOCKTAG.
560 *
561 * Because we want to use just one set of partition locks for both the
562 * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
563 * fall into the same partition number as their associated LOCKs.
564 * dynahash.c expects the partition number to be the low-order bits of
565 * the hash code, and therefore a PROCLOCKTAG's hash code must have the
566 * same low-order bits as the associated LOCKTAG's hash code. We achieve
567 * this with this specialized hash function.
568 */
569static uint32
570proclock_hash(const void *key, Size keysize)
571{
572 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
575
576 Assert(keysize == sizeof(PROCLOCKTAG));
577
578 /* Look into the associated LOCK object, and compute its hash code */
579 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
580
581 /*
582 * To make the hash code also depend on the PGPROC, we xor the proc
583 * struct's address into the hash code, left-shifted so that the
584 * partition-number bits don't change. Since this is only a hash, we
585 * don't care if we lose high-order bits of the address; use an
586 * intermediate variable to suppress cast-pointer-to-int warnings.
587 */
590
591 return lockhash;
592}
593
594/*
595 * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
596 * for its underlying LOCK.
597 *
598 * We use this just to avoid redundant calls of LockTagHashCode().
599 */
600static inline uint32
602{
603 uint32 lockhash = hashcode;
605
606 /*
607 * This must match proclock_hash()!
608 */
611
612 return lockhash;
613}
614
615/*
616 * Given two lock modes, return whether they would conflict.
617 */
618bool
620{
622
623 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
624 return true;
625
626 return false;
627}
628
629/*
630 * LockHeldByMe -- test whether lock 'locktag' is held by the current
631 * transaction
632 *
633 * Returns true if current transaction holds a lock on 'tag' of mode
634 * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
635 * ("Stronger" is defined as "numerically higher", which is a bit
636 * semantically dubious but is OK for the purposes we use this for.)
637 */
638bool
639LockHeldByMe(const LOCKTAG *locktag,
640 LOCKMODE lockmode, bool orstronger)
641{
644
645 /*
646 * See if there is a LOCALLOCK entry for this lock and lockmode
647 */
648 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
649 localtag.lock = *locktag;
650 localtag.mode = lockmode;
651
653 &localtag,
654 HASH_FIND, NULL);
655
656 if (locallock && locallock->nLocks > 0)
657 return true;
658
659 if (orstronger)
660 {
662
663 for (slockmode = lockmode + 1;
665 slockmode++)
666 {
667 if (LockHeldByMe(locktag, slockmode, false))
668 return true;
669 }
670 }
671
672 return false;
673}
674
675#ifdef USE_ASSERT_CHECKING
676/*
677 * GetLockMethodLocalHash -- return the hash of local locks, for modules that
678 * evaluate assertions based on all locks held.
679 */
680HTAB *
682{
683 return LockMethodLocalHash;
684}
685#endif
686
687/*
688 * LockHasWaiters -- look up 'locktag' and check if releasing this
689 * lock would wake up other processes waiting for it.
690 */
691bool
692LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
693{
698 LOCK *lock;
699 PROCLOCK *proclock;
701 bool hasWaiters = false;
702
704 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
707 elog(ERROR, "unrecognized lock mode: %d", lockmode);
708
709#ifdef LOCK_DEBUG
710 if (LOCK_DEBUG_ENABLED(locktag))
711 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
712 locktag->locktag_field1, locktag->locktag_field2,
713 lockMethodTable->lockModeNames[lockmode]);
714#endif
715
716 /*
717 * Find the LOCALLOCK entry for this lock and lockmode
718 */
719 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
720 localtag.lock = *locktag;
721 localtag.mode = lockmode;
722
724 &localtag,
725 HASH_FIND, NULL);
726
727 /*
728 * let the caller print its own error message, too. Do not ereport(ERROR).
729 */
730 if (!locallock || locallock->nLocks <= 0)
731 {
732 elog(WARNING, "you don't own a lock of type %s",
733 lockMethodTable->lockModeNames[lockmode]);
734 return false;
735 }
736
737 /*
738 * Check the shared lock table.
739 */
741
743
744 /*
745 * We don't need to re-find the lock or proclock, since we kept their
746 * addresses in the locallock table, and they couldn't have been removed
747 * while we were holding a lock on them.
748 */
749 lock = locallock->lock;
750 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
751 proclock = locallock->proclock;
752 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
753
754 /*
755 * Double-check that we are actually holding a lock of the type we want to
756 * release.
757 */
758 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
759 {
760 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
762 elog(WARNING, "you don't own a lock of type %s",
763 lockMethodTable->lockModeNames[lockmode]);
765 return false;
766 }
767
768 /*
769 * Do the checking.
770 */
771 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
772 hasWaiters = true;
773
775
776 return hasWaiters;
777}
778
779/*
780 * LockAcquire -- Check for lock conflicts, sleep if conflict found,
781 * set lock if/when no conflicts.
782 *
783 * Inputs:
784 * locktag: unique identifier for the lockable object
785 * lockmode: lock mode to acquire
786 * sessionLock: if true, acquire lock for session not current transaction
787 * dontWait: if true, don't wait to acquire lock
788 *
789 * Returns one of:
790 * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
791 * LOCKACQUIRE_OK lock successfully acquired
792 * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
793 * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
794 *
795 * In the normal case where dontWait=false and the caller doesn't need to
796 * distinguish a freshly acquired lock from one already taken earlier in
797 * this same transaction, there is no need to examine the return value.
798 *
799 * Side Effects: The lock is acquired and recorded in lock tables.
800 *
801 * NOTE: if we wait for the lock, there is no way to abort the wait
802 * short of aborting the transaction.
803 */
805LockAcquire(const LOCKTAG *locktag,
806 LOCKMODE lockmode,
807 bool sessionLock,
808 bool dontWait)
809{
810 return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
811 true, NULL, false);
812}
813
814/*
815 * LockAcquireExtended - allows us to specify additional options
816 *
817 * reportMemoryError specifies whether a lock request that fills the lock
818 * table should generate an ERROR or not. Passing "false" allows the caller
819 * to attempt to recover from lock-table-full situations, perhaps by forcibly
820 * canceling other lock holders and then retrying. Note, however, that the
821 * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
822 * in combination with dontWait = true, as the cause of failure couldn't be
823 * distinguished.
824 *
825 * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
826 * table entry if a lock is successfully acquired, or NULL if not.
827 *
828 * logLockFailure indicates whether to log details when a lock acquisition
829 * fails with dontWait = true.
830 */
832LockAcquireExtended(const LOCKTAG *locktag,
833 LOCKMODE lockmode,
834 bool sessionLock,
835 bool dontWait,
838 bool logLockFailure)
839{
844 LOCK *lock;
845 PROCLOCK *proclock;
846 bool found;
847 ResourceOwner owner;
848 uint32 hashcode;
850 bool found_conflict;
852 bool log_lock = false;
853
855 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
858 elog(ERROR, "unrecognized lock mode: %d", lockmode);
859
860 if (RecoveryInProgress() && !InRecovery &&
861 (locktag->locktag_type == LOCKTAG_OBJECT ||
862 locktag->locktag_type == LOCKTAG_RELATION) &&
863 lockmode > RowExclusiveLock)
866 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
867 lockMethodTable->lockModeNames[lockmode]),
868 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
869
870#ifdef LOCK_DEBUG
871 if (LOCK_DEBUG_ENABLED(locktag))
872 elog(LOG, "LockAcquire: lock [%u,%u] %s",
873 locktag->locktag_field1, locktag->locktag_field2,
874 lockMethodTable->lockModeNames[lockmode]);
875#endif
876
877 /* Identify owner for lock */
878 if (sessionLock)
879 owner = NULL;
880 else
881 owner = CurrentResourceOwner;
882
883 /*
884 * Find or create a LOCALLOCK entry for this lock and lockmode
885 */
886 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
887 localtag.lock = *locktag;
888 localtag.mode = lockmode;
889
891 &localtag,
892 HASH_ENTER, &found);
893
894 /*
895 * if it's a new locallock object, initialize it
896 */
897 if (!found)
898 {
899 locallock->lock = NULL;
900 locallock->proclock = NULL;
901 locallock->hashcode = LockTagHashCode(&(localtag.lock));
902 locallock->nLocks = 0;
903 locallock->holdsStrongLockCount = false;
904 locallock->lockCleared = false;
905 locallock->numLockOwners = 0;
906 locallock->maxLockOwners = 8;
907 locallock->lockOwners = NULL; /* in case next line fails */
908 locallock->lockOwners = (LOCALLOCKOWNER *)
910 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
911 }
912 else
913 {
914 /* Make sure there will be room to remember the lock */
915 if (locallock->numLockOwners >= locallock->maxLockOwners)
916 {
917 int newsize = locallock->maxLockOwners * 2;
918
919 locallock->lockOwners = (LOCALLOCKOWNER *)
920 repalloc(locallock->lockOwners,
921 newsize * sizeof(LOCALLOCKOWNER));
922 locallock->maxLockOwners = newsize;
923 }
924 }
925 hashcode = locallock->hashcode;
926
927 if (locallockp)
929
930 /*
931 * If we already hold the lock, we can just increase the count locally.
932 *
933 * If lockCleared is already set, caller need not worry about absorbing
934 * sinval messages related to the lock's object.
935 */
936 if (locallock->nLocks > 0)
937 {
939 if (locallock->lockCleared)
941 else
943 }
944
945 /*
946 * We don't acquire any other heavyweight lock while holding the relation
947 * extension lock. We do allow to acquire the same relation extension
948 * lock more than once but that case won't reach here.
949 */
951
952 /*
953 * Prepare to emit a WAL record if acquisition of this lock needs to be
954 * replayed in a standby server.
955 *
956 * Here we prepare to log; after lock is acquired we'll issue log record.
957 * This arrangement simplifies error recovery in case the preparation step
958 * fails.
959 *
960 * Only AccessExclusiveLocks can conflict with lock types that read-only
961 * transactions can acquire in a standby server. Make sure this definition
962 * matches the one in GetRunningTransactionLocks().
963 */
964 if (lockmode >= AccessExclusiveLock &&
965 locktag->locktag_type == LOCKTAG_RELATION &&
968 {
970 log_lock = true;
971 }
972
973 /*
974 * Attempt to take lock via fast path, if eligible. But if we remember
975 * having filled up the fast path array, we don't attempt to make any
976 * further use of it until we release some locks. It's possible that some
977 * other backend has transferred some of those locks to the shared hash
978 * table, leaving space free, but it's not worth acquiring the LWLock just
979 * to check. It's also possible that we're acquiring a second or third
980 * lock type on a relation we have already locked using the fast-path, but
981 * for now we don't worry about that case either.
982 */
983 if (EligibleForRelationFastPath(locktag, lockmode))
984 {
987 {
989 bool acquired;
990
991 /*
992 * LWLockAcquire acts as a memory sequencing point, so it's safe
993 * to assume that any strong locker whose increment to
994 * FastPathStrongRelationLocks->counts becomes visible after we
995 * test it has yet to begin to transfer fast-path locks.
996 */
999 acquired = false;
1000 else
1002 lockmode);
1004 if (acquired)
1005 {
1006 /*
1007 * The locallock might contain stale pointers to some old
1008 * shared objects; we MUST reset these to null before
1009 * considering the lock to be acquired via fast-path.
1010 */
1011 locallock->lock = NULL;
1012 locallock->proclock = NULL;
1013 GrantLockLocal(locallock, owner);
1014 return LOCKACQUIRE_OK;
1015 }
1016 }
1017 else
1018 {
1019 /*
1020 * Increment the lock statistics counter if lock could not be
1021 * acquired via the fast-path.
1022 */
1023 pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type);
1024 }
1025 }
1026
1027 /*
1028 * If this lock could potentially have been taken via the fast-path by
1029 * some other backend, we must (temporarily) disable further use of the
1030 * fast-path for this lock tag, and migrate any locks already taken via
1031 * this method to the main lock table.
1032 */
1033 if (ConflictsWithRelationFastPath(locktag, lockmode))
1034 {
1036
1039 hashcode))
1040 {
1042 if (locallock->nLocks == 0)
1044 if (locallockp)
1045 *locallockp = NULL;
1047 ereport(ERROR,
1049 errmsg("out of shared memory"),
1050 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1051 else
1052 return LOCKACQUIRE_NOT_AVAIL;
1053 }
1054 }
1055
1056 /*
1057 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1058 * take it via the fast-path, either, so we've got to mess with the shared
1059 * lock table.
1060 */
1062
1064
1065 /*
1066 * Find or create lock and proclock entries with this tag
1067 *
1068 * Note: if the locallock object already existed, it might have a pointer
1069 * to the lock already ... but we should not assume that that pointer is
1070 * valid, since a lock object with zero hold and request counts can go
1071 * away anytime. So we have to use SetupLockInTable() to recompute the
1072 * lock and proclock pointers, even if they're already set.
1073 */
1074 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1075 hashcode, lockmode);
1076 if (!proclock)
1077 {
1080 if (locallock->nLocks == 0)
1082 if (locallockp)
1083 *locallockp = NULL;
1085 ereport(ERROR,
1087 errmsg("out of shared memory"),
1088 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1089 else
1090 return LOCKACQUIRE_NOT_AVAIL;
1091 }
1092 locallock->proclock = proclock;
1093 lock = proclock->tag.myLock;
1094 locallock->lock = lock;
1095
1096 /*
1097 * If lock requested conflicts with locks requested by waiters, must join
1098 * wait queue. Otherwise, check for conflict with already-held locks.
1099 * (That's last because most complex check.)
1100 */
1101 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1102 found_conflict = true;
1103 else
1105 lock, proclock);
1106
1107 if (!found_conflict)
1108 {
1109 /* No conflict with held or previously requested locks */
1110 GrantLock(lock, proclock, lockmode);
1112 }
1113 else
1114 {
1115 /*
1116 * Join the lock's wait queue. We call this even in the dontWait
1117 * case, because JoinWaitQueue() may discover that we can acquire the
1118 * lock immediately after all.
1119 */
1121 }
1122
1124 {
1125 /*
1126 * We're not getting the lock because a deadlock was detected already
1127 * while trying to join the wait queue, or because we would have to
1128 * wait but the caller requested no blocking.
1129 *
1130 * Undo the changes to shared entries before releasing the partition
1131 * lock.
1132 */
1134
1135 if (proclock->holdMask == 0)
1136 {
1138
1140 hashcode);
1141 dlist_delete(&proclock->lockLink);
1142 dlist_delete(&proclock->procLink);
1144 &(proclock->tag),
1147 NULL))
1148 elog(PANIC, "proclock table corrupted");
1149 }
1150 else
1151 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1152 lock->nRequested--;
1153 lock->requested[lockmode]--;
1154 LOCK_PRINT("LockAcquire: did not join wait queue",
1155 lock, lockmode);
1156 Assert((lock->nRequested > 0) &&
1157 (lock->requested[lockmode] >= 0));
1158 Assert(lock->nGranted <= lock->nRequested);
1160 if (locallock->nLocks == 0)
1162
1163 if (dontWait)
1164 {
1165 /*
1166 * Log lock holders and waiters as a detail log message if
1167 * logLockFailure = true and lock acquisition fails with dontWait
1168 * = true
1169 */
1170 if (logLockFailure)
1171 {
1175 const char *modename;
1176 int lockHoldersNum = 0;
1177
1181
1182 DescribeLockTag(&buf, &locallock->tag.lock);
1183 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1184 lockmode);
1185
1186 /* Gather a list of all lock holders and waiters */
1191
1192 ereport(LOG,
1193 (errmsg("process %d could not obtain %s on %s",
1194 MyProcPid, modename, buf.data),
1196 "Process holding the lock: %s, Wait queue: %s.",
1197 "Processes holding the lock: %s, Wait queue: %s.",
1199 lock_holders_sbuf.data,
1200 lock_waiters_sbuf.data)));
1201
1202 pfree(buf.data);
1205 }
1206 if (locallockp)
1207 *locallockp = NULL;
1208 return LOCKACQUIRE_NOT_AVAIL;
1209 }
1210 else
1211 {
1213 /* DeadLockReport() will not return */
1214 }
1215 }
1216
1217 /*
1218 * We are now in the lock queue, or the lock was already granted. If
1219 * queued, go to sleep.
1220 */
1222 {
1223 Assert(!dontWait);
1224 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1225 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1227
1229
1230 /*
1231 * NOTE: do not do any material change of state between here and
1232 * return. All required changes in locktable state must have been
1233 * done when the lock was granted to us --- see notes in WaitOnLock.
1234 */
1235
1237 {
1238 /*
1239 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1240 * now.
1241 */
1242 Assert(!dontWait);
1244 /* DeadLockReport() will not return */
1245 }
1246 }
1247 else
1250
1251 /* The lock was granted to us. Update the local lock entry accordingly */
1252 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1253 GrantLockLocal(locallock, owner);
1254
1255 /*
1256 * Lock state is fully up-to-date now; if we error out after this, no
1257 * special error cleanup is required.
1258 */
1260
1261 /*
1262 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1263 * standby server.
1264 */
1265 if (log_lock)
1266 {
1267 /*
1268 * Decode the locktag back to the original values, to avoid sending
1269 * lots of empty bytes with every message. See lock.h to check how a
1270 * locktag is defined for LOCKTAG_RELATION
1271 */
1273 locktag->locktag_field2);
1274 }
1275
1276 return LOCKACQUIRE_OK;
1277}
1278
1279/*
1280 * Find or create LOCK and PROCLOCK objects as needed for a new lock
1281 * request.
1282 *
1283 * Returns the PROCLOCK object, or NULL if we failed to create the objects
1284 * for lack of shared memory.
1285 *
1286 * The appropriate partition lock must be held at entry, and will be
1287 * held at exit.
1288 */
1289static PROCLOCK *
1291 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1292{
1293 LOCK *lock;
1294 PROCLOCK *proclock;
1297 bool found;
1298
1299 /*
1300 * Find or create a lock with this tag.
1301 */
1303 locktag,
1304 hashcode,
1306 &found);
1307 if (!lock)
1308 return NULL;
1309
1310 /*
1311 * if it's a new lock object, initialize it
1312 */
1313 if (!found)
1314 {
1315 lock->grantMask = 0;
1316 lock->waitMask = 0;
1317 dlist_init(&lock->procLocks);
1318 dclist_init(&lock->waitProcs);
1319 lock->nRequested = 0;
1320 lock->nGranted = 0;
1321 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1322 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1323 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1324 }
1325 else
1326 {
1327 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1328 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1329 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1330 Assert(lock->nGranted <= lock->nRequested);
1331 }
1332
1333 /*
1334 * Create the hash key for the proclock table.
1335 */
1336 proclocktag.myLock = lock;
1337 proclocktag.myProc = proc;
1338
1340
1341 /*
1342 * Find or create a proclock entry with this tag
1343 */
1345 &proclocktag,
1348 &found);
1349 if (!proclock)
1350 {
1351 /* Oops, not enough shmem for the proclock */
1352 if (lock->nRequested == 0)
1353 {
1354 /*
1355 * There are no other requestors of this lock, so garbage-collect
1356 * the lock object. We *must* do this to avoid a permanent leak
1357 * of shared memory, because there won't be anything to cause
1358 * anyone to release the lock object later.
1359 */
1360 Assert(dlist_is_empty(&(lock->procLocks)));
1362 &(lock->tag),
1363 hashcode,
1365 NULL))
1366 elog(PANIC, "lock table corrupted");
1367 }
1368 return NULL;
1369 }
1370
1371 /*
1372 * If new, initialize the new entry
1373 */
1374 if (!found)
1375 {
1377
1378 /*
1379 * It might seem unsafe to access proclock->groupLeader without a
1380 * lock, but it's not really. Either we are initializing a proclock
1381 * on our own behalf, in which case our group leader isn't changing
1382 * because the group leader for a process can only ever be changed by
1383 * the process itself; or else we are transferring a fast-path lock to
1384 * the main lock table, in which case that process can't change its
1385 * lock group leader without first releasing all of its locks (and in
1386 * particular the one we are currently transferring).
1387 */
1388 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1389 proc->lockGroupLeader : proc;
1390 proclock->holdMask = 0;
1391 proclock->releaseMask = 0;
1392 /* Add proclock to appropriate lists */
1393 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1394 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1395 PROCLOCK_PRINT("LockAcquire: new", proclock);
1396 }
1397 else
1398 {
1399 PROCLOCK_PRINT("LockAcquire: found", proclock);
1400 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1401
1402#ifdef CHECK_DEADLOCK_RISK
1403
1404 /*
1405 * Issue warning if we already hold a lower-level lock on this object
1406 * and do not hold a lock of the requested level or higher. This
1407 * indicates a deadlock-prone coding practice (eg, we'd have a
1408 * deadlock if another backend were following the same code path at
1409 * about the same time).
1410 *
1411 * This is not enabled by default, because it may generate log entries
1412 * about user-level coding practices that are in fact safe in context.
1413 * It can be enabled to help find system-level problems.
1414 *
1415 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1416 * better to use a table. For now, though, this works.
1417 */
1418 {
1419 int i;
1420
1421 for (i = lockMethodTable->numLockModes; i > 0; i--)
1422 {
1423 if (proclock->holdMask & LOCKBIT_ON(i))
1424 {
1425 if (i >= (int) lockmode)
1426 break; /* safe: we have a lock >= req level */
1427 elog(LOG, "deadlock risk: raising lock level"
1428 " from %s to %s on object %u/%u/%u",
1429 lockMethodTable->lockModeNames[i],
1430 lockMethodTable->lockModeNames[lockmode],
1431 lock->tag.locktag_field1, lock->tag.locktag_field2,
1432 lock->tag.locktag_field3);
1433 break;
1434 }
1435 }
1436 }
1437#endif /* CHECK_DEADLOCK_RISK */
1438 }
1439
1440 /*
1441 * lock->nRequested and lock->requested[] count the total number of
1442 * requests, whether granted or waiting, so increment those immediately.
1443 * The other counts don't increment till we get the lock.
1444 */
1445 lock->nRequested++;
1446 lock->requested[lockmode]++;
1447 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1448
1449 /*
1450 * We shouldn't already hold the desired lock; else locallock table is
1451 * broken.
1452 */
1453 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1454 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1455 lockMethodTable->lockModeNames[lockmode],
1456 lock->tag.locktag_field1, lock->tag.locktag_field2,
1457 lock->tag.locktag_field3);
1458
1459 return proclock;
1460}
1461
1462/*
1463 * Check and set/reset the flag that we hold the relation extension lock.
1464 *
1465 * It is callers responsibility that this function is called after
1466 * acquiring/releasing the relation extension lock.
1467 *
1468 * Pass acquired as true if lock is acquired, false otherwise.
1469 */
1470static inline void
1472{
1473#ifdef USE_ASSERT_CHECKING
1476#endif
1477}
1478
1479/*
1480 * Subroutine to free a locallock entry
1481 */
1482static void
1484{
1485 int i;
1486
1487 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1488 {
1489 if (locallock->lockOwners[i].owner != NULL)
1490 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1491 }
1492 locallock->numLockOwners = 0;
1493 if (locallock->lockOwners != NULL)
1494 pfree(locallock->lockOwners);
1495 locallock->lockOwners = NULL;
1496
1497 if (locallock->holdsStrongLockCount)
1498 {
1500
1502
1506 locallock->holdsStrongLockCount = false;
1508 }
1509
1511 &(locallock->tag),
1512 HASH_REMOVE, NULL))
1513 elog(WARNING, "locallock table corrupted");
1514
1515 /*
1516 * Indicate that the lock is released for certain types of locks
1517 */
1519}
1520
1521/*
1522 * LockCheckConflicts -- test whether requested lock conflicts
1523 * with those already granted
1524 *
1525 * Returns true if conflict, false if no conflict.
1526 *
1527 * NOTES:
1528 * Here's what makes this complicated: one process's locks don't
1529 * conflict with one another, no matter what purpose they are held for
1530 * (eg, session and transaction locks do not conflict). Nor do the locks
1531 * of one process in a lock group conflict with those of another process in
1532 * the same group. So, we must subtract off these locks when determining
1533 * whether the requested new lock conflicts with those already held.
1534 */
1535bool
1537 LOCKMODE lockmode,
1538 LOCK *lock,
1539 PROCLOCK *proclock)
1540{
1541 int numLockModes = lockMethodTable->numLockModes;
1543 int conflictMask = lockMethodTable->conflictTab[lockmode];
1547 int i;
1548
1549 /*
1550 * first check for global conflicts: If no locks conflict with my request,
1551 * then I get the lock.
1552 *
1553 * Checking for conflict: lock->grantMask represents the types of
1554 * currently held locks. conflictTable[lockmode] has a bit set for each
1555 * type of lock that conflicts with request. Bitwise compare tells if
1556 * there is a conflict.
1557 */
1558 if (!(conflictMask & lock->grantMask))
1559 {
1560 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1561 return false;
1562 }
1563
1564 /*
1565 * Rats. Something conflicts. But it could still be my own lock, or a
1566 * lock held by another member of my locking group. First, figure out how
1567 * many conflicts remain after subtracting out any locks I hold myself.
1568 */
1569 myLocks = proclock->holdMask;
1570 for (i = 1; i <= numLockModes; i++)
1571 {
1572 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1573 {
1574 conflictsRemaining[i] = 0;
1575 continue;
1576 }
1577 conflictsRemaining[i] = lock->granted[i];
1578 if (myLocks & LOCKBIT_ON(i))
1581 }
1582
1583 /* If no conflicts remain, we get the lock. */
1584 if (totalConflictsRemaining == 0)
1585 {
1586 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1587 return false;
1588 }
1589
1590 /* If no group locking, it's definitely a conflict. */
1591 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1592 {
1593 Assert(proclock->tag.myProc == MyProc);
1594 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1595 proclock);
1596 return true;
1597 }
1598
1599 /*
1600 * The relation extension lock conflict even between the group members.
1601 */
1603 {
1604 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1605 proclock);
1606 return true;
1607 }
1608
1609 /*
1610 * Locks held in conflicting modes by members of our own lock group are
1611 * not real conflicts; we can subtract those out and see if we still have
1612 * a conflict. This is O(N) in the number of processes holding or
1613 * awaiting locks on this object. We could improve that by making the
1614 * shared memory state more complex (and larger) but it doesn't seem worth
1615 * it.
1616 */
1618 {
1620 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1621
1622 if (proclock != otherproclock &&
1623 proclock->groupLeader == otherproclock->groupLeader &&
1624 (otherproclock->holdMask & conflictMask) != 0)
1625 {
1626 int intersectMask = otherproclock->holdMask & conflictMask;
1627
1628 for (i = 1; i <= numLockModes; i++)
1629 {
1630 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1631 {
1632 if (conflictsRemaining[i] <= 0)
1633 elog(PANIC, "proclocks held do not match lock");
1636 }
1637 }
1638
1639 if (totalConflictsRemaining == 0)
1640 {
1641 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1642 proclock);
1643 return false;
1644 }
1645 }
1646 }
1647
1648 /* Nope, it's a real conflict. */
1649 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1650 return true;
1651}
1652
1653/*
1654 * GrantLock -- update the lock and proclock data structures to show
1655 * the lock request has been granted.
1656 *
1657 * NOTE: if proc was blocked, it also needs to be removed from the wait list
1658 * and have its waitLock/waitProcLock fields cleared. That's not done here.
1659 *
1660 * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1661 * table entry; but since we may be awaking some other process, we can't do
1662 * that here; it's done by GrantLockLocal, instead.
1663 */
1664void
1665GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1666{
1667 lock->nGranted++;
1668 lock->granted[lockmode]++;
1669 lock->grantMask |= LOCKBIT_ON(lockmode);
1670 if (lock->granted[lockmode] == lock->requested[lockmode])
1671 lock->waitMask &= LOCKBIT_OFF(lockmode);
1672 proclock->holdMask |= LOCKBIT_ON(lockmode);
1673 LOCK_PRINT("GrantLock", lock, lockmode);
1674 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1675 Assert(lock->nGranted <= lock->nRequested);
1676}
1677
1678/*
1679 * UnGrantLock -- opposite of GrantLock.
1680 *
1681 * Updates the lock and proclock data structures to show that the lock
1682 * is no longer held nor requested by the current holder.
1683 *
1684 * Returns true if there were any waiters waiting on the lock that
1685 * should now be woken up with ProcLockWakeup.
1686 */
1687static bool
1688UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1690{
1691 bool wakeupNeeded = false;
1692
1693 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1694 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1695 Assert(lock->nGranted <= lock->nRequested);
1696
1697 /*
1698 * fix the general lock stats
1699 */
1700 lock->nRequested--;
1701 lock->requested[lockmode]--;
1702 lock->nGranted--;
1703 lock->granted[lockmode]--;
1704
1705 if (lock->granted[lockmode] == 0)
1706 {
1707 /* change the conflict mask. No more of this lock type. */
1708 lock->grantMask &= LOCKBIT_OFF(lockmode);
1709 }
1710
1711 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1712
1713 /*
1714 * We need only run ProcLockWakeup if the released lock conflicts with at
1715 * least one of the lock types requested by waiter(s). Otherwise whatever
1716 * conflict made them wait must still exist. NOTE: before MVCC, we could
1717 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1718 * not true anymore, because the remaining granted locks might belong to
1719 * some waiter, who could now be awakened because he doesn't conflict with
1720 * his own locks.
1721 */
1722 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1723 wakeupNeeded = true;
1724
1725 /*
1726 * Now fix the per-proclock state.
1727 */
1728 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1729 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1730
1731 return wakeupNeeded;
1732}
1733
1734/*
1735 * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1736 * proclock and lock objects if possible, and call ProcLockWakeup if there
1737 * are remaining requests and the caller says it's OK. (Normally, this
1738 * should be called after UnGrantLock, and wakeupNeeded is the result from
1739 * UnGrantLock.)
1740 *
1741 * The appropriate partition lock must be held at entry, and will be
1742 * held at exit.
1743 */
1744static void
1745CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1747 bool wakeupNeeded)
1748{
1749 /*
1750 * If this was my last hold on this lock, delete my entry in the proclock
1751 * table.
1752 */
1753 if (proclock->holdMask == 0)
1754 {
1756
1757 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1758 dlist_delete(&proclock->lockLink);
1759 dlist_delete(&proclock->procLink);
1760 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1762 &(proclock->tag),
1765 NULL))
1766 elog(PANIC, "proclock table corrupted");
1767 }
1768
1769 if (lock->nRequested == 0)
1770 {
1771 /*
1772 * The caller just released the last lock, so garbage-collect the lock
1773 * object.
1774 */
1775 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1778 &(lock->tag),
1779 hashcode,
1781 NULL))
1782 elog(PANIC, "lock table corrupted");
1783 }
1784 else if (wakeupNeeded)
1785 {
1786 /* There are waiters on this lock, so wake them up. */
1788 }
1789}
1790
1791/*
1792 * GrantLockLocal -- update the locallock data structures to show
1793 * the lock request has been granted.
1794 *
1795 * We expect that LockAcquire made sure there is room to add a new
1796 * ResourceOwner entry.
1797 */
1798static void
1800{
1801 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1802 int i;
1803
1804 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1805 /* Count the total */
1806 locallock->nLocks++;
1807 /* Count the per-owner lock */
1808 for (i = 0; i < locallock->numLockOwners; i++)
1809 {
1810 if (lockOwners[i].owner == owner)
1811 {
1812 lockOwners[i].nLocks++;
1813 return;
1814 }
1815 }
1816 lockOwners[i].owner = owner;
1817 lockOwners[i].nLocks = 1;
1818 locallock->numLockOwners++;
1819 if (owner != NULL)
1821
1822 /* Indicate that the lock is acquired for certain types of locks. */
1824}
1825
1826/*
1827 * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1828 * and arrange for error cleanup if it fails
1829 */
1830static void
1832{
1834 Assert(locallock->holdsStrongLockCount == false);
1835
1836 /*
1837 * Adding to a memory location is not atomic, so we take a spinlock to
1838 * ensure we don't collide with someone else trying to bump the count at
1839 * the same time.
1840 *
1841 * XXX: It might be worth considering using an atomic fetch-and-add
1842 * instruction here, on architectures where that is supported.
1843 */
1844
1847 locallock->holdsStrongLockCount = true;
1850}
1851
1852/*
1853 * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1854 * acquisition once it's no longer needed
1855 */
1856static void
1858{
1860}
1861
1862/*
1863 * AbortStrongLockAcquire - undo strong lock state changes performed by
1864 * BeginStrongLockAcquire.
1865 */
1866void
1868{
1871
1872 if (locallock == NULL)
1873 return;
1874
1876 Assert(locallock->holdsStrongLockCount == true);
1880 locallock->holdsStrongLockCount = false;
1883}
1884
1885/*
1886 * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1887 * WaitOnLock on.
1888 *
1889 * proc.c needs this for the case where we are booted off the lock by
1890 * timeout, but discover that someone granted us the lock anyway.
1891 *
1892 * We could just export GrantLockLocal, but that would require including
1893 * resowner.h in lock.h, which creates circularity.
1894 */
1895void
1896GrantAwaitedLock(void)
1897{
1899}
1900
1901/*
1902 * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1903 */
1904LOCALLOCK *
1905GetAwaitedLock(void)
1906{
1907 return awaitedLock;
1908}
1909
1910/*
1911 * ResetAwaitedLock -- Forget that we are waiting on a lock.
1912 */
1913void
1914ResetAwaitedLock(void)
1915{
1916 awaitedLock = NULL;
1917}
1918
1919/*
1920 * MarkLockClear -- mark an acquired lock as "clear"
1921 *
1922 * This means that we know we have absorbed all sinval messages that other
1923 * sessions generated before we acquired this lock, and so we can confidently
1924 * assume we know about any catalog changes protected by this lock.
1925 */
1926void
1928{
1929 Assert(locallock->nLocks > 0);
1930 locallock->lockCleared = true;
1931}
1932
1933/*
1934 * WaitOnLock -- wait to acquire a lock
1935 *
1936 * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1937 */
1938static ProcWaitStatus
1940{
1943
1944 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1945 locallock->tag.lock.locktag_field2,
1946 locallock->tag.lock.locktag_field3,
1947 locallock->tag.lock.locktag_field4,
1948 locallock->tag.lock.locktag_type,
1949 locallock->tag.mode);
1950
1951 /* Setup error traceback support for ereport() */
1956
1957 /* adjust the process title to indicate that it's waiting */
1958 set_ps_display_suffix("waiting");
1959
1960 /*
1961 * Record the fact that we are waiting for a lock, so that
1962 * LockErrorCleanup will clean up if cancel/die happens.
1963 */
1965 awaitedOwner = owner;
1966
1967 /*
1968 * NOTE: Think not to put any shared-state cleanup after the call to
1969 * ProcSleep, in either the normal or failure path. The lock state must
1970 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1971 * waiting for the lock. This is necessary because of the possibility
1972 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1973 * grants us the lock, but before we've noticed it. Hence, after granting,
1974 * the locktable state must fully reflect the fact that we own the lock;
1975 * we can't do additional work on return.
1976 *
1977 * We can and do use a PG_TRY block to try to clean up after failure, but
1978 * this still has a major limitation: elog(FATAL) can occur while waiting
1979 * (eg, a "die" interrupt), and then control won't come back here. So all
1980 * cleanup of essential state should happen in LockErrorCleanup, not here.
1981 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1982 * is unimportant if the process exits.
1983 */
1984 PG_TRY();
1985 {
1987 }
1988 PG_CATCH();
1989 {
1990 /* In this path, awaitedLock remains set until LockErrorCleanup */
1991
1992 /* reset ps display to remove the suffix */
1994
1995 /* and propagate the error */
1996 PG_RE_THROW();
1997 }
1998 PG_END_TRY();
1999
2000 /*
2001 * We no longer want LockErrorCleanup to do anything.
2002 */
2003 awaitedLock = NULL;
2004
2005 /* reset ps display to remove the suffix */
2007
2009
2010 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2011 locallock->tag.lock.locktag_field2,
2012 locallock->tag.lock.locktag_field3,
2013 locallock->tag.lock.locktag_field4,
2014 locallock->tag.lock.locktag_type,
2015 locallock->tag.mode);
2016
2017 return result;
2018}
2019
2020/*
2021 * error context callback for failures in WaitOnLock
2022 *
2023 * We report which lock was being waited on, in the same style used in
2024 * deadlock reports. This helps with lock timeout errors in particular.
2025 */
2026static void
2028{
2030 const LOCKTAG *tag = &locallock->tag.lock;
2031 LOCKMODE mode = locallock->tag.mode;
2033
2036
2037 errcontext("waiting for %s on %s",
2039 locktagbuf.data);
2040}
2041
2042/*
2043 * Remove a proc from the wait-queue it is on (caller must know it is on one).
2044 * This is only used when the proc has failed to get the lock, so we set its
2045 * waitStatus to PROC_WAIT_STATUS_ERROR.
2046 *
2047 * Appropriate partition lock must be held by caller. Also, caller is
2048 * responsible for signaling the proc if needed.
2049 *
2050 * NB: this does not clean up any locallock object that may exist for the lock.
2051 */
2052void
2053RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
2054{
2055 LOCK *waitLock = proc->waitLock;
2056 PROCLOCK *proclock = proc->waitProcLock;
2057 LOCKMODE lockmode = proc->waitLockMode;
2059
2060 /* Make sure proc is waiting */
2063 Assert(waitLock);
2064 Assert(!dclist_is_empty(&waitLock->waitProcs));
2066
2067 /* Remove proc from lock's wait queue */
2069
2070 /* Undo increments of request counts by waiting process */
2071 Assert(waitLock->nRequested > 0);
2072 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2073 waitLock->nRequested--;
2074 Assert(waitLock->requested[lockmode] > 0);
2075 waitLock->requested[lockmode]--;
2076 /* don't forget to clear waitMask bit if appropriate */
2077 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2078 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2079
2080 /* Clean up the proc's own state, and pass it the ok/fail signal */
2081 proc->waitLock = NULL;
2082 proc->waitProcLock = NULL;
2084
2085 /*
2086 * Delete the proclock immediately if it represents no already-held locks.
2087 * (This must happen now because if the owner of the lock decides to
2088 * release it, and the requested/granted counts then go to zero,
2089 * LockRelease expects there to be no remaining proclocks.) Then see if
2090 * any other waiters for the lock can be woken up now.
2091 */
2092 CleanUpLock(waitLock, proclock,
2093 LockMethods[lockmethodid], hashcode,
2094 true);
2095}
2096
2097/*
2098 * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2099 * Release a session lock if 'sessionLock' is true, else release a
2100 * regular transaction lock.
2101 *
2102 * Side Effects: find any waiting processes that are now wakable,
2103 * grant them their requested locks and awaken them.
2104 * (We have to grant the lock here to avoid a race between
2105 * the waking process and any new process to
2106 * come along and request the lock.)
2107 */
2108bool
2109LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2110{
2115 LOCK *lock;
2116 PROCLOCK *proclock;
2118 bool wakeupNeeded;
2119
2121 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2124 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2125
2126#ifdef LOCK_DEBUG
2127 if (LOCK_DEBUG_ENABLED(locktag))
2128 elog(LOG, "LockRelease: lock [%u,%u] %s",
2129 locktag->locktag_field1, locktag->locktag_field2,
2130 lockMethodTable->lockModeNames[lockmode]);
2131#endif
2132
2133 /*
2134 * Find the LOCALLOCK entry for this lock and lockmode
2135 */
2136 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2137 localtag.lock = *locktag;
2138 localtag.mode = lockmode;
2139
2141 &localtag,
2142 HASH_FIND, NULL);
2143
2144 /*
2145 * let the caller print its own error message, too. Do not ereport(ERROR).
2146 */
2147 if (!locallock || locallock->nLocks <= 0)
2148 {
2149 elog(WARNING, "you don't own a lock of type %s",
2150 lockMethodTable->lockModeNames[lockmode]);
2151 return false;
2152 }
2153
2154 /*
2155 * Decrease the count for the resource owner.
2156 */
2157 {
2158 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2159 ResourceOwner owner;
2160 int i;
2161
2162 /* Identify owner for lock */
2163 if (sessionLock)
2164 owner = NULL;
2165 else
2166 owner = CurrentResourceOwner;
2167
2168 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2169 {
2170 if (lockOwners[i].owner == owner)
2171 {
2172 Assert(lockOwners[i].nLocks > 0);
2173 if (--lockOwners[i].nLocks == 0)
2174 {
2175 if (owner != NULL)
2177 /* compact out unused slot */
2178 locallock->numLockOwners--;
2179 if (i < locallock->numLockOwners)
2180 lockOwners[i] = lockOwners[locallock->numLockOwners];
2181 }
2182 break;
2183 }
2184 }
2185 if (i < 0)
2186 {
2187 /* don't release a lock belonging to another owner */
2188 elog(WARNING, "you don't own a lock of type %s",
2189 lockMethodTable->lockModeNames[lockmode]);
2190 return false;
2191 }
2192 }
2193
2194 /*
2195 * Decrease the total local count. If we're still holding the lock, we're
2196 * done.
2197 */
2198 locallock->nLocks--;
2199
2200 if (locallock->nLocks > 0)
2201 return true;
2202
2203 /*
2204 * At this point we can no longer suppose we are clear of invalidation
2205 * messages related to this lock. Although we'll delete the LOCALLOCK
2206 * object before any intentional return from this routine, it seems worth
2207 * the trouble to explicitly reset lockCleared right now, just in case
2208 * some error prevents us from deleting the LOCALLOCK.
2209 */
2210 locallock->lockCleared = false;
2211
2212 /* Attempt fast release of any lock eligible for the fast path. */
2213 if (EligibleForRelationFastPath(locktag, lockmode) &&
2215 {
2216 bool released;
2217
2218 /*
2219 * We might not find the lock here, even if we originally entered it
2220 * here. Another backend may have moved it to the main table.
2221 */
2224 lockmode);
2226 if (released)
2227 {
2229 return true;
2230 }
2231 }
2232
2233 /*
2234 * Otherwise we've got to mess with the shared lock table.
2235 */
2237
2239
2240 /*
2241 * Normally, we don't need to re-find the lock or proclock, since we kept
2242 * their addresses in the locallock table, and they couldn't have been
2243 * removed while we were holding a lock on them. But it's possible that
2244 * the lock was taken fast-path and has since been moved to the main hash
2245 * table by another backend, in which case we will need to look up the
2246 * objects here. We assume the lock field is NULL if so.
2247 */
2248 lock = locallock->lock;
2249 if (!lock)
2250 {
2252
2253 Assert(EligibleForRelationFastPath(locktag, lockmode));
2255 locktag,
2256 locallock->hashcode,
2257 HASH_FIND,
2258 NULL);
2259 if (!lock)
2260 elog(ERROR, "failed to re-find shared lock object");
2261 locallock->lock = lock;
2262
2263 proclocktag.myLock = lock;
2264 proclocktag.myProc = MyProc;
2266 &proclocktag,
2267 HASH_FIND,
2268 NULL);
2269 if (!locallock->proclock)
2270 elog(ERROR, "failed to re-find shared proclock object");
2271 }
2272 LOCK_PRINT("LockRelease: found", lock, lockmode);
2273 proclock = locallock->proclock;
2274 PROCLOCK_PRINT("LockRelease: found", proclock);
2275
2276 /*
2277 * Double-check that we are actually holding a lock of the type we want to
2278 * release.
2279 */
2280 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2281 {
2282 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2284 elog(WARNING, "you don't own a lock of type %s",
2285 lockMethodTable->lockModeNames[lockmode]);
2287 return false;
2288 }
2289
2290 /*
2291 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2292 */
2293 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2294
2295 CleanUpLock(lock, proclock,
2296 lockMethodTable, locallock->hashcode,
2297 wakeupNeeded);
2298
2300
2302 return true;
2303}
2304
2305/*
2306 * LockReleaseAll -- Release all locks of the specified lock method that
2307 * are held by the current process.
2308 *
2309 * Well, not necessarily *all* locks. The available behaviors are:
2310 * allLocks == true: release all locks including session locks.
2311 * allLocks == false: release all non-session locks.
2312 */
2313void
2315{
2316 HASH_SEQ_STATUS status;
2318 int i,
2319 numLockModes;
2321 LOCK *lock;
2322 int partition;
2323 bool have_fast_path_lwlock = false;
2324
2326 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2328
2329#ifdef LOCK_DEBUG
2330 if (*(lockMethodTable->trace_flag))
2331 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2332#endif
2333
2334 /*
2335 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2336 * the only way that the lock we hold on our own VXID can ever get
2337 * released: it is always and only released when a toplevel transaction
2338 * ends.
2339 */
2342
2343 numLockModes = lockMethodTable->numLockModes;
2344
2345 /*
2346 * First we run through the locallock table and get rid of unwanted
2347 * entries, then we scan the process's proclocks and get rid of those. We
2348 * do this separately because we may have multiple locallock entries
2349 * pointing to the same proclock, and we daren't end up with any dangling
2350 * pointers. Fast-path locks are cleaned up during the locallock table
2351 * scan, though.
2352 */
2354
2355 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2356 {
2357 /*
2358 * If the LOCALLOCK entry is unused, something must've gone wrong
2359 * while trying to acquire this lock. Just forget the local entry.
2360 */
2361 if (locallock->nLocks == 0)
2362 {
2364 continue;
2365 }
2366
2367 /* Ignore items that are not of the lockmethod to be removed */
2369 continue;
2370
2371 /*
2372 * If we are asked to release all locks, we can just zap the entry.
2373 * Otherwise, must scan to see if there are session locks. We assume
2374 * there is at most one lockOwners entry for session locks.
2375 */
2376 if (!allLocks)
2377 {
2378 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2379
2380 /* If session lock is above array position 0, move it down to 0 */
2381 for (i = 0; i < locallock->numLockOwners; i++)
2382 {
2383 if (lockOwners[i].owner == NULL)
2384 lockOwners[0] = lockOwners[i];
2385 else
2386 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2387 }
2388
2389 if (locallock->numLockOwners > 0 &&
2390 lockOwners[0].owner == NULL &&
2391 lockOwners[0].nLocks > 0)
2392 {
2393 /* Fix the locallock to show just the session locks */
2394 locallock->nLocks = lockOwners[0].nLocks;
2395 locallock->numLockOwners = 1;
2396 /* We aren't deleting this locallock, so done */
2397 continue;
2398 }
2399 else
2400 locallock->numLockOwners = 0;
2401 }
2402
2403#ifdef USE_ASSERT_CHECKING
2404
2405 /*
2406 * Tuple locks are currently held only for short durations within a
2407 * transaction. Check that we didn't forget to release one.
2408 */
2410 elog(WARNING, "tuple lock held at commit");
2411#endif
2412
2413 /*
2414 * If the lock or proclock pointers are NULL, this lock was taken via
2415 * the relation fast-path (and is not known to have been transferred).
2416 */
2417 if (locallock->proclock == NULL || locallock->lock == NULL)
2418 {
2419 LOCKMODE lockmode = locallock->tag.mode;
2420 Oid relid;
2421
2422 /* Verify that a fast-path lock is what we've got. */
2423 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2424 elog(PANIC, "locallock table corrupted");
2425
2426 /*
2427 * If we don't currently hold the LWLock that protects our
2428 * fast-path data structures, we must acquire it before attempting
2429 * to release the lock via the fast-path. We will continue to
2430 * hold the LWLock until we're done scanning the locallock table,
2431 * unless we hit a transferred fast-path lock. (XXX is this
2432 * really such a good idea? There could be a lot of entries ...)
2433 */
2435 {
2437 have_fast_path_lwlock = true;
2438 }
2439
2440 /* Attempt fast-path release. */
2441 relid = locallock->tag.lock.locktag_field2;
2442 if (FastPathUnGrantRelationLock(relid, lockmode))
2443 {
2445 continue;
2446 }
2447
2448 /*
2449 * Our lock, originally taken via the fast path, has been
2450 * transferred to the main lock table. That's going to require
2451 * some extra work, so release our fast-path lock before starting.
2452 */
2454 have_fast_path_lwlock = false;
2455
2456 /*
2457 * Now dump the lock. We haven't got a pointer to the LOCK or
2458 * PROCLOCK in this case, so we have to handle this a bit
2459 * differently than a normal lock release. Unfortunately, this
2460 * requires an extra LWLock acquire-and-release cycle on the
2461 * partitionLock, but hopefully it shouldn't happen often.
2462 */
2464 &locallock->tag.lock, lockmode, false);
2466 continue;
2467 }
2468
2469 /* Mark the proclock to show we need to release this lockmode */
2470 if (locallock->nLocks > 0)
2471 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2472
2473 /* And remove the locallock hashtable entry */
2475 }
2476
2477 /* Done with the fast-path data structures */
2480
2481 /*
2482 * Now, scan each lock partition separately.
2483 */
2485 {
2487 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2489
2491
2492 /*
2493 * If the proclock list for this partition is empty, we can skip
2494 * acquiring the partition lock. This optimization is trickier than
2495 * it looks, because another backend could be in process of adding
2496 * something to our proclock list due to promoting one of our
2497 * fast-path locks. However, any such lock must be one that we
2498 * decided not to delete above, so it's okay to skip it again now;
2499 * we'd just decide not to delete it again. We must, however, be
2500 * careful to re-fetch the list header once we've acquired the
2501 * partition lock, to be sure we have a valid, up-to-date pointer.
2502 * (There is probably no significant risk if pointer fetch/store is
2503 * atomic, but we don't wish to assume that.)
2504 *
2505 * XXX This argument assumes that the locallock table correctly
2506 * represents all of our fast-path locks. While allLocks mode
2507 * guarantees to clean up all of our normal locks regardless of the
2508 * locallock situation, we lose that guarantee for fast-path locks.
2509 * This is not ideal.
2510 */
2511 if (dlist_is_empty(procLocks))
2512 continue; /* needn't examine this partition */
2513
2515
2517 {
2518 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2519 bool wakeupNeeded = false;
2520
2521 Assert(proclock->tag.myProc == MyProc);
2522
2523 lock = proclock->tag.myLock;
2524
2525 /* Ignore items that are not of the lockmethod to be removed */
2526 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2527 continue;
2528
2529 /*
2530 * In allLocks mode, force release of all locks even if locallock
2531 * table had problems
2532 */
2533 if (allLocks)
2534 proclock->releaseMask = proclock->holdMask;
2535 else
2536 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2537
2538 /*
2539 * Ignore items that have nothing to be released, unless they have
2540 * holdMask == 0 and are therefore recyclable
2541 */
2542 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2543 continue;
2544
2545 PROCLOCK_PRINT("LockReleaseAll", proclock);
2546 LOCK_PRINT("LockReleaseAll", lock, 0);
2547 Assert(lock->nRequested >= 0);
2548 Assert(lock->nGranted >= 0);
2549 Assert(lock->nGranted <= lock->nRequested);
2550 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2551
2552 /*
2553 * Release the previously-marked lock modes
2554 */
2555 for (i = 1; i <= numLockModes; i++)
2556 {
2557 if (proclock->releaseMask & LOCKBIT_ON(i))
2558 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2560 }
2561 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2562 Assert(lock->nGranted <= lock->nRequested);
2563 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2564
2565 proclock->releaseMask = 0;
2566
2567 /* CleanUpLock will wake up waiters if needed. */
2568 CleanUpLock(lock, proclock,
2570 LockTagHashCode(&lock->tag),
2571 wakeupNeeded);
2572 } /* loop over PROCLOCKs within this partition */
2573
2575 } /* loop over partitions */
2576
2577#ifdef LOCK_DEBUG
2578 if (*(lockMethodTable->trace_flag))
2579 elog(LOG, "LockReleaseAll done");
2580#endif
2581}
2582
2583/*
2584 * LockReleaseSession -- Release all session locks of the specified lock method
2585 * that are held by the current process.
2586 */
2587void
2589{
2590 HASH_SEQ_STATUS status;
2592
2594 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2595
2597
2598 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2599 {
2600 /* Ignore items that are not of the specified lock method */
2602 continue;
2603
2605 }
2606}
2607
2608/*
2609 * LockReleaseCurrentOwner
2610 * Release all locks belonging to CurrentResourceOwner
2611 *
2612 * If the caller knows what those locks are, it can pass them as an array.
2613 * That speeds up the call significantly, when a lot of locks are held.
2614 * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2615 * table to find them.
2616 */
2617void
2619{
2620 if (locallocks == NULL)
2621 {
2622 HASH_SEQ_STATUS status;
2624
2626
2627 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2629 }
2630 else
2631 {
2632 int i;
2633
2634 for (i = nlocks - 1; i >= 0; i--)
2636 }
2637}
2638
2639/*
2640 * ReleaseLockIfHeld
2641 * Release any session-level locks on this lockable object if sessionLock
2642 * is true; else, release any locks held by CurrentResourceOwner.
2643 *
2644 * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2645 * locks), but without refactoring LockRelease() we cannot support releasing
2646 * locks belonging to resource owners other than CurrentResourceOwner.
2647 * If we were to refactor, it'd be a good idea to fix it so we don't have to
2648 * do a hashtable lookup of the locallock, too. However, currently this
2649 * function isn't used heavily enough to justify refactoring for its
2650 * convenience.
2651 */
2652static void
2654{
2655 ResourceOwner owner;
2656 LOCALLOCKOWNER *lockOwners;
2657 int i;
2658
2659 /* Identify owner for lock (must match LockRelease!) */
2660 if (sessionLock)
2661 owner = NULL;
2662 else
2663 owner = CurrentResourceOwner;
2664
2665 /* Scan to see if there are any locks belonging to the target owner */
2666 lockOwners = locallock->lockOwners;
2667 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2668 {
2669 if (lockOwners[i].owner == owner)
2670 {
2671 Assert(lockOwners[i].nLocks > 0);
2672 if (lockOwners[i].nLocks < locallock->nLocks)
2673 {
2674 /*
2675 * We will still hold this lock after forgetting this
2676 * ResourceOwner.
2677 */
2678 locallock->nLocks -= lockOwners[i].nLocks;
2679 /* compact out unused slot */
2680 locallock->numLockOwners--;
2681 if (owner != NULL)
2683 if (i < locallock->numLockOwners)
2684 lockOwners[i] = lockOwners[locallock->numLockOwners];
2685 }
2686 else
2687 {
2688 Assert(lockOwners[i].nLocks == locallock->nLocks);
2689 /* We want to call LockRelease just once */
2690 lockOwners[i].nLocks = 1;
2691 locallock->nLocks = 1;
2692 if (!LockRelease(&locallock->tag.lock,
2693 locallock->tag.mode,
2694 sessionLock))
2695 elog(WARNING, "ReleaseLockIfHeld: failed??");
2696 }
2697 break;
2698 }
2699 }
2700}
2701
2702/*
2703 * LockReassignCurrentOwner
2704 * Reassign all locks belonging to CurrentResourceOwner to belong
2705 * to its parent resource owner.
2706 *
2707 * If the caller knows what those locks are, it can pass them as an array.
2708 * That speeds up the call significantly, when a lot of locks are held
2709 * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2710 * and we'll traverse through our hash table to find them.
2711 */
2712void
2714{
2716
2717 Assert(parent != NULL);
2718
2719 if (locallocks == NULL)
2720 {
2721 HASH_SEQ_STATUS status;
2723
2725
2726 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2728 }
2729 else
2730 {
2731 int i;
2732
2733 for (i = nlocks - 1; i >= 0; i--)
2734 LockReassignOwner(locallocks[i], parent);
2735 }
2736}
2737
2738/*
2739 * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2740 * CurrentResourceOwner to its parent.
2741 */
2742static void
2744{
2745 LOCALLOCKOWNER *lockOwners;
2746 int i;
2747 int ic = -1;
2748 int ip = -1;
2749
2750 /*
2751 * Scan to see if there are any locks belonging to current owner or its
2752 * parent
2753 */
2754 lockOwners = locallock->lockOwners;
2755 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2756 {
2757 if (lockOwners[i].owner == CurrentResourceOwner)
2758 ic = i;
2759 else if (lockOwners[i].owner == parent)
2760 ip = i;
2761 }
2762
2763 if (ic < 0)
2764 return; /* no current locks */
2765
2766 if (ip < 0)
2767 {
2768 /* Parent has no slot, so just give it the child's slot */
2769 lockOwners[ic].owner = parent;
2771 }
2772 else
2773 {
2774 /* Merge child's count with parent's */
2775 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2776 /* compact out unused slot */
2777 locallock->numLockOwners--;
2778 if (ic < locallock->numLockOwners)
2779 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2780 }
2782}
2783
2784/*
2785 * FastPathGrantRelationLock
2786 * Grant lock using per-backend fast-path array, if there is space.
2787 */
2788static bool
2789FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
2790{
2791 uint32 i;
2793
2794 /* fast-path group the lock belongs to */
2795 uint32 group = FAST_PATH_REL_GROUP(relid);
2796
2797 /* Scan for existing entry for this relid, remembering empty slot. */
2798 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2799 {
2800 /* index into the whole per-backend array */
2801 uint32 f = FAST_PATH_SLOT(group, i);
2802
2803 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2804 unused_slot = f;
2805 else if (MyProc->fpRelId[f] == relid)
2806 {
2807 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2808 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2809 return true;
2810 }
2811 }
2812
2813 /* If no existing entry, use any empty slot. */
2815 {
2816 MyProc->fpRelId[unused_slot] = relid;
2818 ++FastPathLocalUseCounts[group];
2819 return true;
2820 }
2821
2822 /* No existing entry, and no empty slot. */
2823 return false;
2824}
2825
2826/*
2827 * FastPathUnGrantRelationLock
2828 * Release fast-path lock, if present. Update backend-private local
2829 * use count, while we're at it.
2830 */
2831static bool
2833{
2834 uint32 i;
2835 bool result = false;
2836
2837 /* fast-path group the lock belongs to */
2838 uint32 group = FAST_PATH_REL_GROUP(relid);
2839
2840 FastPathLocalUseCounts[group] = 0;
2841 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2842 {
2843 /* index into the whole per-backend array */
2844 uint32 f = FAST_PATH_SLOT(group, i);
2845
2846 if (MyProc->fpRelId[f] == relid
2847 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2848 {
2849 Assert(!result);
2850 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2851 result = true;
2852 /* we continue iterating so as to update FastPathLocalUseCount */
2853 }
2854 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2855 ++FastPathLocalUseCounts[group];
2856 }
2857 return result;
2858}
2859
2860/*
2861 * FastPathTransferRelationLocks
2862 * Transfer locks matching the given lock tag from per-backend fast-path
2863 * arrays to the shared hash table.
2864 *
2865 * Returns true if successful, false if ran out of shared memory.
2866 */
2867static bool
2869 uint32 hashcode)
2870{
2872 Oid relid = locktag->locktag_field2;
2873 uint32 i;
2874
2875 /* fast-path group the lock belongs to */
2876 uint32 group = FAST_PATH_REL_GROUP(relid);
2877
2878 /*
2879 * Every PGPROC that can potentially hold a fast-path lock is present in
2880 * ProcGlobal->allProcs. Prepared transactions are not, but any
2881 * outstanding fast-path locks held by prepared transactions are
2882 * transferred to the main lock table.
2883 */
2884 for (i = 0; i < ProcGlobal->allProcCount; i++)
2885 {
2886 PGPROC *proc = GetPGProcByNumber(i);
2887 uint32 j;
2888
2890
2891 /*
2892 * If the target backend isn't referencing the same database as the
2893 * lock, then we needn't examine the individual relation IDs at all;
2894 * none of them can be relevant.
2895 *
2896 * proc->databaseId is set at backend startup time and never changes
2897 * thereafter, so it might be safe to perform this test before
2898 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2899 * assume that if the target backend holds any fast-path locks, it
2900 * must have performed a memory-fencing operation (in particular, an
2901 * LWLock acquisition) since setting proc->databaseId. However, it's
2902 * less clear that our backend is certain to have performed a memory
2903 * fencing operation since the other backend set proc->databaseId. So
2904 * for now, we test it after acquiring the LWLock just to be safe.
2905 *
2906 * Also skip groups without any registered fast-path locks.
2907 */
2908 if (proc->databaseId != locktag->locktag_field1 ||
2909 proc->fpLockBits[group] == 0)
2910 {
2911 LWLockRelease(&proc->fpInfoLock);
2912 continue;
2913 }
2914
2915 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2916 {
2917 uint32 lockmode;
2918
2919 /* index into the whole per-backend array */
2920 uint32 f = FAST_PATH_SLOT(group, j);
2921
2922 /* Look for an allocated slot matching the given relid. */
2923 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2924 continue;
2925
2926 /* Find or create lock object. */
2928 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2930 ++lockmode)
2931 {
2932 PROCLOCK *proclock;
2933
2934 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2935 continue;
2936 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2937 hashcode, lockmode);
2938 if (!proclock)
2939 {
2941 LWLockRelease(&proc->fpInfoLock);
2942 return false;
2943 }
2944 GrantLock(proclock->tag.myLock, proclock, lockmode);
2945 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2946 }
2948
2949 /* No need to examine remaining slots. */
2950 break;
2951 }
2952 LWLockRelease(&proc->fpInfoLock);
2953 }
2954 return true;
2955}
2956
2957/*
2958 * FastPathGetRelationLockEntry
2959 * Return the PROCLOCK for a lock originally taken via the fast-path,
2960 * transferring it to the primary lock table if necessary.
2961 *
2962 * Note: caller takes care of updating the locallock object.
2963 */
2964static PROCLOCK *
2966{
2968 LOCKTAG *locktag = &locallock->tag.lock;
2969 PROCLOCK *proclock = NULL;
2971 Oid relid = locktag->locktag_field2;
2972 uint32 i,
2973 group;
2974
2975 /* fast-path group the lock belongs to */
2976 group = FAST_PATH_REL_GROUP(relid);
2977
2979
2980 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2981 {
2982 uint32 lockmode;
2983
2984 /* index into the whole per-backend array */
2985 uint32 f = FAST_PATH_SLOT(group, i);
2986
2987 /* Look for an allocated slot matching the given relid. */
2988 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2989 continue;
2990
2991 /* If we don't have a lock of the given mode, forget it! */
2992 lockmode = locallock->tag.mode;
2993 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2994 break;
2995
2996 /* Find or create lock object. */
2998
2999 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
3000 locallock->hashcode, lockmode);
3001 if (!proclock)
3002 {
3005 ereport(ERROR,
3007 errmsg("out of shared memory"),
3008 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3009 }
3010 GrantLock(proclock->tag.myLock, proclock, lockmode);
3011 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3012
3014
3015 /* No need to examine remaining slots. */
3016 break;
3017 }
3018
3020
3021 /* Lock may have already been transferred by some other backend. */
3022 if (proclock == NULL)
3023 {
3024 LOCK *lock;
3027
3029
3031 locktag,
3032 locallock->hashcode,
3033 HASH_FIND,
3034 NULL);
3035 if (!lock)
3036 elog(ERROR, "failed to re-find shared lock object");
3037
3038 proclocktag.myLock = lock;
3039 proclocktag.myProc = MyProc;
3040
3042 proclock = (PROCLOCK *)
3044 &proclocktag,
3046 HASH_FIND,
3047 NULL);
3048 if (!proclock)
3049 elog(ERROR, "failed to re-find shared proclock object");
3051 }
3052
3053 return proclock;
3054}
3055
3056/*
3057 * GetLockConflicts
3058 * Get an array of VirtualTransactionIds of xacts currently holding locks
3059 * that would conflict with the specified lock/lockmode.
3060 * xacts merely awaiting such a lock are NOT reported.
3061 *
3062 * The result array is palloc'd and is terminated with an invalid VXID.
3063 * *countp, if not null, is updated to the number of items set.
3064 *
3065 * Of course, the result could be out of date by the time it's returned, so
3066 * use of this function has to be thought about carefully. Similarly, a
3067 * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3068 * lock it holds. Existing callers don't care about a locker after that
3069 * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3070 * pg_xact updates and before releasing locks.
3071 *
3072 * Note we never include the current xact's vxid in the result array,
3073 * since an xact never blocks itself.
3074 */
3076GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3077{
3081 LOCK *lock;
3084 PROCLOCK *proclock;
3085 uint32 hashcode;
3087 int count = 0;
3088 int fast_count = 0;
3089
3091 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3094 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3095
3096 /*
3097 * Allocate memory to store results, and fill with InvalidVXID. We only
3098 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3099 * InHotStandby allocate once in TopMemoryContext.
3100 */
3101 if (InHotStandby)
3102 {
3103 if (vxids == NULL)
3106 sizeof(VirtualTransactionId) *
3108 }
3109 else
3111
3112 /* Compute hash code and partition lock, and look up conflicting modes. */
3113 hashcode = LockTagHashCode(locktag);
3115 conflictMask = lockMethodTable->conflictTab[lockmode];
3116
3117 /*
3118 * Fast path locks might not have been entered in the primary lock table.
3119 * If the lock we're dealing with could conflict with such a lock, we must
3120 * examine each backend's fast-path array for conflicts.
3121 */
3122 if (ConflictsWithRelationFastPath(locktag, lockmode))
3123 {
3124 int i;
3125 Oid relid = locktag->locktag_field2;
3127
3128 /* fast-path group the lock belongs to */
3129 uint32 group = FAST_PATH_REL_GROUP(relid);
3130
3131 /*
3132 * Iterate over relevant PGPROCs. Anything held by a prepared
3133 * transaction will have been transferred to the primary lock table,
3134 * so we need not worry about those. This is all a bit fuzzy, because
3135 * new locks could be taken after we've visited a particular
3136 * partition, but the callers had better be prepared to deal with that
3137 * anyway, since the locks could equally well be taken between the
3138 * time we return the value and the time the caller does something
3139 * with it.
3140 */
3141 for (i = 0; i < ProcGlobal->allProcCount; i++)
3142 {
3143 PGPROC *proc = GetPGProcByNumber(i);
3144 uint32 j;
3145
3146 /* A backend never blocks itself */
3147 if (proc == MyProc)
3148 continue;
3149
3151
3152 /*
3153 * If the target backend isn't referencing the same database as
3154 * the lock, then we needn't examine the individual relation IDs
3155 * at all; none of them can be relevant.
3156 *
3157 * See FastPathTransferRelationLocks() for discussion of why we do
3158 * this test after acquiring the lock.
3159 *
3160 * Also skip groups without any registered fast-path locks.
3161 */
3162 if (proc->databaseId != locktag->locktag_field1 ||
3163 proc->fpLockBits[group] == 0)
3164 {
3165 LWLockRelease(&proc->fpInfoLock);
3166 continue;
3167 }
3168
3169 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3170 {
3172
3173 /* index into the whole per-backend array */
3174 uint32 f = FAST_PATH_SLOT(group, j);
3175
3176 /* Look for an allocated slot matching the given relid. */
3177 if (relid != proc->fpRelId[f])
3178 continue;
3179 lockmask = FAST_PATH_GET_BITS(proc, f);
3180 if (!lockmask)
3181 continue;
3183
3184 /*
3185 * There can only be one entry per relation, so if we found it
3186 * and it doesn't conflict, we can skip the rest of the slots.
3187 */
3188 if ((lockmask & conflictMask) == 0)
3189 break;
3190
3191 /* Conflict! */
3192 GET_VXID_FROM_PGPROC(vxid, *proc);
3193
3195 vxids[count++] = vxid;
3196 /* else, xact already committed or aborted */
3197
3198 /* No need to examine remaining slots. */
3199 break;
3200 }
3201
3202 LWLockRelease(&proc->fpInfoLock);
3203 }
3204 }
3205
3206 /* Remember how many fast-path conflicts we found. */
3207 fast_count = count;
3208
3209 /*
3210 * Look up the lock object matching the tag.
3211 */
3213
3215 locktag,
3216 hashcode,
3217 HASH_FIND,
3218 NULL);
3219 if (!lock)
3220 {
3221 /*
3222 * If the lock object doesn't exist, there is nothing holding a lock
3223 * on this lockable object.
3224 */
3226 vxids[count].procNumber = INVALID_PROC_NUMBER;
3227 vxids[count].localTransactionId = InvalidLocalTransactionId;
3228 if (countp)
3229 *countp = count;
3230 return vxids;
3231 }
3232
3233 /*
3234 * Examine each existing holder (or awaiter) of the lock.
3235 */
3237 {
3238 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3239
3240 if (conflictMask & proclock->holdMask)
3241 {
3242 PGPROC *proc = proclock->tag.myProc;
3243
3244 /* A backend never blocks itself */
3245 if (proc != MyProc)
3246 {
3248
3249 GET_VXID_FROM_PGPROC(vxid, *proc);
3250
3252 {
3253 int i;
3254
3255 /* Avoid duplicate entries. */
3256 for (i = 0; i < fast_count; ++i)
3258 break;
3259 if (i >= fast_count)
3260 vxids[count++] = vxid;
3261 }
3262 /* else, xact already committed or aborted */
3263 }
3264 }
3265 }
3266
3268
3269 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3270 elog(PANIC, "too many conflicting locks found");
3271
3272 vxids[count].procNumber = INVALID_PROC_NUMBER;
3273 vxids[count].localTransactionId = InvalidLocalTransactionId;
3274 if (countp)
3275 *countp = count;
3276 return vxids;
3277}
3278
3279/*
3280 * Find a lock in the shared lock table and release it. It is the caller's
3281 * responsibility to verify that this is a sane thing to do. (For example, it
3282 * would be bad to release a lock here if there might still be a LOCALLOCK
3283 * object with pointers to it.)
3284 *
3285 * We currently use this in two situations: first, to release locks held by
3286 * prepared transactions on commit (see lock_twophase_postcommit); and second,
3287 * to release locks taken via the fast-path, transferred to the main hash
3288 * table, and then released (see LockReleaseAll).
3289 */
3290static void
3292 LOCKTAG *locktag, LOCKMODE lockmode,
3294{
3295 LOCK *lock;
3296 PROCLOCK *proclock;
3298 uint32 hashcode;
3301 bool wakeupNeeded;
3302
3303 hashcode = LockTagHashCode(locktag);
3305
3307
3308 /*
3309 * Re-find the lock object (it had better be there).
3310 */
3312 locktag,
3313 hashcode,
3314 HASH_FIND,
3315 NULL);
3316 if (!lock)
3317 elog(PANIC, "failed to re-find shared lock object");
3318
3319 /*
3320 * Re-find the proclock object (ditto).
3321 */
3322 proclocktag.myLock = lock;
3323 proclocktag.myProc = proc;
3324
3326
3328 &proclocktag,
3330 HASH_FIND,
3331 NULL);
3332 if (!proclock)
3333 elog(PANIC, "failed to re-find shared proclock object");
3334
3335 /*
3336 * Double-check that we are actually holding a lock of the type we want to
3337 * release.
3338 */
3339 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3340 {
3341 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3343 elog(WARNING, "you don't own a lock of type %s",
3344 lockMethodTable->lockModeNames[lockmode]);
3345 return;
3346 }
3347
3348 /*
3349 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3350 */
3351 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3352
3353 CleanUpLock(lock, proclock,
3354 lockMethodTable, hashcode,
3355 wakeupNeeded);
3356
3358
3359 /*
3360 * Decrement strong lock count. This logic is needed only for 2PC.
3361 */
3363 && ConflictsWithRelationFastPath(locktag, lockmode))
3364 {
3366
3371 }
3372}
3373
3374/*
3375 * CheckForSessionAndXactLocks
3376 * Check to see if transaction holds both session-level and xact-level
3377 * locks on the same object; if so, throw an error.
3378 *
3379 * If we have both session- and transaction-level locks on the same object,
3380 * PREPARE TRANSACTION must fail. This should never happen with regular
3381 * locks, since we only take those at session level in some special operations
3382 * like VACUUM. It's possible to hit this with advisory locks, though.
3383 *
3384 * It would be nice if we could keep the session hold and give away the
3385 * transactional hold to the prepared xact. However, that would require two
3386 * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3387 * available when it comes time for PostPrepare_Locks to do the deed.
3388 * So for now, we error out while we can still do so safely.
3389 *
3390 * Since the LOCALLOCK table stores a separate entry for each lockmode,
3391 * we can't implement this check by examining LOCALLOCK entries in isolation.
3392 * We must build a transient hashtable that is indexed by locktag only.
3393 */
3394static void
3396{
3397 typedef struct
3398 {
3399 LOCKTAG lock; /* identifies the lockable object */
3400 bool sessLock; /* is any lockmode held at session level? */
3401 bool xactLock; /* is any lockmode held at xact level? */
3403
3405 HTAB *lockhtab;
3406 HASH_SEQ_STATUS status;
3408
3409 /* Create a local hash table keyed by LOCKTAG only */
3410 hash_ctl.keysize = sizeof(LOCKTAG);
3411 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3413
3414 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3415 256, /* arbitrary initial size */
3416 &hash_ctl,
3418
3419 /* Scan local lock table to find entries for each LOCKTAG */
3421
3422 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3423 {
3424 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3426 bool found;
3427 int i;
3428
3429 /*
3430 * Ignore VXID locks. We don't want those to be held by prepared
3431 * transactions, since they aren't meaningful after a restart.
3432 */
3433 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3434 continue;
3435
3436 /* Ignore it if we don't actually hold the lock */
3437 if (locallock->nLocks <= 0)
3438 continue;
3439
3440 /* Otherwise, find or make an entry in lockhtab */
3442 &locallock->tag.lock,
3443 HASH_ENTER, &found);
3444 if (!found) /* initialize, if newly created */
3445 hentry->sessLock = hentry->xactLock = false;
3446
3447 /* Scan to see if we hold lock at session or xact level or both */
3448 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3449 {
3450 if (lockOwners[i].owner == NULL)
3451 hentry->sessLock = true;
3452 else
3453 hentry->xactLock = true;
3454 }
3455
3456 /*
3457 * We can throw error immediately when we see both types of locks; no
3458 * need to wait around to see if there are more violations.
3459 */
3460 if (hentry->sessLock && hentry->xactLock)
3461 ereport(ERROR,
3463 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3464 }
3465
3466 /* Success, so clean up */
3468}
3469
3470/*
3471 * AtPrepare_Locks
3472 * Do the preparatory work for a PREPARE: make 2PC state file records
3473 * for all locks currently held.
3474 *
3475 * Session-level locks are ignored, as are VXID locks.
3476 *
3477 * For the most part, we don't need to touch shared memory for this ---
3478 * all the necessary state information is in the locallock table.
3479 * Fast-path locks are an exception, however: we move any such locks to
3480 * the main table before allowing PREPARE TRANSACTION to succeed.
3481 */
3482void
3483AtPrepare_Locks(void)
3484{
3485 HASH_SEQ_STATUS status;
3487
3488 /* First, verify there aren't locks of both xact and session level */
3490
3491 /* Now do the per-locallock cleanup work */
3493
3494 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3495 {
3496 TwoPhaseLockRecord record;
3497 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3498 bool haveSessionLock;
3499 bool haveXactLock;
3500 int i;
3501
3502 /*
3503 * Ignore VXID locks. We don't want those to be held by prepared
3504 * transactions, since they aren't meaningful after a restart.
3505 */
3506 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3507 continue;
3508
3509 /* Ignore it if we don't actually hold the lock */
3510 if (locallock->nLocks <= 0)
3511 continue;
3512
3513 /* Scan to see whether we hold it at session or transaction level */
3514 haveSessionLock = haveXactLock = false;
3515 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3516 {
3517 if (lockOwners[i].owner == NULL)
3518 haveSessionLock = true;
3519 else
3520 haveXactLock = true;
3521 }
3522
3523 /* Ignore it if we have only session lock */
3524 if (!haveXactLock)
3525 continue;
3526
3527 /* This can't happen, because we already checked it */
3528 if (haveSessionLock)
3529 ereport(ERROR,
3531 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3532
3533 /*
3534 * If the local lock was taken via the fast-path, we need to move it
3535 * to the primary lock table, or just get a pointer to the existing
3536 * primary lock table entry if by chance it's already been
3537 * transferred.
3538 */
3539 if (locallock->proclock == NULL)
3540 {
3542 locallock->lock = locallock->proclock->tag.myLock;
3543 }
3544
3545 /*
3546 * Arrange to not release any strong lock count held by this lock
3547 * entry. We must retain the count until the prepared transaction is
3548 * committed or rolled back.
3549 */
3550 locallock->holdsStrongLockCount = false;
3551
3552 /*
3553 * Create a 2PC record.
3554 */
3555 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3556 record.lockmode = locallock->tag.mode;
3557
3559 &record, sizeof(TwoPhaseLockRecord));
3560 }
3561}
3562
3563/*
3564 * PostPrepare_Locks
3565 * Clean up after successful PREPARE
3566 *
3567 * Here, we want to transfer ownership of our locks to a dummy PGPROC
3568 * that's now associated with the prepared transaction, and we want to
3569 * clean out the corresponding entries in the LOCALLOCK table.
3570 *
3571 * Note: by removing the LOCALLOCK entries, we are leaving dangling
3572 * pointers in the transaction's resource owner. This is OK at the
3573 * moment since resowner.c doesn't try to free locks retail at a toplevel
3574 * transaction commit or abort. We could alternatively zero out nLocks
3575 * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3576 * but that probably costs more cycles.
3577 */
3578void
3580{
3581 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3582 HASH_SEQ_STATUS status;
3584 LOCK *lock;
3585 PROCLOCK *proclock;
3587 int partition;
3588
3589 /* Can't prepare a lock group follower. */
3592
3593 /* This is a critical section: any error means big trouble */
3595
3596 /*
3597 * First we run through the locallock table and get rid of unwanted
3598 * entries, then we scan the process's proclocks and transfer them to the
3599 * target proc.
3600 *
3601 * We do this separately because we may have multiple locallock entries
3602 * pointing to the same proclock, and we daren't end up with any dangling
3603 * pointers.
3604 */
3606
3607 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3608 {
3609 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3610 bool haveSessionLock;
3611 bool haveXactLock;
3612 int i;
3613
3614 if (locallock->proclock == NULL || locallock->lock == NULL)
3615 {
3616 /*
3617 * We must've run out of shared memory while trying to set up this
3618 * lock. Just forget the local entry.
3619 */
3620 Assert(locallock->nLocks == 0);
3622 continue;
3623 }
3624
3625 /* Ignore VXID locks */
3626 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3627 continue;
3628
3629 /* Scan to see whether we hold it at session or transaction level */
3630 haveSessionLock = haveXactLock = false;
3631 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3632 {
3633 if (lockOwners[i].owner == NULL)
3634 haveSessionLock = true;
3635 else
3636 haveXactLock = true;
3637 }
3638
3639 /* Ignore it if we have only session lock */
3640 if (!haveXactLock)
3641 continue;
3642
3643 /* This can't happen, because we already checked it */
3644 if (haveSessionLock)
3645 ereport(PANIC,
3647 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3648
3649 /* Mark the proclock to show we need to release this lockmode */
3650 if (locallock->nLocks > 0)
3651 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3652
3653 /* And remove the locallock hashtable entry */
3655 }
3656
3657 /*
3658 * Now, scan each lock partition separately.
3659 */
3661 {
3663 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3665
3667
3668 /*
3669 * If the proclock list for this partition is empty, we can skip
3670 * acquiring the partition lock. This optimization is safer than the
3671 * situation in LockReleaseAll, because we got rid of any fast-path
3672 * locks during AtPrepare_Locks, so there cannot be any case where
3673 * another backend is adding something to our lists now. For safety,
3674 * though, we code this the same way as in LockReleaseAll.
3675 */
3676 if (dlist_is_empty(procLocks))
3677 continue; /* needn't examine this partition */
3678
3680
3682 {
3683 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3684
3685 Assert(proclock->tag.myProc == MyProc);
3686
3687 lock = proclock->tag.myLock;
3688
3689 /* Ignore VXID locks */
3691 continue;
3692
3693 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3694 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3695 Assert(lock->nRequested >= 0);
3696 Assert(lock->nGranted >= 0);
3697 Assert(lock->nGranted <= lock->nRequested);
3698 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3699
3700 /* Ignore it if nothing to release (must be a session lock) */
3701 if (proclock->releaseMask == 0)
3702 continue;
3703
3704 /* Else we should be releasing all locks */
3705 if (proclock->releaseMask != proclock->holdMask)
3706 elog(PANIC, "we seem to have dropped a bit somewhere");
3707
3708 /*
3709 * We cannot simply modify proclock->tag.myProc to reassign
3710 * ownership of the lock, because that's part of the hash key and
3711 * the proclock would then be in the wrong hash chain. Instead
3712 * use hash_update_hash_key. (We used to create a new hash entry,
3713 * but that risks out-of-memory failure if other processes are
3714 * busy making proclocks too.) We must unlink the proclock from
3715 * our procLink chain and put it into the new proc's chain, too.
3716 *
3717 * Note: the updated proclock hash key will still belong to the
3718 * same hash partition, cf proclock_hash(). So the partition lock
3719 * we already hold is sufficient for this.
3720 */
3721 dlist_delete(&proclock->procLink);
3722
3723 /*
3724 * Create the new hash key for the proclock.
3725 */
3726 proclocktag.myLock = lock;
3727 proclocktag.myProc = newproc;
3728
3729 /*
3730 * Update groupLeader pointer to point to the new proc. (We'd
3731 * better not be a member of somebody else's lock group!)
3732 */
3733 Assert(proclock->groupLeader == proclock->tag.myProc);
3734 proclock->groupLeader = newproc;
3735
3736 /*
3737 * Update the proclock. We should not find any existing entry for
3738 * the same hash key, since there can be only one entry for any
3739 * given lock with my own proc.
3740 */
3742 proclock,
3743 &proclocktag))
3744 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3745
3746 /* Re-link into the new proc's proclock list */
3747 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3748
3749 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3750 } /* loop over PROCLOCKs within this partition */
3751
3753 } /* loop over partitions */
3754
3756}
3757
3758
3759/*
3760 * GetLockStatusData - Return a summary of the lock manager's internal
3761 * status, for use in a user-level reporting function.
3762 *
3763 * The return data consists of an array of LockInstanceData objects,
3764 * which are a lightly abstracted version of the PROCLOCK data structures,
3765 * i.e. there is one entry for each unique lock and interested PGPROC.
3766 * It is the caller's responsibility to match up related items (such as
3767 * references to the same lockable object or PGPROC) if wanted.
3768 *
3769 * The design goal is to hold the LWLocks for as short a time as possible;
3770 * thus, this function simply makes a copy of the necessary data and releases
3771 * the locks, allowing the caller to contemplate and format the data for as
3772 * long as it pleases.
3773 */
3774LockData *
3776{
3777 LockData *data;
3778 PROCLOCK *proclock;
3780 int els;
3781 int el;
3782 int i;
3783
3785
3786 /* Guess how much space we'll need. */
3787 els = MaxBackends;
3788 el = 0;
3790
3791 /*
3792 * First, we iterate through the per-backend fast-path arrays, locking
3793 * them one at a time. This might produce an inconsistent picture of the
3794 * system state, but taking all of those LWLocks at the same time seems
3795 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3796 * matter too much, because none of these locks can be involved in lock
3797 * conflicts anyway - anything that might must be present in the main lock
3798 * table. (For the same reason, we don't sweat about making leaderPid
3799 * completely valid. We cannot safely dereference another backend's
3800 * lockGroupLeader field without holding all lock partition locks, and
3801 * it's not worth that.)
3802 */
3803 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3804 {
3805 PGPROC *proc = GetPGProcByNumber(i);
3806
3807 /* Skip backends with pid=0, as they don't hold fast-path locks */
3808 if (proc->pid == 0)
3809 continue;
3810
3812
3813 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3814 {
3815 /* Skip groups without registered fast-path locks */
3816 if (proc->fpLockBits[g] == 0)
3817 continue;
3818
3819 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3820 {
3822 uint32 f = FAST_PATH_SLOT(g, j);
3824
3825 /* Skip unallocated slots */
3826 if (!lockbits)
3827 continue;
3828
3829 if (el >= els)
3830 {
3831 els += MaxBackends;
3832 data->locks = (LockInstanceData *)
3833 repalloc(data->locks, sizeof(LockInstanceData) * els);
3834 }
3835
3836 instance = &data->locks[el];
3838 proc->fpRelId[f]);
3840 instance->waitLockMode = NoLock;
3841 instance->vxid.procNumber = proc->vxid.procNumber;
3842 instance->vxid.localTransactionId = proc->vxid.lxid;
3843 instance->pid = proc->pid;
3844 instance->leaderPid = proc->pid;
3845 instance->fastpath = true;
3846
3847 /*
3848 * Successfully taking fast path lock means there were no
3849 * conflicting locks.
3850 */
3851 instance->waitStart = 0;
3852
3853 el++;
3854 }
3855 }
3856
3857 if (proc->fpVXIDLock)
3858 {
3861
3862 if (el >= els)
3863 {
3864 els += MaxBackends;
3865 data->locks = (LockInstanceData *)
3866 repalloc(data->locks, sizeof(LockInstanceData) * els);
3867 }
3868
3869 vxid.procNumber = proc->vxid.procNumber;
3871
3872 instance = &data->locks[el];
3874 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3875 instance->waitLockMode = NoLock;
3876 instance->vxid.procNumber = proc->vxid.procNumber;
3877 instance->vxid.localTransactionId = proc->vxid.lxid;
3878 instance->pid = proc->pid;
3879 instance->leaderPid = proc->pid;
3880 instance->fastpath = true;
3881 instance->waitStart = 0;
3882
3883 el++;
3884 }
3885
3886 LWLockRelease(&proc->fpInfoLock);
3887 }
3888
3889 /*
3890 * Next, acquire lock on the entire shared lock data structure. We do
3891 * this so that, at least for locks in the primary lock table, the state
3892 * will be self-consistent.
3893 *
3894 * Since this is a read-only operation, we take shared instead of
3895 * exclusive lock. There's not a whole lot of point to this, because all
3896 * the normal operations require exclusive lock, but it doesn't hurt
3897 * anything either. It will at least allow two backends to do
3898 * GetLockStatusData in parallel.
3899 *
3900 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3901 */
3902 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3904
3905 /* Now we can safely count the number of proclocks */
3907 if (data->nelements > els)
3908 {
3909 els = data->nelements;
3910 data->locks = (LockInstanceData *)
3911 repalloc(data->locks, sizeof(LockInstanceData) * els);
3912 }
3913
3914 /* Now scan the tables to copy the data */
3916
3917 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3918 {
3919 PGPROC *proc = proclock->tag.myProc;
3920 LOCK *lock = proclock->tag.myLock;
3921 LockInstanceData *instance = &data->locks[el];
3922
3923 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3924 instance->holdMask = proclock->holdMask;
3925 if (proc->waitLock == proclock->tag.myLock)
3926 instance->waitLockMode = proc->waitLockMode;
3927 else
3928 instance->waitLockMode = NoLock;
3929 instance->vxid.procNumber = proc->vxid.procNumber;
3930 instance->vxid.localTransactionId = proc->vxid.lxid;
3931 instance->pid = proc->pid;
3932 instance->leaderPid = proclock->groupLeader->pid;
3933 instance->fastpath = false;
3934 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3935
3936 el++;
3937 }
3938
3939 /*
3940 * And release locks. We do this in reverse order for two reasons: (1)
3941 * Anyone else who needs more than one of the locks will be trying to lock
3942 * them in increasing order; we don't want to release the other process
3943 * until it can get all the locks it needs. (2) This avoids O(N^2)
3944 * behavior inside LWLockRelease.
3945 */
3946 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3948
3949 Assert(el == data->nelements);
3950
3951 return data;
3952}
3953
3954/*
3955 * GetBlockerStatusData - Return a summary of the lock manager's state
3956 * concerning locks that are blocking the specified PID or any member of
3957 * the PID's lock group, for use in a user-level reporting function.
3958 *
3959 * For each PID within the lock group that is awaiting some heavyweight lock,
3960 * the return data includes an array of LockInstanceData objects, which are
3961 * the same data structure used by GetLockStatusData; but unlike that function,
3962 * this one reports only the PROCLOCKs associated with the lock that that PID
3963 * is blocked on. (Hence, all the locktags should be the same for any one
3964 * blocked PID.) In addition, we return an array of the PIDs of those backends
3965 * that are ahead of the blocked PID in the lock's wait queue. These can be
3966 * compared with the PIDs in the LockInstanceData objects to determine which
3967 * waiters are ahead of or behind the blocked PID in the queue.
3968 *
3969 * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3970 * waiting on any heavyweight lock, return empty arrays.
3971 *
3972 * The design goal is to hold the LWLocks for as short a time as possible;
3973 * thus, this function simply makes a copy of the necessary data and releases
3974 * the locks, allowing the caller to contemplate and format the data for as
3975 * long as it pleases.
3976 */
3979{
3981 PGPROC *proc;
3982 int i;
3983
3985
3986 /*
3987 * Guess how much space we'll need, and preallocate. Most of the time
3988 * this will avoid needing to do repalloc while holding the LWLocks. (We
3989 * assume, but check with an Assert, that MaxBackends is enough entries
3990 * for the procs[] array; the other two could need enlargement, though.)
3991 */
3992 data->nprocs = data->nlocks = data->npids = 0;
3993 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3994 data->procs = palloc_array(BlockedProcData, data->maxprocs);
3995 data->locks = palloc_array(LockInstanceData, data->maxlocks);
3996 data->waiter_pids = palloc_array(int, data->maxpids);
3997
3998 /*
3999 * In order to search the ProcArray for blocked_pid and assume that that
4000 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4001 * In addition, to examine the lock grouping fields of any other backend,
4002 * we must hold all the hash partition locks. (Only one of those locks is
4003 * actually relevant for any one lock group, but we can't know which one
4004 * ahead of time.) It's fairly annoying to hold all those locks
4005 * throughout this, but it's no worse than GetLockStatusData(), and it
4006 * does have the advantage that we're guaranteed to return a
4007 * self-consistent instantaneous state.
4008 */
4010
4012
4013 /* Nothing to do if it's gone */
4014 if (proc != NULL)
4015 {
4016 /*
4017 * Acquire lock on the entire shared lock data structure. See notes
4018 * in GetLockStatusData().
4019 */
4020 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4022
4023 if (proc->lockGroupLeader == NULL)
4024 {
4025 /* Easy case, proc is not a lock group member */
4027 }
4028 else
4029 {
4030 /* Examine all procs in proc's lock group */
4031 dlist_iter iter;
4032
4034 {
4036
4037 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4039 }
4040 }
4041
4042 /*
4043 * And release locks. See notes in GetLockStatusData().
4044 */
4045 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4047
4048 Assert(data->nprocs <= data->maxprocs);
4049 }
4050
4052
4053 return data;
4054}
4055
4056/* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4057static void
4059{
4060 LOCK *theLock = blocked_proc->waitLock;
4065 int queue_size;
4066
4067 /* Nothing to do if this proc is not blocked */
4068 if (theLock == NULL)
4069 return;
4070
4071 /* Set up a procs[] element */
4072 bproc = &data->procs[data->nprocs++];
4073 bproc->pid = blocked_proc->pid;
4074 bproc->first_lock = data->nlocks;
4075 bproc->first_waiter = data->npids;
4076
4077 /*
4078 * We may ignore the proc's fast-path arrays, since nothing in those could
4079 * be related to a contended lock.
4080 */
4081
4082 /* Collect all PROCLOCKs associated with theLock */
4083 dlist_foreach(proclock_iter, &theLock->procLocks)
4084 {
4085 PROCLOCK *proclock =
4086 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4087 PGPROC *proc = proclock->tag.myProc;
4088 LOCK *lock = proclock->tag.myLock;
4090
4091 if (data->nlocks >= data->maxlocks)
4092 {
4093 data->maxlocks += MaxBackends;
4094 data->locks = (LockInstanceData *)
4095 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4096 }
4097
4098 instance = &data->locks[data->nlocks];
4099 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4100 instance->holdMask = proclock->holdMask;
4101 if (proc->waitLock == lock)
4102 instance->waitLockMode = proc->waitLockMode;
4103 else
4104 instance->waitLockMode = NoLock;
4105 instance->vxid.procNumber = proc->vxid.procNumber;
4106 instance->vxid.localTransactionId = proc->vxid.lxid;
4107 instance->pid = proc->pid;
4108 instance->leaderPid = proclock->groupLeader->pid;
4109 instance->fastpath = false;
4110 data->nlocks++;
4111 }
4112
4113 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4114 waitQueue = &(theLock->waitProcs);
4115 queue_size = dclist_count(waitQueue);
4116
4117 if (queue_size > data->maxpids - data->npids)
4118 {
4119 data->maxpids = Max(data->maxpids + MaxBackends,
4120 data->npids + queue_size);
4121 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4122 sizeof(int) * data->maxpids);
4123 }
4124
4125 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4127 {
4129
4131 break;
4132 data->waiter_pids[data->npids++] = queued_proc->pid;
4133 }
4134
4135 bproc->num_locks = data->nlocks - bproc->first_lock;
4136 bproc->num_waiters = data->npids - bproc->first_waiter;
4137}
4138
4139/*
4140 * Returns a list of currently held AccessExclusiveLocks, for use by
4141 * LogStandbySnapshot(). The result is a palloc'd array,
4142 * with the number of elements returned into *nlocks.
4143 *
4144 * XXX This currently takes a lock on all partitions of the lock table,
4145 * but it's possible to do better. By reference counting locks and storing
4146 * the value in the ProcArray entry for each backend we could tell if any
4147 * locks need recording without having to acquire the partition locks and
4148 * scan the lock table. Whether that's worth the additional overhead
4149 * is pretty dubious though.
4150 */
4152GetRunningTransactionLocks(int *nlocks)
4153{
4155 PROCLOCK *proclock;
4157 int i;
4158 int index;
4159 int els;
4160
4161 /*
4162 * Acquire lock on the entire shared lock data structure.
4163 *
4164 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4165 */
4166 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4168
4169 /* Now we can safely count the number of proclocks */
4171
4172 /*
4173 * Allocating enough space for all locks in the lock table is overkill,
4174 * but it's more convenient and faster than having to enlarge the array.
4175 */
4177
4178 /* Now scan the tables to copy the data */
4180
4181 /*
4182 * If lock is a currently granted AccessExclusiveLock then it will have
4183 * just one proclock holder, so locks are never accessed twice in this
4184 * particular case. Don't copy this code for use elsewhere because in the
4185 * general case this will give you duplicate locks when looking at
4186 * non-exclusive lock types.
4187 */
4188 index = 0;
4189 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4190 {
4191 /* make sure this definition matches the one used in LockAcquire */
4192 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4194 {
4195 PGPROC *proc = proclock->tag.myProc;
4196 LOCK *lock = proclock->tag.myLock;
4197 TransactionId xid = proc->xid;
4198
4199 /*
4200 * Don't record locks for transactions if we know they have
4201 * already issued their WAL record for commit but not yet released
4202 * lock. It is still possible that we see locks held by already
4203 * complete transactions, if they haven't yet zeroed their xids.
4204 */
4205 if (!TransactionIdIsValid(xid))
4206 continue;
4207
4208 accessExclusiveLocks[index].xid = xid;
4211
4212 index++;
4213 }
4214 }
4215
4216 Assert(index <= els);
4217
4218 /*
4219 * And release locks. We do this in reverse order for two reasons: (1)
4220 * Anyone else who needs more than one of the locks will be trying to lock
4221 * them in increasing order; we don't want to release the other process
4222 * until it can get all the locks it needs. (2) This avoids O(N^2)
4223 * behavior inside LWLockRelease.
4224 */
4225 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4227
4228 *nlocks = index;
4229 return accessExclusiveLocks;
4230}
4231
4232/* Provide the textual name of any lock mode */
4233const char *
4235{
4237 Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4239}
4240
4241#ifdef LOCK_DEBUG
4242/*
4243 * Dump all locks in the given proc's myProcLocks lists.
4244 *
4245 * Caller is responsible for having acquired appropriate LWLocks.
4246 */
4247void
4248DumpLocks(PGPROC *proc)
4249{
4250 int i;
4251
4252 if (proc == NULL)
4253 return;
4254
4255 if (proc->waitLock)
4256 LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4257
4258 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4259 {
4260 dlist_head *procLocks = &proc->myProcLocks[i];
4261 dlist_iter iter;
4262
4263 dlist_foreach(iter, procLocks)
4264 {
4265 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4266 LOCK *lock = proclock->tag.myLock;
4267
4268 Assert(proclock->tag.myProc == proc);
4269 PROCLOCK_PRINT("DumpLocks", proclock);
4270 LOCK_PRINT("DumpLocks", lock, 0);
4271 }
4272 }
4273}
4274
4275/*
4276 * Dump all lmgr locks.
4277 *
4278 * Caller is responsible for having acquired appropriate LWLocks.
4279 */
4280void
4281DumpAllLocks(void)
4282{
4283 PGPROC *proc;
4284 PROCLOCK *proclock;
4285 LOCK *lock;
4286 HASH_SEQ_STATUS status;
4287
4288 proc = MyProc;
4289
4290 if (proc && proc->waitLock)
4291 LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4292
4294
4295 while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4296 {
4297 PROCLOCK_PRINT("DumpAllLocks", proclock);
4298
4299 lock = proclock->tag.myLock;
4300 if (lock)
4301 LOCK_PRINT("DumpAllLocks", lock, 0);
4302 else
4303 elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4304 }
4305}
4306#endif /* LOCK_DEBUG */
4307
4308/*
4309 * LOCK 2PC resource manager's routines
4310 */
4311
4312/*
4313 * Re-acquire a lock belonging to a transaction that was prepared.
4314 *
4315 * Because this function is run at db startup, re-acquiring the locks should
4316 * never conflict with running transactions because there are none. We
4317 * assume that the lock state represented by the stored 2PC files is legal.
4318 *
4319 * When switching from Hot Standby mode to normal operation, the locks will
4320 * be already held by the startup process. The locks are acquired for the new
4321 * procs without checking for conflicts, so we don't get a conflict between the
4322 * startup process and the dummy procs, even though we will momentarily have
4323 * a situation where two procs are holding the same AccessExclusiveLock,
4324 * which isn't normally possible because the conflict. If we're in standby
4325 * mode, but a recovery snapshot hasn't been established yet, it's possible
4326 * that some but not all of the locks are already held by the startup process.
4327 *
4328 * This approach is simple, but also a bit dangerous, because if there isn't
4329 * enough shared memory to acquire the locks, an error will be thrown, which
4330 * is promoted to FATAL and recovery will abort, bringing down postmaster.
4331 * A safer approach would be to transfer the locks like we do in
4332 * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4333 * read-only backends to use up all the shared lock memory anyway, so that
4334 * replaying the WAL record that needs to acquire a lock will throw an error
4335 * and PANIC anyway.
4336 */
4337void
4339 void *recdata, uint32 len)
4340{
4342 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4343 LOCKTAG *locktag;
4344 LOCKMODE lockmode;
4346 LOCK *lock;
4347 PROCLOCK *proclock;
4349 bool found;
4350 uint32 hashcode;
4352 int partition;
4355
4356 Assert(len == sizeof(TwoPhaseLockRecord));
4357 locktag = &rec->locktag;
4358 lockmode = rec->lockmode;
4360
4362 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4364
4365 hashcode = LockTagHashCode(locktag);
4366 partition = LockHashPartition(hashcode);
4368
4370
4371 /*
4372 * Find or create a lock with this tag.
4373 */
4375 locktag,
4376 hashcode,
4378 &found);
4379 if (!lock)
4380 {
4382 ereport(ERROR,
4384 errmsg("out of shared memory"),
4385 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4386 }
4387
4388 /*
4389 * if it's a new lock object, initialize it
4390 */
4391 if (!found)
4392 {
4393 lock->grantMask = 0;
4394 lock->waitMask = 0;
4395 dlist_init(&lock->procLocks);
4396 dclist_init(&lock->waitProcs);
4397 lock->nRequested = 0;
4398 lock->nGranted = 0;
4399 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4400 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4401 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4402 }
4403 else
4404 {
4405 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4406 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4407 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4408 Assert(lock->nGranted <= lock->nRequested);
4409 }
4410
4411 /*
4412 * Create the hash key for the proclock table.
4413 */
4414 proclocktag.myLock = lock;
4415 proclocktag.myProc = proc;
4416
4418
4419 /*
4420 * Find or create a proclock entry with this tag
4421 */
4423 &proclocktag,
4426 &found);
4427 if (!proclock)
4428 {
4429 /* Oops, not enough shmem for the proclock */
4430 if (lock->nRequested == 0)
4431 {
4432 /*
4433 * There are no other requestors of this lock, so garbage-collect
4434 * the lock object. We *must* do this to avoid a permanent leak
4435 * of shared memory, because there won't be anything to cause
4436 * anyone to release the lock object later.
4437 */
4440 &(lock->tag),
4441 hashcode,
4443 NULL))
4444 elog(PANIC, "lock table corrupted");
4445 }
4447 ereport(ERROR,
4449 errmsg("out of shared memory"),
4450 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4451 }
4452
4453 /*
4454 * If new, initialize the new entry
4455 */
4456 if (!found)
4457 {
4458 Assert(proc->lockGroupLeader == NULL);
4459 proclock->groupLeader = proc;
4460 proclock->holdMask = 0;
4461 proclock->releaseMask = 0;
4462 /* Add proclock to appropriate lists */
4463 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4465 &proclock->procLink);
4466 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4467 }
4468 else
4469 {
4470 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4471 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4472 }
4473
4474 /*
4475 * lock->nRequested and lock->requested[] count the total number of
4476 * requests, whether granted or waiting, so increment those immediately.
4477 */
4478 lock->nRequested++;
4479 lock->requested[lockmode]++;
4480 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4481
4482 /*
4483 * We shouldn't already hold the desired lock.
4484 */
4485 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4486 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4487 lockMethodTable->lockModeNames[lockmode],
4488 lock->tag.locktag_field1, lock->tag.locktag_field2,
4489 lock->tag.locktag_field3);
4490
4491 /*
4492 * We ignore any possible conflicts and just grant ourselves the lock. Not
4493 * only because we don't bother, but also to avoid deadlocks when
4494 * switching from standby to normal mode. See function comment.
4495 */
4496 GrantLock(lock, proclock, lockmode);
4497
4498 /*
4499 * Bump strong lock count, to make sure any fast-path lock requests won't
4500 * be granted without consulting the primary lock table.
4501 */
4502 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4503 {
4505
4509 }
4510
4512}
4513
4514/*
4515 * Re-acquire a lock belonging to a transaction that was prepared, when
4516 * starting up into hot standby mode.
4517 */
4518void
4520 void *recdata, uint32 len)
4521{
4523 LOCKTAG *locktag;
4524 LOCKMODE lockmode;
4526
4527 Assert(len == sizeof(TwoPhaseLockRecord));
4528 locktag = &rec->locktag;
4529 lockmode = rec->lockmode;
4531
4533 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4534
4535 if (lockmode == AccessExclusiveLock &&
4536 locktag->locktag_type == LOCKTAG_RELATION)
4537 {
4539 locktag->locktag_field1 /* dboid */ ,
4540 locktag->locktag_field2 /* reloid */ );
4541 }
4542}
4543
4544
4545/*
4546 * 2PC processing routine for COMMIT PREPARED case.
4547 *
4548 * Find and release the lock indicated by the 2PC record.
4549 */
4550void
4552 void *recdata, uint32 len)
4553{
4555 PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4556 LOCKTAG *locktag;
4559
4560 Assert(len == sizeof(TwoPhaseLockRecord));
4561 locktag = &rec->locktag;
4563
4565 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4567
4568 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4569}
4570
4571/*
4572 * 2PC processing routine for ROLLBACK PREPARED case.
4573 *
4574 * This is actually just the same as the COMMIT case.
4575 */
4576void
4578 void *recdata, uint32 len)
4579{
4580 lock_twophase_postcommit(fxid, info, recdata, len);
4581}
4582
4583/*
4584 * VirtualXactLockTableInsert
4585 *
4586 * Take vxid lock via the fast-path. There can't be any pre-existing
4587 * lockers, as we haven't advertised this vxid via the ProcArray yet.
4588 *
4589 * Since MyProc->fpLocalTransactionId will normally contain the same data
4590 * as MyProc->vxid.lxid, you might wonder if we really need both. The
4591 * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4592 * examined by procarray.c, while fpLocalTransactionId is protected by
4593 * fpInfoLock and is used only by the locking subsystem. Doing it this
4594 * way makes it easier to verify that there are no funny race conditions.
4595 *
4596 * We don't bother recording this lock in the local lock table, since it's
4597 * only ever released at the end of a transaction. Instead,
4598 * LockReleaseAll() calls VirtualXactLockTableCleanup().
4599 */
4600void
4602{
4604
4606
4609 Assert(MyProc->fpVXIDLock == false);
4610
4611 MyProc->fpVXIDLock = true;
4613
4615}
4616
4617/*
4618 * VirtualXactLockTableCleanup
4619 *
4620 * Check whether a VXID lock has been materialized; if so, release it,
4621 * unblocking waiters.
4622 */
4623void
4625{
4626 bool fastpath;
4627 LocalTransactionId lxid;
4628
4630
4631 /*
4632 * Clean up shared memory state.
4633 */
4635
4636 fastpath = MyProc->fpVXIDLock;
4638 MyProc->fpVXIDLock = false;
4640
4642
4643 /*
4644 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4645 * that means someone transferred the lock to the main lock table.
4646 */
4647 if (!fastpath && LocalTransactionIdIsValid(lxid))
4648 {
4650 LOCKTAG locktag;
4651
4652 vxid.procNumber = MyProcNumber;
4653 vxid.localTransactionId = lxid;
4654 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4655
4657 &locktag, ExclusiveLock, false);
4658 }
4659}
4660
4661/*
4662 * XactLockForVirtualXact
4663 *
4664 * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4665 * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4666 * functions, it assumes "xid" is never a subtransaction and that "xid" is
4667 * prepared, committed, or aborted.
4668 *
4669 * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4670 * known as "vxid" before its PREPARE TRANSACTION.
4671 */
4672static bool
4674 TransactionId xid, bool wait)
4675{
4676 bool more = false;
4677
4678 /* There is no point to wait for 2PCs if you have no 2PCs. */
4679 if (max_prepared_xacts == 0)
4680 return true;
4681
4682 do
4683 {
4685 LOCKTAG tag;
4686
4687 /* Clear state from previous iterations. */
4688 if (more)
4689 {
4691 more = false;
4692 }
4693
4694 /* If we have no xid, try to find one. */
4695 if (!TransactionIdIsValid(xid))
4696 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4697 if (!TransactionIdIsValid(xid))
4698 {
4699 Assert(!more);
4700 return true;
4701 }
4702
4703 /* Check or wait for XID completion. */
4704 SET_LOCKTAG_TRANSACTION(tag, xid);
4705 lar = LockAcquire(&tag, ShareLock, false, !wait);
4707 return false;
4708 LockRelease(&tag, ShareLock, false);
4709 } while (more);
4710
4711 return true;
4712}
4713
4714/*
4715 * VirtualXactLock
4716 *
4717 * If wait = true, wait as long as the given VXID or any XID acquired by the
4718 * same transaction is still running. Then, return true.
4719 *
4720 * If wait = false, just check whether that VXID or one of those XIDs is still
4721 * running, and return true or false.
4722 */
4723bool
4725{
4726 LOCKTAG tag;
4727 PGPROC *proc;
4729
4731
4733 /* no vxid lock; localTransactionId is a normal, locked XID */
4734 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4735
4737
4738 /*
4739 * If a lock table entry must be made, this is the PGPROC on whose behalf
4740 * it must be done. Note that the transaction might end or the PGPROC
4741 * might be reassigned to a new backend before we get around to examining
4742 * it, but it doesn't matter. If we find upon examination that the
4743 * relevant lxid is no longer running here, that's enough to prove that
4744 * it's no longer running anywhere.
4745 */
4746 proc = ProcNumberGetProc(vxid.procNumber);
4747 if (proc == NULL)
4748 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4749
4750 /*
4751 * We must acquire this lock before checking the procNumber and lxid
4752 * against the ones we're waiting for. The target backend will only set
4753 * or clear lxid while holding this lock.
4754 */
4756
4757 if (proc->vxid.procNumber != vxid.procNumber
4759 {
4760 /* VXID ended */
4761 LWLockRelease(&proc->fpInfoLock);
4762 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4763 }
4764
4765 /*
4766 * If we aren't asked to wait, there's no need to set up a lock table
4767 * entry. The transaction is still in progress, so just return false.
4768 */
4769 if (!wait)
4770 {
4771 LWLockRelease(&proc->fpInfoLock);
4772 return false;
4773 }
4774
4775 /*
4776 * OK, we're going to need to sleep on the VXID. But first, we must set
4777 * up the primary lock table entry, if needed (ie, convert the proc's
4778 * fast-path lock on its VXID to a regular lock).
4779 */
4780 if (proc->fpVXIDLock)
4781 {
4782 PROCLOCK *proclock;
4783 uint32 hashcode;
4785
4786 hashcode = LockTagHashCode(&tag);
4787
4790
4792 &tag, hashcode, ExclusiveLock);
4793 if (!proclock)
4794 {
4796 LWLockRelease(&proc->fpInfoLock);
4797 ereport(ERROR,
4799 errmsg("out of shared memory"),
4800 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4801 }
4802 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4803
4805
4806 proc->fpVXIDLock = false;
4807 }
4808
4809 /*
4810 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4811 * search. The proc might have assigned this XID but not yet locked it,
4812 * in which case the proc will lock this XID before releasing the VXID.
4813 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4814 * so we won't save an XID of a different VXID. It doesn't matter whether
4815 * we save this before or after setting up the primary lock table entry.
4816 */
4817 xid = proc->xid;
4818
4819 /* Done with proc->fpLockBits */
4820 LWLockRelease(&proc->fpInfoLock);
4821
4822 /* Time to wait. */
4823 (void) LockAcquire(&tag, ShareLock, false, false);
4824
4825 LockRelease(&tag, ShareLock, false);
4826 return XactLockForVirtualXact(vxid, xid, wait);
4827}
4828
4829/*
4830 * LockWaiterCount
4831 *
4832 * Find the number of lock requester on this locktag
4833 */
4834int
4835LockWaiterCount(const LOCKTAG *locktag)
4836{
4838 LOCK *lock;
4839 bool found;
4840 uint32 hashcode;
4842 int waiters = 0;
4843
4845 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4846
4847 hashcode = LockTagHashCode(locktag);
4850
4852 locktag,
4853 hashcode,
4854 HASH_FIND,
4855 &found);
4856 if (found)
4857 {
4858 Assert(lock != NULL);
4859 waiters = lock->nRequested;
4860 }
4862
4863 return waiters;
4864}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
#define Max(x, y)
Definition c.h:1085
#define Assert(condition)
Definition c.h:943
int64_t int64
Definition c.h:621
uint16_t uint16
Definition c.h:623
uint32_t uint32
Definition c.h:624
#define lengthof(array)
Definition c.h:873
uint32 LocalTransactionId
Definition c.h:738
#define MemSet(start, val, len)
Definition c.h:1107
uint32 TransactionId
Definition c.h:736
size_t Size
Definition c.h:689
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
int64 TimestampTz
Definition timestamp.h:39
void DeadLockReport(void)
Definition deadlock.c:1075
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:889
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:360
void hash_destroy(HTAB *hashp)
Definition dynahash.c:802
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition dynahash.c:902
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1352
int64 hash_get_num_entries(HTAB *hashp)
Definition dynahash.c:1273
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition dynahash.c:1077
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition dynahash.c:845
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1317
Datum arg
Definition elog.c:1323
ErrorContextCallback * error_context_stack
Definition elog.c:100
int errcode(int sqlerrcode)
Definition elog.c:875
#define LOG
Definition elog.h:32
#define PG_RE_THROW()
Definition elog.h:407
#define errcontext
Definition elog.h:200
int errhint(const char *fmt,...) pg_attribute_printf(1
#define PG_TRY(...)
Definition elog.h:374
#define WARNING
Definition elog.h:37
#define PG_END_TRY(...)
Definition elog.h:399
#define PANIC
Definition elog.h:44
#define ERROR
Definition elog.h:40
#define PG_CATCH(...)
Definition elog.h:384
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
int int int int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...) pg_attribute_printf(1
#define palloc_object(type)
Definition fe_memutils.h:74
#define palloc_array(type, count)
Definition fe_memutils.h:76
#define palloc0_array(type, count)
Definition fe_memutils.h:77
int MyProcPid
Definition globals.c:49
ProcNumber MyProcNumber
Definition globals.c:92
int MaxBackends
Definition globals.c:149
@ HASH_FIND
Definition hsearch.h:108
@ HASH_REMOVE
Definition hsearch.h:110
@ HASH_ENTER
Definition hsearch.h:109
@ HASH_ENTER_NULL
Definition hsearch.h:111
#define HASH_CONTEXT
Definition hsearch.h:97
#define HASH_ELEM
Definition hsearch.h:90
#define HASH_FUNCTION
Definition hsearch.h:93
#define HASH_BLOBS
Definition hsearch.h:92
#define HASH_PARTITION
Definition hsearch.h:87
#define dlist_foreach(iter, lhead)
Definition ilist.h:623
static void dlist_init(dlist_head *head)
Definition ilist.h:314
static void dlist_delete(dlist_node *node)
Definition ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition ilist.h:932
static bool dclist_is_empty(const dclist_head *head)
Definition ilist.h:682
static bool dlist_node_is_detached(const dlist_node *node)
Definition ilist.h:525
#define dlist_foreach_modify(iter, lhead)
Definition ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition ilist.h:776
static void dclist_init(dclist_head *head)
Definition ilist.h:671
#define dlist_container(type, membername, ptr)
Definition ilist.h:593
#define dclist_foreach(iter, lhead)
Definition ilist.h:970
int j
Definition isn.c:78
int i
Definition isn.c:77
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition lmgr.c:1249
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition lock.c:4674
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition lock.c:806
static LOCALLOCK * awaitedLock
Definition lock.c:339
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition lock.c:1484
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition lock.c:2744
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition lock.c:640
void lock_twophase_postabort(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4578
#define LOCK_PRINT(where, lock, type)
Definition lock.c:416
void PostPrepare_Locks(FullTransactionId fxid)
Definition lock.c:3580
void lock_twophase_standby_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4520
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition lock.c:620
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition lock.c:1291
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition lock.c:2966
const ShmemCallbacks LockManagerShmemCallbacks
Definition lock.c:320
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition lock.c:4602
#define NLOCKENTS()
Definition lock.c:59
#define FastPathStrongLockHashPartition(hashcode)
Definition lock.c:306
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition lock.c:602
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition lock.c:259
void GrantAwaitedLock(void)
Definition lock.c:1897
int LockWaiterCount(const LOCKTAG *locktag)
Definition lock.c:4836
void AtPrepare_Locks(void)
Definition lock.c:3484
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:2110
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition lock.c:245
#define FAST_PATH_REL_GROUP(rel)
Definition lock.c:220
void InitLockManagerAccess(void)
Definition lock.c:502
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition lock.c:1666
void VirtualXactLockTableCleanup(void)
Definition lock.c:4625
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition lock.c:4725
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition lock.c:3077
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition lock.c:315
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition lock.c:2054
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
Definition lock.c:833
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition lock.c:2315
#define FAST_PATH_SLOT(group, index)
Definition lock.c:227
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition lock.c:1472
#define ConflictsWithRelationFastPath(locktag, mode)
Definition lock.c:276
void ResetAwaitedLock(void)
Definition lock.c:1915
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition lock.c:2869
static HTAB * LockMethodLocalHash
Definition lock.c:334
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2714
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition lock.c:1689
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition lock.c:255
#define PROCLOCK_PRINT(where, proclockP)
Definition lock.c:417
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition lock.c:1746
static uint32 proclock_hash(const void *key, Size keysize)
Definition lock.c:571
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2833
void AbortStrongLockAcquire(void)
Definition lock.c:1868
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition lock.c:2790
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition lock.c:179
static HTAB * LockMethodLockHash
Definition lock.c:332
static ResourceOwner awaitedOwner
Definition lock.c:340
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition lock.c:3979
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1940
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition lock.c:693
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition lock.c:4235
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition lock.c:4059
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition lock.c:257
void lock_twophase_postcommit(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4552
static const LockMethod LockMethods[]
Definition lock.c:153
static void waitonlock_error_callback(void *arg)
Definition lock.c:2028
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition lock.c:2619
LOCALLOCK * GetAwaitedLock(void)
Definition lock.c:1906
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition lock.c:2589
void MarkLockClear(LOCALLOCK *locallock)
Definition lock.c:1928
LockData * GetLockStatusData(void)
Definition lock.c:3776
#define FAST_PATH_GET_BITS(proc, n)
Definition lock.c:248
static LOCALLOCK * StrongLockInProgress
Definition lock.c:338
#define FAST_PATH_BITS_PER_SLOT
Definition lock.c:244
int FastPathLockGroupsPerBackend
Definition lock.c:205
#define EligibleForRelationFastPath(locktag, mode)
Definition lock.c:270
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition lock.c:554
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition lock.c:1832
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition lock.c:1537
static void LockManagerShmemRequest(void *arg)
Definition lock.c:451
void lock_twophase_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition lock.c:4339
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition lock.c:1800
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition lock.c:2654
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition lock.c:524
static void FinishStrongLockAcquire(void)
Definition lock.c:1858
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition lock.c:304
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition lock.c:4153
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition lock.c:3292
static void CheckForSessionAndXactLocks(void)
Definition lock.c:3396
static HTAB * LockMethodProcLockHash
Definition lock.c:333
static void LockManagerShmemInit(void *arg)
Definition lock.c:493
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition lock.c:536
#define LOCK_LOCKTAG(lock)
Definition lock.h:156
#define VirtualTransactionIdIsValid(vxid)
Definition lock.h:70
#define LockHashPartitionLock(hashcode)
Definition lock.h:357
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition lock.h:80
#define LOCK_LOCKMETHOD(lock)
Definition lock.h:155
#define LOCKBIT_OFF(lockmode)
Definition lock.h:88
#define LOCALLOCK_LOCKMETHOD(llock)
Definition lock.h:274
#define InvalidLocalTransactionId
Definition lock.h:68
#define MAX_LOCKMODES
Definition lock.h:85
#define LOCKBIT_ON(lockmode)
Definition lock.h:87
#define LocalTransactionIdIsValid(lxid)
Definition lock.h:69
#define LOCALLOCK_LOCKTAG(llock)
Definition lock.h:275
#define LockHashPartition(hashcode)
Definition lock.h:355
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition lock.h:74
#define PROCLOCK_LOCKMETHOD(proclock)
Definition lock.h:213
#define LockHashPartitionLockByIndex(i)
Definition lock.h:360
LockAcquireResult
Definition lock.h:331
@ LOCKACQUIRE_ALREADY_CLEAR
Definition lock.h:335
@ LOCKACQUIRE_OK
Definition lock.h:333
@ LOCKACQUIRE_ALREADY_HELD
Definition lock.h:334
@ LOCKACQUIRE_NOT_AVAIL
Definition lock.h:332
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition lock.h:72
int LOCKMODE
Definition lockdefs.h:26
#define NoLock
Definition lockdefs.h:34
#define AccessExclusiveLock
Definition lockdefs.h:43
int LOCKMASK
Definition lockdefs.h:25
#define ExclusiveLock
Definition lockdefs.h:42
#define ShareLock
Definition lockdefs.h:40
#define MaxLockMode
Definition lockdefs.h:45
#define RowExclusiveLock
Definition lockdefs.h:38
uint16 LOCKMETHODID
Definition locktag.h:22
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition locktag.h:135
@ LOCKTAG_OBJECT
Definition locktag.h:45
@ LOCKTAG_RELATION_EXTEND
Definition locktag.h:38
@ LOCKTAG_TUPLE
Definition locktag.h:41
@ LOCKTAG_VIRTUALTRANSACTION
Definition locktag.h:43
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition locktag.h:126
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition locktag.h:81
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1150
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1767
#define NUM_LOCK_PARTITIONS
Definition lwlock.h:87
#define LOG2_NUM_LOCK_PARTITIONS
Definition lwlock.h:86
@ LW_SHARED
Definition lwlock.h:105
@ LW_EXCLUSIVE
Definition lwlock.h:104
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
MemoryContext CurrentMemoryContext
Definition mcxt.c:160
#define START_CRIT_SECTION()
Definition miscadmin.h:152
#define END_CRIT_SECTION()
Definition miscadmin.h:154
static char * errmsg
const void size_t len
const void * data
static char buf[DEFAULT_XLOG_SEG_SIZE]
void pgstat_count_lock_fastpath_exceeded(uint8 locktag_type)
static uint32 DatumGetUInt32(Datum X)
Definition postgres.h:222
uint64_t Datum
Definition postgres.h:70
#define PointerGetDatum(X)
Definition postgres.h:354
unsigned int Oid
static int fb(int x)
#define FastPathLockSlotsPerBackend()
Definition proc.h:97
#define GetPGProcByNumber(n)
Definition proc.h:504
#define FP_LOCK_SLOTS_PER_GROUP
Definition proc.h:96
ProcWaitStatus
Definition proc.h:144
@ PROC_WAIT_STATUS_OK
Definition proc.h:145
@ PROC_WAIT_STATUS_WAITING
Definition proc.h:146
@ PROC_WAIT_STATUS_ERROR
Definition proc.h:147
PGPROC * BackendPidGetProcWithLock(int pid)
Definition procarray.c:3192
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition procarray.c:3111
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
void set_ps_display_remove_suffix(void)
Definition ps_status.c:440
void set_ps_display_suffix(const char *suffix)
Definition ps_status.c:388
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1059
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition resowner.c:902
ResourceOwner CurrentResourceOwner
Definition resowner.c:173
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition resowner.c:1079
#define ShmemRequestHash(...)
Definition shmem.h:179
#define ShmemRequestStruct(...)
Definition shmem.h:176
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition proc.c:1146
PGPROC * MyProc
Definition proc.c:71
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition proc.c:1941
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition proc.c:1315
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition proc.c:1776
PROC_HDR * ProcGlobal
Definition proc.c:74
void LogAccessExclusiveLockPrepare(void)
Definition standby.c:1471
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition standby.c:988
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition standby.c:1454
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
struct ErrorContextCallback * previous
Definition elog.h:299
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition lock.c:312
Size keysize
Definition hsearch.h:69
Size entrysize
Definition hsearch.h:70
Size keysize
Definition dynahash.c:241
int64 nLocks
Definition lock.h:254
struct ResourceOwnerData * owner
Definition lock.h:253
uint8 locktag_type
Definition locktag.h:70
uint32 locktag_field3
Definition locktag.h:68
uint32 locktag_field1
Definition locktag.h:66
uint8 locktag_lockmethodid
Definition locktag.h:71
uint16 locktag_field4
Definition locktag.h:69
uint32 locktag_field2
Definition locktag.h:67
Definition lock.h:140
int nRequested
Definition lock.h:150
LOCKTAG tag
Definition lock.h:142
int requested[MAX_LOCKMODES]
Definition lock.h:149
dclist_head waitProcs
Definition lock.h:148
int granted[MAX_LOCKMODES]
Definition lock.h:151
LOCKMASK grantMask
Definition lock.h:145
LOCKMASK waitMask
Definition lock.h:146
int nGranted
Definition lock.h:152
dlist_head procLocks
Definition lock.h:147
const bool * trace_flag
Definition lock.h:116
const char *const * lockModeNames
Definition lock.h:115
Definition proc.h:179
LWLock fpInfoLock
Definition proc.h:324
LocalTransactionId lxid
Definition proc.h:231
PROCLOCK * waitProcLock
Definition proc.h:306
dlist_head lockGroupMembers
Definition proc.h:299
Oid * fpRelId
Definition proc.h:326
Oid databaseId
Definition proc.h:201
uint64 * fpLockBits
Definition proc.h:325
pg_atomic_uint64 waitStart
Definition proc.h:311
bool fpVXIDLock
Definition proc.h:327
ProcNumber procNumber
Definition proc.h:226
int pid
Definition proc.h:197
struct PGPROC::@136 vxid
LOCK * waitLock
Definition proc.h:304
TransactionId xid
Definition proc.h:237
LOCKMODE waitLockMode
Definition proc.h:307
dlist_node waitLink
Definition proc.h:305
PGPROC * lockGroupLeader
Definition proc.h:298
LocalTransactionId fpLocalTransactionId
Definition proc.h:328
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition proc.h:321
ProcWaitStatus waitStatus
Definition proc.h:314
LOCK * myLock
Definition lock.h:196
PGPROC * myProc
Definition lock.h:197
LOCKMASK holdMask
Definition lock.h:207
dlist_node lockLink
Definition lock.h:209
PGPROC * groupLeader
Definition lock.h:206
LOCKMASK releaseMask
Definition lock.h:208
PROCLOCKTAG tag
Definition lock.h:203
dlist_node procLink
Definition lock.h:210
uint32 allProcCount
Definition proc.h:459
ShmemRequestCallback request_fn
Definition shmem.h:133
LOCKTAG locktag
Definition lock.c:163
LOCKMODE lockmode
Definition lock.c:164
LocalTransactionId localTransactionId
Definition lock.h:65
ProcNumber procNumber
Definition lock.h:64
dlist_node * cur
Definition ilist.h:179
Definition type.h:96
#define InvalidTransactionId
Definition transam.h:31
#define XidFromFullTransactionId(x)
Definition transam.h:48
#define FirstNormalObjectId
Definition transam.h:197
#define TransactionIdIsValid(xid)
Definition transam.h:41
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition twophase.c:1277
int max_prepared_xacts
Definition twophase.c:118
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition twophase.c:862
PGPROC * TwoPhaseGetDummyProc(FullTransactionId fxid, bool lock_held)
Definition twophase.c:929
#define TWOPHASE_RM_LOCK_ID
const char * type
const char * name
bool RecoveryInProgress(void)
Definition xlog.c:6836
#define XLogStandbyInfoActive()
Definition xlog.h:126
bool InRecovery
Definition xlogutils.c:50
#define InHotStandby
Definition xlogutils.h:60

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition globals.c:96

Definition at line 270 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:

Definition at line 250 of file lock.c.

◆ FAST_PATH_BITS

#define FAST_PATH_BITS (   proc,
 
)    (proc)->fpLockBits[FAST_PATH_GROUP(n)]

Definition at line 247 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 244 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 259 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 257 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)

Definition at line 248 of file lock.c.

◆ FAST_PATH_GROUP

#define FAST_PATH_GROUP (   index)
Value:

Definition at line 236 of file lock.c.

◆ FAST_PATH_INDEX

#define FAST_PATH_INDEX (   index)
Value:

Definition at line 239 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 245 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 246 of file lock.c.

◆ FAST_PATH_REL_GROUP

#define FAST_PATH_REL_GROUP (   rel)     (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))

Definition at line 220 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 255 of file lock.c.

◆ FAST_PATH_SLOT

#define FAST_PATH_SLOT (   group,
  index 
)
Value:

Definition at line 227 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 303 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 304 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 306 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 416 of file lock.c.

◆ NLOCKENTS

Definition at line 59 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 417 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3484 of file lock.c.

3485{
3486 HASH_SEQ_STATUS status;
3488
3489 /* First, verify there aren't locks of both xact and session level */
3491
3492 /* Now do the per-locallock cleanup work */
3494
3495 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3496 {
3497 TwoPhaseLockRecord record;
3498 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3499 bool haveSessionLock;
3500 bool haveXactLock;
3501 int i;
3502
3503 /*
3504 * Ignore VXID locks. We don't want those to be held by prepared
3505 * transactions, since they aren't meaningful after a restart.
3506 */
3507 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3508 continue;
3509
3510 /* Ignore it if we don't actually hold the lock */
3511 if (locallock->nLocks <= 0)
3512 continue;
3513
3514 /* Scan to see whether we hold it at session or transaction level */
3515 haveSessionLock = haveXactLock = false;
3516 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3517 {
3518 if (lockOwners[i].owner == NULL)
3519 haveSessionLock = true;
3520 else
3521 haveXactLock = true;
3522 }
3523
3524 /* Ignore it if we have only session lock */
3525 if (!haveXactLock)
3526 continue;
3527
3528 /* This can't happen, because we already checked it */
3529 if (haveSessionLock)
3530 ereport(ERROR,
3532 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3533
3534 /*
3535 * If the local lock was taken via the fast-path, we need to move it
3536 * to the primary lock table, or just get a pointer to the existing
3537 * primary lock table entry if by chance it's already been
3538 * transferred.
3539 */
3540 if (locallock->proclock == NULL)
3541 {
3543 locallock->lock = locallock->proclock->tag.myLock;
3544 }
3545
3546 /*
3547 * Arrange to not release any strong lock count held by this lock
3548 * entry. We must retain the count until the prepared transaction is
3549 * committed or rolled back.
3550 */
3551 locallock->holdsStrongLockCount = false;
3552
3553 /*
3554 * Create a 2PC record.
3555 */
3556 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3557 record.lockmode = locallock->tag.mode;
3558
3560 &record, sizeof(TwoPhaseLockRecord));
3561 }
3562}

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg, ERROR, FastPathGetRelationLockEntry(), fb(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG_VIRTUALTRANSACTION, memcpy(), RegisterTwoPhaseRecord(), and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1832 of file lock.c.

1833{
1835 Assert(locallock->holdsStrongLockCount == false);
1836
1837 /*
1838 * Adding to a memory location is not atomic, so we take a spinlock to
1839 * ensure we don't collide with someone else trying to bump the count at
1840 * the same time.
1841 *
1842 * XXX: It might be worth considering using an atomic fetch-and-add
1843 * instruction here, on architectures where that is supported.
1844 */
1845
1848 locallock->holdsStrongLockCount = true;
1851}

References Assert, FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, fb(), FastPathStrongRelationLockData::mutex, SpinLockAcquire(), SpinLockRelease(), and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1472 of file lock.c.

1473{
1474#ifdef USE_ASSERT_CHECKING
1477#endif
1478}

References fb(), LOCALLOCK_LOCKTAG, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3396 of file lock.c.

3397{
3398 typedef struct
3399 {
3400 LOCKTAG lock; /* identifies the lockable object */
3401 bool sessLock; /* is any lockmode held at session level? */
3402 bool xactLock; /* is any lockmode held at xact level? */
3404
3406 HTAB *lockhtab;
3407 HASH_SEQ_STATUS status;
3409
3410 /* Create a local hash table keyed by LOCKTAG only */
3411 hash_ctl.keysize = sizeof(LOCKTAG);
3412 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3414
3415 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3416 256, /* arbitrary initial size */
3417 &hash_ctl,
3419
3420 /* Scan local lock table to find entries for each LOCKTAG */
3422
3423 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3424 {
3425 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3427 bool found;
3428 int i;
3429
3430 /*
3431 * Ignore VXID locks. We don't want those to be held by prepared
3432 * transactions, since they aren't meaningful after a restart.
3433 */
3434 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3435 continue;
3436
3437 /* Ignore it if we don't actually hold the lock */
3438 if (locallock->nLocks <= 0)
3439 continue;
3440
3441 /* Otherwise, find or make an entry in lockhtab */
3443 &locallock->tag.lock,
3444 HASH_ENTER, &found);
3445 if (!found) /* initialize, if newly created */
3446 hentry->sessLock = hentry->xactLock = false;
3447
3448 /* Scan to see if we hold lock at session or xact level or both */
3449 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3450 {
3451 if (lockOwners[i].owner == NULL)
3452 hentry->sessLock = true;
3453 else
3454 hentry->xactLock = true;
3455 }
3456
3457 /*
3458 * We can throw error immediately when we see both types of locks; no
3459 * need to wait around to see if there are more violations.
3460 */
3461 if (hentry->sessLock && hentry->xactLock)
3462 ereport(ERROR,
3464 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3465 }
3466
3467 /* Success, so clean up */
3469}

References CurrentMemoryContext, ereport, errcode(), errmsg, ERROR, fb(), HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and LOCKTAG_VIRTUALTRANSACTION.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1746 of file lock.c.

1749{
1750 /*
1751 * If this was my last hold on this lock, delete my entry in the proclock
1752 * table.
1753 */
1754 if (proclock->holdMask == 0)
1755 {
1757
1758 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1759 dlist_delete(&proclock->lockLink);
1760 dlist_delete(&proclock->procLink);
1761 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1763 &(proclock->tag),
1766 NULL))
1767 elog(PANIC, "proclock table corrupted");
1768 }
1769
1770 if (lock->nRequested == 0)
1771 {
1772 /*
1773 * The caller just released the last lock, so garbage-collect the lock
1774 * object.
1775 */
1776 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1779 &(lock->tag),
1780 hashcode,
1782 NULL))
1783 elog(PANIC, "lock table corrupted");
1784 }
1785 else if (wakeupNeeded)
1786 {
1787 /* There are waiters on this lock, so wake them up. */
1789 }
1790}

References Assert, dlist_delete(), dlist_is_empty(), elog, fb(), HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 620 of file lock.c.

621{
623
624 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
625 return true;
626
627 return false;
628}

References DEFAULT_LOCKMETHOD, fb(), LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2966 of file lock.c.

2967{
2969 LOCKTAG *locktag = &locallock->tag.lock;
2970 PROCLOCK *proclock = NULL;
2972 Oid relid = locktag->locktag_field2;
2973 uint32 i,
2974 group;
2975
2976 /* fast-path group the lock belongs to */
2977 group = FAST_PATH_REL_GROUP(relid);
2978
2980
2981 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2982 {
2983 uint32 lockmode;
2984
2985 /* index into the whole per-backend array */
2986 uint32 f = FAST_PATH_SLOT(group, i);
2987
2988 /* Look for an allocated slot matching the given relid. */
2989 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2990 continue;
2991
2992 /* If we don't have a lock of the given mode, forget it! */
2993 lockmode = locallock->tag.mode;
2994 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2995 break;
2996
2997 /* Find or create lock object. */
2999
3000 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
3001 locallock->hashcode, lockmode);
3002 if (!proclock)
3003 {
3006 ereport(ERROR,
3008 errmsg("out of shared memory"),
3009 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3010 }
3011 GrantLock(proclock->tag.myLock, proclock, lockmode);
3012 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3013
3015
3016 /* No need to examine remaining slots. */
3017 break;
3018 }
3019
3021
3022 /* Lock may have already been transferred by some other backend. */
3023 if (proclock == NULL)
3024 {
3025 LOCK *lock;
3028
3030
3032 locktag,
3033 locallock->hashcode,
3034 HASH_FIND,
3035 NULL);
3036 if (!lock)
3037 elog(ERROR, "failed to re-find shared lock object");
3038
3039 proclocktag.myLock = lock;
3040 proclocktag.myProc = MyProc;
3041
3043 proclock = (PROCLOCK *)
3045 &proclocktag,
3047 HASH_FIND,
3048 NULL);
3049 if (!proclock)
3050 elog(ERROR, "failed to re-find shared proclock object");
3052 }
3053
3054 return proclock;
3055}

References DEFAULT_LOCKMETHOD, elog, ereport, errcode(), errhint(), errmsg, ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), i, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, MyProc, ProcLockHashCode(), SetupLockInTable(), and PROCLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2790 of file lock.c.

2791{
2792 uint32 i;
2794
2795 /* fast-path group the lock belongs to */
2796 uint32 group = FAST_PATH_REL_GROUP(relid);
2797
2798 /* Scan for existing entry for this relid, remembering empty slot. */
2799 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2800 {
2801 /* index into the whole per-backend array */
2802 uint32 f = FAST_PATH_SLOT(group, i);
2803
2804 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2805 unused_slot = f;
2806 else if (MyProc->fpRelId[f] == relid)
2807 {
2808 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2809 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2810 return true;
2811 }
2812 }
2813
2814 /* If no existing entry, use any empty slot. */
2816 {
2817 MyProc->fpRelId[unused_slot] = relid;
2819 ++FastPathLocalUseCounts[group];
2820 return true;
2821 }
2822
2823 /* No existing entry, and no empty slot. */
2824 return false;
2825}

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SET_LOCKMODE, FAST_PATH_SLOT, FastPathLocalUseCounts, FastPathLockSlotsPerBackend, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2869 of file lock.c.

2871{
2873 Oid relid = locktag->locktag_field2;
2874 uint32 i;
2875
2876 /* fast-path group the lock belongs to */
2877 uint32 group = FAST_PATH_REL_GROUP(relid);
2878
2879 /*
2880 * Every PGPROC that can potentially hold a fast-path lock is present in
2881 * ProcGlobal->allProcs. Prepared transactions are not, but any
2882 * outstanding fast-path locks held by prepared transactions are
2883 * transferred to the main lock table.
2884 */
2885 for (i = 0; i < ProcGlobal->allProcCount; i++)
2886 {
2887 PGPROC *proc = GetPGProcByNumber(i);
2888 uint32 j;
2889
2891
2892 /*
2893 * If the target backend isn't referencing the same database as the
2894 * lock, then we needn't examine the individual relation IDs at all;
2895 * none of them can be relevant.
2896 *
2897 * proc->databaseId is set at backend startup time and never changes
2898 * thereafter, so it might be safe to perform this test before
2899 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2900 * assume that if the target backend holds any fast-path locks, it
2901 * must have performed a memory-fencing operation (in particular, an
2902 * LWLock acquisition) since setting proc->databaseId. However, it's
2903 * less clear that our backend is certain to have performed a memory
2904 * fencing operation since the other backend set proc->databaseId. So
2905 * for now, we test it after acquiring the LWLock just to be safe.
2906 *
2907 * Also skip groups without any registered fast-path locks.
2908 */
2909 if (proc->databaseId != locktag->locktag_field1 ||
2910 proc->fpLockBits[group] == 0)
2911 {
2912 LWLockRelease(&proc->fpInfoLock);
2913 continue;
2914 }
2915
2916 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2917 {
2918 uint32 lockmode;
2919
2920 /* index into the whole per-backend array */
2921 uint32 f = FAST_PATH_SLOT(group, j);
2922
2923 /* Look for an allocated slot matching the given relid. */
2924 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2925 continue;
2926
2927 /* Find or create lock object. */
2929 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2931 ++lockmode)
2932 {
2933 PROCLOCK *proclock;
2934
2935 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2936 continue;
2937 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2938 hashcode, lockmode);
2939 if (!proclock)
2940 {
2942 LWLockRelease(&proc->fpInfoLock);
2943 return false;
2944 }
2945 GrantLock(proclock->tag.myLock, proclock, lockmode);
2946 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2947 }
2949
2950 /* No need to examine remaining slots. */
2951 break;
2952 }
2953 LWLockRelease(&proc->fpInfoLock);
2954 }
2955 return true;
2956}

References PROC_HDR::allProcCount, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GetPGProcByNumber, GrantLock(), i, j, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2833 of file lock.c.

2834{
2835 uint32 i;
2836 bool result = false;
2837
2838 /* fast-path group the lock belongs to */
2839 uint32 group = FAST_PATH_REL_GROUP(relid);
2840
2841 FastPathLocalUseCounts[group] = 0;
2842 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2843 {
2844 /* index into the whole per-backend array */
2845 uint32 f = FAST_PATH_SLOT(group, i);
2846
2847 if (MyProc->fpRelId[f] == relid
2848 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2849 {
2850 Assert(!result);
2851 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2852 result = true;
2853 /* we continue iterating so as to update FastPathLocalUseCount */
2854 }
2855 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2856 ++FastPathLocalUseCounts[group];
2857 }
2858 return result;
2859}

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FastPathLocalUseCounts, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, MyProc, and result.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1858 of file lock.c.

1859{
1861}

References fb(), and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetAwaitedLock()

LOCALLOCK * GetAwaitedLock ( void  )

Definition at line 1906 of file lock.c.

1907{
1908 return awaitedLock;
1909}

References awaitedLock.

Referenced by LockErrorCleanup(), ProcessRecoveryConflictInterrupt(), and ProcSleep().

◆ GetBlockerStatusData()

BlockedProcsData * GetBlockerStatusData ( int  blocked_pid)

Definition at line 3979 of file lock.c.

3980{
3982 PGPROC *proc;
3983 int i;
3984
3986
3987 /*
3988 * Guess how much space we'll need, and preallocate. Most of the time
3989 * this will avoid needing to do repalloc while holding the LWLocks. (We
3990 * assume, but check with an Assert, that MaxBackends is enough entries
3991 * for the procs[] array; the other two could need enlargement, though.)
3992 */
3993 data->nprocs = data->nlocks = data->npids = 0;
3994 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3995 data->procs = palloc_array(BlockedProcData, data->maxprocs);
3996 data->locks = palloc_array(LockInstanceData, data->maxlocks);
3997 data->waiter_pids = palloc_array(int, data->maxpids);
3998
3999 /*
4000 * In order to search the ProcArray for blocked_pid and assume that that
4001 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4002 * In addition, to examine the lock grouping fields of any other backend,
4003 * we must hold all the hash partition locks. (Only one of those locks is
4004 * actually relevant for any one lock group, but we can't know which one
4005 * ahead of time.) It's fairly annoying to hold all those locks
4006 * throughout this, but it's no worse than GetLockStatusData(), and it
4007 * does have the advantage that we're guaranteed to return a
4008 * self-consistent instantaneous state.
4009 */
4011
4013
4014 /* Nothing to do if it's gone */
4015 if (proc != NULL)
4016 {
4017 /*
4018 * Acquire lock on the entire shared lock data structure. See notes
4019 * in GetLockStatusData().
4020 */
4021 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4023
4024 if (proc->lockGroupLeader == NULL)
4025 {
4026 /* Easy case, proc is not a lock group member */
4028 }
4029 else
4030 {
4031 /* Examine all procs in proc's lock group */
4032 dlist_iter iter;
4033
4035 {
4037
4038 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4040 }
4041 }
4042
4043 /*
4044 * And release locks. See notes in GetLockStatusData().
4045 */
4046 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4048
4049 Assert(data->nprocs <= data->maxprocs);
4050 }
4051
4053
4054 return data;
4055}

References Assert, BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, fb(), GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, palloc_array, and palloc_object.

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId * GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int countp 
)

Definition at line 3077 of file lock.c.

3078{
3082 LOCK *lock;
3085 PROCLOCK *proclock;
3086 uint32 hashcode;
3088 int count = 0;
3089 int fast_count = 0;
3090
3092 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3095 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3096
3097 /*
3098 * Allocate memory to store results, and fill with InvalidVXID. We only
3099 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3100 * InHotStandby allocate once in TopMemoryContext.
3101 */
3102 if (InHotStandby)
3103 {
3104 if (vxids == NULL)
3107 sizeof(VirtualTransactionId) *
3109 }
3110 else
3112
3113 /* Compute hash code and partition lock, and look up conflicting modes. */
3114 hashcode = LockTagHashCode(locktag);
3116 conflictMask = lockMethodTable->conflictTab[lockmode];
3117
3118 /*
3119 * Fast path locks might not have been entered in the primary lock table.
3120 * If the lock we're dealing with could conflict with such a lock, we must
3121 * examine each backend's fast-path array for conflicts.
3122 */
3123 if (ConflictsWithRelationFastPath(locktag, lockmode))
3124 {
3125 int i;
3126 Oid relid = locktag->locktag_field2;
3128
3129 /* fast-path group the lock belongs to */
3130 uint32 group = FAST_PATH_REL_GROUP(relid);
3131
3132 /*
3133 * Iterate over relevant PGPROCs. Anything held by a prepared
3134 * transaction will have been transferred to the primary lock table,
3135 * so we need not worry about those. This is all a bit fuzzy, because
3136 * new locks could be taken after we've visited a particular
3137 * partition, but the callers had better be prepared to deal with that
3138 * anyway, since the locks could equally well be taken between the
3139 * time we return the value and the time the caller does something
3140 * with it.
3141 */
3142 for (i = 0; i < ProcGlobal->allProcCount; i++)
3143 {
3144 PGPROC *proc = GetPGProcByNumber(i);
3145 uint32 j;
3146
3147 /* A backend never blocks itself */
3148 if (proc == MyProc)
3149 continue;
3150
3152
3153 /*
3154 * If the target backend isn't referencing the same database as
3155 * the lock, then we needn't examine the individual relation IDs
3156 * at all; none of them can be relevant.
3157 *
3158 * See FastPathTransferRelationLocks() for discussion of why we do
3159 * this test after acquiring the lock.
3160 *
3161 * Also skip groups without any registered fast-path locks.
3162 */
3163 if (proc->databaseId != locktag->locktag_field1 ||
3164 proc->fpLockBits[group] == 0)
3165 {
3166 LWLockRelease(&proc->fpInfoLock);
3167 continue;
3168 }
3169
3170 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3171 {
3173
3174 /* index into the whole per-backend array */
3175 uint32 f = FAST_PATH_SLOT(group, j);
3176
3177 /* Look for an allocated slot matching the given relid. */
3178 if (relid != proc->fpRelId[f])
3179 continue;
3180 lockmask = FAST_PATH_GET_BITS(proc, f);
3181 if (!lockmask)
3182 continue;
3184
3185 /*
3186 * There can only be one entry per relation, so if we found it
3187 * and it doesn't conflict, we can skip the rest of the slots.
3188 */
3189 if ((lockmask & conflictMask) == 0)
3190 break;
3191
3192 /* Conflict! */
3193 GET_VXID_FROM_PGPROC(vxid, *proc);
3194
3196 vxids[count++] = vxid;
3197 /* else, xact already committed or aborted */
3198
3199 /* No need to examine remaining slots. */
3200 break;
3201 }
3202
3203 LWLockRelease(&proc->fpInfoLock);
3204 }
3205 }
3206
3207 /* Remember how many fast-path conflicts we found. */
3208 fast_count = count;
3209
3210 /*
3211 * Look up the lock object matching the tag.
3212 */
3214
3216 locktag,
3217 hashcode,
3218 HASH_FIND,
3219 NULL);
3220 if (!lock)
3221 {
3222 /*
3223 * If the lock object doesn't exist, there is nothing holding a lock
3224 * on this lockable object.
3225 */
3227 vxids[count].procNumber = INVALID_PROC_NUMBER;
3228 vxids[count].localTransactionId = InvalidLocalTransactionId;
3229 if (countp)
3230 *countp = count;
3231 return vxids;
3232 }
3233
3234 /*
3235 * Examine each existing holder (or awaiter) of the lock.
3236 */
3238 {
3239 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3240
3241 if (conflictMask & proclock->holdMask)
3242 {
3243 PGPROC *proc = proclock->tag.myProc;
3244
3245 /* A backend never blocks itself */
3246 if (proc != MyProc)
3247 {
3249
3250 GET_VXID_FROM_PGPROC(vxid, *proc);
3251
3253 {
3254 int i;
3255
3256 /* Avoid duplicate entries. */
3257 for (i = 0; i < fast_count; ++i)
3259 break;
3260 if (i >= fast_count)
3261 vxids[count++] = vxid;
3262 }
3263 /* else, xact already committed or aborted */
3264 }
3265 }
3266 }
3267
3269
3270 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3271 elog(PANIC, "too many conflicting locks found");
3272
3273 vxids[count].procNumber = INVALID_PROC_NUMBER;
3274 vxids[count].localTransactionId = InvalidLocalTransactionId;
3275 if (countp)
3276 *countp = count;
3277 return vxids;
3278}

References PROC_HDR::allProcCount, ConflictsWithRelationFastPath, PGPROC::databaseId, dlist_container, dlist_foreach, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, GetPGProcByNumber, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, INVALID_PROC_NUMBER, InvalidLocalTransactionId, j, lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, palloc0_array, PANIC, ProcGlobal, LOCK::procLocks, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char * GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 524 of file lock.c.

References Assert, fb(), lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData * GetLockStatusData ( void  )

Definition at line 3776 of file lock.c.

3777{
3778 LockData *data;
3779 PROCLOCK *proclock;
3781 int els;
3782 int el;
3783 int i;
3784
3786
3787 /* Guess how much space we'll need. */
3788 els = MaxBackends;
3789 el = 0;
3791
3792 /*
3793 * First, we iterate through the per-backend fast-path arrays, locking
3794 * them one at a time. This might produce an inconsistent picture of the
3795 * system state, but taking all of those LWLocks at the same time seems
3796 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3797 * matter too much, because none of these locks can be involved in lock
3798 * conflicts anyway - anything that might must be present in the main lock
3799 * table. (For the same reason, we don't sweat about making leaderPid
3800 * completely valid. We cannot safely dereference another backend's
3801 * lockGroupLeader field without holding all lock partition locks, and
3802 * it's not worth that.)
3803 */
3804 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3805 {
3806 PGPROC *proc = GetPGProcByNumber(i);
3807
3808 /* Skip backends with pid=0, as they don't hold fast-path locks */
3809 if (proc->pid == 0)
3810 continue;
3811
3813
3814 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3815 {
3816 /* Skip groups without registered fast-path locks */
3817 if (proc->fpLockBits[g] == 0)
3818 continue;
3819
3820 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3821 {
3823 uint32 f = FAST_PATH_SLOT(g, j);
3825
3826 /* Skip unallocated slots */
3827 if (!lockbits)
3828 continue;
3829
3830 if (el >= els)
3831 {
3832 els += MaxBackends;
3833 data->locks = (LockInstanceData *)
3834 repalloc(data->locks, sizeof(LockInstanceData) * els);
3835 }
3836
3837 instance = &data->locks[el];
3839 proc->fpRelId[f]);
3841 instance->waitLockMode = NoLock;
3842 instance->vxid.procNumber = proc->vxid.procNumber;
3843 instance->vxid.localTransactionId = proc->vxid.lxid;
3844 instance->pid = proc->pid;
3845 instance->leaderPid = proc->pid;
3846 instance->fastpath = true;
3847
3848 /*
3849 * Successfully taking fast path lock means there were no
3850 * conflicting locks.
3851 */
3852 instance->waitStart = 0;
3853
3854 el++;
3855 }
3856 }
3857
3858 if (proc->fpVXIDLock)
3859 {
3862
3863 if (el >= els)
3864 {
3865 els += MaxBackends;
3866 data->locks = (LockInstanceData *)
3867 repalloc(data->locks, sizeof(LockInstanceData) * els);
3868 }
3869
3870 vxid.procNumber = proc->vxid.procNumber;
3872
3873 instance = &data->locks[el];
3875 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3876 instance->waitLockMode = NoLock;
3877 instance->vxid.procNumber = proc->vxid.procNumber;
3878 instance->vxid.localTransactionId = proc->vxid.lxid;
3879 instance->pid = proc->pid;
3880 instance->leaderPid = proc->pid;
3881 instance->fastpath = true;
3882 instance->waitStart = 0;
3883
3884 el++;
3885 }
3886
3887 LWLockRelease(&proc->fpInfoLock);
3888 }
3889
3890 /*
3891 * Next, acquire lock on the entire shared lock data structure. We do
3892 * this so that, at least for locks in the primary lock table, the state
3893 * will be self-consistent.
3894 *
3895 * Since this is a read-only operation, we take shared instead of
3896 * exclusive lock. There's not a whole lot of point to this, because all
3897 * the normal operations require exclusive lock, but it doesn't hurt
3898 * anything either. It will at least allow two backends to do
3899 * GetLockStatusData in parallel.
3900 *
3901 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3902 */
3903 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3905
3906 /* Now we can safely count the number of proclocks */
3908 if (data->nelements > els)
3909 {
3910 els = data->nelements;
3911 data->locks = (LockInstanceData *)
3912 repalloc(data->locks, sizeof(LockInstanceData) * els);
3913 }
3914
3915 /* Now scan the tables to copy the data */
3917
3918 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3919 {
3920 PGPROC *proc = proclock->tag.myProc;
3921 LOCK *lock = proclock->tag.myLock;
3922 LockInstanceData *instance = &data->locks[el];
3923
3924 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3925 instance->holdMask = proclock->holdMask;
3926 if (proc->waitLock == proclock->tag.myLock)
3927 instance->waitLockMode = proc->waitLockMode;
3928 else
3929 instance->waitLockMode = NoLock;
3930 instance->vxid.procNumber = proc->vxid.procNumber;
3931 instance->vxid.localTransactionId = proc->vxid.lxid;
3932 instance->pid = proc->pid;
3933 instance->leaderPid = proclock->groupLeader->pid;
3934 instance->fastpath = false;
3935 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3936
3937 el++;
3938 }
3939
3940 /*
3941 * And release locks. We do this in reverse order for two reasons: (1)
3942 * Anyone else who needs more than one of the locks will be trying to lock
3943 * them in increasing order; we don't want to release the other process
3944 * until it can get all the locks it needs. (2) This avoids O(N^2)
3945 * behavior inside LWLockRelease.
3946 */
3947 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3949
3950 Assert(el == data->nelements);
3951
3952 return data;
3953}

References PROC_HDR::allProcCount, Assert, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_SLOT, FastPathLockGroupsPerBackend, fb(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpLockBits, PGPROC::fpRelId, PGPROC::fpVXIDLock, GetPGProcByNumber, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, j, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, MaxBackends, memcpy(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc_array, palloc_object, pg_atomic_read_u64(), PGPROC::pid, ProcGlobal, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, PGPROC::vxid, PGPROC::waitLock, PGPROC::waitLockMode, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 536 of file lock.c.

References Assert, fb(), lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock * GetRunningTransactionLocks ( int nlocks)

Definition at line 4153 of file lock.c.

4154{
4156 PROCLOCK *proclock;
4158 int i;
4159 int index;
4160 int els;
4161
4162 /*
4163 * Acquire lock on the entire shared lock data structure.
4164 *
4165 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4166 */
4167 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4169
4170 /* Now we can safely count the number of proclocks */
4172
4173 /*
4174 * Allocating enough space for all locks in the lock table is overkill,
4175 * but it's more convenient and faster than having to enlarge the array.
4176 */
4178
4179 /* Now scan the tables to copy the data */
4181
4182 /*
4183 * If lock is a currently granted AccessExclusiveLock then it will have
4184 * just one proclock holder, so locks are never accessed twice in this
4185 * particular case. Don't copy this code for use elsewhere because in the
4186 * general case this will give you duplicate locks when looking at
4187 * non-exclusive lock types.
4188 */
4189 index = 0;
4190 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4191 {
4192 /* make sure this definition matches the one used in LockAcquire */
4193 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4195 {
4196 PGPROC *proc = proclock->tag.myProc;
4197 LOCK *lock = proclock->tag.myLock;
4198 TransactionId xid = proc->xid;
4199
4200 /*
4201 * Don't record locks for transactions if we know they have
4202 * already issued their WAL record for commit but not yet released
4203 * lock. It is still possible that we see locks held by already
4204 * complete transactions, if they haven't yet zeroed their xids.
4205 */
4206 if (!TransactionIdIsValid(xid))
4207 continue;
4208
4209 accessExclusiveLocks[index].xid = xid;
4212
4213 index++;
4214 }
4215 }
4216
4217 Assert(index <= els);
4218
4219 /*
4220 * And release locks. We do this in reverse order for two reasons: (1)
4221 * Anyone else who needs more than one of the locks will be trying to lock
4222 * them in increasing order; we don't want to release the other process
4223 * until it can get all the locks it needs. (2) This avoids O(N^2)
4224 * behavior inside LWLockRelease.
4225 */
4226 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4228
4229 *nlocks = index;
4230 return accessExclusiveLocks;
4231}

References AccessExclusiveLock, Assert, fb(), hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 4059 of file lock.c.

4060{
4061 LOCK *theLock = blocked_proc->waitLock;
4066 int queue_size;
4067
4068 /* Nothing to do if this proc is not blocked */
4069 if (theLock == NULL)
4070 return;
4071
4072 /* Set up a procs[] element */
4073 bproc = &data->procs[data->nprocs++];
4074 bproc->pid = blocked_proc->pid;
4075 bproc->first_lock = data->nlocks;
4076 bproc->first_waiter = data->npids;
4077
4078 /*
4079 * We may ignore the proc's fast-path arrays, since nothing in those could
4080 * be related to a contended lock.
4081 */
4082
4083 /* Collect all PROCLOCKs associated with theLock */
4084 dlist_foreach(proclock_iter, &theLock->procLocks)
4085 {
4086 PROCLOCK *proclock =
4087 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4088 PGPROC *proc = proclock->tag.myProc;
4089 LOCK *lock = proclock->tag.myLock;
4091
4092 if (data->nlocks >= data->maxlocks)
4093 {
4094 data->maxlocks += MaxBackends;
4095 data->locks = (LockInstanceData *)
4096 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4097 }
4098
4099 instance = &data->locks[data->nlocks];
4100 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4101 instance->holdMask = proclock->holdMask;
4102 if (proc->waitLock == lock)
4103 instance->waitLockMode = proc->waitLockMode;
4104 else
4105 instance->waitLockMode = NoLock;
4106 instance->vxid.procNumber = proc->vxid.procNumber;
4107 instance->vxid.localTransactionId = proc->vxid.lxid;
4108 instance->pid = proc->pid;
4109 instance->leaderPid = proclock->groupLeader->pid;
4110 instance->fastpath = false;
4111 data->nlocks++;
4112 }
4113
4114 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4115 waitQueue = &(theLock->waitProcs);
4116 queue_size = dclist_count(waitQueue);
4117
4118 if (queue_size > data->maxpids - data->npids)
4119 {
4120 data->maxpids = Max(data->maxpids + MaxBackends,
4121 data->npids + queue_size);
4122 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4123 sizeof(int) * data->maxpids);
4124 }
4125
4126 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4128 {
4130
4132 break;
4133 data->waiter_pids[data->npids++] = queued_proc->pid;
4134 }
4135
4136 bproc->num_locks = data->nlocks - bproc->first_lock;
4137 bproc->num_waiters = data->npids - bproc->first_waiter;
4138}

References data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, fb(), PROCLOCK::groupLeader, PROCLOCK::holdMask, PGPROC::lxid, Max, MaxBackends, memcpy(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, PGPROC::pid, PGPROC::procNumber, repalloc(), LOCK::tag, PROCLOCK::tag, PGPROC::vxid, PGPROC::waitLock, and PGPROC::waitLockMode.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1897 of file lock.c.

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1666 of file lock.c.

1667{
1668 lock->nGranted++;
1669 lock->granted[lockmode]++;
1670 lock->grantMask |= LOCKBIT_ON(lockmode);
1671 if (lock->granted[lockmode] == lock->requested[lockmode])
1672 lock->waitMask &= LOCKBIT_OFF(lockmode);
1673 proclock->holdMask |= LOCKBIT_ON(lockmode);
1674 LOCK_PRINT("GrantLock", lock, lockmode);
1675 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1676 Assert(lock->nGranted <= lock->nRequested);
1677}

References Assert, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), JoinWaitQueue(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1800 of file lock.c.

1801{
1802 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1803 int i;
1804
1805 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1806 /* Count the total */
1807 locallock->nLocks++;
1808 /* Count the per-owner lock */
1809 for (i = 0; i < locallock->numLockOwners; i++)
1810 {
1811 if (lockOwners[i].owner == owner)
1812 {
1813 lockOwners[i].nLocks++;
1814 return;
1815 }
1816 }
1817 lockOwners[i].owner = owner;
1818 lockOwners[i].nLocks = 1;
1819 locallock->numLockOwners++;
1820 if (owner != NULL)
1822
1823 /* Indicate that the lock is acquired for certain types of locks. */
1825}

References Assert, CheckAndSetLockHeld(), fb(), i, LOCALLOCKOWNER::nLocks, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLockManagerAccess()

void InitLockManagerAccess ( void  )

Definition at line 502 of file lock.c.

503{
504 /*
505 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
506 * counts and resource owner information.
507 */
508 HASHCTL info;
509
510 info.keysize = sizeof(LOCALLOCKTAG);
511 info.entrysize = sizeof(LOCALLOCK);
512
513 LockMethodLocalHash = hash_create("LOCALLOCK hash",
514 16,
515 &info,
517}

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, and LockMethodLocalHash.

Referenced by BaseInit().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

Definition at line 4578 of file lock.c.

4580{
4581 lock_twophase_postcommit(fxid, info, recdata, len);
4582}

References fb(), len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

◆ lock_twophase_recover()

void lock_twophase_recover ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

Definition at line 4339 of file lock.c.

4341{
4343 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4344 LOCKTAG *locktag;
4345 LOCKMODE lockmode;
4347 LOCK *lock;
4348 PROCLOCK *proclock;
4350 bool found;
4351 uint32 hashcode;
4353 int partition;
4356
4357 Assert(len == sizeof(TwoPhaseLockRecord));
4358 locktag = &rec->locktag;
4359 lockmode = rec->lockmode;
4361
4363 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4365
4366 hashcode = LockTagHashCode(locktag);
4367 partition = LockHashPartition(hashcode);
4369
4371
4372 /*
4373 * Find or create a lock with this tag.
4374 */
4376 locktag,
4377 hashcode,
4379 &found);
4380 if (!lock)
4381 {
4383 ereport(ERROR,
4385 errmsg("out of shared memory"),
4386 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4387 }
4388
4389 /*
4390 * if it's a new lock object, initialize it
4391 */
4392 if (!found)
4393 {
4394 lock->grantMask = 0;
4395 lock->waitMask = 0;
4396 dlist_init(&lock->procLocks);
4397 dclist_init(&lock->waitProcs);
4398 lock->nRequested = 0;
4399 lock->nGranted = 0;
4400 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4401 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4402 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4403 }
4404 else
4405 {
4406 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4407 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4408 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4409 Assert(lock->nGranted <= lock->nRequested);
4410 }
4411
4412 /*
4413 * Create the hash key for the proclock table.
4414 */
4415 proclocktag.myLock = lock;
4416 proclocktag.myProc = proc;
4417
4419
4420 /*
4421 * Find or create a proclock entry with this tag
4422 */
4424 &proclocktag,
4427 &found);
4428 if (!proclock)
4429 {
4430 /* Oops, not enough shmem for the proclock */
4431 if (lock->nRequested == 0)
4432 {
4433 /*
4434 * There are no other requestors of this lock, so garbage-collect
4435 * the lock object. We *must* do this to avoid a permanent leak
4436 * of shared memory, because there won't be anything to cause
4437 * anyone to release the lock object later.
4438 */
4441 &(lock->tag),
4442 hashcode,
4444 NULL))
4445 elog(PANIC, "lock table corrupted");
4446 }
4448 ereport(ERROR,
4450 errmsg("out of shared memory"),
4451 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4452 }
4453
4454 /*
4455 * If new, initialize the new entry
4456 */
4457 if (!found)
4458 {
4459 Assert(proc->lockGroupLeader == NULL);
4460 proclock->groupLeader = proc;
4461 proclock->holdMask = 0;
4462 proclock->releaseMask = 0;
4463 /* Add proclock to appropriate lists */
4464 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4466 &proclock->procLink);
4467 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4468 }
4469 else
4470 {
4471 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4472 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4473 }
4474
4475 /*
4476 * lock->nRequested and lock->requested[] count the total number of
4477 * requests, whether granted or waiting, so increment those immediately.
4478 */
4479 lock->nRequested++;
4480 lock->requested[lockmode]++;
4481 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4482
4483 /*
4484 * We shouldn't already hold the desired lock.
4485 */
4486 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4487 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4488 lockMethodTable->lockModeNames[lockmode],
4489 lock->tag.locktag_field1, lock->tag.locktag_field2,
4490 lock->tag.locktag_field3);
4491
4492 /*
4493 * We ignore any possible conflicts and just grant ourselves the lock. Not
4494 * only because we don't bother, but also to avoid deadlocks when
4495 * switching from standby to normal mode. See function comment.
4496 */
4497 GrantLock(lock, proclock, lockmode);
4498
4499 /*
4500 * Bump strong lock count, to make sure any fast-path lock requests won't
4501 * be granted without consulting the primary lock table.
4502 */
4503 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4504 {
4506
4510 }
4511
4513}

References Assert, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ereport, errcode(), errhint(), errmsg, ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, fb(), LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire(), SpinLockRelease(), LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( FullTransactionId  fxid,
uint16  info,
void recdata,
uint32  len 
)

Definition at line 4520 of file lock.c.

4522{
4524 LOCKTAG *locktag;
4525 LOCKMODE lockmode;
4527
4528 Assert(len == sizeof(TwoPhaseLockRecord));
4529 locktag = &rec->locktag;
4530 lockmode = rec->lockmode;
4532
4534 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4535
4536 if (lockmode == AccessExclusiveLock &&
4537 locktag->locktag_type == LOCKTAG_RELATION)
4538 {
4540 locktag->locktag_field1 /* dboid */ ,
4541 locktag->locktag_field2 /* reloid */ );
4542 }
4543}

References AccessExclusiveLock, Assert, elog, ERROR, fb(), len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, StandbyAcquireAccessExclusiveLock(), and XidFromFullTransactionId.

◆ LockAcquire()

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp,
bool  logLockFailure 
)

Definition at line 833 of file lock.c.

840{
845 LOCK *lock;
846 PROCLOCK *proclock;
847 bool found;
848 ResourceOwner owner;
849 uint32 hashcode;
851 bool found_conflict;
853 bool log_lock = false;
854
856 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
859 elog(ERROR, "unrecognized lock mode: %d", lockmode);
860
861 if (RecoveryInProgress() && !InRecovery &&
862 (locktag->locktag_type == LOCKTAG_OBJECT ||
863 locktag->locktag_type == LOCKTAG_RELATION) &&
864 lockmode > RowExclusiveLock)
867 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
868 lockMethodTable->lockModeNames[lockmode]),
869 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
870
871#ifdef LOCK_DEBUG
872 if (LOCK_DEBUG_ENABLED(locktag))
873 elog(LOG, "LockAcquire: lock [%u,%u] %s",
874 locktag->locktag_field1, locktag->locktag_field2,
875 lockMethodTable->lockModeNames[lockmode]);
876#endif
877
878 /* Identify owner for lock */
879 if (sessionLock)
880 owner = NULL;
881 else
882 owner = CurrentResourceOwner;
883
884 /*
885 * Find or create a LOCALLOCK entry for this lock and lockmode
886 */
887 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
888 localtag.lock = *locktag;
889 localtag.mode = lockmode;
890
892 &localtag,
893 HASH_ENTER, &found);
894
895 /*
896 * if it's a new locallock object, initialize it
897 */
898 if (!found)
899 {
900 locallock->lock = NULL;
901 locallock->proclock = NULL;
902 locallock->hashcode = LockTagHashCode(&(localtag.lock));
903 locallock->nLocks = 0;
904 locallock->holdsStrongLockCount = false;
905 locallock->lockCleared = false;
906 locallock->numLockOwners = 0;
907 locallock->maxLockOwners = 8;
908 locallock->lockOwners = NULL; /* in case next line fails */
909 locallock->lockOwners = (LOCALLOCKOWNER *)
911 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
912 }
913 else
914 {
915 /* Make sure there will be room to remember the lock */
916 if (locallock->numLockOwners >= locallock->maxLockOwners)
917 {
918 int newsize = locallock->maxLockOwners * 2;
919
920 locallock->lockOwners = (LOCALLOCKOWNER *)
921 repalloc(locallock->lockOwners,
922 newsize * sizeof(LOCALLOCKOWNER));
923 locallock->maxLockOwners = newsize;
924 }
925 }
926 hashcode = locallock->hashcode;
927
928 if (locallockp)
930
931 /*
932 * If we already hold the lock, we can just increase the count locally.
933 *
934 * If lockCleared is already set, caller need not worry about absorbing
935 * sinval messages related to the lock's object.
936 */
937 if (locallock->nLocks > 0)
938 {
940 if (locallock->lockCleared)
942 else
944 }
945
946 /*
947 * We don't acquire any other heavyweight lock while holding the relation
948 * extension lock. We do allow to acquire the same relation extension
949 * lock more than once but that case won't reach here.
950 */
952
953 /*
954 * Prepare to emit a WAL record if acquisition of this lock needs to be
955 * replayed in a standby server.
956 *
957 * Here we prepare to log; after lock is acquired we'll issue log record.
958 * This arrangement simplifies error recovery in case the preparation step
959 * fails.
960 *
961 * Only AccessExclusiveLocks can conflict with lock types that read-only
962 * transactions can acquire in a standby server. Make sure this definition
963 * matches the one in GetRunningTransactionLocks().
964 */
965 if (lockmode >= AccessExclusiveLock &&
966 locktag->locktag_type == LOCKTAG_RELATION &&
969 {
971 log_lock = true;
972 }
973
974 /*
975 * Attempt to take lock via fast path, if eligible. But if we remember
976 * having filled up the fast path array, we don't attempt to make any
977 * further use of it until we release some locks. It's possible that some
978 * other backend has transferred some of those locks to the shared hash
979 * table, leaving space free, but it's not worth acquiring the LWLock just
980 * to check. It's also possible that we're acquiring a second or third
981 * lock type on a relation we have already locked using the fast-path, but
982 * for now we don't worry about that case either.
983 */
984 if (EligibleForRelationFastPath(locktag, lockmode))
985 {
988 {
990 bool acquired;
991
992 /*
993 * LWLockAcquire acts as a memory sequencing point, so it's safe
994 * to assume that any strong locker whose increment to
995 * FastPathStrongRelationLocks->counts becomes visible after we
996 * test it has yet to begin to transfer fast-path locks.
997 */
1000 acquired = false;
1001 else
1003 lockmode);
1005 if (acquired)
1006 {
1007 /*
1008 * The locallock might contain stale pointers to some old
1009 * shared objects; we MUST reset these to null before
1010 * considering the lock to be acquired via fast-path.
1011 */
1012 locallock->lock = NULL;
1013 locallock->proclock = NULL;
1014 GrantLockLocal(locallock, owner);
1015 return LOCKACQUIRE_OK;
1016 }
1017 }
1018 else
1019 {
1020 /*
1021 * Increment the lock statistics counter if lock could not be
1022 * acquired via the fast-path.
1023 */
1024 pgstat_count_lock_fastpath_exceeded(locallock->tag.lock.locktag_type);
1025 }
1026 }
1027
1028 /*
1029 * If this lock could potentially have been taken via the fast-path by
1030 * some other backend, we must (temporarily) disable further use of the
1031 * fast-path for this lock tag, and migrate any locks already taken via
1032 * this method to the main lock table.
1033 */
1034 if (ConflictsWithRelationFastPath(locktag, lockmode))
1035 {
1037
1040 hashcode))
1041 {
1043 if (locallock->nLocks == 0)
1045 if (locallockp)
1046 *locallockp = NULL;
1048 ereport(ERROR,
1050 errmsg("out of shared memory"),
1051 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1052 else
1053 return LOCKACQUIRE_NOT_AVAIL;
1054 }
1055 }
1056
1057 /*
1058 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1059 * take it via the fast-path, either, so we've got to mess with the shared
1060 * lock table.
1061 */
1063
1065
1066 /*
1067 * Find or create lock and proclock entries with this tag
1068 *
1069 * Note: if the locallock object already existed, it might have a pointer
1070 * to the lock already ... but we should not assume that that pointer is
1071 * valid, since a lock object with zero hold and request counts can go
1072 * away anytime. So we have to use SetupLockInTable() to recompute the
1073 * lock and proclock pointers, even if they're already set.
1074 */
1075 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1076 hashcode, lockmode);
1077 if (!proclock)
1078 {
1081 if (locallock->nLocks == 0)
1083 if (locallockp)
1084 *locallockp = NULL;
1086 ereport(ERROR,
1088 errmsg("out of shared memory"),
1089 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1090 else
1091 return LOCKACQUIRE_NOT_AVAIL;
1092 }
1093 locallock->proclock = proclock;
1094 lock = proclock->tag.myLock;
1095 locallock->lock = lock;
1096
1097 /*
1098 * If lock requested conflicts with locks requested by waiters, must join
1099 * wait queue. Otherwise, check for conflict with already-held locks.
1100 * (That's last because most complex check.)
1101 */
1102 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1103 found_conflict = true;
1104 else
1106 lock, proclock);
1107
1108 if (!found_conflict)
1109 {
1110 /* No conflict with held or previously requested locks */
1111 GrantLock(lock, proclock, lockmode);
1113 }
1114 else
1115 {
1116 /*
1117 * Join the lock's wait queue. We call this even in the dontWait
1118 * case, because JoinWaitQueue() may discover that we can acquire the
1119 * lock immediately after all.
1120 */
1122 }
1123
1125 {
1126 /*
1127 * We're not getting the lock because a deadlock was detected already
1128 * while trying to join the wait queue, or because we would have to
1129 * wait but the caller requested no blocking.
1130 *
1131 * Undo the changes to shared entries before releasing the partition
1132 * lock.
1133 */
1135
1136 if (proclock->holdMask == 0)
1137 {
1139
1141 hashcode);
1142 dlist_delete(&proclock->lockLink);
1143 dlist_delete(&proclock->procLink);
1145 &(proclock->tag),
1148 NULL))
1149 elog(PANIC, "proclock table corrupted");
1150 }
1151 else
1152 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1153 lock->nRequested--;
1154 lock->requested[lockmode]--;
1155 LOCK_PRINT("LockAcquire: did not join wait queue",
1156 lock, lockmode);
1157 Assert((lock->nRequested > 0) &&
1158 (lock->requested[lockmode] >= 0));
1159 Assert(lock->nGranted <= lock->nRequested);
1161 if (locallock->nLocks == 0)
1163
1164 if (dontWait)
1165 {
1166 /*
1167 * Log lock holders and waiters as a detail log message if
1168 * logLockFailure = true and lock acquisition fails with dontWait
1169 * = true
1170 */
1171 if (logLockFailure)
1172 {
1176 const char *modename;
1177 int lockHoldersNum = 0;
1178
1182
1183 DescribeLockTag(&buf, &locallock->tag.lock);
1184 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1185 lockmode);
1186
1187 /* Gather a list of all lock holders and waiters */
1192
1193 ereport(LOG,
1194 (errmsg("process %d could not obtain %s on %s",
1195 MyProcPid, modename, buf.data),
1197 "Process holding the lock: %s, Wait queue: %s.",
1198 "Processes holding the lock: %s, Wait queue: %s.",
1200 lock_holders_sbuf.data,
1201 lock_waiters_sbuf.data)));
1202
1203 pfree(buf.data);
1206 }
1207 if (locallockp)
1208 *locallockp = NULL;
1209 return LOCKACQUIRE_NOT_AVAIL;
1210 }
1211 else
1212 {
1214 /* DeadLockReport() will not return */
1215 }
1216 }
1217
1218 /*
1219 * We are now in the lock queue, or the lock was already granted. If
1220 * queued, go to sleep.
1221 */
1223 {
1224 Assert(!dontWait);
1225 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1226 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1228
1230
1231 /*
1232 * NOTE: do not do any material change of state between here and
1233 * return. All required changes in locktable state must have been
1234 * done when the lock was granted to us --- see notes in WaitOnLock.
1235 */
1236
1238 {
1239 /*
1240 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1241 * now.
1242 */
1243 Assert(!dontWait);
1245 /* DeadLockReport() will not return */
1246 }
1247 }
1248 else
1251
1252 /* The lock was granted to us. Update the local lock entry accordingly */
1253 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1254 GrantLockLocal(locallock, owner);
1255
1256 /*
1257 * Lock state is fully up-to-date now; if we error out after this, no
1258 * special error cleanup is required.
1259 */
1261
1262 /*
1263 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1264 * standby server.
1265 */
1266 if (log_lock)
1267 {
1268 /*
1269 * Decode the locktag back to the original values, to avoid sending
1270 * lots of empty bytes with every message. See lock.h to check how a
1271 * locktag is defined for LOCKTAG_RELATION
1272 */
1274 locktag->locktag_field2);
1275 }
1276
1277 return LOCKACQUIRE_OK;
1278}

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert, BeginStrongLockAcquire(), buf, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, CurrentResourceOwner, DeadLockReport(), DescribeLockTag(), dlist_delete(), EligibleForRelationFastPath, elog, ereport, errcode(), errdetail_log_plural(), errhint(), errmsg, ERROR, FAST_PATH_REL_GROUP, FastPathGrantRelationLock(), FastPathLocalUseCounts, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), fb(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, GetLockHoldersAndWaiters(), GetLockmodeName(), GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), PROCLOCK::holdMask, initStringInfo(), InRecovery, JoinWaitQueue(), lengthof, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemoryContextAlloc(), MemSet, PROCLOCKTAG::myLock, MyProc, MyProcPid, LOCK::nGranted, LOCK::nRequested, PANIC, pfree(), pgstat_count_lock_fastpath_exceeded(), PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_OK, PROC_WAIT_STATUS_WAITING, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), ConditionalLockTuple(), ConditionalXactLockTableWait(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1537 of file lock.c.

1541{
1542 int numLockModes = lockMethodTable->numLockModes;
1544 int conflictMask = lockMethodTable->conflictTab[lockmode];
1548 int i;
1549
1550 /*
1551 * first check for global conflicts: If no locks conflict with my request,
1552 * then I get the lock.
1553 *
1554 * Checking for conflict: lock->grantMask represents the types of
1555 * currently held locks. conflictTable[lockmode] has a bit set for each
1556 * type of lock that conflicts with request. Bitwise compare tells if
1557 * there is a conflict.
1558 */
1559 if (!(conflictMask & lock->grantMask))
1560 {
1561 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1562 return false;
1563 }
1564
1565 /*
1566 * Rats. Something conflicts. But it could still be my own lock, or a
1567 * lock held by another member of my locking group. First, figure out how
1568 * many conflicts remain after subtracting out any locks I hold myself.
1569 */
1570 myLocks = proclock->holdMask;
1571 for (i = 1; i <= numLockModes; i++)
1572 {
1573 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1574 {
1575 conflictsRemaining[i] = 0;
1576 continue;
1577 }
1578 conflictsRemaining[i] = lock->granted[i];
1579 if (myLocks & LOCKBIT_ON(i))
1582 }
1583
1584 /* If no conflicts remain, we get the lock. */
1585 if (totalConflictsRemaining == 0)
1586 {
1587 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1588 return false;
1589 }
1590
1591 /* If no group locking, it's definitely a conflict. */
1592 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1593 {
1594 Assert(proclock->tag.myProc == MyProc);
1595 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1596 proclock);
1597 return true;
1598 }
1599
1600 /*
1601 * The relation extension lock conflict even between the group members.
1602 */
1604 {
1605 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1606 proclock);
1607 return true;
1608 }
1609
1610 /*
1611 * Locks held in conflicting modes by members of our own lock group are
1612 * not real conflicts; we can subtract those out and see if we still have
1613 * a conflict. This is O(N) in the number of processes holding or
1614 * awaiting locks on this object. We could improve that by making the
1615 * shared memory state more complex (and larger) but it doesn't seem worth
1616 * it.
1617 */
1619 {
1621 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1622
1623 if (proclock != otherproclock &&
1624 proclock->groupLeader == otherproclock->groupLeader &&
1625 (otherproclock->holdMask & conflictMask) != 0)
1626 {
1627 int intersectMask = otherproclock->holdMask & conflictMask;
1628
1629 for (i = 1; i <= numLockModes; i++)
1630 {
1631 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1632 {
1633 if (conflictsRemaining[i] <= 0)
1634 elog(PANIC, "proclocks held do not match lock");
1637 }
1638 }
1639
1640 if (totalConflictsRemaining == 0)
1641 {
1642 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1643 proclock);
1644 return false;
1645 }
1646 }
1647 }
1648
1649 /* Nope, it's a real conflict. */
1650 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1651 return true;
1652}

References Assert, dlist_container, dlist_foreach, elog, fb(), LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by JoinWaitQueue(), LockAcquireExtended(), and ProcLockWakeup().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 693 of file lock.c.

694{
699 LOCK *lock;
700 PROCLOCK *proclock;
702 bool hasWaiters = false;
703
705 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
708 elog(ERROR, "unrecognized lock mode: %d", lockmode);
709
710#ifdef LOCK_DEBUG
711 if (LOCK_DEBUG_ENABLED(locktag))
712 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
713 locktag->locktag_field1, locktag->locktag_field2,
714 lockMethodTable->lockModeNames[lockmode]);
715#endif
716
717 /*
718 * Find the LOCALLOCK entry for this lock and lockmode
719 */
720 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
721 localtag.lock = *locktag;
722 localtag.mode = lockmode;
723
725 &localtag,
726 HASH_FIND, NULL);
727
728 /*
729 * let the caller print its own error message, too. Do not ereport(ERROR).
730 */
731 if (!locallock || locallock->nLocks <= 0)
732 {
733 elog(WARNING, "you don't own a lock of type %s",
734 lockMethodTable->lockModeNames[lockmode]);
735 return false;
736 }
737
738 /*
739 * Check the shared lock table.
740 */
742
744
745 /*
746 * We don't need to re-find the lock or proclock, since we kept their
747 * addresses in the locallock table, and they couldn't have been removed
748 * while we were holding a lock on them.
749 */
750 lock = locallock->lock;
751 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
752 proclock = locallock->proclock;
753 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
754
755 /*
756 * Double-check that we are actually holding a lock of the type we want to
757 * release.
758 */
759 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
760 {
761 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
763 elog(WARNING, "you don't own a lock of type %s",
764 lockMethodTable->lockModeNames[lockmode]);
766 return false;
767 }
768
769 /*
770 * Do the checking.
771 */
772 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
773 hasWaiters = true;
774
776
777 return hasWaiters;
778}

References elog, ERROR, fb(), HASH_FIND, hash_search(), PROCLOCK::holdMask, lengthof, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  orstronger 
)

Definition at line 640 of file lock.c.

642{
645
646 /*
647 * See if there is a LOCALLOCK entry for this lock and lockmode
648 */
649 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
650 localtag.lock = *locktag;
651 localtag.mode = lockmode;
652
654 &localtag,
655 HASH_FIND, NULL);
656
657 if (locallock && locallock->nLocks > 0)
658 return true;
659
660 if (orstronger)
661 {
663
664 for (slockmode = lockmode + 1;
666 slockmode++)
667 {
668 if (LockHeldByMe(locktag, slockmode, false))
669 return true;
670 }
671 }
672
673 return false;
674}

References fb(), HASH_FIND, hash_search(), LockHeldByMe(), LockMethodLocalHash, MaxLockMode, and MemSet.

Referenced by CheckRelationLockedByMe(), CheckRelationOidLockedByMe(), LockHeldByMe(), and UpdateSubscriptionRelState().

◆ LockManagerShmemInit()

static void LockManagerShmemInit ( void arg)
static

◆ LockManagerShmemRequest()

static void LockManagerShmemRequest ( void arg)
static

Definition at line 451 of file lock.c.

452{
454
455 /*
456 * Compute sizes for lock hashtables.
457 */
459
460 /*
461 * Hash table for LOCK structs. This stores per-locked-object
462 * information.
463 */
464 ShmemRequestHash(.name = "LOCK hash",
465 .nelems = max_table_size,
466 .ptr = &LockMethodLockHash,
467 .hash_info.keysize = sizeof(LOCKTAG),
468 .hash_info.entrysize = sizeof(LOCK),
469 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
470 .hash_flags = HASH_ELEM | HASH_BLOBS | HASH_PARTITION,
471 );
472
473 /* Assume an average of 2 holders per lock */
474 max_table_size *= 2;
475
476 ShmemRequestHash(.name = "PROCLOCK hash",
477 .nelems = max_table_size,
479 .hash_info.keysize = sizeof(PROCLOCKTAG),
480 .hash_info.entrysize = sizeof(PROCLOCK),
481 .hash_info.hash = proclock_hash,
482 .hash_info.num_partitions = NUM_LOCK_PARTITIONS,
483 .hash_flags = HASH_ELEM | HASH_FUNCTION | HASH_PARTITION,
484 );
485
486 ShmemRequestStruct(.name = "Fast Path Strong Relation Lock Data",
487 .size = sizeof(FastPathStrongRelationLockData),
488 .ptr = (void **) (void *) &FastPathStrongRelationLocks,
489 );
490}

References FastPathStrongRelationLocks, fb(), HASH_BLOBS, HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HTAB::keysize, LockMethodLockHash, LockMethodProcLockHash, name, NLOCKENTS, NUM_LOCK_PARTITIONS, proclock_hash(), ShmemRequestHash, and ShmemRequestStruct.

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2714 of file lock.c.

2715{
2717
2718 Assert(parent != NULL);
2719
2720 if (locallocks == NULL)
2721 {
2722 HASH_SEQ_STATUS status;
2724
2726
2727 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2729 }
2730 else
2731 {
2732 int i;
2733
2734 for (i = nlocks - 1; i >= 0; i--)
2735 LockReassignOwner(locallocks[i], parent);
2736 }
2737}

References Assert, CurrentResourceOwner, fb(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), and ResourceOwnerGetParent().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2744 of file lock.c.

2745{
2746 LOCALLOCKOWNER *lockOwners;
2747 int i;
2748 int ic = -1;
2749 int ip = -1;
2750
2751 /*
2752 * Scan to see if there are any locks belonging to current owner or its
2753 * parent
2754 */
2755 lockOwners = locallock->lockOwners;
2756 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2757 {
2758 if (lockOwners[i].owner == CurrentResourceOwner)
2759 ic = i;
2760 else if (lockOwners[i].owner == parent)
2761 ip = i;
2762 }
2763
2764 if (ic < 0)
2765 return; /* no current locks */
2766
2767 if (ip < 0)
2768 {
2769 /* Parent has no slot, so just give it the child's slot */
2770 lockOwners[ic].owner = parent;
2772 }
2773 else
2774 {
2775 /* Merge child's count with parent's */
2776 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2777 /* compact out unused slot */
2778 locallock->numLockOwners--;
2779 if (ic < locallock->numLockOwners)
2780 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2781 }
2783}

References CurrentResourceOwner, fb(), i, LOCALLOCKOWNER::nLocks, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3292 of file lock.c.

3295{
3296 LOCK *lock;
3297 PROCLOCK *proclock;
3299 uint32 hashcode;
3302 bool wakeupNeeded;
3303
3304 hashcode = LockTagHashCode(locktag);
3306
3308
3309 /*
3310 * Re-find the lock object (it had better be there).
3311 */
3313 locktag,
3314 hashcode,
3315 HASH_FIND,
3316 NULL);
3317 if (!lock)
3318 elog(PANIC, "failed to re-find shared lock object");
3319
3320 /*
3321 * Re-find the proclock object (ditto).
3322 */
3323 proclocktag.myLock = lock;
3324 proclocktag.myProc = proc;
3325
3327
3329 &proclocktag,
3331 HASH_FIND,
3332 NULL);
3333 if (!proclock)
3334 elog(PANIC, "failed to re-find shared proclock object");
3335
3336 /*
3337 * Double-check that we are actually holding a lock of the type we want to
3338 * release.
3339 */
3340 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3341 {
3342 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3344 elog(WARNING, "you don't own a lock of type %s",
3345 lockMethodTable->lockModeNames[lockmode]);
3346 return;
3347 }
3348
3349 /*
3350 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3351 */
3352 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3353
3354 CleanUpLock(lock, proclock,
3355 lockMethodTable, hashcode,
3356 wakeupNeeded);
3357
3359
3360 /*
3361 * Decrement strong lock count. This logic is needed only for 2PC.
3362 */
3364 && ConflictsWithRelationFastPath(locktag, lockmode))
3365 {
3367
3372 }
3373}

References Assert, CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, fb(), HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire(), SpinLockRelease(), UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 2110 of file lock.c.

2111{
2116 LOCK *lock;
2117 PROCLOCK *proclock;
2119 bool wakeupNeeded;
2120
2122 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2125 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2126
2127#ifdef LOCK_DEBUG
2128 if (LOCK_DEBUG_ENABLED(locktag))
2129 elog(LOG, "LockRelease: lock [%u,%u] %s",
2130 locktag->locktag_field1, locktag->locktag_field2,
2131 lockMethodTable->lockModeNames[lockmode]);
2132#endif
2133
2134 /*
2135 * Find the LOCALLOCK entry for this lock and lockmode
2136 */
2137 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2138 localtag.lock = *locktag;
2139 localtag.mode = lockmode;
2140
2142 &localtag,
2143 HASH_FIND, NULL);
2144
2145 /*
2146 * let the caller print its own error message, too. Do not ereport(ERROR).
2147 */
2148 if (!locallock || locallock->nLocks <= 0)
2149 {
2150 elog(WARNING, "you don't own a lock of type %s",
2151 lockMethodTable->lockModeNames[lockmode]);
2152 return false;
2153 }
2154
2155 /*
2156 * Decrease the count for the resource owner.
2157 */
2158 {
2159 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2160 ResourceOwner owner;
2161 int i;
2162
2163 /* Identify owner for lock */
2164 if (sessionLock)
2165 owner = NULL;
2166 else
2167 owner = CurrentResourceOwner;
2168
2169 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2170 {
2171 if (lockOwners[i].owner == owner)
2172 {
2173 Assert(lockOwners[i].nLocks > 0);
2174 if (--lockOwners[i].nLocks == 0)
2175 {
2176 if (owner != NULL)
2178 /* compact out unused slot */
2179 locallock->numLockOwners--;
2180 if (i < locallock->numLockOwners)
2181 lockOwners[i] = lockOwners[locallock->numLockOwners];
2182 }
2183 break;
2184 }
2185 }
2186 if (i < 0)
2187 {
2188 /* don't release a lock belonging to another owner */
2189 elog(WARNING, "you don't own a lock of type %s",
2190 lockMethodTable->lockModeNames[lockmode]);
2191 return false;
2192 }
2193 }
2194
2195 /*
2196 * Decrease the total local count. If we're still holding the lock, we're
2197 * done.
2198 */
2199 locallock->nLocks--;
2200
2201 if (locallock->nLocks > 0)
2202 return true;
2203
2204 /*
2205 * At this point we can no longer suppose we are clear of invalidation
2206 * messages related to this lock. Although we'll delete the LOCALLOCK
2207 * object before any intentional return from this routine, it seems worth
2208 * the trouble to explicitly reset lockCleared right now, just in case
2209 * some error prevents us from deleting the LOCALLOCK.
2210 */
2211 locallock->lockCleared = false;
2212
2213 /* Attempt fast release of any lock eligible for the fast path. */
2214 if (EligibleForRelationFastPath(locktag, lockmode) &&
2216 {
2217 bool released;
2218
2219 /*
2220 * We might not find the lock here, even if we originally entered it
2221 * here. Another backend may have moved it to the main table.
2222 */
2225 lockmode);
2227 if (released)
2228 {
2230 return true;
2231 }
2232 }
2233
2234 /*
2235 * Otherwise we've got to mess with the shared lock table.
2236 */
2238
2240
2241 /*
2242 * Normally, we don't need to re-find the lock or proclock, since we kept
2243 * their addresses in the locallock table, and they couldn't have been
2244 * removed while we were holding a lock on them. But it's possible that
2245 * the lock was taken fast-path and has since been moved to the main hash
2246 * table by another backend, in which case we will need to look up the
2247 * objects here. We assume the lock field is NULL if so.
2248 */
2249 lock = locallock->lock;
2250 if (!lock)
2251 {
2253
2254 Assert(EligibleForRelationFastPath(locktag, lockmode));
2256 locktag,
2257 locallock->hashcode,
2258 HASH_FIND,
2259 NULL);
2260 if (!lock)
2261 elog(ERROR, "failed to re-find shared lock object");
2262 locallock->lock = lock;
2263
2264 proclocktag.myLock = lock;
2265 proclocktag.myProc = MyProc;
2267 &proclocktag,
2268 HASH_FIND,
2269 NULL);
2270 if (!locallock->proclock)
2271 elog(ERROR, "failed to re-find shared proclock object");
2272 }
2273 LOCK_PRINT("LockRelease: found", lock, lockmode);
2274 proclock = locallock->proclock;
2275 PROCLOCK_PRINT("LockRelease: found", proclock);
2276
2277 /*
2278 * Double-check that we are actually holding a lock of the type we want to
2279 * release.
2280 */
2281 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2282 {
2283 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2285 elog(WARNING, "you don't own a lock of type %s",
2286 lockMethodTable->lockModeNames[lockmode]);
2288 return false;
2289 }
2290
2291 /*
2292 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2293 */
2294 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2295
2296 CleanUpLock(lock, proclock,
2297 lockMethodTable, locallock->hashcode,
2298 wakeupNeeded);
2299
2301
2303 return true;
2304}

References Assert, CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FAST_PATH_REL_GROUP, FastPathLocalUseCounts, FastPathUnGrantRelationLock(), fb(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), PROCLOCK::holdMask, i, lengthof, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, MyProc, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SearchSysCacheLocked1(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2315 of file lock.c.

2316{
2317 HASH_SEQ_STATUS status;
2319 int i,
2320 numLockModes;
2322 LOCK *lock;
2323 int partition;
2324 bool have_fast_path_lwlock = false;
2325
2327 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2329
2330#ifdef LOCK_DEBUG
2331 if (*(lockMethodTable->trace_flag))
2332 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2333#endif
2334
2335 /*
2336 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2337 * the only way that the lock we hold on our own VXID can ever get
2338 * released: it is always and only released when a toplevel transaction
2339 * ends.
2340 */
2343
2344 numLockModes = lockMethodTable->numLockModes;
2345
2346 /*
2347 * First we run through the locallock table and get rid of unwanted
2348 * entries, then we scan the process's proclocks and get rid of those. We
2349 * do this separately because we may have multiple locallock entries
2350 * pointing to the same proclock, and we daren't end up with any dangling
2351 * pointers. Fast-path locks are cleaned up during the locallock table
2352 * scan, though.
2353 */
2355
2356 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2357 {
2358 /*
2359 * If the LOCALLOCK entry is unused, something must've gone wrong
2360 * while trying to acquire this lock. Just forget the local entry.
2361 */
2362 if (locallock->nLocks == 0)
2363 {
2365 continue;
2366 }
2367
2368 /* Ignore items that are not of the lockmethod to be removed */
2370 continue;
2371
2372 /*
2373 * If we are asked to release all locks, we can just zap the entry.
2374 * Otherwise, must scan to see if there are session locks. We assume
2375 * there is at most one lockOwners entry for session locks.
2376 */
2377 if (!allLocks)
2378 {
2379 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2380
2381 /* If session lock is above array position 0, move it down to 0 */
2382 for (i = 0; i < locallock->numLockOwners; i++)
2383 {
2384 if (lockOwners[i].owner == NULL)
2385 lockOwners[0] = lockOwners[i];
2386 else
2387 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2388 }
2389
2390 if (locallock->numLockOwners > 0 &&
2391 lockOwners[0].owner == NULL &&
2392 lockOwners[0].nLocks > 0)
2393 {
2394 /* Fix the locallock to show just the session locks */
2395 locallock->nLocks = lockOwners[0].nLocks;
2396 locallock->numLockOwners = 1;
2397 /* We aren't deleting this locallock, so done */
2398 continue;
2399 }
2400 else
2401 locallock->numLockOwners = 0;
2402 }
2403
2404#ifdef USE_ASSERT_CHECKING
2405
2406 /*
2407 * Tuple locks are currently held only for short durations within a
2408 * transaction. Check that we didn't forget to release one.
2409 */
2411 elog(WARNING, "tuple lock held at commit");
2412#endif
2413
2414 /*
2415 * If the lock or proclock pointers are NULL, this lock was taken via
2416 * the relation fast-path (and is not known to have been transferred).
2417 */
2418 if (locallock->proclock == NULL || locallock->lock == NULL)
2419 {
2420 LOCKMODE lockmode = locallock->tag.mode;
2421 Oid relid;
2422
2423 /* Verify that a fast-path lock is what we've got. */
2424 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2425 elog(PANIC, "locallock table corrupted");
2426
2427 /*
2428 * If we don't currently hold the LWLock that protects our
2429 * fast-path data structures, we must acquire it before attempting
2430 * to release the lock via the fast-path. We will continue to
2431 * hold the LWLock until we're done scanning the locallock table,
2432 * unless we hit a transferred fast-path lock. (XXX is this
2433 * really such a good idea? There could be a lot of entries ...)
2434 */
2436 {
2438 have_fast_path_lwlock = true;
2439 }
2440
2441 /* Attempt fast-path release. */
2442 relid = locallock->tag.lock.locktag_field2;
2443 if (FastPathUnGrantRelationLock(relid, lockmode))
2444 {
2446 continue;
2447 }
2448
2449 /*
2450 * Our lock, originally taken via the fast path, has been
2451 * transferred to the main lock table. That's going to require
2452 * some extra work, so release our fast-path lock before starting.
2453 */
2455 have_fast_path_lwlock = false;
2456
2457 /*
2458 * Now dump the lock. We haven't got a pointer to the LOCK or
2459 * PROCLOCK in this case, so we have to handle this a bit
2460 * differently than a normal lock release. Unfortunately, this
2461 * requires an extra LWLock acquire-and-release cycle on the
2462 * partitionLock, but hopefully it shouldn't happen often.
2463 */
2465 &locallock->tag.lock, lockmode, false);
2467 continue;
2468 }
2469
2470 /* Mark the proclock to show we need to release this lockmode */
2471 if (locallock->nLocks > 0)
2472 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2473
2474 /* And remove the locallock hashtable entry */
2476 }
2477
2478 /* Done with the fast-path data structures */
2481
2482 /*
2483 * Now, scan each lock partition separately.
2484 */
2486 {
2488 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2490
2492
2493 /*
2494 * If the proclock list for this partition is empty, we can skip
2495 * acquiring the partition lock. This optimization is trickier than
2496 * it looks, because another backend could be in process of adding
2497 * something to our proclock list due to promoting one of our
2498 * fast-path locks. However, any such lock must be one that we
2499 * decided not to delete above, so it's okay to skip it again now;
2500 * we'd just decide not to delete it again. We must, however, be
2501 * careful to re-fetch the list header once we've acquired the
2502 * partition lock, to be sure we have a valid, up-to-date pointer.
2503 * (There is probably no significant risk if pointer fetch/store is
2504 * atomic, but we don't wish to assume that.)
2505 *
2506 * XXX This argument assumes that the locallock table correctly
2507 * represents all of our fast-path locks. While allLocks mode
2508 * guarantees to clean up all of our normal locks regardless of the
2509 * locallock situation, we lose that guarantee for fast-path locks.
2510 * This is not ideal.
2511 */
2512 if (dlist_is_empty(procLocks))
2513 continue; /* needn't examine this partition */
2514
2516
2518 {
2519 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2520 bool wakeupNeeded = false;
2521
2522 Assert(proclock->tag.myProc == MyProc);
2523
2524 lock = proclock->tag.myLock;
2525
2526 /* Ignore items that are not of the lockmethod to be removed */
2527 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2528 continue;
2529
2530 /*
2531 * In allLocks mode, force release of all locks even if locallock
2532 * table had problems
2533 */
2534 if (allLocks)
2535 proclock->releaseMask = proclock->holdMask;
2536 else
2537 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2538
2539 /*
2540 * Ignore items that have nothing to be released, unless they have
2541 * holdMask == 0 and are therefore recyclable
2542 */
2543 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2544 continue;
2545
2546 PROCLOCK_PRINT("LockReleaseAll", proclock);
2547 LOCK_PRINT("LockReleaseAll", lock, 0);
2548 Assert(lock->nRequested >= 0);
2549 Assert(lock->nGranted >= 0);
2550 Assert(lock->nGranted <= lock->nRequested);
2551 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2552
2553 /*
2554 * Release the previously-marked lock modes
2555 */
2556 for (i = 1; i <= numLockModes; i++)
2557 {
2558 if (proclock->releaseMask & LOCKBIT_ON(i))
2559 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2561 }
2562 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2563 Assert(lock->nGranted <= lock->nRequested);
2564 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2565
2566 proclock->releaseMask = 0;
2567
2568 /* CleanUpLock will wake up waiters if needed. */
2569 CleanUpLock(lock, proclock,
2571 LockTagHashCode(&lock->tag),
2572 wakeupNeeded);
2573 } /* loop over PROCLOCKs within this partition */
2574
2576 } /* loop over partitions */
2577
2578#ifdef LOCK_DEBUG
2579 if (*(lockMethodTable->trace_flag))
2580 elog(LOG, "LockReleaseAll done");
2581#endif
2582}

References Assert, CleanUpLock(), DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), fb(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCK_LOCKTAG, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LockRefindAndRelease(), LOCKTAG_TUPLE, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCKOWNER::owner, PANIC, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), LOCK::tag, PROCLOCK::tag, UnGrantLock(), VirtualXactLockTableCleanup(), and WARNING.

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2619 of file lock.c.

2620{
2621 if (locallocks == NULL)
2622 {
2623 HASH_SEQ_STATUS status;
2625
2627
2628 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2630 }
2631 else
2632 {
2633 int i;
2634
2635 for (i = nlocks - 1; i >= 0; i--)
2637 }
2638}

References fb(), hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and ReleaseLockIfHeld().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2589 of file lock.c.

2590{
2591 HASH_SEQ_STATUS status;
2593
2595 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2596
2598
2599 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2600 {
2601 /* Ignore items that are not of the specified lock method */
2603 continue;
2604
2606 }
2607}

References elog, ERROR, fb(), hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, and ReleaseLockIfHeld().

Referenced by pg_advisory_unlock_all().

◆ LockTagHashCode()

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4836 of file lock.c.

4837{
4839 LOCK *lock;
4840 bool found;
4841 uint32 hashcode;
4843 int waiters = 0;
4844
4846 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4847
4848 hashcode = LockTagHashCode(locktag);
4851
4853 locktag,
4854 hashcode,
4855 HASH_FIND,
4856 &found);
4857 if (found)
4858 {
4859 Assert(lock != NULL);
4860 waiters = lock->nRequested;
4861 }
4863
4864 return waiters;
4865}

References Assert, elog, ERROR, fb(), HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

Definition at line 1928 of file lock.c.

1929{
1930 Assert(locallock->nLocks > 0);
1931 locallock->lockCleared = true;
1932}

References Assert, and fb().

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ PostPrepare_Locks()

void PostPrepare_Locks ( FullTransactionId  fxid)

Definition at line 3580 of file lock.c.

3581{
3582 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3583 HASH_SEQ_STATUS status;
3585 LOCK *lock;
3586 PROCLOCK *proclock;
3588 int partition;
3589
3590 /* Can't prepare a lock group follower. */
3593
3594 /* This is a critical section: any error means big trouble */
3596
3597 /*
3598 * First we run through the locallock table and get rid of unwanted
3599 * entries, then we scan the process's proclocks and transfer them to the
3600 * target proc.
3601 *
3602 * We do this separately because we may have multiple locallock entries
3603 * pointing to the same proclock, and we daren't end up with any dangling
3604 * pointers.
3605 */
3607
3608 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3609 {
3610 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3611 bool haveSessionLock;
3612 bool haveXactLock;
3613 int i;
3614
3615 if (locallock->proclock == NULL || locallock->lock == NULL)
3616 {
3617 /*
3618 * We must've run out of shared memory while trying to set up this
3619 * lock. Just forget the local entry.
3620 */
3621 Assert(locallock->nLocks == 0);
3623 continue;
3624 }
3625
3626 /* Ignore VXID locks */
3627 if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3628 continue;
3629
3630 /* Scan to see whether we hold it at session or transaction level */
3631 haveSessionLock = haveXactLock = false;
3632 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3633 {
3634 if (lockOwners[i].owner == NULL)
3635 haveSessionLock = true;
3636 else
3637 haveXactLock = true;
3638 }
3639
3640 /* Ignore it if we have only session lock */
3641 if (!haveXactLock)
3642 continue;
3643
3644 /* This can't happen, because we already checked it */
3645 if (haveSessionLock)
3646 ereport(PANIC,
3648 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3649
3650 /* Mark the proclock to show we need to release this lockmode */
3651 if (locallock->nLocks > 0)
3652 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3653
3654 /* And remove the locallock hashtable entry */
3656 }
3657
3658 /*
3659 * Now, scan each lock partition separately.
3660 */
3662 {
3664 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3666
3668
3669 /*
3670 * If the proclock list for this partition is empty, we can skip
3671 * acquiring the partition lock. This optimization is safer than the
3672 * situation in LockReleaseAll, because we got rid of any fast-path
3673 * locks during AtPrepare_Locks, so there cannot be any case where
3674 * another backend is adding something to our lists now. For safety,
3675 * though, we code this the same way as in LockReleaseAll.
3676 */
3677 if (dlist_is_empty(procLocks))
3678 continue; /* needn't examine this partition */
3679
3681
3683 {
3684 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3685
3686 Assert(proclock->tag.myProc == MyProc);
3687
3688 lock = proclock->tag.myLock;
3689
3690 /* Ignore VXID locks */
3692 continue;
3693
3694 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3695 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3696 Assert(lock->nRequested >= 0);
3697 Assert(lock->nGranted >= 0);
3698 Assert(lock->nGranted <= lock->nRequested);
3699 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3700
3701 /* Ignore it if nothing to release (must be a session lock) */
3702 if (proclock->releaseMask == 0)
3703 continue;
3704
3705 /* Else we should be releasing all locks */
3706 if (proclock->releaseMask != proclock->holdMask)
3707 elog(PANIC, "we seem to have dropped a bit somewhere");
3708
3709 /*
3710 * We cannot simply modify proclock->tag.myProc to reassign
3711 * ownership of the lock, because that's part of the hash key and
3712 * the proclock would then be in the wrong hash chain. Instead
3713 * use hash_update_hash_key. (We used to create a new hash entry,
3714 * but that risks out-of-memory failure if other processes are
3715 * busy making proclocks too.) We must unlink the proclock from
3716 * our procLink chain and put it into the new proc's chain, too.
3717 *
3718 * Note: the updated proclock hash key will still belong to the
3719 * same hash partition, cf proclock_hash(). So the partition lock
3720 * we already hold is sufficient for this.
3721 */
3722 dlist_delete(&proclock->procLink);
3723
3724 /*
3725 * Create the new hash key for the proclock.
3726 */
3727 proclocktag.myLock = lock;
3728 proclocktag.myProc = newproc;
3729
3730 /*
3731 * Update groupLeader pointer to point to the new proc. (We'd
3732 * better not be a member of somebody else's lock group!)
3733 */
3734 Assert(proclock->groupLeader == proclock->tag.myProc);
3735 proclock->groupLeader = newproc;
3736
3737 /*
3738 * Update the proclock. We should not find any existing entry for
3739 * the same hash key, since there can be only one entry for any
3740 * given lock with my own proc.
3741 */
3743 proclock,
3744 &proclocktag))
3745 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3746
3747 /* Re-link into the new proc's proclock list */
3748 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3749
3750 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3751 } /* loop over PROCLOCKs within this partition */
3752
3754 } /* loop over partitions */
3755
3757}

References Assert, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg, fb(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, NUM_LOCK_PARTITIONS, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void key,
Size  keysize 
)
static

Definition at line 571 of file lock.c.

572{
573 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
576
577 Assert(keysize == sizeof(PROCLOCKTAG));
578
579 /* Look into the associated LOCK object, and compute its hash code */
580 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
581
582 /*
583 * To make the hash code also depend on the PGPROC, we xor the proc
584 * struct's address into the hash code, left-shifted so that the
585 * partition-number bits don't change. Since this is only a hash, we
586 * don't care if we lose high-order bits of the address; use an
587 * intermediate variable to suppress cast-pointer-to-int warnings.
588 */
591
592 return lockhash;
593}

References Assert, DatumGetUInt32(), fb(), LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, and PointerGetDatum.

Referenced by LockManagerShmemRequest().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 602 of file lock.c.

603{
604 uint32 lockhash = hashcode;
606
607 /*
608 * This must match proclock_hash()!
609 */
612
613 return lockhash;
614}

References DatumGetUInt32(), fb(), LOG2_NUM_LOCK_PARTITIONS, and PointerGetDatum.

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2654 of file lock.c.

2655{
2656 ResourceOwner owner;
2657 LOCALLOCKOWNER *lockOwners;
2658 int i;
2659
2660 /* Identify owner for lock (must match LockRelease!) */
2661 if (sessionLock)
2662 owner = NULL;
2663 else
2664 owner = CurrentResourceOwner;
2665
2666 /* Scan to see if there are any locks belonging to the target owner */
2667 lockOwners = locallock->lockOwners;
2668 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2669 {
2670 if (lockOwners[i].owner == owner)
2671 {
2672 Assert(lockOwners[i].nLocks > 0);
2673 if (lockOwners[i].nLocks < locallock->nLocks)
2674 {
2675 /*
2676 * We will still hold this lock after forgetting this
2677 * ResourceOwner.
2678 */
2679 locallock->nLocks -= lockOwners[i].nLocks;
2680 /* compact out unused slot */
2681 locallock->numLockOwners--;
2682 if (owner != NULL)
2684 if (i < locallock->numLockOwners)
2685 lockOwners[i] = lockOwners[locallock->numLockOwners];
2686 }
2687 else
2688 {
2689 Assert(lockOwners[i].nLocks == locallock->nLocks);
2690 /* We want to call LockRelease just once */
2691 lockOwners[i].nLocks = 1;
2692 locallock->nLocks = 1;
2693 if (!LockRelease(&locallock->tag.lock,
2694 locallock->tag.mode,
2695 sessionLock))
2696 elog(WARNING, "ReleaseLockIfHeld: failed??");
2697 }
2698 break;
2699 }
2700 }
2701}

References Assert, CurrentResourceOwner, elog, fb(), i, LockRelease(), LOCALLOCKOWNER::nLocks, ResourceOwnerForgetLock(), and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 2054 of file lock.c.

2055{
2056 LOCK *waitLock = proc->waitLock;
2057 PROCLOCK *proclock = proc->waitProcLock;
2058 LOCKMODE lockmode = proc->waitLockMode;
2060
2061 /* Make sure proc is waiting */
2064 Assert(waitLock);
2065 Assert(!dclist_is_empty(&waitLock->waitProcs));
2067
2068 /* Remove proc from lock's wait queue */
2070
2071 /* Undo increments of request counts by waiting process */
2072 Assert(waitLock->nRequested > 0);
2073 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2074 waitLock->nRequested--;
2075 Assert(waitLock->requested[lockmode] > 0);
2076 waitLock->requested[lockmode]--;
2077 /* don't forget to clear waitMask bit if appropriate */
2078 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2079 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2080
2081 /* Clean up the proc's own state, and pass it the ok/fail signal */
2082 proc->waitLock = NULL;
2083 proc->waitProcLock = NULL;
2085
2086 /*
2087 * Delete the proclock immediately if it represents no already-held locks.
2088 * (This must happen now because if the owner of the lock decides to
2089 * release it, and the requested/granted counts then go to zero,
2090 * LockRelease expects there to be no remaining proclocks.) Then see if
2091 * any other waiters for the lock can be woken up now.
2092 */
2093 CleanUpLock(waitLock, proclock,
2094 LockMethods[lockmethodid], hashcode,
2095 true);
2096}

References Assert, CleanUpLock(), dclist_delete_from_thoroughly(), dclist_is_empty(), dlist_node_is_detached(), fb(), LOCK::granted, lengthof, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLink, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), and LockErrorCleanup().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1484 of file lock.c.

1485{
1486 int i;
1487
1488 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1489 {
1490 if (locallock->lockOwners[i].owner != NULL)
1491 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1492 }
1493 locallock->numLockOwners = 0;
1494 if (locallock->lockOwners != NULL)
1495 pfree(locallock->lockOwners);
1496 locallock->lockOwners = NULL;
1497
1498 if (locallock->holdsStrongLockCount)
1499 {
1501
1503
1507 locallock->holdsStrongLockCount = false;
1509 }
1510
1512 &(locallock->tag),
1513 HASH_REMOVE, NULL))
1514 elog(WARNING, "locallock table corrupted");
1515
1516 /*
1517 * Indicate that the lock is released for certain types of locks
1518 */
1520}

References Assert, CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, fb(), HASH_REMOVE, hash_search(), i, LockMethodLocalHash, FastPathStrongRelationLockData::mutex, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire(), SpinLockRelease(), and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ ResetAwaitedLock()

void ResetAwaitedLock ( void  )

Definition at line 1915 of file lock.c.

1916{
1917 awaitedLock = NULL;
1918}

References awaitedLock, and fb().

Referenced by LockErrorCleanup().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1291 of file lock.c.

1293{
1294 LOCK *lock;
1295 PROCLOCK *proclock;
1298 bool found;
1299
1300 /*
1301 * Find or create a lock with this tag.
1302 */
1304 locktag,
1305 hashcode,
1307 &found);
1308 if (!lock)
1309 return NULL;
1310
1311 /*
1312 * if it's a new lock object, initialize it
1313 */
1314 if (!found)
1315 {
1316 lock->grantMask = 0;
1317 lock->waitMask = 0;
1318 dlist_init(&lock->procLocks);
1319 dclist_init(&lock->waitProcs);
1320 lock->nRequested = 0;
1321 lock->nGranted = 0;
1322 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1323 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1324 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1325 }
1326 else
1327 {
1328 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1329 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1330 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1331 Assert(lock->nGranted <= lock->nRequested);
1332 }
1333
1334 /*
1335 * Create the hash key for the proclock table.
1336 */
1337 proclocktag.myLock = lock;
1338 proclocktag.myProc = proc;
1339
1341
1342 /*
1343 * Find or create a proclock entry with this tag
1344 */
1346 &proclocktag,
1349 &found);
1350 if (!proclock)
1351 {
1352 /* Oops, not enough shmem for the proclock */
1353 if (lock->nRequested == 0)
1354 {
1355 /*
1356 * There are no other requestors of this lock, so garbage-collect
1357 * the lock object. We *must* do this to avoid a permanent leak
1358 * of shared memory, because there won't be anything to cause
1359 * anyone to release the lock object later.
1360 */
1361 Assert(dlist_is_empty(&(lock->procLocks)));
1363 &(lock->tag),
1364 hashcode,
1366 NULL))
1367 elog(PANIC, "lock table corrupted");
1368 }
1369 return NULL;
1370 }
1371
1372 /*
1373 * If new, initialize the new entry
1374 */
1375 if (!found)
1376 {
1378
1379 /*
1380 * It might seem unsafe to access proclock->groupLeader without a
1381 * lock, but it's not really. Either we are initializing a proclock
1382 * on our own behalf, in which case our group leader isn't changing
1383 * because the group leader for a process can only ever be changed by
1384 * the process itself; or else we are transferring a fast-path lock to
1385 * the main lock table, in which case that process can't change its
1386 * lock group leader without first releasing all of its locks (and in
1387 * particular the one we are currently transferring).
1388 */
1389 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1390 proc->lockGroupLeader : proc;
1391 proclock->holdMask = 0;
1392 proclock->releaseMask = 0;
1393 /* Add proclock to appropriate lists */
1394 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1395 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1396 PROCLOCK_PRINT("LockAcquire: new", proclock);
1397 }
1398 else
1399 {
1400 PROCLOCK_PRINT("LockAcquire: found", proclock);
1401 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1402
1403#ifdef CHECK_DEADLOCK_RISK
1404
1405 /*
1406 * Issue warning if we already hold a lower-level lock on this object
1407 * and do not hold a lock of the requested level or higher. This
1408 * indicates a deadlock-prone coding practice (eg, we'd have a
1409 * deadlock if another backend were following the same code path at
1410 * about the same time).
1411 *
1412 * This is not enabled by default, because it may generate log entries
1413 * about user-level coding practices that are in fact safe in context.
1414 * It can be enabled to help find system-level problems.
1415 *
1416 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1417 * better to use a table. For now, though, this works.
1418 */
1419 {
1420 int i;
1421
1422 for (i = lockMethodTable->numLockModes; i > 0; i--)
1423 {
1424 if (proclock->holdMask & LOCKBIT_ON(i))
1425 {
1426 if (i >= (int) lockmode)
1427 break; /* safe: we have a lock >= req level */
1428 elog(LOG, "deadlock risk: raising lock level"
1429 " from %s to %s on object %u/%u/%u",
1430 lockMethodTable->lockModeNames[i],
1431 lockMethodTable->lockModeNames[lockmode],
1432 lock->tag.locktag_field1, lock->tag.locktag_field2,
1433 lock->tag.locktag_field3);
1434 break;
1435 }
1436 }
1437 }
1438#endif /* CHECK_DEADLOCK_RISK */
1439 }
1440
1441 /*
1442 * lock->nRequested and lock->requested[] count the total number of
1443 * requests, whether granted or waiting, so increment those immediately.
1444 * The other counts don't increment till we get the lock.
1445 */
1446 lock->nRequested++;
1447 lock->requested[lockmode]++;
1448 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1449
1450 /*
1451 * We shouldn't already hold the desired lock; else locallock table is
1452 * broken.
1453 */
1454 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1455 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1456 lockMethodTable->lockModeNames[lockmode],
1457 lock->tag.locktag_field1, lock->tag.locktag_field2,
1458 lock->tag.locktag_field3);
1459
1460 return proclock;
1461}

References Assert, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ERROR, fb(), LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1689 of file lock.c.

1691{
1692 bool wakeupNeeded = false;
1693
1694 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1695 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1696 Assert(lock->nGranted <= lock->nRequested);
1697
1698 /*
1699 * fix the general lock stats
1700 */
1701 lock->nRequested--;
1702 lock->requested[lockmode]--;
1703 lock->nGranted--;
1704 lock->granted[lockmode]--;
1705
1706 if (lock->granted[lockmode] == 0)
1707 {
1708 /* change the conflict mask. No more of this lock type. */
1709 lock->grantMask &= LOCKBIT_OFF(lockmode);
1710 }
1711
1712 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1713
1714 /*
1715 * We need only run ProcLockWakeup if the released lock conflicts with at
1716 * least one of the lock types requested by waiter(s). Otherwise whatever
1717 * conflict made them wait must still exist. NOTE: before MVCC, we could
1718 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1719 * not true anymore, because the remaining granted locks might belong to
1720 * some waiter, who could now be awakened because he doesn't conflict with
1721 * his own locks.
1722 */
1723 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1724 wakeupNeeded = true;
1725
1726 /*
1727 * Now fix the per-proclock state.
1728 */
1729 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1730 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1731
1732 return wakeupNeeded;
1733}

References Assert, fb(), LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4725 of file lock.c.

4726{
4727 LOCKTAG tag;
4728 PGPROC *proc;
4730
4732
4734 /* no vxid lock; localTransactionId is a normal, locked XID */
4735 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4736
4738
4739 /*
4740 * If a lock table entry must be made, this is the PGPROC on whose behalf
4741 * it must be done. Note that the transaction might end or the PGPROC
4742 * might be reassigned to a new backend before we get around to examining
4743 * it, but it doesn't matter. If we find upon examination that the
4744 * relevant lxid is no longer running here, that's enough to prove that
4745 * it's no longer running anywhere.
4746 */
4747 proc = ProcNumberGetProc(vxid.procNumber);
4748 if (proc == NULL)
4749 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4750
4751 /*
4752 * We must acquire this lock before checking the procNumber and lxid
4753 * against the ones we're waiting for. The target backend will only set
4754 * or clear lxid while holding this lock.
4755 */
4757
4758 if (proc->vxid.procNumber != vxid.procNumber
4760 {
4761 /* VXID ended */
4762 LWLockRelease(&proc->fpInfoLock);
4763 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4764 }
4765
4766 /*
4767 * If we aren't asked to wait, there's no need to set up a lock table
4768 * entry. The transaction is still in progress, so just return false.
4769 */
4770 if (!wait)
4771 {
4772 LWLockRelease(&proc->fpInfoLock);
4773 return false;
4774 }
4775
4776 /*
4777 * OK, we're going to need to sleep on the VXID. But first, we must set
4778 * up the primary lock table entry, if needed (ie, convert the proc's
4779 * fast-path lock on its VXID to a regular lock).
4780 */
4781 if (proc->fpVXIDLock)
4782 {
4783 PROCLOCK *proclock;
4784 uint32 hashcode;
4786
4787 hashcode = LockTagHashCode(&tag);
4788
4791
4793 &tag, hashcode, ExclusiveLock);
4794 if (!proclock)
4795 {
4797 LWLockRelease(&proc->fpInfoLock);
4798 ereport(ERROR,
4800 errmsg("out of shared memory"),
4801 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4802 }
4803 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4804
4806
4807 proc->fpVXIDLock = false;
4808 }
4809
4810 /*
4811 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4812 * search. The proc might have assigned this XID but not yet locked it,
4813 * in which case the proc will lock this XID before releasing the VXID.
4814 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4815 * so we won't save an XID of a different VXID. It doesn't matter whether
4816 * we save this before or after setting up the primary lock table entry.
4817 */
4818 xid = proc->xid;
4819
4820 /* Done with proc->fpLockBits */
4821 LWLockRelease(&proc->fpInfoLock);
4822
4823 /* Time to wait. */
4824 (void) LockAcquire(&tag, ShareLock, false, false);
4825
4826 LockRelease(&tag, ShareLock, false);
4827 return XactLockForVirtualXact(vxid, xid, wait);
4828}

References Assert, DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg, ERROR, ExclusiveLock, fb(), PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, VirtualTransactionId::procNumber, PGPROC::procNumber, ProcNumberGetProc(), SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, PGPROC::vxid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4625 of file lock.c.

4626{
4627 bool fastpath;
4628 LocalTransactionId lxid;
4629
4631
4632 /*
4633 * Clean up shared memory state.
4634 */
4636
4637 fastpath = MyProc->fpVXIDLock;
4639 MyProc->fpVXIDLock = false;
4641
4643
4644 /*
4645 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4646 * that means someone transferred the lock to the main lock table.
4647 */
4648 if (!fastpath && LocalTransactionIdIsValid(lxid))
4649 {
4651 LOCKTAG locktag;
4652
4653 vxid.procNumber = MyProcNumber;
4654 vxid.localTransactionId = lxid;
4655 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4656
4658 &locktag, ExclusiveLock, false);
4659 }
4660}

References Assert, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, INVALID_PROC_NUMBER, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyProc, MyProcNumber, VirtualTransactionId::procNumber, PGPROC::procNumber, SET_LOCKTAG_VIRTUALTRANSACTION, and PGPROC::vxid.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static ProcWaitStatus WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1940 of file lock.c.

1941{
1944
1945 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1946 locallock->tag.lock.locktag_field2,
1947 locallock->tag.lock.locktag_field3,
1948 locallock->tag.lock.locktag_field4,
1949 locallock->tag.lock.locktag_type,
1950 locallock->tag.mode);
1951
1952 /* Setup error traceback support for ereport() */
1957
1958 /* adjust the process title to indicate that it's waiting */
1959 set_ps_display_suffix("waiting");
1960
1961 /*
1962 * Record the fact that we are waiting for a lock, so that
1963 * LockErrorCleanup will clean up if cancel/die happens.
1964 */
1966 awaitedOwner = owner;
1967
1968 /*
1969 * NOTE: Think not to put any shared-state cleanup after the call to
1970 * ProcSleep, in either the normal or failure path. The lock state must
1971 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1972 * waiting for the lock. This is necessary because of the possibility
1973 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1974 * grants us the lock, but before we've noticed it. Hence, after granting,
1975 * the locktable state must fully reflect the fact that we own the lock;
1976 * we can't do additional work on return.
1977 *
1978 * We can and do use a PG_TRY block to try to clean up after failure, but
1979 * this still has a major limitation: elog(FATAL) can occur while waiting
1980 * (eg, a "die" interrupt), and then control won't come back here. So all
1981 * cleanup of essential state should happen in LockErrorCleanup, not here.
1982 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1983 * is unimportant if the process exits.
1984 */
1985 PG_TRY();
1986 {
1988 }
1989 PG_CATCH();
1990 {
1991 /* In this path, awaitedLock remains set until LockErrorCleanup */
1992
1993 /* reset ps display to remove the suffix */
1995
1996 /* and propagate the error */
1997 PG_RE_THROW();
1998 }
1999 PG_END_TRY();
2000
2001 /*
2002 * We no longer want LockErrorCleanup to do anything.
2003 */
2004 awaitedLock = NULL;
2005
2006 /* reset ps display to remove the suffix */
2008
2010
2011 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2012 locallock->tag.lock.locktag_field2,
2013 locallock->tag.lock.locktag_field3,
2014 locallock->tag.lock.locktag_field4,
2015 locallock->tag.lock.locktag_type,
2016 locallock->tag.mode);
2017
2018 return result;
2019}

References awaitedLock, awaitedOwner, error_context_stack, fb(), PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, ErrorContextCallback::previous, ProcSleep(), result, set_ps_display_remove_suffix(), set_ps_display_suffix(), and waitonlock_error_callback().

Referenced by LockAcquireExtended().

◆ waitonlock_error_callback()

static void waitonlock_error_callback ( void arg)
static

Definition at line 2028 of file lock.c.

2029{
2031 const LOCKTAG *tag = &locallock->tag.lock;
2032 LOCKMODE mode = locallock->tag.mode;
2034
2037
2038 errcontext("waiting for %s on %s",
2040 locktagbuf.data);
2041}

References arg, DescribeLockTag(), errcontext, fb(), GetLockmodeName(), initStringInfo(), LOCKTAG::locktag_lockmethodid, and mode.

Referenced by WaitOnLock().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4674 of file lock.c.

4676{
4677 bool more = false;
4678
4679 /* There is no point to wait for 2PCs if you have no 2PCs. */
4680 if (max_prepared_xacts == 0)
4681 return true;
4682
4683 do
4684 {
4686 LOCKTAG tag;
4687
4688 /* Clear state from previous iterations. */
4689 if (more)
4690 {
4692 more = false;
4693 }
4694
4695 /* If we have no xid, try to find one. */
4696 if (!TransactionIdIsValid(xid))
4697 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4698 if (!TransactionIdIsValid(xid))
4699 {
4700 Assert(!more);
4701 return true;
4702 }
4703
4704 /* Check or wait for XID completion. */
4705 SET_LOCKTAG_TRANSACTION(tag, xid);
4706 lar = LockAcquire(&tag, ShareLock, false, !wait);
4708 return false;
4709 LockRelease(&tag, ShareLock, false);
4710 } while (more);
4711
4712 return true;
4713}

References Assert, fb(), InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 339 of file lock.c.

Referenced by GetAwaitedLock(), GrantAwaitedLock(), ResetAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 340 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition lock.c:125
static const char *const lock_mode_names[]
Definition lock.c:111
static const LOCKMASK LockConflicts[]
Definition lock.c:68

Definition at line 128 of file lock.c.

128 {
132#ifdef LOCK_DEBUG
134#else
136#endif
137};

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 125 of file lock.c.

◆ FastPathLocalUseCounts

int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
static

◆ FastPathLockGroupsPerBackend

int FastPathLockGroupsPerBackend = 0

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 111 of file lock.c.

112{
113 "INVALID",
114 "AccessShareLock",
115 "RowShareLock",
116 "RowExclusiveLock",
117 "ShareUpdateExclusiveLock",
118 "ShareLock",
119 "ShareRowExclusiveLock",
120 "ExclusiveLock",
121 "AccessExclusiveLock"
122};

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 68 of file lock.c.

68 {
69 0,
70
71 /* AccessShareLock */
73
74 /* RowShareLock */
76
77 /* RowExclusiveLock */
80
81 /* ShareUpdateExclusiveLock */
85
86 /* ShareLock */
90
91 /* ShareRowExclusiveLock */
95
96 /* ExclusiveLock */
101
102 /* AccessExclusiveLock */
107
108};
#define ShareRowExclusiveLock
Definition lockdefs.h:41
#define AccessShareLock
Definition lockdefs.h:36
#define RowShareLock
Definition lockdefs.h:37

◆ LockManagerShmemCallbacks

const ShmemCallbacks LockManagerShmemCallbacks
Initial value:
= {
.request_fn = LockManagerShmemRequest,
}

Definition at line 320 of file lock.c.

320 {
321 .request_fn = LockManagerShmemRequest,
322 .init_fn = LockManagerShmemInit,
323};

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ log_lock_failures

bool log_lock_failures = false

Definition at line 57 of file lock.c.

Referenced by heap_acquire_tuplock(), heap_lock_tuple(), and heapam_tuple_lock().

◆ max_locks_per_xact

int max_locks_per_xact

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 194 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 338 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod

const LockMethodData user_lockmethod
static
Initial value:

Definition at line 139 of file lock.c.

139 {
143#ifdef LOCK_DEBUG
145#else
147#endif
148};