PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "storage/lmgr.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_REL_GROUP(rel)    (((uint64) (rel) * 49157) % FastPathLockGroupsPerBackend)
 
#define FAST_PATH_SLOT(group, index)
 
#define FAST_PATH_GROUP(index)
 
#define FAST_PATH_INDEX(index)
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_BITS(proc, n)   (proc)->fpLockBits[FAST_PATH_GROUP(n)]
 
#define FAST_PATH_GET_BITS(proc, n)    ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static ProcWaitStatus WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void LockManagerShmemInit (void)
 
void InitLockManagerAccess (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
LOCALLOCKGetAwaitedLock (void)
 
void ResetAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (TransactionId xid)
 
Size LockManagerShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
bool log_lock_failure = false
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCounts [FP_LOCK_GROUPS_PER_BACKEND_MAX]
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
int FastPathLockGroupsPerBackend = 0
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define DEFAULT_LOCKMETHOD
Definition: lock.h:126
@ LOCKTAG_RELATION
Definition: lock.h:138
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
static PgChecksumMode mode
Definition: pg_checksums.c:55
#define InvalidOid
Definition: postgres_ext.h:35

Definition at line 270 of file lock.c.

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition: globals.c:95

Definition at line 264 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:
AssertMacro((n) < FastPathLockSlotsPerBackend()), \
#define AssertMacro(condition)
Definition: c.h:830
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:239
#define FAST_PATH_INDEX(index)
Definition: lock.c:233
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:238
#define FastPathLockSlotsPerBackend()
Definition: proc.h:85

Definition at line 244 of file lock.c.

◆ FAST_PATH_BITS

#define FAST_PATH_BITS (   proc,
 
)    (proc)->fpLockBits[FAST_PATH_GROUP(n)]

Definition at line 241 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 238 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 253 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 251 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)

Definition at line 242 of file lock.c.

◆ FAST_PATH_GROUP

#define FAST_PATH_GROUP (   index)
Value:
uint32_t uint32
Definition: c.h:502
#define FP_LOCK_SLOTS_PER_GROUP
Definition: proc.h:84
Definition: type.h:96

Definition at line 230 of file lock.c.

◆ FAST_PATH_INDEX

#define FAST_PATH_INDEX (   index)
Value:

Definition at line 233 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 239 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 240 of file lock.c.

◆ FAST_PATH_REL_GROUP

#define FAST_PATH_REL_GROUP (   rel)     (((uint64) (rel) * 49157) % FastPathLockGroupsPerBackend)

Definition at line 214 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 249 of file lock.c.

◆ FAST_PATH_SLOT

#define FAST_PATH_SLOT (   group,
  index 
)
Value:
AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
int FastPathLockGroupsPerBackend
Definition: lock.c:202

Definition at line 221 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 297 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 298 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 300 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 402 of file lock.c.

◆ NLOCKENTS

Definition at line 56 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 403 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1856 of file lock.c.

1857{
1858 uint32 fasthashcode;
1859 LOCALLOCK *locallock = StrongLockInProgress;
1860
1861 if (locallock == NULL)
1862 return;
1863
1864 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1865 Assert(locallock->holdsStrongLockCount == true);
1867 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1868 FastPathStrongRelationLocks->count[fasthashcode]--;
1869 locallock->holdsStrongLockCount = false;
1870 StrongLockInProgress = NULL;
1872}
Assert(PointerIsAligned(start, uint64))
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:300
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:309
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:324
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:306
uint32 hashcode
Definition: lock.h:433
bool holdsStrongLockCount
Definition: lock.h:440

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3443 of file lock.c.

3444{
3445 HASH_SEQ_STATUS status;
3446 LOCALLOCK *locallock;
3447
3448 /* First, verify there aren't locks of both xact and session level */
3450
3451 /* Now do the per-locallock cleanup work */
3453
3454 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3455 {
3456 TwoPhaseLockRecord record;
3457 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3458 bool haveSessionLock;
3459 bool haveXactLock;
3460 int i;
3461
3462 /*
3463 * Ignore VXID locks. We don't want those to be held by prepared
3464 * transactions, since they aren't meaningful after a restart.
3465 */
3467 continue;
3468
3469 /* Ignore it if we don't actually hold the lock */
3470 if (locallock->nLocks <= 0)
3471 continue;
3472
3473 /* Scan to see whether we hold it at session or transaction level */
3474 haveSessionLock = haveXactLock = false;
3475 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3476 {
3477 if (lockOwners[i].owner == NULL)
3478 haveSessionLock = true;
3479 else
3480 haveXactLock = true;
3481 }
3482
3483 /* Ignore it if we have only session lock */
3484 if (!haveXactLock)
3485 continue;
3486
3487 /* This can't happen, because we already checked it */
3488 if (haveSessionLock)
3489 ereport(ERROR,
3490 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3491 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3492
3493 /*
3494 * If the local lock was taken via the fast-path, we need to move it
3495 * to the primary lock table, or just get a pointer to the existing
3496 * primary lock table entry if by chance it's already been
3497 * transferred.
3498 */
3499 if (locallock->proclock == NULL)
3500 {
3501 locallock->proclock = FastPathGetRelationLockEntry(locallock);
3502 locallock->lock = locallock->proclock->tag.myLock;
3503 }
3504
3505 /*
3506 * Arrange to not release any strong lock count held by this lock
3507 * entry. We must retain the count until the prepared transaction is
3508 * committed or rolled back.
3509 */
3510 locallock->holdsStrongLockCount = false;
3511
3512 /*
3513 * Create a 2PC record.
3514 */
3515 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3516 record.lockmode = locallock->tag.mode;
3517
3519 &record, sizeof(TwoPhaseLockRecord));
3520 }
3521}
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1420
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int i
Definition: isn.c:77
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2923
static HTAB * LockMethodLocalHash
Definition: lock.c:320
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3355
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:144
LOCKTAG lock
Definition: lock.h:411
LOCKMODE mode
Definition: lock.h:412
LOCALLOCKOWNER * lockOwners
Definition: lock.h:439
LOCK * lock
Definition: lock.h:434
int64 nLocks
Definition: lock.h:436
int numLockOwners
Definition: lock.h:437
PROCLOCK * proclock
Definition: lock.h:435
LOCALLOCKTAG tag
Definition: lock.h:430
Definition: lock.h:166
uint8 locktag_type
Definition: lock.h:171
LOCK * myLock
Definition: lock.h:366
PROCLOCKTAG tag
Definition: lock.h:373
LOCKTAG locktag
Definition: lock.c:160
LOCKMODE lockmode
Definition: lock.c:161
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1264
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1820 of file lock.c.

1821{
1823 Assert(locallock->holdsStrongLockCount == false);
1824
1825 /*
1826 * Adding to a memory location is not atomic, so we take a spinlock to
1827 * ensure we don't collide with someone else trying to bump the count at
1828 * the same time.
1829 *
1830 * XXX: It might be worth considering using an atomic fetch-and-add
1831 * instruction here, on architectures where that is supported.
1832 */
1833
1835 FastPathStrongRelationLocks->count[fasthashcode]++;
1836 locallock->holdsStrongLockCount = true;
1837 StrongLockInProgress = locallock;
1839}

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1460 of file lock.c.

1461{
1462#ifdef USE_ASSERT_CHECKING
1463 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1464 IsRelationExtensionLockHeld = acquired;
1465#endif
1466}
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:139
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:445

References LOCALLOCK_LOCKTAG, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3355 of file lock.c.

3356{
3357 typedef struct
3358 {
3359 LOCKTAG lock; /* identifies the lockable object */
3360 bool sessLock; /* is any lockmode held at session level? */
3361 bool xactLock; /* is any lockmode held at xact level? */
3362 } PerLockTagEntry;
3363
3364 HASHCTL hash_ctl;
3365 HTAB *lockhtab;
3366 HASH_SEQ_STATUS status;
3367 LOCALLOCK *locallock;
3368
3369 /* Create a local hash table keyed by LOCKTAG only */
3370 hash_ctl.keysize = sizeof(LOCKTAG);
3371 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3372 hash_ctl.hcxt = CurrentMemoryContext;
3373
3374 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3375 256, /* arbitrary initial size */
3376 &hash_ctl,
3378
3379 /* Scan local lock table to find entries for each LOCKTAG */
3381
3382 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3383 {
3384 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3385 PerLockTagEntry *hentry;
3386 bool found;
3387 int i;
3388
3389 /*
3390 * Ignore VXID locks. We don't want those to be held by prepared
3391 * transactions, since they aren't meaningful after a restart.
3392 */
3394 continue;
3395
3396 /* Ignore it if we don't actually hold the lock */
3397 if (locallock->nLocks <= 0)
3398 continue;
3399
3400 /* Otherwise, find or make an entry in lockhtab */
3401 hentry = (PerLockTagEntry *) hash_search(lockhtab,
3402 &locallock->tag.lock,
3403 HASH_ENTER, &found);
3404 if (!found) /* initialize, if newly created */
3405 hentry->sessLock = hentry->xactLock = false;
3406
3407 /* Scan to see if we hold lock at session or xact level or both */
3408 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3409 {
3410 if (lockOwners[i].owner == NULL)
3411 hentry->sessLock = true;
3412 else
3413 hentry->xactLock = true;
3414 }
3415
3416 /*
3417 * We can throw error immediately when we see both types of locks; no
3418 * need to wait around to see if there are more violations.
3419 */
3420 if (hentry->sessLock && hentry->xactLock)
3421 ereport(ERROR,
3422 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3423 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3424 }
3425
3426 /* Success, so clean up */
3427 hash_destroy(lockhtab);
3428}
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
struct LOCKTAG LOCKTAG
MemoryContext CurrentMemoryContext
Definition: mcxt.c:159
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220

References CurrentMemoryContext, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), HASHCTL::hcxt, i, HASHCTL::keysize, LOCALLOCKTAG::lock, LockMethodLocalHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1734 of file lock.c.

1737{
1738 /*
1739 * If this was my last hold on this lock, delete my entry in the proclock
1740 * table.
1741 */
1742 if (proclock->holdMask == 0)
1743 {
1744 uint32 proclock_hashcode;
1745
1746 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1747 dlist_delete(&proclock->lockLink);
1748 dlist_delete(&proclock->procLink);
1749 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1751 &(proclock->tag),
1752 proclock_hashcode,
1754 NULL))
1755 elog(PANIC, "proclock table corrupted");
1756 }
1757
1758 if (lock->nRequested == 0)
1759 {
1760 /*
1761 * The caller just released the last lock, so garbage-collect the lock
1762 * object.
1763 */
1764 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1767 &(lock->tag),
1768 hashcode,
1770 NULL))
1771 elog(PANIC, "lock table corrupted");
1772 }
1773 else if (wakeupNeeded)
1774 {
1775 /* There are waiters on this lock, so wake them up. */
1776 ProcLockWakeup(lockMethodTable, lock);
1777 }
1778}
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:968
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:226
@ HASH_REMOVE
Definition: hsearch.h:115
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:402
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:601
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:403
static HTAB * LockMethodLockHash
Definition: lock.c:318
static HTAB * LockMethodProcLockHash
Definition: lock.c:319
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1740
int nRequested
Definition: lock.h:320
LOCKTAG tag
Definition: lock.h:312
dlist_head procLocks
Definition: lock.h:317
LOCKMASK holdMask
Definition: lock.h:377
dlist_node lockLink
Definition: lock.h:379
dlist_node procLink
Definition: lock.h:380

References Assert(), dlist_delete(), dlist_is_empty(), elog, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 619 of file lock.c.

620{
621 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
622
623 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
624 return true;
625
626 return false;
627}
static const LockMethod LockMethods[]
Definition: lock.c:150
#define LOCKBIT_ON(lockmode)
Definition: lock.h:85
const LOCKMASK * conflictTab
Definition: lock.h:112

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2923 of file lock.c.

2924{
2925 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2926 LOCKTAG *locktag = &locallock->tag.lock;
2927 PROCLOCK *proclock = NULL;
2928 LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2929 Oid relid = locktag->locktag_field2;
2930 uint32 i,
2931 group;
2932
2933 /* fast-path group the lock belongs to */
2934 group = FAST_PATH_REL_GROUP(relid);
2935
2937
2938 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2939 {
2940 uint32 lockmode;
2941
2942 /* index into the whole per-backend array */
2943 uint32 f = FAST_PATH_SLOT(group, i);
2944
2945 /* Look for an allocated slot matching the given relid. */
2946 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2947 continue;
2948
2949 /* If we don't have a lock of the given mode, forget it! */
2950 lockmode = locallock->tag.mode;
2951 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2952 break;
2953
2954 /* Find or create lock object. */
2955 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2956
2957 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2958 locallock->hashcode, lockmode);
2959 if (!proclock)
2960 {
2961 LWLockRelease(partitionLock);
2963 ereport(ERROR,
2964 (errcode(ERRCODE_OUT_OF_MEMORY),
2965 errmsg("out of shared memory"),
2966 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
2967 }
2968 GrantLock(proclock->tag.myLock, proclock, lockmode);
2969 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2970
2971 LWLockRelease(partitionLock);
2972
2973 /* No need to examine remaining slots. */
2974 break;
2975 }
2976
2978
2979 /* Lock may have already been transferred by some other backend. */
2980 if (proclock == NULL)
2981 {
2982 LOCK *lock;
2983 PROCLOCKTAG proclocktag;
2984 uint32 proclock_hashcode;
2985
2986 LWLockAcquire(partitionLock, LW_SHARED);
2987
2989 locktag,
2990 locallock->hashcode,
2991 HASH_FIND,
2992 NULL);
2993 if (!lock)
2994 elog(ERROR, "failed to re-find shared lock object");
2995
2996 proclocktag.myLock = lock;
2997 proclocktag.myProc = MyProc;
2998
2999 proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
3000 proclock = (PROCLOCK *)
3002 &proclocktag,
3003 proclock_hashcode,
3004 HASH_FIND,
3005 NULL);
3006 if (!proclock)
3007 elog(ERROR, "failed to re-find shared proclock object");
3008 LWLockRelease(partitionLock);
3009 }
3010
3011 return proclock;
3012}
int errhint(const char *fmt,...)
Definition: elog.c:1318
@ HASH_FIND
Definition: hsearch.h:113
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1279
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:253
#define FAST_PATH_REL_GROUP(rel)
Definition: lock.c:214
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1654
#define FAST_PATH_SLOT(group, index)
Definition: lock.c:221
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:251
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:242
#define LockHashPartitionLock(hashcode)
Definition: lock.h:527
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1182
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1902
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
unsigned int Oid
Definition: postgres_ext.h:30
PGPROC * MyProc
Definition: proc.c:67
uint32 locktag_field2
Definition: lock.h:168
Definition: lock.h:310
Definition: lwlock.h:42
LWLock fpInfoLock
Definition: proc.h:294
Oid * fpRelId
Definition: proc.h:296
PGPROC * myProc
Definition: lock.h:367
Definition: lock.h:371

References DEFAULT_LOCKMETHOD, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), LOCALLOCK::hashcode, i, LOCALLOCKTAG::lock, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, ProcLockHashCode(), SetupLockInTable(), PROCLOCK::tag, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2747 of file lock.c.

2748{
2749 uint32 i;
2750 uint32 unused_slot = FastPathLockSlotsPerBackend();
2751
2752 /* fast-path group the lock belongs to */
2753 uint32 group = FAST_PATH_REL_GROUP(relid);
2754
2755 /* Scan for existing entry for this relid, remembering empty slot. */
2756 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2757 {
2758 /* index into the whole per-backend array */
2759 uint32 f = FAST_PATH_SLOT(group, i);
2760
2761 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2762 unused_slot = f;
2763 else if (MyProc->fpRelId[f] == relid)
2764 {
2765 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2766 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2767 return true;
2768 }
2769 }
2770
2771 /* If no existing entry, use any empty slot. */
2772 if (unused_slot < FastPathLockSlotsPerBackend())
2773 {
2774 MyProc->fpRelId[unused_slot] = relid;
2775 FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2776 ++FastPathLocalUseCounts[group];
2777 return true;
2778 }
2779
2780 /* No existing entry, and no empty slot. */
2781 return false;
2782}
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:249
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition: lock.c:176

References Assert(), FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SET_LOCKMODE, FAST_PATH_SLOT, FastPathLocalUseCounts, FastPathLockSlotsPerBackend, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2826 of file lock.c.

2828{
2829 LWLock *partitionLock = LockHashPartitionLock(hashcode);
2830 Oid relid = locktag->locktag_field2;
2831 uint32 i;
2832
2833 /* fast-path group the lock belongs to */
2834 uint32 group = FAST_PATH_REL_GROUP(relid);
2835
2836 /*
2837 * Every PGPROC that can potentially hold a fast-path lock is present in
2838 * ProcGlobal->allProcs. Prepared transactions are not, but any
2839 * outstanding fast-path locks held by prepared transactions are
2840 * transferred to the main lock table.
2841 */
2842 for (i = 0; i < ProcGlobal->allProcCount; i++)
2843 {
2844 PGPROC *proc = &ProcGlobal->allProcs[i];
2845 uint32 j;
2846
2848
2849 /*
2850 * If the target backend isn't referencing the same database as the
2851 * lock, then we needn't examine the individual relation IDs at all;
2852 * none of them can be relevant.
2853 *
2854 * proc->databaseId is set at backend startup time and never changes
2855 * thereafter, so it might be safe to perform this test before
2856 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2857 * assume that if the target backend holds any fast-path locks, it
2858 * must have performed a memory-fencing operation (in particular, an
2859 * LWLock acquisition) since setting proc->databaseId. However, it's
2860 * less clear that our backend is certain to have performed a memory
2861 * fencing operation since the other backend set proc->databaseId. So
2862 * for now, we test it after acquiring the LWLock just to be safe.
2863 *
2864 * Also skip groups without any registered fast-path locks.
2865 */
2866 if (proc->databaseId != locktag->locktag_field1 ||
2867 proc->fpLockBits[group] == 0)
2868 {
2869 LWLockRelease(&proc->fpInfoLock);
2870 continue;
2871 }
2872
2873 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2874 {
2875 uint32 lockmode;
2876
2877 /* index into the whole per-backend array */
2878 uint32 f = FAST_PATH_SLOT(group, j);
2879
2880 /* Look for an allocated slot matching the given relid. */
2881 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2882 continue;
2883
2884 /* Find or create lock object. */
2885 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2886 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2888 ++lockmode)
2889 {
2890 PROCLOCK *proclock;
2891
2892 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2893 continue;
2894 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2895 hashcode, lockmode);
2896 if (!proclock)
2897 {
2898 LWLockRelease(partitionLock);
2899 LWLockRelease(&proc->fpInfoLock);
2900 return false;
2901 }
2902 GrantLock(proclock->tag.myLock, proclock, lockmode);
2903 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2904 }
2905 LWLockRelease(partitionLock);
2906
2907 /* No need to examine remaining slots. */
2908 break;
2909 }
2910 LWLockRelease(&proc->fpInfoLock);
2911 }
2912 return true;
2913}
int j
Definition: isn.c:78
PROC_HDR * ProcGlobal
Definition: proc.c:79
uint32 locktag_field1
Definition: lock.h:167
Definition: proc.h:163
Oid databaseId
Definition: proc.h:208
uint64 * fpLockBits
Definition: proc.h:295
PGPROC * allProcs
Definition: proc.h:372
uint32 allProcCount
Definition: proc.h:390

References PROC_HDR::allProcCount, PROC_HDR::allProcs, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GrantLock(), i, j, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2790 of file lock.c.

2791{
2792 uint32 i;
2793 bool result = false;
2794
2795 /* fast-path group the lock belongs to */
2796 uint32 group = FAST_PATH_REL_GROUP(relid);
2797
2798 FastPathLocalUseCounts[group] = 0;
2799 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2800 {
2801 /* index into the whole per-backend array */
2802 uint32 f = FAST_PATH_SLOT(group, i);
2803
2804 if (MyProc->fpRelId[f] == relid
2805 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2806 {
2807 Assert(!result);
2808 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2809 result = true;
2810 /* we continue iterating so as to update FastPathLocalUseCount */
2811 }
2812 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2813 ++FastPathLocalUseCounts[group];
2814 }
2815 return result;
2816}

References Assert(), FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FastPathLocalUseCounts, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1846 of file lock.c.

1847{
1848 StrongLockInProgress = NULL;
1849}

References StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetAwaitedLock()

LOCALLOCK * GetAwaitedLock ( void  )

Definition at line 1894 of file lock.c.

1895{
1896 return awaitedLock;
1897}
static LOCALLOCK * awaitedLock
Definition: lock.c:325

References awaitedLock.

Referenced by LockErrorCleanup(), ProcessRecoveryConflictInterrupt(), and ProcSleep().

◆ GetBlockerStatusData()

BlockedProcsData * GetBlockerStatusData ( int  blocked_pid)

Definition at line 3963 of file lock.c.

3964{
3966 PGPROC *proc;
3967 int i;
3968
3970
3971 /*
3972 * Guess how much space we'll need, and preallocate. Most of the time
3973 * this will avoid needing to do repalloc while holding the LWLocks. (We
3974 * assume, but check with an Assert, that MaxBackends is enough entries
3975 * for the procs[] array; the other two could need enlargement, though.)
3976 */
3977 data->nprocs = data->nlocks = data->npids = 0;
3978 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3979 data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3980 data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3981 data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3982
3983 /*
3984 * In order to search the ProcArray for blocked_pid and assume that that
3985 * entry won't immediately disappear under us, we must hold ProcArrayLock.
3986 * In addition, to examine the lock grouping fields of any other backend,
3987 * we must hold all the hash partition locks. (Only one of those locks is
3988 * actually relevant for any one lock group, but we can't know which one
3989 * ahead of time.) It's fairly annoying to hold all those locks
3990 * throughout this, but it's no worse than GetLockStatusData(), and it
3991 * does have the advantage that we're guaranteed to return a
3992 * self-consistent instantaneous state.
3993 */
3994 LWLockAcquire(ProcArrayLock, LW_SHARED);
3995
3996 proc = BackendPidGetProcWithLock(blocked_pid);
3997
3998 /* Nothing to do if it's gone */
3999 if (proc != NULL)
4000 {
4001 /*
4002 * Acquire lock on the entire shared lock data structure. See notes
4003 * in GetLockStatusData().
4004 */
4005 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4007
4008 if (proc->lockGroupLeader == NULL)
4009 {
4010 /* Easy case, proc is not a lock group member */
4012 }
4013 else
4014 {
4015 /* Examine all procs in proc's lock group */
4016 dlist_iter iter;
4017
4019 {
4020 PGPROC *memberProc;
4021
4022 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4024 }
4025 }
4026
4027 /*
4028 * And release locks. See notes in GetLockStatusData().
4029 */
4030 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4032
4033 Assert(data->nprocs <= data->maxprocs);
4034 }
4035
4036 LWLockRelease(ProcArrayLock);
4037
4038 return data;
4039}
int MaxBackends
Definition: globals.c:147
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:4043
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:530
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:97
void * palloc(Size size)
Definition: mcxt.c:1940
const void * data
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3219
dlist_head lockGroupMembers
Definition: proc.h:306
PGPROC * lockGroupLeader
Definition: proc.h:305
dlist_node * cur
Definition: ilist.h:179

References Assert(), BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, and palloc().

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId * GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 3034 of file lock.c.

3035{
3036 static VirtualTransactionId *vxids;
3037 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
3038 LockMethod lockMethodTable;
3039 LOCK *lock;
3040 LOCKMASK conflictMask;
3041 dlist_iter proclock_iter;
3042 PROCLOCK *proclock;
3043 uint32 hashcode;
3044 LWLock *partitionLock;
3045 int count = 0;
3046 int fast_count = 0;
3047
3048 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
3049 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3050 lockMethodTable = LockMethods[lockmethodid];
3051 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
3052 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3053
3054 /*
3055 * Allocate memory to store results, and fill with InvalidVXID. We only
3056 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3057 * InHotStandby allocate once in TopMemoryContext.
3058 */
3059 if (InHotStandby)
3060 {
3061 if (vxids == NULL)
3062 vxids = (VirtualTransactionId *)
3064 sizeof(VirtualTransactionId) *
3066 }
3067 else
3068 vxids = (VirtualTransactionId *)
3071
3072 /* Compute hash code and partition lock, and look up conflicting modes. */
3073 hashcode = LockTagHashCode(locktag);
3074 partitionLock = LockHashPartitionLock(hashcode);
3075 conflictMask = lockMethodTable->conflictTab[lockmode];
3076
3077 /*
3078 * Fast path locks might not have been entered in the primary lock table.
3079 * If the lock we're dealing with could conflict with such a lock, we must
3080 * examine each backend's fast-path array for conflicts.
3081 */
3082 if (ConflictsWithRelationFastPath(locktag, lockmode))
3083 {
3084 int i;
3085 Oid relid = locktag->locktag_field2;
3087
3088 /* fast-path group the lock belongs to */
3089 uint32 group = FAST_PATH_REL_GROUP(relid);
3090
3091 /*
3092 * Iterate over relevant PGPROCs. Anything held by a prepared
3093 * transaction will have been transferred to the primary lock table,
3094 * so we need not worry about those. This is all a bit fuzzy, because
3095 * new locks could be taken after we've visited a particular
3096 * partition, but the callers had better be prepared to deal with that
3097 * anyway, since the locks could equally well be taken between the
3098 * time we return the value and the time the caller does something
3099 * with it.
3100 */
3101 for (i = 0; i < ProcGlobal->allProcCount; i++)
3102 {
3103 PGPROC *proc = &ProcGlobal->allProcs[i];
3104 uint32 j;
3105
3106 /* A backend never blocks itself */
3107 if (proc == MyProc)
3108 continue;
3109
3111
3112 /*
3113 * If the target backend isn't referencing the same database as
3114 * the lock, then we needn't examine the individual relation IDs
3115 * at all; none of them can be relevant.
3116 *
3117 * See FastPathTransferRelationLocks() for discussion of why we do
3118 * this test after acquiring the lock.
3119 *
3120 * Also skip groups without any registered fast-path locks.
3121 */
3122 if (proc->databaseId != locktag->locktag_field1 ||
3123 proc->fpLockBits[group] == 0)
3124 {
3125 LWLockRelease(&proc->fpInfoLock);
3126 continue;
3127 }
3128
3129 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3130 {
3131 uint32 lockmask;
3132
3133 /* index into the whole per-backend array */
3134 uint32 f = FAST_PATH_SLOT(group, j);
3135
3136 /* Look for an allocated slot matching the given relid. */
3137 if (relid != proc->fpRelId[f])
3138 continue;
3139 lockmask = FAST_PATH_GET_BITS(proc, f);
3140 if (!lockmask)
3141 continue;
3142 lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3143
3144 /*
3145 * There can only be one entry per relation, so if we found it
3146 * and it doesn't conflict, we can skip the rest of the slots.
3147 */
3148 if ((lockmask & conflictMask) == 0)
3149 break;
3150
3151 /* Conflict! */
3152 GET_VXID_FROM_PGPROC(vxid, *proc);
3153
3155 vxids[count++] = vxid;
3156 /* else, xact already committed or aborted */
3157
3158 /* No need to examine remaining slots. */
3159 break;
3160 }
3161
3162 LWLockRelease(&proc->fpInfoLock);
3163 }
3164 }
3165
3166 /* Remember how many fast-path conflicts we found. */
3167 fast_count = count;
3168
3169 /*
3170 * Look up the lock object matching the tag.
3171 */
3172 LWLockAcquire(partitionLock, LW_SHARED);
3173
3175 locktag,
3176 hashcode,
3177 HASH_FIND,
3178 NULL);
3179 if (!lock)
3180 {
3181 /*
3182 * If the lock object doesn't exist, there is nothing holding a lock
3183 * on this lockable object.
3184 */
3185 LWLockRelease(partitionLock);
3186 vxids[count].procNumber = INVALID_PROC_NUMBER;
3188 if (countp)
3189 *countp = count;
3190 return vxids;
3191 }
3192
3193 /*
3194 * Examine each existing holder (or awaiter) of the lock.
3195 */
3196 dlist_foreach(proclock_iter, &lock->procLocks)
3197 {
3198 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3199
3200 if (conflictMask & proclock->holdMask)
3201 {
3202 PGPROC *proc = proclock->tag.myProc;
3203
3204 /* A backend never blocks itself */
3205 if (proc != MyProc)
3206 {
3208
3209 GET_VXID_FROM_PGPROC(vxid, *proc);
3210
3212 {
3213 int i;
3214
3215 /* Avoid duplicate entries. */
3216 for (i = 0; i < fast_count; ++i)
3217 if (VirtualTransactionIdEquals(vxids[i], vxid))
3218 break;
3219 if (i >= fast_count)
3220 vxids[count++] = vxid;
3221 }
3222 /* else, xact already committed or aborted */
3223 }
3224 }
3225 }
3226
3227 LWLockRelease(partitionLock);
3228
3229 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3230 elog(PANIC, "too many conflicting locks found");
3231
3232 vxids[count].procNumber = INVALID_PROC_NUMBER;
3234 if (countp)
3235 *countp = count;
3236 return vxids;
3237}
#define lengthof(array)
Definition: c.h:759
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:270
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:553
uint16 LOCKMETHODID
Definition: lock.h:123
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:68
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition: lock.h:78
#define InvalidLocalTransactionId
Definition: lock.h:66
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:72
int LOCKMASK
Definition: lockdefs.h:25
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1256
void * palloc0(Size size)
Definition: mcxt.c:1970
MemoryContext TopMemoryContext
Definition: mcxt.c:165
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
uint8 locktag_lockmethodid
Definition: lock.h:172
int numLockModes
Definition: lock.h:111
LocalTransactionId localTransactionId
Definition: lock.h:63
ProcNumber procNumber
Definition: lock.h:62
int max_prepared_xacts
Definition: twophase.c:115
#define InHotStandby
Definition: xlogutils.h:60

References PROC_HDR::allProcCount, PROC_HDR::allProcs, ConflictsWithRelationFastPath, LockMethodData::conflictTab, dlist_iter::cur, PGPROC::databaseId, dlist_container, dlist_foreach, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLockBits, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, INVALID_PROC_NUMBER, InvalidLocalTransactionId, j, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, VirtualTransactionId::procNumber, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char * GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 4220 of file lock.c.

4221{
4222 Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4223 Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4224 return LockMethods[lockmethodid]->lockModeNames[mode];
4225}
const char *const * lockModeNames
Definition: lock.h:113

References Assert(), lengthof, LockMethods, LockMethodData::lockModeNames, and mode.

Referenced by DeadLockReport(), LockAcquireExtended(), overexplain_range_table(), pg_lock_status(), and ProcSleep().

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 523 of file lock.c.

524{
525 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
526
527 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
528 return LockMethods[lockmethodid];
529}
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:325

References Assert(), lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData * GetLockStatusData ( void  )

Definition at line 3760 of file lock.c.

3761{
3762 LockData *data;
3763 PROCLOCK *proclock;
3764 HASH_SEQ_STATUS seqstat;
3765 int els;
3766 int el;
3767 int i;
3768
3769 data = (LockData *) palloc(sizeof(LockData));
3770
3771 /* Guess how much space we'll need. */
3772 els = MaxBackends;
3773 el = 0;
3774 data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3775
3776 /*
3777 * First, we iterate through the per-backend fast-path arrays, locking
3778 * them one at a time. This might produce an inconsistent picture of the
3779 * system state, but taking all of those LWLocks at the same time seems
3780 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3781 * matter too much, because none of these locks can be involved in lock
3782 * conflicts anyway - anything that might must be present in the main lock
3783 * table. (For the same reason, we don't sweat about making leaderPid
3784 * completely valid. We cannot safely dereference another backend's
3785 * lockGroupLeader field without holding all lock partition locks, and
3786 * it's not worth that.)
3787 */
3788 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3789 {
3790 PGPROC *proc = &ProcGlobal->allProcs[i];
3791
3792 /* Skip backends with pid=0, as they don't hold fast-path locks */
3793 if (proc->pid == 0)
3794 continue;
3795
3797
3798 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3799 {
3800 /* Skip groups without registered fast-path locks */
3801 if (proc->fpLockBits[g] == 0)
3802 continue;
3803
3804 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3805 {
3806 LockInstanceData *instance;
3807 uint32 f = FAST_PATH_SLOT(g, j);
3808 uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3809
3810 /* Skip unallocated slots */
3811 if (!lockbits)
3812 continue;
3813
3814 if (el >= els)
3815 {
3816 els += MaxBackends;
3817 data->locks = (LockInstanceData *)
3818 repalloc(data->locks, sizeof(LockInstanceData) * els);
3819 }
3820
3821 instance = &data->locks[el];
3822 SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3823 proc->fpRelId[f]);
3824 instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3825 instance->waitLockMode = NoLock;
3826 instance->vxid.procNumber = proc->vxid.procNumber;
3827 instance->vxid.localTransactionId = proc->vxid.lxid;
3828 instance->pid = proc->pid;
3829 instance->leaderPid = proc->pid;
3830 instance->fastpath = true;
3831
3832 /*
3833 * Successfully taking fast path lock means there were no
3834 * conflicting locks.
3835 */
3836 instance->waitStart = 0;
3837
3838 el++;
3839 }
3840 }
3841
3842 if (proc->fpVXIDLock)
3843 {
3845 LockInstanceData *instance;
3846
3847 if (el >= els)
3848 {
3849 els += MaxBackends;
3850 data->locks = (LockInstanceData *)
3851 repalloc(data->locks, sizeof(LockInstanceData) * els);
3852 }
3853
3854 vxid.procNumber = proc->vxid.procNumber;
3856
3857 instance = &data->locks[el];
3858 SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3859 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3860 instance->waitLockMode = NoLock;
3861 instance->vxid.procNumber = proc->vxid.procNumber;
3862 instance->vxid.localTransactionId = proc->vxid.lxid;
3863 instance->pid = proc->pid;
3864 instance->leaderPid = proc->pid;
3865 instance->fastpath = true;
3866 instance->waitStart = 0;
3867
3868 el++;
3869 }
3870
3871 LWLockRelease(&proc->fpInfoLock);
3872 }
3873
3874 /*
3875 * Next, acquire lock on the entire shared lock data structure. We do
3876 * this so that, at least for locks in the primary lock table, the state
3877 * will be self-consistent.
3878 *
3879 * Since this is a read-only operation, we take shared instead of
3880 * exclusive lock. There's not a whole lot of point to this, because all
3881 * the normal operations require exclusive lock, but it doesn't hurt
3882 * anything either. It will at least allow two backends to do
3883 * GetLockStatusData in parallel.
3884 *
3885 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3886 */
3887 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3889
3890 /* Now we can safely count the number of proclocks */
3892 if (data->nelements > els)
3893 {
3894 els = data->nelements;
3895 data->locks = (LockInstanceData *)
3896 repalloc(data->locks, sizeof(LockInstanceData) * els);
3897 }
3898
3899 /* Now scan the tables to copy the data */
3901
3902 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3903 {
3904 PGPROC *proc = proclock->tag.myProc;
3905 LOCK *lock = proclock->tag.myLock;
3906 LockInstanceData *instance = &data->locks[el];
3907
3908 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3909 instance->holdMask = proclock->holdMask;
3910 if (proc->waitLock == proclock->tag.myLock)
3911 instance->waitLockMode = proc->waitLockMode;
3912 else
3913 instance->waitLockMode = NoLock;
3914 instance->vxid.procNumber = proc->vxid.procNumber;
3915 instance->vxid.localTransactionId = proc->vxid.lxid;
3916 instance->pid = proc->pid;
3917 instance->leaderPid = proclock->groupLeader->pid;
3918 instance->fastpath = false;
3919 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3920
3921 el++;
3922 }
3923
3924 /*
3925 * And release locks. We do this in reverse order for two reasons: (1)
3926 * Anyone else who needs more than one of the locks will be trying to lock
3927 * them in increasing order; we don't want to release the other process
3928 * until it can get all the locks it needs. (2) This avoids O(N^2)
3929 * behavior inside LWLockRelease.
3930 */
3931 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3933
3934 Assert(el == data->nelements);
3935
3936 return data;
3937}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:467
int64 TimestampTz
Definition: timestamp.h:39
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1341
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:236
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:182
#define NoLock
Definition: lockdefs.h:34
#define ExclusiveLock
Definition: lockdefs.h:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:2167
Definition: lock.h:467
LOCKMASK holdMask
Definition: lock.h:456
LOCKMODE waitLockMode
Definition: lock.h:457
bool fastpath
Definition: lock.h:463
LOCKTAG locktag
Definition: lock.h:455
TimestampTz waitStart
Definition: lock.h:459
int leaderPid
Definition: lock.h:462
VirtualTransactionId vxid
Definition: lock.h:458
struct PGPROC::@127 vxid
LocalTransactionId lxid
Definition: proc.h:201
pg_atomic_uint64 waitStart
Definition: proc.h:238
bool fpVXIDLock
Definition: proc.h:297
ProcNumber procNumber
Definition: proc.h:196
int pid
Definition: proc.h:183
LOCK * waitLock
Definition: proc.h:233
LOCKMODE waitLockMode
Definition: proc.h:235
LocalTransactionId fpLocalTransactionId
Definition: proc.h:298
PGPROC * groupLeader
Definition: lock.h:376

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert(), data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_SLOT, LockInstanceData::fastpath, FastPathLockGroupsPerBackend, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpLockBits, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, j, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc(), pg_atomic_read_u64(), LockInstanceData::pid, PGPROC::pid, ProcGlobal, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, LockInstanceData::waitStart, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 535 of file lock.c.

536{
537 LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
538
539 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
540 return LockMethods[lockmethodid];
541}

References Assert(), lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock * GetRunningTransactionLocks ( int *  nlocks)

Definition at line 4138 of file lock.c.

4139{
4140 xl_standby_lock *accessExclusiveLocks;
4141 PROCLOCK *proclock;
4142 HASH_SEQ_STATUS seqstat;
4143 int i;
4144 int index;
4145 int els;
4146
4147 /*
4148 * Acquire lock on the entire shared lock data structure.
4149 *
4150 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4151 */
4152 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4154
4155 /* Now we can safely count the number of proclocks */
4157
4158 /*
4159 * Allocating enough space for all locks in the lock table is overkill,
4160 * but it's more convenient and faster than having to enlarge the array.
4161 */
4162 accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4163
4164 /* Now scan the tables to copy the data */
4166
4167 /*
4168 * If lock is a currently granted AccessExclusiveLock then it will have
4169 * just one proclock holder, so locks are never accessed twice in this
4170 * particular case. Don't copy this code for use elsewhere because in the
4171 * general case this will give you duplicate locks when looking at
4172 * non-exclusive lock types.
4173 */
4174 index = 0;
4175 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4176 {
4177 /* make sure this definition matches the one used in LockAcquire */
4178 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4180 {
4181 PGPROC *proc = proclock->tag.myProc;
4182 LOCK *lock = proclock->tag.myLock;
4183 TransactionId xid = proc->xid;
4184
4185 /*
4186 * Don't record locks for transactions if we know they have
4187 * already issued their WAL record for commit but not yet released
4188 * lock. It is still possible that we see locks held by already
4189 * complete transactions, if they haven't yet zeroed their xids.
4190 */
4191 if (!TransactionIdIsValid(xid))
4192 continue;
4193
4194 accessExclusiveLocks[index].xid = xid;
4195 accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4196 accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4197
4198 index++;
4199 }
4200 }
4201
4202 Assert(index <= els);
4203
4204 /*
4205 * And release locks. We do this in reverse order for two reasons: (1)
4206 * Anyone else who needs more than one of the locks will be trying to lock
4207 * them in increasing order; we don't want to release the other process
4208 * until it can get all the locks it needs. (2) This avoids O(N^2)
4209 * behavior inside LWLockRelease.
4210 */
4211 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4213
4214 *nlocks = index;
4215 return accessExclusiveLocks;
4216}
uint32 TransactionId
Definition: c.h:623
#define AccessExclusiveLock
Definition: lockdefs.h:43
TransactionId xid
Definition: proc.h:173
TransactionId xid
Definition: lockdefs.h:53
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References AccessExclusiveLock, Assert(), xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 4043 of file lock.c.

4044{
4045 LOCK *theLock = blocked_proc->waitLock;
4046 BlockedProcData *bproc;
4047 dlist_iter proclock_iter;
4048 dlist_iter proc_iter;
4049 dclist_head *waitQueue;
4050 int queue_size;
4051
4052 /* Nothing to do if this proc is not blocked */
4053 if (theLock == NULL)
4054 return;
4055
4056 /* Set up a procs[] element */
4057 bproc = &data->procs[data->nprocs++];
4058 bproc->pid = blocked_proc->pid;
4059 bproc->first_lock = data->nlocks;
4060 bproc->first_waiter = data->npids;
4061
4062 /*
4063 * We may ignore the proc's fast-path arrays, since nothing in those could
4064 * be related to a contended lock.
4065 */
4066
4067 /* Collect all PROCLOCKs associated with theLock */
4068 dlist_foreach(proclock_iter, &theLock->procLocks)
4069 {
4070 PROCLOCK *proclock =
4071 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4072 PGPROC *proc = proclock->tag.myProc;
4073 LOCK *lock = proclock->tag.myLock;
4074 LockInstanceData *instance;
4075
4076 if (data->nlocks >= data->maxlocks)
4077 {
4078 data->maxlocks += MaxBackends;
4079 data->locks = (LockInstanceData *)
4080 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4081 }
4082
4083 instance = &data->locks[data->nlocks];
4084 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4085 instance->holdMask = proclock->holdMask;
4086 if (proc->waitLock == lock)
4087 instance->waitLockMode = proc->waitLockMode;
4088 else
4089 instance->waitLockMode = NoLock;
4090 instance->vxid.procNumber = proc->vxid.procNumber;
4091 instance->vxid.localTransactionId = proc->vxid.lxid;
4092 instance->pid = proc->pid;
4093 instance->leaderPid = proclock->groupLeader->pid;
4094 instance->fastpath = false;
4095 data->nlocks++;
4096 }
4097
4098 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4099 waitQueue = &(theLock->waitProcs);
4100 queue_size = dclist_count(waitQueue);
4101
4102 if (queue_size > data->maxpids - data->npids)
4103 {
4104 data->maxpids = Max(data->maxpids + MaxBackends,
4105 data->npids + queue_size);
4106 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4107 sizeof(int) * data->maxpids);
4108 }
4109
4110 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4111 dclist_foreach(proc_iter, waitQueue)
4112 {
4113 PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
4114
4115 if (queued_proc == blocked_proc)
4116 break;
4117 data->waiter_pids[data->npids++] = queued_proc->pid;
4118 queued_proc = (PGPROC *) queued_proc->links.next;
4119 }
4120
4121 bproc->num_locks = data->nlocks - bproc->first_lock;
4122 bproc->num_waiters = data->npids - bproc->first_waiter;
4123}
#define Max(x, y)
Definition: c.h:969
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int first_lock
Definition: lock.h:477
int first_waiter
Definition: lock.h:481
int num_waiters
Definition: lock.h:482
int num_locks
Definition: lock.h:478
dclist_head waitProcs
Definition: lock.h:318
dlist_node links
Definition: proc.h:164
dlist_node * next
Definition: ilist.h:140
static struct link * links
Definition: zic.c:299

References dlist_iter::cur, data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, LockInstanceData::fastpath, BlockedProcData::first_lock, BlockedProcData::first_waiter, PROCLOCK::groupLeader, PROCLOCK::holdMask, LockInstanceData::holdMask, LockInstanceData::leaderPid, PGPROC::links, links, VirtualTransactionId::localTransactionId, LockInstanceData::locktag, PGPROC::lxid, Max, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, dlist_node::next, NoLock, BlockedProcData::num_locks, BlockedProcData::num_waiters, LockInstanceData::pid, BlockedProcData::pid, PGPROC::pid, LOCK::procLocks, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, and LOCK::waitProcs.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1885 of file lock.c.

1886{
1888}
static ResourceOwner awaitedOwner
Definition: lock.c:326
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1788

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1654 of file lock.c.

1655{
1656 lock->nGranted++;
1657 lock->granted[lockmode]++;
1658 lock->grantMask |= LOCKBIT_ON(lockmode);
1659 if (lock->granted[lockmode] == lock->requested[lockmode])
1660 lock->waitMask &= LOCKBIT_OFF(lockmode);
1661 proclock->holdMask |= LOCKBIT_ON(lockmode);
1662 LOCK_PRINT("GrantLock", lock, lockmode);
1663 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1664 Assert(lock->nGranted <= lock->nRequested);
1665}
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:86
int requested[MAX_LOCKMODES]
Definition: lock.h:319
int granted[MAX_LOCKMODES]
Definition: lock.h:321
LOCKMASK grantMask
Definition: lock.h:315
LOCKMASK waitMask
Definition: lock.h:316
int nGranted
Definition: lock.h:322

References Assert(), LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), JoinWaitQueue(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1788 of file lock.c.

1789{
1790 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1791 int i;
1792
1793 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1794 /* Count the total */
1795 locallock->nLocks++;
1796 /* Count the per-owner lock */
1797 for (i = 0; i < locallock->numLockOwners; i++)
1798 {
1799 if (lockOwners[i].owner == owner)
1800 {
1801 lockOwners[i].nLocks++;
1802 return;
1803 }
1804 }
1805 lockOwners[i].owner = owner;
1806 lockOwners[i].nLocks = 1;
1807 locallock->numLockOwners++;
1808 if (owner != NULL)
1809 ResourceOwnerRememberLock(owner, locallock);
1810
1811 /* Indicate that the lock is acquired for certain types of locks. */
1812 CheckAndSetLockHeld(locallock, true);
1813}
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1460
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1062
int64 nLocks
Definition: lock.h:424
struct ResourceOwnerData * owner
Definition: lock.h:423
int maxLockOwners
Definition: lock.h:438

References Assert(), CheckAndSetLockHeld(), i, LOCALLOCK::lockOwners, LOCALLOCK::maxLockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLockManagerAccess()

void InitLockManagerAccess ( void  )

Definition at line 501 of file lock.c.

502{
503 /*
504 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
505 * counts and resource owner information.
506 */
507 HASHCTL info;
508
509 info.keysize = sizeof(LOCALLOCKTAG);
510 info.entrysize = sizeof(LOCALLOCK);
511
512 LockMethodLocalHash = hash_create("LOCALLOCK hash",
513 16,
514 &info,
516}
struct LOCALLOCK LOCALLOCK
struct LOCALLOCKTAG LOCALLOCKTAG

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, and LockMethodLocalHash.

Referenced by BaseInit().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4563 of file lock.c.

4565{
4566 lock_twophase_postcommit(xid, info, recdata, len);
4567}
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4537
const void size_t len

References len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4537 of file lock.c.

4539{
4540 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4541 PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4542 LOCKTAG *locktag;
4543 LOCKMETHODID lockmethodid;
4544 LockMethod lockMethodTable;
4545
4546 Assert(len == sizeof(TwoPhaseLockRecord));
4547 locktag = &rec->locktag;
4548 lockmethodid = locktag->locktag_lockmethodid;
4549
4550 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4551 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4552 lockMethodTable = LockMethods[lockmethodid];
4553
4554 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4555}
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3251
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:918

References Assert(), elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

◆ lock_twophase_recover()

void lock_twophase_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4324 of file lock.c.

4326{
4327 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4328 PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4329 LOCKTAG *locktag;
4330 LOCKMODE lockmode;
4331 LOCKMETHODID lockmethodid;
4332 LOCK *lock;
4333 PROCLOCK *proclock;
4334 PROCLOCKTAG proclocktag;
4335 bool found;
4336 uint32 hashcode;
4337 uint32 proclock_hashcode;
4338 int partition;
4339 LWLock *partitionLock;
4340 LockMethod lockMethodTable;
4341
4342 Assert(len == sizeof(TwoPhaseLockRecord));
4343 locktag = &rec->locktag;
4344 lockmode = rec->lockmode;
4345 lockmethodid = locktag->locktag_lockmethodid;
4346
4347 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4348 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4349 lockMethodTable = LockMethods[lockmethodid];
4350
4351 hashcode = LockTagHashCode(locktag);
4352 partition = LockHashPartition(hashcode);
4353 partitionLock = LockHashPartitionLock(hashcode);
4354
4355 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4356
4357 /*
4358 * Find or create a lock with this tag.
4359 */
4361 locktag,
4362 hashcode,
4364 &found);
4365 if (!lock)
4366 {
4367 LWLockRelease(partitionLock);
4368 ereport(ERROR,
4369 (errcode(ERRCODE_OUT_OF_MEMORY),
4370 errmsg("out of shared memory"),
4371 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4372 }
4373
4374 /*
4375 * if it's a new lock object, initialize it
4376 */
4377 if (!found)
4378 {
4379 lock->grantMask = 0;
4380 lock->waitMask = 0;
4381 dlist_init(&lock->procLocks);
4382 dclist_init(&lock->waitProcs);
4383 lock->nRequested = 0;
4384 lock->nGranted = 0;
4385 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4386 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4387 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4388 }
4389 else
4390 {
4391 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4392 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4393 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4394 Assert(lock->nGranted <= lock->nRequested);
4395 }
4396
4397 /*
4398 * Create the hash key for the proclock table.
4399 */
4400 proclocktag.myLock = lock;
4401 proclocktag.myProc = proc;
4402
4403 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4404
4405 /*
4406 * Find or create a proclock entry with this tag
4407 */
4409 &proclocktag,
4410 proclock_hashcode,
4412 &found);
4413 if (!proclock)
4414 {
4415 /* Oops, not enough shmem for the proclock */
4416 if (lock->nRequested == 0)
4417 {
4418 /*
4419 * There are no other requestors of this lock, so garbage-collect
4420 * the lock object. We *must* do this to avoid a permanent leak
4421 * of shared memory, because there won't be anything to cause
4422 * anyone to release the lock object later.
4423 */
4426 &(lock->tag),
4427 hashcode,
4429 NULL))
4430 elog(PANIC, "lock table corrupted");
4431 }
4432 LWLockRelease(partitionLock);
4433 ereport(ERROR,
4434 (errcode(ERRCODE_OUT_OF_MEMORY),
4435 errmsg("out of shared memory"),
4436 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4437 }
4438
4439 /*
4440 * If new, initialize the new entry
4441 */
4442 if (!found)
4443 {
4444 Assert(proc->lockGroupLeader == NULL);
4445 proclock->groupLeader = proc;
4446 proclock->holdMask = 0;
4447 proclock->releaseMask = 0;
4448 /* Add proclock to appropriate lists */
4449 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4450 dlist_push_tail(&proc->myProcLocks[partition],
4451 &proclock->procLink);
4452 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4453 }
4454 else
4455 {
4456 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4457 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4458 }
4459
4460 /*
4461 * lock->nRequested and lock->requested[] count the total number of
4462 * requests, whether granted or waiting, so increment those immediately.
4463 */
4464 lock->nRequested++;
4465 lock->requested[lockmode]++;
4466 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4467
4468 /*
4469 * We shouldn't already hold the desired lock.
4470 */
4471 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4472 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4473 lockMethodTable->lockModeNames[lockmode],
4474 lock->tag.locktag_field1, lock->tag.locktag_field2,
4475 lock->tag.locktag_field3);
4476
4477 /*
4478 * We ignore any possible conflicts and just grant ourselves the lock. Not
4479 * only because we don't bother, but also to avoid deadlocks when
4480 * switching from standby to normal mode. See function comment.
4481 */
4482 GrantLock(lock, proclock, lockmode);
4483
4484 /*
4485 * Bump strong lock count, to make sure any fast-path lock requests won't
4486 * be granted without consulting the primary lock table.
4487 */
4488 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4489 {
4490 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4491
4493 FastPathStrongRelationLocks->count[fasthashcode]++;
4495 }
4496
4497 LWLockRelease(partitionLock);
4498}
#define MemSet(start, val, len)
Definition: c.h:991
@ HASH_ENTER_NULL
Definition: hsearch.h:116
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define MAX_LOCKMODES
Definition: lock.h:83
#define LockHashPartition(hashcode)
Definition: lock.h:525
int LOCKMODE
Definition: lockdefs.h:26
uint32 locktag_field3
Definition: lock.h:169
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:262
LOCKMASK releaseMask
Definition: lock.h:378

References Assert(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4505 of file lock.c.

4507{
4508 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4509 LOCKTAG *locktag;
4510 LOCKMODE lockmode;
4511 LOCKMETHODID lockmethodid;
4512
4513 Assert(len == sizeof(TwoPhaseLockRecord));
4514 locktag = &rec->locktag;
4515 lockmode = rec->lockmode;
4516 lockmethodid = locktag->locktag_lockmethodid;
4517
4518 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4519 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4520
4521 if (lockmode == AccessExclusiveLock &&
4522 locktag->locktag_type == LOCKTAG_RELATION)
4523 {
4525 locktag->locktag_field1 /* dboid */ ,
4526 locktag->locktag_field2 /* reloid */ );
4527 }
4528}
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:986

References AccessExclusiveLock, Assert(), elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().

◆ LockAcquire()

LockAcquireResult LockAcquire ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait 
)

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp,
bool  logLockFailure 
)

Definition at line 832 of file lock.c.

839{
840 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
841 LockMethod lockMethodTable;
842 LOCALLOCKTAG localtag;
843 LOCALLOCK *locallock;
844 LOCK *lock;
845 PROCLOCK *proclock;
846 bool found;
847 ResourceOwner owner;
848 uint32 hashcode;
849 LWLock *partitionLock;
850 bool found_conflict;
851 ProcWaitStatus waitResult;
852 bool log_lock = false;
853
854 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
855 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
856 lockMethodTable = LockMethods[lockmethodid];
857 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
858 elog(ERROR, "unrecognized lock mode: %d", lockmode);
859
860 if (RecoveryInProgress() && !InRecovery &&
861 (locktag->locktag_type == LOCKTAG_OBJECT ||
862 locktag->locktag_type == LOCKTAG_RELATION) &&
863 lockmode > RowExclusiveLock)
865 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
866 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
867 lockMethodTable->lockModeNames[lockmode]),
868 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
869
870#ifdef LOCK_DEBUG
871 if (LOCK_DEBUG_ENABLED(locktag))
872 elog(LOG, "LockAcquire: lock [%u,%u] %s",
873 locktag->locktag_field1, locktag->locktag_field2,
874 lockMethodTable->lockModeNames[lockmode]);
875#endif
876
877 /* Identify owner for lock */
878 if (sessionLock)
879 owner = NULL;
880 else
881 owner = CurrentResourceOwner;
882
883 /*
884 * Find or create a LOCALLOCK entry for this lock and lockmode
885 */
886 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
887 localtag.lock = *locktag;
888 localtag.mode = lockmode;
889
891 &localtag,
892 HASH_ENTER, &found);
893
894 /*
895 * if it's a new locallock object, initialize it
896 */
897 if (!found)
898 {
899 locallock->lock = NULL;
900 locallock->proclock = NULL;
901 locallock->hashcode = LockTagHashCode(&(localtag.lock));
902 locallock->nLocks = 0;
903 locallock->holdsStrongLockCount = false;
904 locallock->lockCleared = false;
905 locallock->numLockOwners = 0;
906 locallock->maxLockOwners = 8;
907 locallock->lockOwners = NULL; /* in case next line fails */
908 locallock->lockOwners = (LOCALLOCKOWNER *)
910 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
911 }
912 else
913 {
914 /* Make sure there will be room to remember the lock */
915 if (locallock->numLockOwners >= locallock->maxLockOwners)
916 {
917 int newsize = locallock->maxLockOwners * 2;
918
919 locallock->lockOwners = (LOCALLOCKOWNER *)
920 repalloc(locallock->lockOwners,
921 newsize * sizeof(LOCALLOCKOWNER));
922 locallock->maxLockOwners = newsize;
923 }
924 }
925 hashcode = locallock->hashcode;
926
927 if (locallockp)
928 *locallockp = locallock;
929
930 /*
931 * If we already hold the lock, we can just increase the count locally.
932 *
933 * If lockCleared is already set, caller need not worry about absorbing
934 * sinval messages related to the lock's object.
935 */
936 if (locallock->nLocks > 0)
937 {
938 GrantLockLocal(locallock, owner);
939 if (locallock->lockCleared)
941 else
943 }
944
945 /*
946 * We don't acquire any other heavyweight lock while holding the relation
947 * extension lock. We do allow to acquire the same relation extension
948 * lock more than once but that case won't reach here.
949 */
950 Assert(!IsRelationExtensionLockHeld);
951
952 /*
953 * Prepare to emit a WAL record if acquisition of this lock needs to be
954 * replayed in a standby server.
955 *
956 * Here we prepare to log; after lock is acquired we'll issue log record.
957 * This arrangement simplifies error recovery in case the preparation step
958 * fails.
959 *
960 * Only AccessExclusiveLocks can conflict with lock types that read-only
961 * transactions can acquire in a standby server. Make sure this definition
962 * matches the one in GetRunningTransactionLocks().
963 */
964 if (lockmode >= AccessExclusiveLock &&
965 locktag->locktag_type == LOCKTAG_RELATION &&
968 {
970 log_lock = true;
971 }
972
973 /*
974 * Attempt to take lock via fast path, if eligible. But if we remember
975 * having filled up the fast path array, we don't attempt to make any
976 * further use of it until we release some locks. It's possible that some
977 * other backend has transferred some of those locks to the shared hash
978 * table, leaving space free, but it's not worth acquiring the LWLock just
979 * to check. It's also possible that we're acquiring a second or third
980 * lock type on a relation we have already locked using the fast-path, but
981 * for now we don't worry about that case either.
982 */
983 if (EligibleForRelationFastPath(locktag, lockmode) &&
985 {
986 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
987 bool acquired;
988
989 /*
990 * LWLockAcquire acts as a memory sequencing point, so it's safe to
991 * assume that any strong locker whose increment to
992 * FastPathStrongRelationLocks->counts becomes visible after we test
993 * it has yet to begin to transfer fast-path locks.
994 */
996 if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
997 acquired = false;
998 else
999 acquired = FastPathGrantRelationLock(locktag->locktag_field2,
1000 lockmode);
1002 if (acquired)
1003 {
1004 /*
1005 * The locallock might contain stale pointers to some old shared
1006 * objects; we MUST reset these to null before considering the
1007 * lock to be acquired via fast-path.
1008 */
1009 locallock->lock = NULL;
1010 locallock->proclock = NULL;
1011 GrantLockLocal(locallock, owner);
1012 return LOCKACQUIRE_OK;
1013 }
1014 }
1015
1016 /*
1017 * If this lock could potentially have been taken via the fast-path by
1018 * some other backend, we must (temporarily) disable further use of the
1019 * fast-path for this lock tag, and migrate any locks already taken via
1020 * this method to the main lock table.
1021 */
1022 if (ConflictsWithRelationFastPath(locktag, lockmode))
1023 {
1024 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1025
1026 BeginStrongLockAcquire(locallock, fasthashcode);
1027 if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1028 hashcode))
1029 {
1031 if (locallock->nLocks == 0)
1032 RemoveLocalLock(locallock);
1033 if (locallockp)
1034 *locallockp = NULL;
1035 if (reportMemoryError)
1036 ereport(ERROR,
1037 (errcode(ERRCODE_OUT_OF_MEMORY),
1038 errmsg("out of shared memory"),
1039 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1040 else
1041 return LOCKACQUIRE_NOT_AVAIL;
1042 }
1043 }
1044
1045 /*
1046 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1047 * take it via the fast-path, either, so we've got to mess with the shared
1048 * lock table.
1049 */
1050 partitionLock = LockHashPartitionLock(hashcode);
1051
1052 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1053
1054 /*
1055 * Find or create lock and proclock entries with this tag
1056 *
1057 * Note: if the locallock object already existed, it might have a pointer
1058 * to the lock already ... but we should not assume that that pointer is
1059 * valid, since a lock object with zero hold and request counts can go
1060 * away anytime. So we have to use SetupLockInTable() to recompute the
1061 * lock and proclock pointers, even if they're already set.
1062 */
1063 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1064 hashcode, lockmode);
1065 if (!proclock)
1066 {
1068 LWLockRelease(partitionLock);
1069 if (locallock->nLocks == 0)
1070 RemoveLocalLock(locallock);
1071 if (locallockp)
1072 *locallockp = NULL;
1073 if (reportMemoryError)
1074 ereport(ERROR,
1075 (errcode(ERRCODE_OUT_OF_MEMORY),
1076 errmsg("out of shared memory"),
1077 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1078 else
1079 return LOCKACQUIRE_NOT_AVAIL;
1080 }
1081 locallock->proclock = proclock;
1082 lock = proclock->tag.myLock;
1083 locallock->lock = lock;
1084
1085 /*
1086 * If lock requested conflicts with locks requested by waiters, must join
1087 * wait queue. Otherwise, check for conflict with already-held locks.
1088 * (That's last because most complex check.)
1089 */
1090 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1091 found_conflict = true;
1092 else
1093 found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1094 lock, proclock);
1095
1096 if (!found_conflict)
1097 {
1098 /* No conflict with held or previously requested locks */
1099 GrantLock(lock, proclock, lockmode);
1100 waitResult = PROC_WAIT_STATUS_OK;
1101 }
1102 else
1103 {
1104 /*
1105 * Join the lock's wait queue. We call this even in the dontWait
1106 * case, because JoinWaitQueue() may discover that we can acquire the
1107 * lock immediately after all.
1108 */
1109 waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
1110 }
1111
1112 if (waitResult == PROC_WAIT_STATUS_ERROR)
1113 {
1114 /*
1115 * We're not getting the lock because a deadlock was detected already
1116 * while trying to join the wait queue, or because we would have to
1117 * wait but the caller requested no blocking.
1118 *
1119 * Undo the changes to shared entries before releasing the partition
1120 * lock.
1121 */
1123
1124 if (proclock->holdMask == 0)
1125 {
1126 uint32 proclock_hashcode;
1127
1128 proclock_hashcode = ProcLockHashCode(&proclock->tag,
1129 hashcode);
1130 dlist_delete(&proclock->lockLink);
1131 dlist_delete(&proclock->procLink);
1133 &(proclock->tag),
1134 proclock_hashcode,
1136 NULL))
1137 elog(PANIC, "proclock table corrupted");
1138 }
1139 else
1140 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1141 lock->nRequested--;
1142 lock->requested[lockmode]--;
1143 LOCK_PRINT("LockAcquire: did not join wait queue",
1144 lock, lockmode);
1145 Assert((lock->nRequested > 0) &&
1146 (lock->requested[lockmode] >= 0));
1147 Assert(lock->nGranted <= lock->nRequested);
1148 LWLockRelease(partitionLock);
1149 if (locallock->nLocks == 0)
1150 RemoveLocalLock(locallock);
1151
1152 if (dontWait)
1153 {
1154 /*
1155 * Log lock holders and waiters as a detail log message if
1156 * logLockFailure = true and lock acquisition fails with dontWait
1157 * = true
1158 */
1159 if (logLockFailure)
1160 {
1162 lock_waiters_sbuf,
1163 lock_holders_sbuf;
1164 const char *modename;
1165 int lockHoldersNum = 0;
1166
1168 initStringInfo(&lock_waiters_sbuf);
1169 initStringInfo(&lock_holders_sbuf);
1170
1171 DescribeLockTag(&buf, &locallock->tag.lock);
1172 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1173 lockmode);
1174
1175 /* Gather a list of all lock holders and waiters */
1176 LWLockAcquire(partitionLock, LW_SHARED);
1177 GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1178 &lock_waiters_sbuf, &lockHoldersNum);
1179 LWLockRelease(partitionLock);
1180
1181 ereport(LOG,
1182 (errmsg("process %d could not obtain %s on %s",
1183 MyProcPid, modename, buf.data),
1185 "Process holding the lock: %s, Wait queue: %s.",
1186 "Processes holding the lock: %s, Wait queue: %s.",
1187 lockHoldersNum,
1188 lock_holders_sbuf.data,
1189 lock_waiters_sbuf.data)));
1190
1191 pfree(buf.data);
1192 pfree(lock_holders_sbuf.data);
1193 pfree(lock_waiters_sbuf.data);
1194 }
1195 if (locallockp)
1196 *locallockp = NULL;
1197 return LOCKACQUIRE_NOT_AVAIL;
1198 }
1199 else
1200 {
1202 /* DeadLockReport() will not return */
1203 }
1204 }
1205
1206 /*
1207 * We are now in the lock queue, or the lock was already granted. If
1208 * queued, go to sleep.
1209 */
1210 if (waitResult == PROC_WAIT_STATUS_WAITING)
1211 {
1212 Assert(!dontWait);
1213 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1214 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1215 LWLockRelease(partitionLock);
1216
1217 waitResult = WaitOnLock(locallock, owner);
1218
1219 /*
1220 * NOTE: do not do any material change of state between here and
1221 * return. All required changes in locktable state must have been
1222 * done when the lock was granted to us --- see notes in WaitOnLock.
1223 */
1224
1225 if (waitResult == PROC_WAIT_STATUS_ERROR)
1226 {
1227 /*
1228 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1229 * now.
1230 */
1231 Assert(!dontWait);
1233 /* DeadLockReport() will not return */
1234 }
1235 }
1236 else
1237 LWLockRelease(partitionLock);
1238 Assert(waitResult == PROC_WAIT_STATUS_OK);
1239
1240 /* The lock was granted to us. Update the local lock entry accordingly */
1241 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1242 GrantLockLocal(locallock, owner);
1243
1244 /*
1245 * Lock state is fully up-to-date now; if we error out after this, no
1246 * special error cleanup is required.
1247 */
1249
1250 /*
1251 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1252 * standby server.
1253 */
1254 if (log_lock)
1255 {
1256 /*
1257 * Decode the locktag back to the original values, to avoid sending
1258 * lots of empty bytes with every message. See lock.h to check how a
1259 * locktag is defined for LOCKTAG_RELATION
1260 */
1262 locktag->locktag_field2);
1263 }
1264
1265 return LOCKACQUIRE_OK;
1266}
void DeadLockReport(void)
Definition: deadlock.c:1075
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1273
#define LOG
Definition: elog.h:31
int MyProcPid
Definition: globals.c:48
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1243
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1472
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2826
void AbortStrongLockAcquire(void)
Definition: lock.c:1856
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2747
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1928
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4220
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:264
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1820
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1525
static void FinishStrongLockAcquire(void)
Definition: lock.c:1846
@ LOCKTAG_OBJECT
Definition: lock.h:146
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:505
@ LOCKACQUIRE_OK
Definition: lock.h:503
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:504
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:502
#define RowExclusiveLock
Definition: lockdefs.h:38
void pfree(void *pointer)
Definition: mcxt.c:2147
static char * buf
Definition: pg_test_fsync.c:72
ProcWaitStatus
Definition: proc.h:124
@ PROC_WAIT_STATUS_OK
Definition: proc.h:125
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:126
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:127
ResourceOwner CurrentResourceOwner
Definition: resowner.c:173
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1141
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition: proc.c:1901
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1448
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1431
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
bool lockCleared
Definition: lock.h:441
bool RecoveryInProgress(void)
Definition: xlog.c:6522
#define XLogStandbyInfoActive()
Definition: xlog.h:123
bool InRecovery
Definition: xlogutils.c:50

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert(), BeginStrongLockAcquire(), buf, ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, StringInfoData::data, DeadLockReport(), DescribeLockTag(), dlist_delete(), EligibleForRelationFastPath, elog, ereport, errcode(), errdetail_log_plural(), errhint(), errmsg(), ERROR, FAST_PATH_REL_GROUP, FastPathGrantRelationLock(), FastPathLocalUseCounts, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, GetLockHoldersAndWaiters(), GetLockmodeName(), GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, initStringInfo(), InRecovery, JoinWaitQueue(), lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, MyProcPid, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, pfree(), PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_OK, PROC_WAIT_STATUS_WAITING, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, LOCALLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), ConditionalLockTuple(), ConditionalXactLockTableWait(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1525 of file lock.c.

1529{
1530 int numLockModes = lockMethodTable->numLockModes;
1531 LOCKMASK myLocks;
1532 int conflictMask = lockMethodTable->conflictTab[lockmode];
1533 int conflictsRemaining[MAX_LOCKMODES];
1534 int totalConflictsRemaining = 0;
1535 dlist_iter proclock_iter;
1536 int i;
1537
1538 /*
1539 * first check for global conflicts: If no locks conflict with my request,
1540 * then I get the lock.
1541 *
1542 * Checking for conflict: lock->grantMask represents the types of
1543 * currently held locks. conflictTable[lockmode] has a bit set for each
1544 * type of lock that conflicts with request. Bitwise compare tells if
1545 * there is a conflict.
1546 */
1547 if (!(conflictMask & lock->grantMask))
1548 {
1549 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1550 return false;
1551 }
1552
1553 /*
1554 * Rats. Something conflicts. But it could still be my own lock, or a
1555 * lock held by another member of my locking group. First, figure out how
1556 * many conflicts remain after subtracting out any locks I hold myself.
1557 */
1558 myLocks = proclock->holdMask;
1559 for (i = 1; i <= numLockModes; i++)
1560 {
1561 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1562 {
1563 conflictsRemaining[i] = 0;
1564 continue;
1565 }
1566 conflictsRemaining[i] = lock->granted[i];
1567 if (myLocks & LOCKBIT_ON(i))
1568 --conflictsRemaining[i];
1569 totalConflictsRemaining += conflictsRemaining[i];
1570 }
1571
1572 /* If no conflicts remain, we get the lock. */
1573 if (totalConflictsRemaining == 0)
1574 {
1575 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1576 return false;
1577 }
1578
1579 /* If no group locking, it's definitely a conflict. */
1580 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1581 {
1582 Assert(proclock->tag.myProc == MyProc);
1583 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1584 proclock);
1585 return true;
1586 }
1587
1588 /*
1589 * The relation extension lock conflict even between the group members.
1590 */
1592 {
1593 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1594 proclock);
1595 return true;
1596 }
1597
1598 /*
1599 * Locks held in conflicting modes by members of our own lock group are
1600 * not real conflicts; we can subtract those out and see if we still have
1601 * a conflict. This is O(N) in the number of processes holding or
1602 * awaiting locks on this object. We could improve that by making the
1603 * shared memory state more complex (and larger) but it doesn't seem worth
1604 * it.
1605 */
1606 dlist_foreach(proclock_iter, &lock->procLocks)
1607 {
1608 PROCLOCK *otherproclock =
1609 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1610
1611 if (proclock != otherproclock &&
1612 proclock->groupLeader == otherproclock->groupLeader &&
1613 (otherproclock->holdMask & conflictMask) != 0)
1614 {
1615 int intersectMask = otherproclock->holdMask & conflictMask;
1616
1617 for (i = 1; i <= numLockModes; i++)
1618 {
1619 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1620 {
1621 if (conflictsRemaining[i] <= 0)
1622 elog(PANIC, "proclocks held do not match lock");
1623 conflictsRemaining[i]--;
1624 totalConflictsRemaining--;
1625 }
1626 }
1627
1628 if (totalConflictsRemaining == 0)
1629 {
1630 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1631 proclock);
1632 return false;
1633 }
1634 }
1635 }
1636
1637 /* Nope, it's a real conflict. */
1638 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1639 return true;
1640}
#define LOCK_LOCKTAG(lock)
Definition: lock.h:326

References Assert(), LockMethodData::conflictTab, dlist_iter::cur, dlist_container, dlist_foreach, elog, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by JoinWaitQueue(), LockAcquireExtended(), and ProcLockWakeup().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 692 of file lock.c.

693{
694 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
695 LockMethod lockMethodTable;
696 LOCALLOCKTAG localtag;
697 LOCALLOCK *locallock;
698 LOCK *lock;
699 PROCLOCK *proclock;
700 LWLock *partitionLock;
701 bool hasWaiters = false;
702
703 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
704 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
705 lockMethodTable = LockMethods[lockmethodid];
706 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
707 elog(ERROR, "unrecognized lock mode: %d", lockmode);
708
709#ifdef LOCK_DEBUG
710 if (LOCK_DEBUG_ENABLED(locktag))
711 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
712 locktag->locktag_field1, locktag->locktag_field2,
713 lockMethodTable->lockModeNames[lockmode]);
714#endif
715
716 /*
717 * Find the LOCALLOCK entry for this lock and lockmode
718 */
719 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
720 localtag.lock = *locktag;
721 localtag.mode = lockmode;
722
724 &localtag,
725 HASH_FIND, NULL);
726
727 /*
728 * let the caller print its own error message, too. Do not ereport(ERROR).
729 */
730 if (!locallock || locallock->nLocks <= 0)
731 {
732 elog(WARNING, "you don't own a lock of type %s",
733 lockMethodTable->lockModeNames[lockmode]);
734 return false;
735 }
736
737 /*
738 * Check the shared lock table.
739 */
740 partitionLock = LockHashPartitionLock(locallock->hashcode);
741
742 LWLockAcquire(partitionLock, LW_SHARED);
743
744 /*
745 * We don't need to re-find the lock or proclock, since we kept their
746 * addresses in the locallock table, and they couldn't have been removed
747 * while we were holding a lock on them.
748 */
749 lock = locallock->lock;
750 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
751 proclock = locallock->proclock;
752 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
753
754 /*
755 * Double-check that we are actually holding a lock of the type we want to
756 * release.
757 */
758 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
759 {
760 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
761 LWLockRelease(partitionLock);
762 elog(WARNING, "you don't own a lock of type %s",
763 lockMethodTable->lockModeNames[lockmode]);
764 RemoveLocalLock(locallock);
765 return false;
766 }
767
768 /*
769 * Do the checking.
770 */
771 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
772 hasWaiters = true;
773
774 LWLockRelease(partitionLock);
775
776 return hasWaiters;
777}
#define WARNING
Definition: elog.h:36

References LockMethodData::conflictTab, elog, ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  orstronger 
)

Definition at line 639 of file lock.c.

641{
642 LOCALLOCKTAG localtag;
643 LOCALLOCK *locallock;
644
645 /*
646 * See if there is a LOCALLOCK entry for this lock and lockmode
647 */
648 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
649 localtag.lock = *locktag;
650 localtag.mode = lockmode;
651
653 &localtag,
654 HASH_FIND, NULL);
655
656 if (locallock && locallock->nLocks > 0)
657 return true;
658
659 if (orstronger)
660 {
661 LOCKMODE slockmode;
662
663 for (slockmode = lockmode + 1;
664 slockmode <= MaxLockMode;
665 slockmode++)
666 {
667 if (LockHeldByMe(locktag, slockmode, false))
668 return true;
669 }
670 }
671
672 return false;
673}
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition: lock.c:639
#define MaxLockMode
Definition: lockdefs.h:45

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, LockHeldByMe(), LockMethodLocalHash, MaxLockMode, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe(), CheckRelationOidLockedByMe(), and LockHeldByMe().

◆ LockManagerShmemInit()

void LockManagerShmemInit ( void  )

Definition at line 440 of file lock.c.

441{
442 HASHCTL info;
443 long init_table_size,
444 max_table_size;
445 bool found;
446
447 /*
448 * Compute init/max size to request for lock hashtables. Note these
449 * calculations must agree with LockManagerShmemSize!
450 */
451 max_table_size = NLOCKENTS();
452 init_table_size = max_table_size / 2;
453
454 /*
455 * Allocate hash table for LOCK structs. This stores per-locked-object
456 * information.
457 */
458 info.keysize = sizeof(LOCKTAG);
459 info.entrysize = sizeof(LOCK);
461
462 LockMethodLockHash = ShmemInitHash("LOCK hash",
463 init_table_size,
464 max_table_size,
465 &info,
467
468 /* Assume an average of 2 holders per lock */
469 max_table_size *= 2;
470 init_table_size *= 2;
471
472 /*
473 * Allocate hash table for PROCLOCK structs. This stores
474 * per-lock-per-holder information.
475 */
476 info.keysize = sizeof(PROCLOCKTAG);
477 info.entrysize = sizeof(PROCLOCK);
478 info.hash = proclock_hash;
480
481 LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
482 init_table_size,
483 max_table_size,
484 &info,
486
487 /*
488 * Allocate fast-path structures.
489 */
491 ShmemInitStruct("Fast Path Strong Relation Lock Data",
492 sizeof(FastPathStrongRelationLockData), &found);
493 if (!found)
495}
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_PARTITION
Definition: hsearch.h:92
#define NLOCKENTS()
Definition: lock.c:56
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:570
struct LOCK LOCK
struct PROCLOCK PROCLOCK
struct PROCLOCKTAG PROCLOCKTAG
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:332
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
#define SpinLockInit(lock)
Definition: spin.h:57
HashValueFunc hash
Definition: hsearch.h:78
long num_partitions
Definition: hsearch.h:68

References HASHCTL::entrysize, FastPathStrongRelationLocks, HASHCTL::hash, HASH_BLOBS, HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateOrAttachShmemStructs().

◆ LockManagerShmemSize()

Size LockManagerShmemSize ( void  )

Definition at line 3723 of file lock.c.

3724{
3725 Size size = 0;
3726 long max_table_size;
3727
3728 /* lock hash table */
3729 max_table_size = NLOCKENTS();
3730 size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3731
3732 /* proclock hash table */
3733 max_table_size *= 2;
3734 size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3735
3736 /*
3737 * Since NLOCKENTS is only an estimate, add 10% safety margin.
3738 */
3739 size = add_size(size, size / 10);
3740
3741 return size;
3742}
size_t Size
Definition: c.h:576
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:783
Size add_size(Size s1, Size s2)
Definition: shmem.c:493

References add_size(), hash_estimate_size(), and NLOCKENTS.

Referenced by CalculateShmemSize().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2671 of file lock.c.

2672{
2674
2675 Assert(parent != NULL);
2676
2677 if (locallocks == NULL)
2678 {
2679 HASH_SEQ_STATUS status;
2680 LOCALLOCK *locallock;
2681
2683
2684 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2685 LockReassignOwner(locallock, parent);
2686 }
2687 else
2688 {
2689 int i;
2690
2691 for (i = nlocks - 1; i >= 0; i--)
2692 LockReassignOwner(locallocks[i], parent);
2693 }
2694}
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2701
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:905

References Assert(), CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), and ResourceOwnerGetParent().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2701 of file lock.c.

2702{
2703 LOCALLOCKOWNER *lockOwners;
2704 int i;
2705 int ic = -1;
2706 int ip = -1;
2707
2708 /*
2709 * Scan to see if there are any locks belonging to current owner or its
2710 * parent
2711 */
2712 lockOwners = locallock->lockOwners;
2713 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2714 {
2715 if (lockOwners[i].owner == CurrentResourceOwner)
2716 ic = i;
2717 else if (lockOwners[i].owner == parent)
2718 ip = i;
2719 }
2720
2721 if (ic < 0)
2722 return; /* no current locks */
2723
2724 if (ip < 0)
2725 {
2726 /* Parent has no slot, so just give it the child's slot */
2727 lockOwners[ic].owner = parent;
2728 ResourceOwnerRememberLock(parent, locallock);
2729 }
2730 else
2731 {
2732 /* Merge child's count with parent's */
2733 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2734 /* compact out unused slot */
2735 locallock->numLockOwners--;
2736 if (ic < locallock->numLockOwners)
2737 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2738 }
2740}
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1082

References CurrentResourceOwner, i, LOCALLOCK::lockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3251 of file lock.c.

3254{
3255 LOCK *lock;
3256 PROCLOCK *proclock;
3257 PROCLOCKTAG proclocktag;
3258 uint32 hashcode;
3259 uint32 proclock_hashcode;
3260 LWLock *partitionLock;
3261 bool wakeupNeeded;
3262
3263 hashcode = LockTagHashCode(locktag);
3264 partitionLock = LockHashPartitionLock(hashcode);
3265
3266 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3267
3268 /*
3269 * Re-find the lock object (it had better be there).
3270 */
3272 locktag,
3273 hashcode,
3274 HASH_FIND,
3275 NULL);
3276 if (!lock)
3277 elog(PANIC, "failed to re-find shared lock object");
3278
3279 /*
3280 * Re-find the proclock object (ditto).
3281 */
3282 proclocktag.myLock = lock;
3283 proclocktag.myProc = proc;
3284
3285 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3286
3288 &proclocktag,
3289 proclock_hashcode,
3290 HASH_FIND,
3291 NULL);
3292 if (!proclock)
3293 elog(PANIC, "failed to re-find shared proclock object");
3294
3295 /*
3296 * Double-check that we are actually holding a lock of the type we want to
3297 * release.
3298 */
3299 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3300 {
3301 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3302 LWLockRelease(partitionLock);
3303 elog(WARNING, "you don't own a lock of type %s",
3304 lockMethodTable->lockModeNames[lockmode]);
3305 return;
3306 }
3307
3308 /*
3309 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3310 */
3311 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3312
3313 CleanUpLock(lock, proclock,
3314 lockMethodTable, hashcode,
3315 wakeupNeeded);
3316
3317 LWLockRelease(partitionLock);
3318
3319 /*
3320 * Decrement strong lock count. This logic is needed only for 2PC.
3321 */
3322 if (decrement_strong_lock_count
3323 && ConflictsWithRelationFastPath(locktag, lockmode))
3324 {
3325 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3326
3328 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3329 FastPathStrongRelationLocks->count[fasthashcode]--;
3331 }
3332}
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1677
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1734

References Assert(), CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire, SpinLockRelease, UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 2067 of file lock.c.

2068{
2069 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2070 LockMethod lockMethodTable;
2071 LOCALLOCKTAG localtag;
2072 LOCALLOCK *locallock;
2073 LOCK *lock;
2074 PROCLOCK *proclock;
2075 LWLock *partitionLock;
2076 bool wakeupNeeded;
2077
2078 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2079 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2080 lockMethodTable = LockMethods[lockmethodid];
2081 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2082 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2083
2084#ifdef LOCK_DEBUG
2085 if (LOCK_DEBUG_ENABLED(locktag))
2086 elog(LOG, "LockRelease: lock [%u,%u] %s",
2087 locktag->locktag_field1, locktag->locktag_field2,
2088 lockMethodTable->lockModeNames[lockmode]);
2089#endif
2090
2091 /*
2092 * Find the LOCALLOCK entry for this lock and lockmode
2093 */
2094 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2095 localtag.lock = *locktag;
2096 localtag.mode = lockmode;
2097
2099 &localtag,
2100 HASH_FIND, NULL);
2101
2102 /*
2103 * let the caller print its own error message, too. Do not ereport(ERROR).
2104 */
2105 if (!locallock || locallock->nLocks <= 0)
2106 {
2107 elog(WARNING, "you don't own a lock of type %s",
2108 lockMethodTable->lockModeNames[lockmode]);
2109 return false;
2110 }
2111
2112 /*
2113 * Decrease the count for the resource owner.
2114 */
2115 {
2116 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2117 ResourceOwner owner;
2118 int i;
2119
2120 /* Identify owner for lock */
2121 if (sessionLock)
2122 owner = NULL;
2123 else
2124 owner = CurrentResourceOwner;
2125
2126 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2127 {
2128 if (lockOwners[i].owner == owner)
2129 {
2130 Assert(lockOwners[i].nLocks > 0);
2131 if (--lockOwners[i].nLocks == 0)
2132 {
2133 if (owner != NULL)
2134 ResourceOwnerForgetLock(owner, locallock);
2135 /* compact out unused slot */
2136 locallock->numLockOwners--;
2137 if (i < locallock->numLockOwners)
2138 lockOwners[i] = lockOwners[locallock->numLockOwners];
2139 }
2140 break;
2141 }
2142 }
2143 if (i < 0)
2144 {
2145 /* don't release a lock belonging to another owner */
2146 elog(WARNING, "you don't own a lock of type %s",
2147 lockMethodTable->lockModeNames[lockmode]);
2148 return false;
2149 }
2150 }
2151
2152 /*
2153 * Decrease the total local count. If we're still holding the lock, we're
2154 * done.
2155 */
2156 locallock->nLocks--;
2157
2158 if (locallock->nLocks > 0)
2159 return true;
2160
2161 /*
2162 * At this point we can no longer suppose we are clear of invalidation
2163 * messages related to this lock. Although we'll delete the LOCALLOCK
2164 * object before any intentional return from this routine, it seems worth
2165 * the trouble to explicitly reset lockCleared right now, just in case
2166 * some error prevents us from deleting the LOCALLOCK.
2167 */
2168 locallock->lockCleared = false;
2169
2170 /* Attempt fast release of any lock eligible for the fast path. */
2171 if (EligibleForRelationFastPath(locktag, lockmode) &&
2173 {
2174 bool released;
2175
2176 /*
2177 * We might not find the lock here, even if we originally entered it
2178 * here. Another backend may have moved it to the main table.
2179 */
2181 released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2182 lockmode);
2184 if (released)
2185 {
2186 RemoveLocalLock(locallock);
2187 return true;
2188 }
2189 }
2190
2191 /*
2192 * Otherwise we've got to mess with the shared lock table.
2193 */
2194 partitionLock = LockHashPartitionLock(locallock->hashcode);
2195
2196 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2197
2198 /*
2199 * Normally, we don't need to re-find the lock or proclock, since we kept
2200 * their addresses in the locallock table, and they couldn't have been
2201 * removed while we were holding a lock on them. But it's possible that
2202 * the lock was taken fast-path and has since been moved to the main hash
2203 * table by another backend, in which case we will need to look up the
2204 * objects here. We assume the lock field is NULL if so.
2205 */
2206 lock = locallock->lock;
2207 if (!lock)
2208 {
2209 PROCLOCKTAG proclocktag;
2210
2211 Assert(EligibleForRelationFastPath(locktag, lockmode));
2213 locktag,
2214 locallock->hashcode,
2215 HASH_FIND,
2216 NULL);
2217 if (!lock)
2218 elog(ERROR, "failed to re-find shared lock object");
2219 locallock->lock = lock;
2220
2221 proclocktag.myLock = lock;
2222 proclocktag.myProc = MyProc;
2224 &proclocktag,
2225 HASH_FIND,
2226 NULL);
2227 if (!locallock->proclock)
2228 elog(ERROR, "failed to re-find shared proclock object");
2229 }
2230 LOCK_PRINT("LockRelease: found", lock, lockmode);
2231 proclock = locallock->proclock;
2232 PROCLOCK_PRINT("LockRelease: found", proclock);
2233
2234 /*
2235 * Double-check that we are actually holding a lock of the type we want to
2236 * release.
2237 */
2238 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2239 {
2240 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2241 LWLockRelease(partitionLock);
2242 elog(WARNING, "you don't own a lock of type %s",
2243 lockMethodTable->lockModeNames[lockmode]);
2244 RemoveLocalLock(locallock);
2245 return false;
2246 }
2247
2248 /*
2249 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2250 */
2251 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2252
2253 CleanUpLock(lock, proclock,
2254 lockMethodTable, locallock->hashcode,
2255 wakeupNeeded);
2256
2257 LWLockRelease(partitionLock);
2258
2259 RemoveLocalLock(locallock);
2260 return true;
2261}
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2790

References Assert(), CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FAST_PATH_REL_GROUP, FastPathLocalUseCounts, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SearchSysCacheLocked1(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2272 of file lock.c.

2273{
2274 HASH_SEQ_STATUS status;
2275 LockMethod lockMethodTable;
2276 int i,
2277 numLockModes;
2278 LOCALLOCK *locallock;
2279 LOCK *lock;
2280 int partition;
2281 bool have_fast_path_lwlock = false;
2282
2283 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2284 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2285 lockMethodTable = LockMethods[lockmethodid];
2286
2287#ifdef LOCK_DEBUG
2288 if (*(lockMethodTable->trace_flag))
2289 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2290#endif
2291
2292 /*
2293 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2294 * the only way that the lock we hold on our own VXID can ever get
2295 * released: it is always and only released when a toplevel transaction
2296 * ends.
2297 */
2298 if (lockmethodid == DEFAULT_LOCKMETHOD)
2300
2301 numLockModes = lockMethodTable->numLockModes;
2302
2303 /*
2304 * First we run through the locallock table and get rid of unwanted
2305 * entries, then we scan the process's proclocks and get rid of those. We
2306 * do this separately because we may have multiple locallock entries
2307 * pointing to the same proclock, and we daren't end up with any dangling
2308 * pointers. Fast-path locks are cleaned up during the locallock table
2309 * scan, though.
2310 */
2312
2313 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2314 {
2315 /*
2316 * If the LOCALLOCK entry is unused, something must've gone wrong
2317 * while trying to acquire this lock. Just forget the local entry.
2318 */
2319 if (locallock->nLocks == 0)
2320 {
2321 RemoveLocalLock(locallock);
2322 continue;
2323 }
2324
2325 /* Ignore items that are not of the lockmethod to be removed */
2326 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2327 continue;
2328
2329 /*
2330 * If we are asked to release all locks, we can just zap the entry.
2331 * Otherwise, must scan to see if there are session locks. We assume
2332 * there is at most one lockOwners entry for session locks.
2333 */
2334 if (!allLocks)
2335 {
2336 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2337
2338 /* If session lock is above array position 0, move it down to 0 */
2339 for (i = 0; i < locallock->numLockOwners; i++)
2340 {
2341 if (lockOwners[i].owner == NULL)
2342 lockOwners[0] = lockOwners[i];
2343 else
2344 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2345 }
2346
2347 if (locallock->numLockOwners > 0 &&
2348 lockOwners[0].owner == NULL &&
2349 lockOwners[0].nLocks > 0)
2350 {
2351 /* Fix the locallock to show just the session locks */
2352 locallock->nLocks = lockOwners[0].nLocks;
2353 locallock->numLockOwners = 1;
2354 /* We aren't deleting this locallock, so done */
2355 continue;
2356 }
2357 else
2358 locallock->numLockOwners = 0;
2359 }
2360
2361#ifdef USE_ASSERT_CHECKING
2362
2363 /*
2364 * Tuple locks are currently held only for short durations within a
2365 * transaction. Check that we didn't forget to release one.
2366 */
2367 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
2368 elog(WARNING, "tuple lock held at commit");
2369#endif
2370
2371 /*
2372 * If the lock or proclock pointers are NULL, this lock was taken via
2373 * the relation fast-path (and is not known to have been transferred).
2374 */
2375 if (locallock->proclock == NULL || locallock->lock == NULL)
2376 {
2377 LOCKMODE lockmode = locallock->tag.mode;
2378 Oid relid;
2379
2380 /* Verify that a fast-path lock is what we've got. */
2381 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2382 elog(PANIC, "locallock table corrupted");
2383
2384 /*
2385 * If we don't currently hold the LWLock that protects our
2386 * fast-path data structures, we must acquire it before attempting
2387 * to release the lock via the fast-path. We will continue to
2388 * hold the LWLock until we're done scanning the locallock table,
2389 * unless we hit a transferred fast-path lock. (XXX is this
2390 * really such a good idea? There could be a lot of entries ...)
2391 */
2392 if (!have_fast_path_lwlock)
2393 {
2395 have_fast_path_lwlock = true;
2396 }
2397
2398 /* Attempt fast-path release. */
2399 relid = locallock->tag.lock.locktag_field2;
2400 if (FastPathUnGrantRelationLock(relid, lockmode))
2401 {
2402 RemoveLocalLock(locallock);
2403 continue;
2404 }
2405
2406 /*
2407 * Our lock, originally taken via the fast path, has been
2408 * transferred to the main lock table. That's going to require
2409 * some extra work, so release our fast-path lock before starting.
2410 */
2412 have_fast_path_lwlock = false;
2413
2414 /*
2415 * Now dump the lock. We haven't got a pointer to the LOCK or
2416 * PROCLOCK in this case, so we have to handle this a bit
2417 * differently than a normal lock release. Unfortunately, this
2418 * requires an extra LWLock acquire-and-release cycle on the
2419 * partitionLock, but hopefully it shouldn't happen often.
2420 */
2421 LockRefindAndRelease(lockMethodTable, MyProc,
2422 &locallock->tag.lock, lockmode, false);
2423 RemoveLocalLock(locallock);
2424 continue;
2425 }
2426
2427 /* Mark the proclock to show we need to release this lockmode */
2428 if (locallock->nLocks > 0)
2429 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2430
2431 /* And remove the locallock hashtable entry */
2432 RemoveLocalLock(locallock);
2433 }
2434
2435 /* Done with the fast-path data structures */
2436 if (have_fast_path_lwlock)
2438
2439 /*
2440 * Now, scan each lock partition separately.
2441 */
2442 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2443 {
2444 LWLock *partitionLock;
2445 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2446 dlist_mutable_iter proclock_iter;
2447
2448 partitionLock = LockHashPartitionLockByIndex(partition);
2449
2450 /*
2451 * If the proclock list for this partition is empty, we can skip
2452 * acquiring the partition lock. This optimization is trickier than
2453 * it looks, because another backend could be in process of adding
2454 * something to our proclock list due to promoting one of our
2455 * fast-path locks. However, any such lock must be one that we
2456 * decided not to delete above, so it's okay to skip it again now;
2457 * we'd just decide not to delete it again. We must, however, be
2458 * careful to re-fetch the list header once we've acquired the
2459 * partition lock, to be sure we have a valid, up-to-date pointer.
2460 * (There is probably no significant risk if pointer fetch/store is
2461 * atomic, but we don't wish to assume that.)
2462 *
2463 * XXX This argument assumes that the locallock table correctly
2464 * represents all of our fast-path locks. While allLocks mode
2465 * guarantees to clean up all of our normal locks regardless of the
2466 * locallock situation, we lose that guarantee for fast-path locks.
2467 * This is not ideal.
2468 */
2469 if (dlist_is_empty(procLocks))
2470 continue; /* needn't examine this partition */
2471
2472 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2473
2474 dlist_foreach_modify(proclock_iter, procLocks)
2475 {
2476 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2477 bool wakeupNeeded = false;
2478
2479 Assert(proclock->tag.myProc == MyProc);
2480
2481 lock = proclock->tag.myLock;
2482
2483 /* Ignore items that are not of the lockmethod to be removed */
2484 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2485 continue;
2486
2487 /*
2488 * In allLocks mode, force release of all locks even if locallock
2489 * table had problems
2490 */
2491 if (allLocks)
2492 proclock->releaseMask = proclock->holdMask;
2493 else
2494 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2495
2496 /*
2497 * Ignore items that have nothing to be released, unless they have
2498 * holdMask == 0 and are therefore recyclable
2499 */
2500 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2501 continue;
2502
2503 PROCLOCK_PRINT("LockReleaseAll", proclock);
2504 LOCK_PRINT("LockReleaseAll", lock, 0);
2505 Assert(lock->nRequested >= 0);
2506 Assert(lock->nGranted >= 0);
2507 Assert(lock->nGranted <= lock->nRequested);
2508 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2509
2510 /*
2511 * Release the previously-marked lock modes
2512 */
2513 for (i = 1; i <= numLockModes; i++)
2514 {
2515 if (proclock->releaseMask & LOCKBIT_ON(i))
2516 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2517 lockMethodTable);
2518 }
2519 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2520 Assert(lock->nGranted <= lock->nRequested);
2521 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2522
2523 proclock->releaseMask = 0;
2524
2525 /* CleanUpLock will wake up waiters if needed. */
2526 CleanUpLock(lock, proclock,
2527 lockMethodTable,
2528 LockTagHashCode(&lock->tag),
2529 wakeupNeeded);
2530 } /* loop over PROCLOCKs within this partition */
2531
2532 LWLockRelease(partitionLock);
2533 } /* loop over partitions */
2534
2535#ifdef LOCK_DEBUG
2536 if (*(lockMethodTable->trace_flag))
2537 elog(LOG, "LockReleaseAll done");
2538#endif
2539}
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4610
@ LOCKTAG_TUPLE
Definition: lock.h:142
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:444
const bool * trace_flag
Definition: lock.h:114
dlist_node * cur
Definition: ilist.h:200

References Assert(), CleanUpLock(), dlist_mutable_iter::cur, DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCK_LOCKTAG, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LOCKTAG_TUPLE, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, PANIC, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), VirtualXactLockTableCleanup(), and WARNING.

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2576 of file lock.c.

2577{
2578 if (locallocks == NULL)
2579 {
2580 HASH_SEQ_STATUS status;
2581 LOCALLOCK *locallock;
2582
2584
2585 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2586 ReleaseLockIfHeld(locallock, false);
2587 }
2588 else
2589 {
2590 int i;
2591
2592 for (i = nlocks - 1; i >= 0; i--)
2593 ReleaseLockIfHeld(locallocks[i], false);
2594 }
2595}
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2611

References hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and ReleaseLockIfHeld().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2546 of file lock.c.

2547{
2548 HASH_SEQ_STATUS status;
2549 LOCALLOCK *locallock;
2550
2551 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2552 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2553
2555
2556 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2557 {
2558 /* Ignore items that are not of the specified lock method */
2559 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2560 continue;
2561
2562 ReleaseLockIfHeld(locallock, true);
2563 }
2564}

References elog, ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, and ReleaseLockIfHeld().

Referenced by pg_advisory_unlock_all().

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 553 of file lock.c.

554{
555 return get_hash_value(LockMethodLockHash, locktag);
556}
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:911

References get_hash_value(), and LockMethodLockHash.

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4821 of file lock.c.

4822{
4823 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4824 LOCK *lock;
4825 bool found;
4826 uint32 hashcode;
4827 LWLock *partitionLock;
4828 int waiters = 0;
4829
4830 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4831 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4832
4833 hashcode = LockTagHashCode(locktag);
4834 partitionLock = LockHashPartitionLock(hashcode);
4835 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4836
4838 locktag,
4839 hashcode,
4840 HASH_FIND,
4841 &found);
4842 if (found)
4843 {
4844 Assert(lock != NULL);
4845 waiters = lock->nRequested;
4846 }
4847 LWLockRelease(partitionLock);
4848
4849 return waiters;
4850}

References Assert(), elog, ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

◆ PostPrepare_Locks()

void PostPrepare_Locks ( TransactionId  xid)

Definition at line 3539 of file lock.c.

3540{
3541 PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3542 HASH_SEQ_STATUS status;
3543 LOCALLOCK *locallock;
3544 LOCK *lock;
3545 PROCLOCK *proclock;
3546 PROCLOCKTAG proclocktag;
3547 int partition;
3548
3549 /* Can't prepare a lock group follower. */
3550 Assert(MyProc->lockGroupLeader == NULL ||
3552
3553 /* This is a critical section: any error means big trouble */
3555
3556 /*
3557 * First we run through the locallock table and get rid of unwanted
3558 * entries, then we scan the process's proclocks and transfer them to the
3559 * target proc.
3560 *
3561 * We do this separately because we may have multiple locallock entries
3562 * pointing to the same proclock, and we daren't end up with any dangling
3563 * pointers.
3564 */
3566
3567 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3568 {
3569 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3570 bool haveSessionLock;
3571 bool haveXactLock;
3572 int i;
3573
3574 if (locallock->proclock == NULL || locallock->lock == NULL)
3575 {
3576 /*
3577 * We must've run out of shared memory while trying to set up this
3578 * lock. Just forget the local entry.
3579 */
3580 Assert(locallock->nLocks == 0);
3581 RemoveLocalLock(locallock);
3582 continue;
3583 }
3584
3585 /* Ignore VXID locks */
3587 continue;
3588
3589 /* Scan to see whether we hold it at session or transaction level */
3590 haveSessionLock = haveXactLock = false;
3591 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3592 {
3593 if (lockOwners[i].owner == NULL)
3594 haveSessionLock = true;
3595 else
3596 haveXactLock = true;
3597 }
3598
3599 /* Ignore it if we have only session lock */
3600 if (!haveXactLock)
3601 continue;
3602
3603 /* This can't happen, because we already checked it */
3604 if (haveSessionLock)
3605 ereport(PANIC,
3606 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3607 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3608
3609 /* Mark the proclock to show we need to release this lockmode */
3610 if (locallock->nLocks > 0)
3611 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3612
3613 /* And remove the locallock hashtable entry */
3614 RemoveLocalLock(locallock);
3615 }
3616
3617 /*
3618 * Now, scan each lock partition separately.
3619 */
3620 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3621 {
3622 LWLock *partitionLock;
3623 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3624 dlist_mutable_iter proclock_iter;
3625
3626 partitionLock = LockHashPartitionLockByIndex(partition);
3627
3628 /*
3629 * If the proclock list for this partition is empty, we can skip
3630 * acquiring the partition lock. This optimization is safer than the
3631 * situation in LockReleaseAll, because we got rid of any fast-path
3632 * locks during AtPrepare_Locks, so there cannot be any case where
3633 * another backend is adding something to our lists now. For safety,
3634 * though, we code this the same way as in LockReleaseAll.
3635 */
3636 if (dlist_is_empty(procLocks))
3637 continue; /* needn't examine this partition */
3638
3639 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3640
3641 dlist_foreach_modify(proclock_iter, procLocks)
3642 {
3643 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3644
3645 Assert(proclock->tag.myProc == MyProc);
3646
3647 lock = proclock->tag.myLock;
3648
3649 /* Ignore VXID locks */
3651 continue;
3652
3653 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3654 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3655 Assert(lock->nRequested >= 0);
3656 Assert(lock->nGranted >= 0);
3657 Assert(lock->nGranted <= lock->nRequested);
3658 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3659
3660 /* Ignore it if nothing to release (must be a session lock) */
3661 if (proclock->releaseMask == 0)
3662 continue;
3663
3664 /* Else we should be releasing all locks */
3665 if (proclock->releaseMask != proclock->holdMask)
3666 elog(PANIC, "we seem to have dropped a bit somewhere");
3667
3668 /*
3669 * We cannot simply modify proclock->tag.myProc to reassign
3670 * ownership of the lock, because that's part of the hash key and
3671 * the proclock would then be in the wrong hash chain. Instead
3672 * use hash_update_hash_key. (We used to create a new hash entry,
3673 * but that risks out-of-memory failure if other processes are
3674 * busy making proclocks too.) We must unlink the proclock from
3675 * our procLink chain and put it into the new proc's chain, too.
3676 *
3677 * Note: the updated proclock hash key will still belong to the
3678 * same hash partition, cf proclock_hash(). So the partition lock
3679 * we already hold is sufficient for this.
3680 */
3681 dlist_delete(&proclock->procLink);
3682
3683 /*
3684 * Create the new hash key for the proclock.
3685 */
3686 proclocktag.myLock = lock;
3687 proclocktag.myProc = newproc;
3688
3689 /*
3690 * Update groupLeader pointer to point to the new proc. (We'd
3691 * better not be a member of somebody else's lock group!)
3692 */
3693 Assert(proclock->groupLeader == proclock->tag.myProc);
3694 proclock->groupLeader = newproc;
3695
3696 /*
3697 * Update the proclock. We should not find any existing entry for
3698 * the same hash key, since there can be only one entry for any
3699 * given lock with my own proc.
3700 */
3702 proclock,
3703 &proclocktag))
3704 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3705
3706 /* Re-link into the new proc's proclock list */
3707 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3708
3709 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3710 } /* loop over PROCLOCKs within this partition */
3711
3712 LWLockRelease(partitionLock);
3713 } /* loop over partitions */
3714
3716}
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1145
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152

References Assert(), dlist_mutable_iter::cur, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void *  key,
Size  keysize 
)
static

Definition at line 570 of file lock.c.

571{
572 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
573 uint32 lockhash;
574 Datum procptr;
575
576 Assert(keysize == sizeof(PROCLOCKTAG));
577
578 /* Look into the associated LOCK object, and compute its hash code */
579 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
580
581 /*
582 * To make the hash code also depend on the PGPROC, we xor the proc
583 * struct's address into the hash code, left-shifted so that the
584 * partition-number bits don't change. Since this is only a hash, we
585 * don't care if we lose high-order bits of the address; use an
586 * intermediate variable to suppress cast-pointer-to-int warnings.
587 */
588 procptr = PointerGetDatum(proclocktag->myProc);
589 lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
590
591 return lockhash;
592}
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:96
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:327
uintptr_t Datum
Definition: postgres.h:69

References Assert(), sort-test::key, LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PointerGetDatum(), and LOCK::tag.

Referenced by LockManagerShmemInit().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 601 of file lock.c.

602{
603 uint32 lockhash = hashcode;
604 Datum procptr;
605
606 /*
607 * This must match proclock_hash()!
608 */
609 procptr = PointerGetDatum(proclocktag->myProc);
610 lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
611
612 return lockhash;
613}

References LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myProc, and PointerGetDatum().

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2611 of file lock.c.

2612{
2613 ResourceOwner owner;
2614 LOCALLOCKOWNER *lockOwners;
2615 int i;
2616
2617 /* Identify owner for lock (must match LockRelease!) */
2618 if (sessionLock)
2619 owner = NULL;
2620 else
2621 owner = CurrentResourceOwner;
2622
2623 /* Scan to see if there are any locks belonging to the target owner */
2624 lockOwners = locallock->lockOwners;
2625 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2626 {
2627 if (lockOwners[i].owner == owner)
2628 {
2629 Assert(lockOwners[i].nLocks > 0);
2630 if (lockOwners[i].nLocks < locallock->nLocks)
2631 {
2632 /*
2633 * We will still hold this lock after forgetting this
2634 * ResourceOwner.
2635 */
2636 locallock->nLocks -= lockOwners[i].nLocks;
2637 /* compact out unused slot */
2638 locallock->numLockOwners--;
2639 if (owner != NULL)
2640 ResourceOwnerForgetLock(owner, locallock);
2641 if (i < locallock->numLockOwners)
2642 lockOwners[i] = lockOwners[locallock->numLockOwners];
2643 }
2644 else
2645 {
2646 Assert(lockOwners[i].nLocks == locallock->nLocks);
2647 /* We want to call LockRelease just once */
2648 lockOwners[i].nLocks = 1;
2649 locallock->nLocks = 1;
2650 if (!LockRelease(&locallock->tag.lock,
2651 locallock->tag.mode,
2652 sessionLock))
2653 elog(WARNING, "ReleaseLockIfHeld: failed??");
2654 }
2655 break;
2656 }
2657 }
2658}
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:2067

References Assert(), CurrentResourceOwner, elog, i, LOCALLOCKTAG::lock, LOCALLOCK::lockOwners, LockRelease(), LOCALLOCKTAG::mode, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, ResourceOwnerForgetLock(), LOCALLOCK::tag, and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 2011 of file lock.c.

2012{
2013 LOCK *waitLock = proc->waitLock;
2014 PROCLOCK *proclock = proc->waitProcLock;
2015 LOCKMODE lockmode = proc->waitLockMode;
2016 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
2017
2018 /* Make sure proc is waiting */
2020 Assert(proc->links.next != NULL);
2021 Assert(waitLock);
2022 Assert(!dclist_is_empty(&waitLock->waitProcs));
2023 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
2024
2025 /* Remove proc from lock's wait queue */
2026 dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
2027
2028 /* Undo increments of request counts by waiting process */
2029 Assert(waitLock->nRequested > 0);
2030 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2031 waitLock->nRequested--;
2032 Assert(waitLock->requested[lockmode] > 0);
2033 waitLock->requested[lockmode]--;
2034 /* don't forget to clear waitMask bit if appropriate */
2035 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2036 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2037
2038 /* Clean up the proc's own state, and pass it the ok/fail signal */
2039 proc->waitLock = NULL;
2040 proc->waitProcLock = NULL;
2042
2043 /*
2044 * Delete the proclock immediately if it represents no already-held locks.
2045 * (This must happen now because if the owner of the lock decides to
2046 * release it, and the requested/granted counts then go to zero,
2047 * LockRelease expects there to be no remaining proclocks.) Then see if
2048 * any other waiters for the lock can be woken up now.
2049 */
2050 CleanUpLock(waitLock, proclock,
2051 LockMethods[lockmethodid], hashcode,
2052 true);
2053}
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
PROCLOCK * waitProcLock
Definition: proc.h:234
ProcWaitStatus waitStatus
Definition: proc.h:168

References Assert(), CleanUpLock(), dclist_delete_from_thoroughly(), dclist_is_empty(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, dlist_node::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), and LockErrorCleanup().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1472 of file lock.c.

1473{
1474 int i;
1475
1476 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1477 {
1478 if (locallock->lockOwners[i].owner != NULL)
1479 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1480 }
1481 locallock->numLockOwners = 0;
1482 if (locallock->lockOwners != NULL)
1483 pfree(locallock->lockOwners);
1484 locallock->lockOwners = NULL;
1485
1486 if (locallock->holdsStrongLockCount)
1487 {
1488 uint32 fasthashcode;
1489
1490 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1491
1493 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1494 FastPathStrongRelationLocks->count[fasthashcode]--;
1495 locallock->holdsStrongLockCount = false;
1497 }
1498
1500 &(locallock->tag),
1501 HASH_REMOVE, NULL))
1502 elog(WARNING, "locallock table corrupted");
1503
1504 /*
1505 * Indicate that the lock is released for certain types of locks
1506 */
1507 CheckAndSetLockHeld(locallock, false);
1508}

References Assert(), CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_REMOVE, hash_search(), LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, i, LockMethodLocalHash, LOCALLOCK::lockOwners, FastPathStrongRelationLockData::mutex, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire, SpinLockRelease, LOCALLOCK::tag, and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ ResetAwaitedLock()

void ResetAwaitedLock ( void  )

Definition at line 1903 of file lock.c.

1904{
1905 awaitedLock = NULL;
1906}

References awaitedLock.

Referenced by LockErrorCleanup().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1279 of file lock.c.

1281{
1282 LOCK *lock;
1283 PROCLOCK *proclock;
1284 PROCLOCKTAG proclocktag;
1285 uint32 proclock_hashcode;
1286 bool found;
1287
1288 /*
1289 * Find or create a lock with this tag.
1290 */
1292 locktag,
1293 hashcode,
1295 &found);
1296 if (!lock)
1297 return NULL;
1298
1299 /*
1300 * if it's a new lock object, initialize it
1301 */
1302 if (!found)
1303 {
1304 lock->grantMask = 0;
1305 lock->waitMask = 0;
1306 dlist_init(&lock->procLocks);
1307 dclist_init(&lock->waitProcs);
1308 lock->nRequested = 0;
1309 lock->nGranted = 0;
1310 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1311 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1312 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1313 }
1314 else
1315 {
1316 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1317 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1318 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1319 Assert(lock->nGranted <= lock->nRequested);
1320 }
1321
1322 /*
1323 * Create the hash key for the proclock table.
1324 */
1325 proclocktag.myLock = lock;
1326 proclocktag.myProc = proc;
1327
1328 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1329
1330 /*
1331 * Find or create a proclock entry with this tag
1332 */
1334 &proclocktag,
1335 proclock_hashcode,
1337 &found);
1338 if (!proclock)
1339 {
1340 /* Oops, not enough shmem for the proclock */
1341 if (lock->nRequested == 0)
1342 {
1343 /*
1344 * There are no other requestors of this lock, so garbage-collect
1345 * the lock object. We *must* do this to avoid a permanent leak
1346 * of shared memory, because there won't be anything to cause
1347 * anyone to release the lock object later.
1348 */
1349 Assert(dlist_is_empty(&(lock->procLocks)));
1351 &(lock->tag),
1352 hashcode,
1354 NULL))
1355 elog(PANIC, "lock table corrupted");
1356 }
1357 return NULL;
1358 }
1359
1360 /*
1361 * If new, initialize the new entry
1362 */
1363 if (!found)
1364 {
1365 uint32 partition = LockHashPartition(hashcode);
1366
1367 /*
1368 * It might seem unsafe to access proclock->groupLeader without a
1369 * lock, but it's not really. Either we are initializing a proclock
1370 * on our own behalf, in which case our group leader isn't changing
1371 * because the group leader for a process can only ever be changed by
1372 * the process itself; or else we are transferring a fast-path lock to
1373 * the main lock table, in which case that process can't change its
1374 * lock group leader without first releasing all of its locks (and in
1375 * particular the one we are currently transferring).
1376 */
1377 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1378 proc->lockGroupLeader : proc;
1379 proclock->holdMask = 0;
1380 proclock->releaseMask = 0;
1381 /* Add proclock to appropriate lists */
1382 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1383 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1384 PROCLOCK_PRINT("LockAcquire: new", proclock);
1385 }
1386 else
1387 {
1388 PROCLOCK_PRINT("LockAcquire: found", proclock);
1389 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1390
1391#ifdef CHECK_DEADLOCK_RISK
1392
1393 /*
1394 * Issue warning if we already hold a lower-level lock on this object
1395 * and do not hold a lock of the requested level or higher. This
1396 * indicates a deadlock-prone coding practice (eg, we'd have a
1397 * deadlock if another backend were following the same code path at
1398 * about the same time).
1399 *
1400 * This is not enabled by default, because it may generate log entries
1401 * about user-level coding practices that are in fact safe in context.
1402 * It can be enabled to help find system-level problems.
1403 *
1404 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1405 * better to use a table. For now, though, this works.
1406 */
1407 {
1408 int i;
1409
1410 for (i = lockMethodTable->numLockModes; i > 0; i--)
1411 {
1412 if (proclock->holdMask & LOCKBIT_ON(i))
1413 {
1414 if (i >= (int) lockmode)
1415 break; /* safe: we have a lock >= req level */
1416 elog(LOG, "deadlock risk: raising lock level"
1417 " from %s to %s on object %u/%u/%u",
1418 lockMethodTable->lockModeNames[i],
1419 lockMethodTable->lockModeNames[lockmode],
1420 lock->tag.locktag_field1, lock->tag.locktag_field2,
1421 lock->tag.locktag_field3);
1422 break;
1423 }
1424 }
1425 }
1426#endif /* CHECK_DEADLOCK_RISK */
1427 }
1428
1429 /*
1430 * lock->nRequested and lock->requested[] count the total number of
1431 * requests, whether granted or waiting, so increment those immediately.
1432 * The other counts don't increment till we get the lock.
1433 */
1434 lock->nRequested++;
1435 lock->requested[lockmode]++;
1436 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1437
1438 /*
1439 * We shouldn't already hold the desired lock; else locallock table is
1440 * broken.
1441 */
1442 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1443 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1444 lockMethodTable->lockModeNames[lockmode],
1445 lock->tag.locktag_field1, lock->tag.locktag_field2,
1446 lock->tag.locktag_field3);
1447
1448 return proclock;
1449}

References Assert(), dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ERROR, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, LockMethodData::numLockModes, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1677 of file lock.c.

1679{
1680 bool wakeupNeeded = false;
1681
1682 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1683 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1684 Assert(lock->nGranted <= lock->nRequested);
1685
1686 /*
1687 * fix the general lock stats
1688 */
1689 lock->nRequested--;
1690 lock->requested[lockmode]--;
1691 lock->nGranted--;
1692 lock->granted[lockmode]--;
1693
1694 if (lock->granted[lockmode] == 0)
1695 {
1696 /* change the conflict mask. No more of this lock type. */
1697 lock->grantMask &= LOCKBIT_OFF(lockmode);
1698 }
1699
1700 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1701
1702 /*
1703 * We need only run ProcLockWakeup if the released lock conflicts with at
1704 * least one of the lock types requested by waiter(s). Otherwise whatever
1705 * conflict made them wait must still exist. NOTE: before MVCC, we could
1706 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1707 * not true anymore, because the remaining granted locks might belong to
1708 * some waiter, who could now be awakened because he doesn't conflict with
1709 * his own locks.
1710 */
1711 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1712 wakeupNeeded = true;
1713
1714 /*
1715 * Now fix the per-proclock state.
1716 */
1717 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1718 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1719
1720 return wakeupNeeded;
1721}

References Assert(), LockMethodData::conflictTab, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4710 of file lock.c.

4711{
4712 LOCKTAG tag;
4713 PGPROC *proc;
4715
4717
4719 /* no vxid lock; localTransactionId is a normal, locked XID */
4720 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4721
4723
4724 /*
4725 * If a lock table entry must be made, this is the PGPROC on whose behalf
4726 * it must be done. Note that the transaction might end or the PGPROC
4727 * might be reassigned to a new backend before we get around to examining
4728 * it, but it doesn't matter. If we find upon examination that the
4729 * relevant lxid is no longer running here, that's enough to prove that
4730 * it's no longer running anywhere.
4731 */
4732 proc = ProcNumberGetProc(vxid.procNumber);
4733 if (proc == NULL)
4734 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4735
4736 /*
4737 * We must acquire this lock before checking the procNumber and lxid
4738 * against the ones we're waiting for. The target backend will only set
4739 * or clear lxid while holding this lock.
4740 */
4742
4743 if (proc->vxid.procNumber != vxid.procNumber
4745 {
4746 /* VXID ended */
4747 LWLockRelease(&proc->fpInfoLock);
4748 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4749 }
4750
4751 /*
4752 * If we aren't asked to wait, there's no need to set up a lock table
4753 * entry. The transaction is still in progress, so just return false.
4754 */
4755 if (!wait)
4756 {
4757 LWLockRelease(&proc->fpInfoLock);
4758 return false;
4759 }
4760
4761 /*
4762 * OK, we're going to need to sleep on the VXID. But first, we must set
4763 * up the primary lock table entry, if needed (ie, convert the proc's
4764 * fast-path lock on its VXID to a regular lock).
4765 */
4766 if (proc->fpVXIDLock)
4767 {
4768 PROCLOCK *proclock;
4769 uint32 hashcode;
4770 LWLock *partitionLock;
4771
4772 hashcode = LockTagHashCode(&tag);
4773
4774 partitionLock = LockHashPartitionLock(hashcode);
4775 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4776
4778 &tag, hashcode, ExclusiveLock);
4779 if (!proclock)
4780 {
4781 LWLockRelease(partitionLock);
4782 LWLockRelease(&proc->fpInfoLock);
4783 ereport(ERROR,
4784 (errcode(ERRCODE_OUT_OF_MEMORY),
4785 errmsg("out of shared memory"),
4786 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4787 }
4788 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4789
4790 LWLockRelease(partitionLock);
4791
4792 proc->fpVXIDLock = false;
4793 }
4794
4795 /*
4796 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4797 * search. The proc might have assigned this XID but not yet locked it,
4798 * in which case the proc will lock this XID before releasing the VXID.
4799 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4800 * so we won't save an XID of a different VXID. It doesn't matter whether
4801 * we save this before or after setting up the primary lock table entry.
4802 */
4803 xid = proc->xid;
4804
4805 /* Done with proc->fpLockBits */
4806 LWLockRelease(&proc->fpInfoLock);
4807
4808 /* Time to wait. */
4809 (void) LockAcquire(&tag, ShareLock, false, false);
4810
4811 LockRelease(&tag, ShareLock, false);
4812 return XactLockForVirtualXact(vxid, xid, wait);
4813}
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4659
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:805
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:70
#define ShareLock
Definition: lockdefs.h:40
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition: procarray.c:3138
#define InvalidTransactionId
Definition: transam.h:31

References Assert(), DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, VirtualTransactionId::procNumber, PGPROC::procNumber, ProcNumberGetProc(), SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, PGPROC::vxid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4610 of file lock.c.

4611{
4612 bool fastpath;
4613 LocalTransactionId lxid;
4614
4616
4617 /*
4618 * Clean up shared memory state.
4619 */
4621
4622 fastpath = MyProc->fpVXIDLock;
4624 MyProc->fpVXIDLock = false;
4626
4628
4629 /*
4630 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4631 * that means someone transferred the lock to the main lock table.
4632 */
4633 if (!fastpath && LocalTransactionIdIsValid(lxid))
4634 {
4636 LOCKTAG locktag;
4637
4638 vxid.procNumber = MyProcNumber;
4639 vxid.localTransactionId = lxid;
4640 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4641
4643 &locktag, ExclusiveLock, false);
4644 }
4645}
uint32 LocalTransactionId
Definition: c.h:625
ProcNumber MyProcNumber
Definition: globals.c:91
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:67

References Assert(), DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, INVALID_PROC_NUMBER, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyProc, MyProcNumber, VirtualTransactionId::procNumber, PGPROC::procNumber, SET_LOCKTAG_VIRTUALTRANSACTION, and PGPROC::vxid.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static ProcWaitStatus WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1928 of file lock.c.

1929{
1930 ProcWaitStatus result;
1931
1932 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1933 locallock->tag.lock.locktag_field2,
1934 locallock->tag.lock.locktag_field3,
1935 locallock->tag.lock.locktag_field4,
1936 locallock->tag.lock.locktag_type,
1937 locallock->tag.mode);
1938
1939 /* adjust the process title to indicate that it's waiting */
1940 set_ps_display_suffix("waiting");
1941
1942 /*
1943 * Record the fact that we are waiting for a lock, so that
1944 * LockErrorCleanup will clean up if cancel/die happens.
1945 */
1946 awaitedLock = locallock;
1947 awaitedOwner = owner;
1948
1949 /*
1950 * NOTE: Think not to put any shared-state cleanup after the call to
1951 * ProcSleep, in either the normal or failure path. The lock state must
1952 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1953 * waiting for the lock. This is necessary because of the possibility
1954 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1955 * grants us the lock, but before we've noticed it. Hence, after granting,
1956 * the locktable state must fully reflect the fact that we own the lock;
1957 * we can't do additional work on return.
1958 *
1959 * We can and do use a PG_TRY block to try to clean up after failure, but
1960 * this still has a major limitation: elog(FATAL) can occur while waiting
1961 * (eg, a "die" interrupt), and then control won't come back here. So all
1962 * cleanup of essential state should happen in LockErrorCleanup, not here.
1963 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1964 * is unimportant if the process exits.
1965 */
1966 PG_TRY();
1967 {
1968 result = ProcSleep(locallock);
1969 }
1970 PG_CATCH();
1971 {
1972 /* In this path, awaitedLock remains set until LockErrorCleanup */
1973
1974 /* reset ps display to remove the suffix */
1976
1977 /* and propagate the error */
1978 PG_RE_THROW();
1979 }
1980 PG_END_TRY();
1981
1982 /*
1983 * We no longer want LockErrorCleanup to do anything.
1984 */
1985 awaitedLock = NULL;
1986
1987 /* reset ps display to remove the suffix */
1989
1990 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
1991 locallock->tag.lock.locktag_field2,
1992 locallock->tag.lock.locktag_field3,
1993 locallock->tag.lock.locktag_field4,
1994 locallock->tag.lock.locktag_type,
1995 locallock->tag.mode);
1996
1997 return result;
1998}
#define PG_RE_THROW()
Definition: elog.h:405
#define PG_TRY(...)
Definition: elog.h:372
#define PG_END_TRY(...)
Definition: elog.h:397
#define PG_CATCH(...)
Definition: elog.h:382
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:423
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:371
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition: proc.c:1310
uint16 locktag_field4
Definition: lock.h:170

References awaitedLock, awaitedOwner, LOCALLOCKTAG::lock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_type, LOCALLOCKTAG::mode, PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, ProcSleep(), set_ps_display_remove_suffix(), set_ps_display_suffix(), and LOCALLOCK::tag.

Referenced by LockAcquireExtended().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4659 of file lock.c.

4661{
4662 bool more = false;
4663
4664 /* There is no point to wait for 2PCs if you have no 2PCs. */
4665 if (max_prepared_xacts == 0)
4666 return true;
4667
4668 do
4669 {
4671 LOCKTAG tag;
4672
4673 /* Clear state from previous iterations. */
4674 if (more)
4675 {
4677 more = false;
4678 }
4679
4680 /* If we have no xid, try to find one. */
4681 if (!TransactionIdIsValid(xid))
4682 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4683 if (!TransactionIdIsValid(xid))
4684 {
4685 Assert(!more);
4686 return true;
4687 }
4688
4689 /* Check or wait for XID completion. */
4690 SET_LOCKTAG_TRANSACTION(tag, xid);
4691 lar = LockAcquire(&tag, ShareLock, false, !wait);
4692 if (lar == LOCKACQUIRE_NOT_AVAIL)
4693 return false;
4694 LockRelease(&tag, ShareLock, false);
4695 } while (more);
4696
4697 return true;
4698}
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:227
LockAcquireResult
Definition: lock.h:501
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:852

References Assert(), InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 325 of file lock.c.

Referenced by GetAwaitedLock(), GrantAwaitedLock(), ResetAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 326 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition: lock.c:122
static const char *const lock_mode_names[]
Definition: lock.c:108
static const LOCKMASK LockConflicts[]
Definition: lock.c:65

Definition at line 125 of file lock.c.

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 122 of file lock.c.

◆ FastPathLocalUseCounts

int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
static

◆ FastPathLockGroupsPerBackend

int FastPathLockGroupsPerBackend = 0

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 108 of file lock.c.

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 65 of file lock.c.

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ log_lock_failure

bool log_lock_failure = false

Definition at line 54 of file lock.c.

Referenced by heap_acquire_tuplock(), heap_lock_tuple(), and heapam_tuple_lock().

◆ max_locks_per_xact

int max_locks_per_xact

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 191 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 324 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod

const LockMethodData user_lockmethod
static
Initial value:

Definition at line 136 of file lock.c.