PostgreSQL Source Code  git master
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/sinvaladt.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_GET_BITS(proc, n)    (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static void WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner, bool dontWait)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void InitLocks (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (TransactionId xid)
 
Size LockShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCount = 0
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
@ LOCKTAG_RELATION
Definition: lock.h:137
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
static PgChecksumMode mode
Definition: pg_checksums.c:56
#define InvalidOid
Definition: postgres_ext.h:36

Definition at line 219 of file lock.c.

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition: globals.c:91

Definition at line 213 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:
AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
#define AssertMacro(condition)
Definition: c.h:859
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:189
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:188
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:80

Definition at line 193 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 188 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 202 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 200 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)

Definition at line 191 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 189 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 190 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 198 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 246 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 247 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 249 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 351 of file lock.c.

◆ NLOCKENTS

Definition at line 55 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 352 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1738 of file lock.c.

1739 {
1740  uint32 fasthashcode;
1741  LOCALLOCK *locallock = StrongLockInProgress;
1742 
1743  if (locallock == NULL)
1744  return;
1745 
1746  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1747  Assert(locallock->holdsStrongLockCount == true);
1749  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1750  FastPathStrongRelationLocks->count[fasthashcode]--;
1751  locallock->holdsStrongLockCount = false;
1752  StrongLockInProgress = NULL;
1754 }
unsigned int uint32
Definition: c.h:506
#define Assert(condition)
Definition: c.h:858
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:249
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:258
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:273
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:255
uint32 hashcode
Definition: lock.h:432
bool holdsStrongLockCount
Definition: lock.h:439

References Assert, FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3272 of file lock.c.

3273 {
3274  HASH_SEQ_STATUS status;
3275  LOCALLOCK *locallock;
3276 
3277  /* First, verify there aren't locks of both xact and session level */
3279 
3280  /* Now do the per-locallock cleanup work */
3282 
3283  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3284  {
3285  TwoPhaseLockRecord record;
3286  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3287  bool haveSessionLock;
3288  bool haveXactLock;
3289  int i;
3290 
3291  /*
3292  * Ignore VXID locks. We don't want those to be held by prepared
3293  * transactions, since they aren't meaningful after a restart.
3294  */
3295  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3296  continue;
3297 
3298  /* Ignore it if we don't actually hold the lock */
3299  if (locallock->nLocks <= 0)
3300  continue;
3301 
3302  /* Scan to see whether we hold it at session or transaction level */
3303  haveSessionLock = haveXactLock = false;
3304  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3305  {
3306  if (lockOwners[i].owner == NULL)
3307  haveSessionLock = true;
3308  else
3309  haveXactLock = true;
3310  }
3311 
3312  /* Ignore it if we have only session lock */
3313  if (!haveXactLock)
3314  continue;
3315 
3316  /* This can't happen, because we already checked it */
3317  if (haveSessionLock)
3318  ereport(ERROR,
3319  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3320  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3321 
3322  /*
3323  * If the local lock was taken via the fast-path, we need to move it
3324  * to the primary lock table, or just get a pointer to the existing
3325  * primary lock table entry if by chance it's already been
3326  * transferred.
3327  */
3328  if (locallock->proclock == NULL)
3329  {
3330  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3331  locallock->lock = locallock->proclock->tag.myLock;
3332  }
3333 
3334  /*
3335  * Arrange to not release any strong lock count held by this lock
3336  * entry. We must retain the count until the prepared transaction is
3337  * committed or rolled back.
3338  */
3339  locallock->holdsStrongLockCount = false;
3340 
3341  /*
3342  * Create a 2PC record.
3343  */
3344  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3345  record.lockmode = locallock->tag.mode;
3346 
3348  &record, sizeof(TwoPhaseLockRecord));
3349  }
3350 }
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1395
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errcode(int sqlerrcode)
Definition: elog.c:859
int errmsg(const char *fmt,...)
Definition: elog.c:1072
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int i
Definition: isn.c:73
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2768
static HTAB * LockMethodLocalHash
Definition: lock.c:269
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3184
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:143
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
LOCALLOCKOWNER * lockOwners
Definition: lock.h:438
LOCK * lock
Definition: lock.h:433
int64 nLocks
Definition: lock.h:435
int numLockOwners
Definition: lock.h:436
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
LOCK * myLock
Definition: lock.h:365
PROCLOCKTAG tag
Definition: lock.h:372
LOCKTAG locktag
Definition: lock.c:159
LOCKMODE lockmode
Definition: lock.c:160
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1280
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1702 of file lock.c.

1703 {
1704  Assert(StrongLockInProgress == NULL);
1705  Assert(locallock->holdsStrongLockCount == false);
1706 
1707  /*
1708  * Adding to a memory location is not atomic, so we take a spinlock to
1709  * ensure we don't collide with someone else trying to bump the count at
1710  * the same time.
1711  *
1712  * XXX: It might be worth considering using an atomic fetch-and-add
1713  * instruction here, on architectures where that is supported.
1714  */
1715 
1717  FastPathStrongRelationLocks->count[fasthashcode]++;
1718  locallock->holdsStrongLockCount = true;
1719  StrongLockInProgress = locallock;
1721 }

References Assert, FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1342 of file lock.c.

1343 {
1344 #ifdef USE_ASSERT_CHECKING
1345  if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1346  IsRelationExtensionLockHeld = acquired;
1347 #endif
1348 }
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:138
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:444

References LOCALLOCK_LOCKTAG, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3184 of file lock.c.

3185 {
3186  typedef struct
3187  {
3188  LOCKTAG lock; /* identifies the lockable object */
3189  bool sessLock; /* is any lockmode held at session level? */
3190  bool xactLock; /* is any lockmode held at xact level? */
3191  } PerLockTagEntry;
3192 
3193  HASHCTL hash_ctl;
3194  HTAB *lockhtab;
3195  HASH_SEQ_STATUS status;
3196  LOCALLOCK *locallock;
3197 
3198  /* Create a local hash table keyed by LOCKTAG only */
3199  hash_ctl.keysize = sizeof(LOCKTAG);
3200  hash_ctl.entrysize = sizeof(PerLockTagEntry);
3201  hash_ctl.hcxt = CurrentMemoryContext;
3202 
3203  lockhtab = hash_create("CheckForSessionAndXactLocks table",
3204  256, /* arbitrary initial size */
3205  &hash_ctl,
3207 
3208  /* Scan local lock table to find entries for each LOCKTAG */
3210 
3211  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3212  {
3213  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3214  PerLockTagEntry *hentry;
3215  bool found;
3216  int i;
3217 
3218  /*
3219  * Ignore VXID locks. We don't want those to be held by prepared
3220  * transactions, since they aren't meaningful after a restart.
3221  */
3222  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3223  continue;
3224 
3225  /* Ignore it if we don't actually hold the lock */
3226  if (locallock->nLocks <= 0)
3227  continue;
3228 
3229  /* Otherwise, find or make an entry in lockhtab */
3230  hentry = (PerLockTagEntry *) hash_search(lockhtab,
3231  &locallock->tag.lock,
3232  HASH_ENTER, &found);
3233  if (!found) /* initialize, if newly created */
3234  hentry->sessLock = hentry->xactLock = false;
3235 
3236  /* Scan to see if we hold lock at session or xact level or both */
3237  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3238  {
3239  if (lockOwners[i].owner == NULL)
3240  hentry->sessLock = true;
3241  else
3242  hentry->xactLock = true;
3243  }
3244 
3245  /*
3246  * We can throw error immediately when we see both types of locks; no
3247  * need to wait around to see if there are more violations.
3248  */
3249  if (hentry->sessLock && hentry->xactLock)
3250  ereport(ERROR,
3251  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3252  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3253  }
3254 
3255  /* Success, so clean up */
3256  hash_destroy(lockhtab);
3257 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
struct LOCKTAG LOCKTAG
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220

References CurrentMemoryContext, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), HASHCTL::hcxt, i, HASHCTL::keysize, LOCALLOCKTAG::lock, LockMethodLocalHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1616 of file lock.c.

1619 {
1620  /*
1621  * If this was my last hold on this lock, delete my entry in the proclock
1622  * table.
1623  */
1624  if (proclock->holdMask == 0)
1625  {
1626  uint32 proclock_hashcode;
1627 
1628  PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1629  dlist_delete(&proclock->lockLink);
1630  dlist_delete(&proclock->procLink);
1631  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1633  &(proclock->tag),
1634  proclock_hashcode,
1635  HASH_REMOVE,
1636  NULL))
1637  elog(PANIC, "proclock table corrupted");
1638  }
1639 
1640  if (lock->nRequested == 0)
1641  {
1642  /*
1643  * The caller just released the last lock, so garbage-collect the lock
1644  * object.
1645  */
1646  LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1647  Assert(dlist_is_empty(&lock->procLocks));
1649  &(lock->tag),
1650  hashcode,
1651  HASH_REMOVE,
1652  NULL))
1653  elog(PANIC, "lock table corrupted");
1654  }
1655  else if (wakeupNeeded)
1656  {
1657  /* There are waiters on this lock, so wake them up. */
1658  ProcLockWakeup(lockMethodTable, lock);
1659  }
1660 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:968
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:224
@ HASH_REMOVE
Definition: hsearch.h:115
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:351
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:552
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:352
static HTAB * LockMethodLockHash
Definition: lock.c:267
static HTAB * LockMethodProcLockHash
Definition: lock.c:268
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1706
int nRequested
Definition: lock.h:319
LOCKTAG tag
Definition: lock.h:311
dlist_head procLocks
Definition: lock.h:316
LOCKMASK holdMask
Definition: lock.h:376
dlist_node lockLink
Definition: lock.h:378
dlist_node procLink
Definition: lock.h:379

References Assert, dlist_delete(), dlist_is_empty(), elog, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 570 of file lock.c.

571 {
572  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
573 
574  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
575  return true;
576 
577  return false;
578 }
static const LockMethod LockMethods[]
Definition: lock.c:149
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
const LOCKMASK * conflictTab
Definition: lock.h:111

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2768 of file lock.c.

2769 {
2770  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2771  LOCKTAG *locktag = &locallock->tag.lock;
2772  PROCLOCK *proclock = NULL;
2773  LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2774  Oid relid = locktag->locktag_field2;
2775  uint32 f;
2776 
2778 
2779  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2780  {
2781  uint32 lockmode;
2782 
2783  /* Look for an allocated slot matching the given relid. */
2784  if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2785  continue;
2786 
2787  /* If we don't have a lock of the given mode, forget it! */
2788  lockmode = locallock->tag.mode;
2789  if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2790  break;
2791 
2792  /* Find or create lock object. */
2793  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2794 
2795  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2796  locallock->hashcode, lockmode);
2797  if (!proclock)
2798  {
2799  LWLockRelease(partitionLock);
2801  ereport(ERROR,
2802  (errcode(ERRCODE_OUT_OF_MEMORY),
2803  errmsg("out of shared memory"),
2804  errhint("You might need to increase %s.", "max_locks_per_transaction")));
2805  }
2806  GrantLock(proclock->tag.myLock, proclock, lockmode);
2807  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2808 
2809  LWLockRelease(partitionLock);
2810 
2811  /* No need to examine remaining slots. */
2812  break;
2813  }
2814 
2816 
2817  /* Lock may have already been transferred by some other backend. */
2818  if (proclock == NULL)
2819  {
2820  LOCK *lock;
2821  PROCLOCKTAG proclocktag;
2822  uint32 proclock_hashcode;
2823 
2824  LWLockAcquire(partitionLock, LW_SHARED);
2825 
2827  locktag,
2828  locallock->hashcode,
2829  HASH_FIND,
2830  NULL);
2831  if (!lock)
2832  elog(ERROR, "failed to re-find shared lock object");
2833 
2834  proclocktag.myLock = lock;
2835  proclocktag.myProc = MyProc;
2836 
2837  proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2838  proclock = (PROCLOCK *)
2840  &proclocktag,
2841  proclock_hashcode,
2842  HASH_FIND,
2843  NULL);
2844  if (!proclock)
2845  elog(ERROR, "failed to re-find shared proclock object");
2846  LWLockRelease(partitionLock);
2847  }
2848 
2849  return proclock;
2850 }
int errhint(const char *fmt,...)
Definition: elog.c:1319
@ HASH_FIND
Definition: hsearch.h:113
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1161
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:202
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1536
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:200
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:191
#define LockHashPartitionLock(hashcode)
Definition: lock.h:526
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1170
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1783
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
unsigned int Oid
Definition: postgres_ext.h:31
PGPROC * MyProc
Definition: proc.c:66
uint32 locktag_field2
Definition: lock.h:167
Definition: lock.h:309
Definition: lwlock.h:42
LWLock fpInfoLock
Definition: proc.h:289
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:291
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370

References DEFAULT_LOCKMETHOD, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), LOCALLOCK::hashcode, LOCALLOCKTAG::lock, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, ProcLockHashCode(), SetupLockInTable(), PROCLOCK::tag, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2613 of file lock.c.

2614 {
2615  uint32 f;
2616  uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2617 
2618  /* Scan for existing entry for this relid, remembering empty slot. */
2619  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2620  {
2621  if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2622  unused_slot = f;
2623  else if (MyProc->fpRelId[f] == relid)
2624  {
2625  Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2626  FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2627  return true;
2628  }
2629  }
2630 
2631  /* If no existing entry, use any empty slot. */
2632  if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2633  {
2634  MyProc->fpRelId[unused_slot] = relid;
2635  FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2637  return true;
2638  }
2639 
2640  /* No existing entry, and no empty slot. */
2641  return false;
2642 }
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:198
static int FastPathLocalUseCount
Definition: lock.c:170

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_SET_LOCKMODE, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2680 of file lock.c.

2682 {
2683  LWLock *partitionLock = LockHashPartitionLock(hashcode);
2684  Oid relid = locktag->locktag_field2;
2685  uint32 i;
2686 
2687  /*
2688  * Every PGPROC that can potentially hold a fast-path lock is present in
2689  * ProcGlobal->allProcs. Prepared transactions are not, but any
2690  * outstanding fast-path locks held by prepared transactions are
2691  * transferred to the main lock table.
2692  */
2693  for (i = 0; i < ProcGlobal->allProcCount; i++)
2694  {
2695  PGPROC *proc = &ProcGlobal->allProcs[i];
2696  uint32 f;
2697 
2699 
2700  /*
2701  * If the target backend isn't referencing the same database as the
2702  * lock, then we needn't examine the individual relation IDs at all;
2703  * none of them can be relevant.
2704  *
2705  * proc->databaseId is set at backend startup time and never changes
2706  * thereafter, so it might be safe to perform this test before
2707  * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2708  * assume that if the target backend holds any fast-path locks, it
2709  * must have performed a memory-fencing operation (in particular, an
2710  * LWLock acquisition) since setting proc->databaseId. However, it's
2711  * less clear that our backend is certain to have performed a memory
2712  * fencing operation since the other backend set proc->databaseId. So
2713  * for now, we test it after acquiring the LWLock just to be safe.
2714  */
2715  if (proc->databaseId != locktag->locktag_field1)
2716  {
2717  LWLockRelease(&proc->fpInfoLock);
2718  continue;
2719  }
2720 
2721  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2722  {
2723  uint32 lockmode;
2724 
2725  /* Look for an allocated slot matching the given relid. */
2726  if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2727  continue;
2728 
2729  /* Find or create lock object. */
2730  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2731  for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2733  ++lockmode)
2734  {
2735  PROCLOCK *proclock;
2736 
2737  if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2738  continue;
2739  proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2740  hashcode, lockmode);
2741  if (!proclock)
2742  {
2743  LWLockRelease(partitionLock);
2744  LWLockRelease(&proc->fpInfoLock);
2745  return false;
2746  }
2747  GrantLock(proclock->tag.myLock, proclock, lockmode);
2748  FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2749  }
2750  LWLockRelease(partitionLock);
2751 
2752  /* No need to examine remaining slots. */
2753  break;
2754  }
2755  LWLockRelease(&proc->fpInfoLock);
2756  }
2757  return true;
2758 }
PROC_HDR * ProcGlobal
Definition: proc.c:78
uint32 locktag_field1
Definition: lock.h:166
Definition: proc.h:157
Oid databaseId
Definition: proc.h:203
PGPROC * allProcs
Definition: proc.h:380
uint32 allProcCount
Definition: proc.h:398

References PROC_HDR::allProcCount, PROC_HDR::allProcs, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), i, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2650 of file lock.c.

2651 {
2652  uint32 f;
2653  bool result = false;
2654 
2656  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2657  {
2658  if (MyProc->fpRelId[f] == relid
2659  && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2660  {
2661  Assert(!result);
2662  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2663  result = true;
2664  /* we continue iterating so as to update FastPathLocalUseCount */
2665  }
2666  if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2668  }
2669  return result;
2670 }

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1728 of file lock.c.

1729 {
1730  StrongLockInProgress = NULL;
1731 }

References StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetBlockerStatusData()

BlockedProcsData* GetBlockerStatusData ( int  blocked_pid)

Definition at line 3781 of file lock.c.

3782 {
3784  PGPROC *proc;
3785  int i;
3786 
3788 
3789  /*
3790  * Guess how much space we'll need, and preallocate. Most of the time
3791  * this will avoid needing to do repalloc while holding the LWLocks. (We
3792  * assume, but check with an Assert, that MaxBackends is enough entries
3793  * for the procs[] array; the other two could need enlargement, though.)
3794  */
3795  data->nprocs = data->nlocks = data->npids = 0;
3796  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3797  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3798  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3799  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3800 
3801  /*
3802  * In order to search the ProcArray for blocked_pid and assume that that
3803  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3804  * In addition, to examine the lock grouping fields of any other backend,
3805  * we must hold all the hash partition locks. (Only one of those locks is
3806  * actually relevant for any one lock group, but we can't know which one
3807  * ahead of time.) It's fairly annoying to hold all those locks
3808  * throughout this, but it's no worse than GetLockStatusData(), and it
3809  * does have the advantage that we're guaranteed to return a
3810  * self-consistent instantaneous state.
3811  */
3812  LWLockAcquire(ProcArrayLock, LW_SHARED);
3813 
3814  proc = BackendPidGetProcWithLock(blocked_pid);
3815 
3816  /* Nothing to do if it's gone */
3817  if (proc != NULL)
3818  {
3819  /*
3820  * Acquire lock on the entire shared lock data structure. See notes
3821  * in GetLockStatusData().
3822  */
3823  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3825 
3826  if (proc->lockGroupLeader == NULL)
3827  {
3828  /* Easy case, proc is not a lock group member */
3830  }
3831  else
3832  {
3833  /* Examine all procs in proc's lock group */
3834  dlist_iter iter;
3835 
3837  {
3838  PGPROC *memberProc;
3839 
3840  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3841  GetSingleProcBlockerStatusData(memberProc, data);
3842  }
3843  }
3844 
3845  /*
3846  * And release locks. See notes in GetLockStatusData().
3847  */
3848  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3850 
3851  Assert(data->nprocs <= data->maxprocs);
3852  }
3853 
3854  LWLockRelease(ProcArrayLock);
3855 
3856  return data;
3857 }
int MaxBackends
Definition: globals.c:143
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3861
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:529
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:97
void * palloc(Size size)
Definition: mcxt.c:1316
const void * data
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3206
dlist_head lockGroupMembers
Definition: proc.h:301
PGPROC * lockGroupLeader
Definition: proc.h:300
dlist_node * cur
Definition: ilist.h:179

References Assert, BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, and palloc().

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId* GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 2872 of file lock.c.

2873 {
2874  static VirtualTransactionId *vxids;
2875  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2876  LockMethod lockMethodTable;
2877  LOCK *lock;
2878  LOCKMASK conflictMask;
2879  dlist_iter proclock_iter;
2880  PROCLOCK *proclock;
2881  uint32 hashcode;
2882  LWLock *partitionLock;
2883  int count = 0;
2884  int fast_count = 0;
2885 
2886  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2887  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2888  lockMethodTable = LockMethods[lockmethodid];
2889  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2890  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2891 
2892  /*
2893  * Allocate memory to store results, and fill with InvalidVXID. We only
2894  * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2895  * InHotStandby allocate once in TopMemoryContext.
2896  */
2897  if (InHotStandby)
2898  {
2899  if (vxids == NULL)
2900  vxids = (VirtualTransactionId *)
2902  sizeof(VirtualTransactionId) *
2904  }
2905  else
2906  vxids = (VirtualTransactionId *)
2907  palloc0(sizeof(VirtualTransactionId) *
2909 
2910  /* Compute hash code and partition lock, and look up conflicting modes. */
2911  hashcode = LockTagHashCode(locktag);
2912  partitionLock = LockHashPartitionLock(hashcode);
2913  conflictMask = lockMethodTable->conflictTab[lockmode];
2914 
2915  /*
2916  * Fast path locks might not have been entered in the primary lock table.
2917  * If the lock we're dealing with could conflict with such a lock, we must
2918  * examine each backend's fast-path array for conflicts.
2919  */
2920  if (ConflictsWithRelationFastPath(locktag, lockmode))
2921  {
2922  int i;
2923  Oid relid = locktag->locktag_field2;
2924  VirtualTransactionId vxid;
2925 
2926  /*
2927  * Iterate over relevant PGPROCs. Anything held by a prepared
2928  * transaction will have been transferred to the primary lock table,
2929  * so we need not worry about those. This is all a bit fuzzy, because
2930  * new locks could be taken after we've visited a particular
2931  * partition, but the callers had better be prepared to deal with that
2932  * anyway, since the locks could equally well be taken between the
2933  * time we return the value and the time the caller does something
2934  * with it.
2935  */
2936  for (i = 0; i < ProcGlobal->allProcCount; i++)
2937  {
2938  PGPROC *proc = &ProcGlobal->allProcs[i];
2939  uint32 f;
2940 
2941  /* A backend never blocks itself */
2942  if (proc == MyProc)
2943  continue;
2944 
2946 
2947  /*
2948  * If the target backend isn't referencing the same database as
2949  * the lock, then we needn't examine the individual relation IDs
2950  * at all; none of them can be relevant.
2951  *
2952  * See FastPathTransferRelationLocks() for discussion of why we do
2953  * this test after acquiring the lock.
2954  */
2955  if (proc->databaseId != locktag->locktag_field1)
2956  {
2957  LWLockRelease(&proc->fpInfoLock);
2958  continue;
2959  }
2960 
2961  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2962  {
2963  uint32 lockmask;
2964 
2965  /* Look for an allocated slot matching the given relid. */
2966  if (relid != proc->fpRelId[f])
2967  continue;
2968  lockmask = FAST_PATH_GET_BITS(proc, f);
2969  if (!lockmask)
2970  continue;
2971  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2972 
2973  /*
2974  * There can only be one entry per relation, so if we found it
2975  * and it doesn't conflict, we can skip the rest of the slots.
2976  */
2977  if ((lockmask & conflictMask) == 0)
2978  break;
2979 
2980  /* Conflict! */
2981  GET_VXID_FROM_PGPROC(vxid, *proc);
2982 
2983  if (VirtualTransactionIdIsValid(vxid))
2984  vxids[count++] = vxid;
2985  /* else, xact already committed or aborted */
2986 
2987  /* No need to examine remaining slots. */
2988  break;
2989  }
2990 
2991  LWLockRelease(&proc->fpInfoLock);
2992  }
2993  }
2994 
2995  /* Remember how many fast-path conflicts we found. */
2996  fast_count = count;
2997 
2998  /*
2999  * Look up the lock object matching the tag.
3000  */
3001  LWLockAcquire(partitionLock, LW_SHARED);
3002 
3004  locktag,
3005  hashcode,
3006  HASH_FIND,
3007  NULL);
3008  if (!lock)
3009  {
3010  /*
3011  * If the lock object doesn't exist, there is nothing holding a lock
3012  * on this lockable object.
3013  */
3014  LWLockRelease(partitionLock);
3015  vxids[count].procNumber = INVALID_PROC_NUMBER;
3017  if (countp)
3018  *countp = count;
3019  return vxids;
3020  }
3021 
3022  /*
3023  * Examine each existing holder (or awaiter) of the lock.
3024  */
3025  dlist_foreach(proclock_iter, &lock->procLocks)
3026  {
3027  proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3028 
3029  if (conflictMask & proclock->holdMask)
3030  {
3031  PGPROC *proc = proclock->tag.myProc;
3032 
3033  /* A backend never blocks itself */
3034  if (proc != MyProc)
3035  {
3036  VirtualTransactionId vxid;
3037 
3038  GET_VXID_FROM_PGPROC(vxid, *proc);
3039 
3040  if (VirtualTransactionIdIsValid(vxid))
3041  {
3042  int i;
3043 
3044  /* Avoid duplicate entries. */
3045  for (i = 0; i < fast_count; ++i)
3046  if (VirtualTransactionIdEquals(vxids[i], vxid))
3047  break;
3048  if (i >= fast_count)
3049  vxids[count++] = vxid;
3050  }
3051  /* else, xact already committed or aborted */
3052  }
3053  }
3054  }
3055 
3056  LWLockRelease(partitionLock);
3057 
3058  if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3059  elog(PANIC, "too many conflicting locks found");
3060 
3061  vxids[count].procNumber = INVALID_PROC_NUMBER;
3063  if (countp)
3064  *countp = count;
3065  return vxids;
3066 }
#define lengthof(array)
Definition: c.h:788
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:219
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:504
uint16 LOCKMETHODID
Definition: lock.h:122
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:67
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition: lock.h:77
#define InvalidLocalTransactionId
Definition: lock.h:65
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:71
int LOCKMASK
Definition: lockdefs.h:25
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void * palloc0(Size size)
Definition: mcxt.c:1346
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1180
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
uint8 locktag_lockmethodid
Definition: lock.h:171
int numLockModes
Definition: lock.h:110
LocalTransactionId localTransactionId
Definition: lock.h:62
ProcNumber procNumber
Definition: lock.h:61
int max_prepared_xacts
Definition: twophase.c:115
#define InHotStandby
Definition: xlogutils.h:57

References PROC_HDR::allProcCount, PROC_HDR::allProcs, ConflictsWithRelationFastPath, LockMethodData::conflictTab, dlist_iter::cur, PGPROC::databaseId, dlist_container, dlist_foreach, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, INVALID_PROC_NUMBER, InvalidLocalTransactionId, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, VirtualTransactionId::procNumber, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char* GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 4038 of file lock.c.

4039 {
4040  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4041  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4042  return LockMethods[lockmethodid]->lockModeNames[mode];
4043 }
const char *const * lockModeNames
Definition: lock.h:112

References Assert, lengthof, LockMethods, LockMethodData::lockModeNames, and mode.

Referenced by CheckRelationLockedByMe(), DeadLockReport(), pg_lock_status(), and ProcSleep().

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 474 of file lock.c.

475 {
476  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
477 
478  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
479  return LockMethods[lockmethodid];
480 }
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:324

References Assert, lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData* GetLockStatusData ( void  )

Definition at line 3589 of file lock.c.

3590 {
3591  LockData *data;
3592  PROCLOCK *proclock;
3593  HASH_SEQ_STATUS seqstat;
3594  int els;
3595  int el;
3596  int i;
3597 
3598  data = (LockData *) palloc(sizeof(LockData));
3599 
3600  /* Guess how much space we'll need. */
3601  els = MaxBackends;
3602  el = 0;
3603  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3604 
3605  /*
3606  * First, we iterate through the per-backend fast-path arrays, locking
3607  * them one at a time. This might produce an inconsistent picture of the
3608  * system state, but taking all of those LWLocks at the same time seems
3609  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3610  * matter too much, because none of these locks can be involved in lock
3611  * conflicts anyway - anything that might must be present in the main lock
3612  * table. (For the same reason, we don't sweat about making leaderPid
3613  * completely valid. We cannot safely dereference another backend's
3614  * lockGroupLeader field without holding all lock partition locks, and
3615  * it's not worth that.)
3616  */
3617  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3618  {
3619  PGPROC *proc = &ProcGlobal->allProcs[i];
3620  uint32 f;
3621 
3623 
3624  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3625  {
3626  LockInstanceData *instance;
3627  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3628 
3629  /* Skip unallocated slots. */
3630  if (!lockbits)
3631  continue;
3632 
3633  if (el >= els)
3634  {
3635  els += MaxBackends;
3636  data->locks = (LockInstanceData *)
3637  repalloc(data->locks, sizeof(LockInstanceData) * els);
3638  }
3639 
3640  instance = &data->locks[el];
3641  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3642  proc->fpRelId[f]);
3643  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3644  instance->waitLockMode = NoLock;
3645  instance->vxid.procNumber = proc->vxid.procNumber;
3646  instance->vxid.localTransactionId = proc->vxid.lxid;
3647  instance->pid = proc->pid;
3648  instance->leaderPid = proc->pid;
3649  instance->fastpath = true;
3650 
3651  /*
3652  * Successfully taking fast path lock means there were no
3653  * conflicting locks.
3654  */
3655  instance->waitStart = 0;
3656 
3657  el++;
3658  }
3659 
3660  if (proc->fpVXIDLock)
3661  {
3662  VirtualTransactionId vxid;
3663  LockInstanceData *instance;
3664 
3665  if (el >= els)
3666  {
3667  els += MaxBackends;
3668  data->locks = (LockInstanceData *)
3669  repalloc(data->locks, sizeof(LockInstanceData) * els);
3670  }
3671 
3672  vxid.procNumber = proc->vxid.procNumber;
3674 
3675  instance = &data->locks[el];
3676  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3677  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3678  instance->waitLockMode = NoLock;
3679  instance->vxid.procNumber = proc->vxid.procNumber;
3680  instance->vxid.localTransactionId = proc->vxid.lxid;
3681  instance->pid = proc->pid;
3682  instance->leaderPid = proc->pid;
3683  instance->fastpath = true;
3684  instance->waitStart = 0;
3685 
3686  el++;
3687  }
3688 
3689  LWLockRelease(&proc->fpInfoLock);
3690  }
3691 
3692  /*
3693  * Next, acquire lock on the entire shared lock data structure. We do
3694  * this so that, at least for locks in the primary lock table, the state
3695  * will be self-consistent.
3696  *
3697  * Since this is a read-only operation, we take shared instead of
3698  * exclusive lock. There's not a whole lot of point to this, because all
3699  * the normal operations require exclusive lock, but it doesn't hurt
3700  * anything either. It will at least allow two backends to do
3701  * GetLockStatusData in parallel.
3702  *
3703  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3704  */
3705  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3707 
3708  /* Now we can safely count the number of proclocks */
3710  if (data->nelements > els)
3711  {
3712  els = data->nelements;
3713  data->locks = (LockInstanceData *)
3714  repalloc(data->locks, sizeof(LockInstanceData) * els);
3715  }
3716 
3717  /* Now scan the tables to copy the data */
3719 
3720  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3721  {
3722  PGPROC *proc = proclock->tag.myProc;
3723  LOCK *lock = proclock->tag.myLock;
3724  LockInstanceData *instance = &data->locks[el];
3725 
3726  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3727  instance->holdMask = proclock->holdMask;
3728  if (proc->waitLock == proclock->tag.myLock)
3729  instance->waitLockMode = proc->waitLockMode;
3730  else
3731  instance->waitLockMode = NoLock;
3732  instance->vxid.procNumber = proc->vxid.procNumber;
3733  instance->vxid.localTransactionId = proc->vxid.lxid;
3734  instance->pid = proc->pid;
3735  instance->leaderPid = proclock->groupLeader->pid;
3736  instance->fastpath = false;
3737  instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3738 
3739  el++;
3740  }
3741 
3742  /*
3743  * And release locks. We do this in reverse order for two reasons: (1)
3744  * Anyone else who needs more than one of the locks will be trying to lock
3745  * them in increasing order; we don't want to release the other process
3746  * until it can get all the locks it needs. (2) This avoids O(N^2)
3747  * behavior inside LWLockRelease.
3748  */
3749  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3751 
3752  Assert(el == data->nelements);
3753 
3754  return data;
3755 }
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:462
int64 TimestampTz
Definition: timestamp.h:39
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1341
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:235
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:181
#define NoLock
Definition: lockdefs.h:34
#define ExclusiveLock
Definition: lockdefs.h:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1540
Definition: lock.h:466
LOCKMASK holdMask
Definition: lock.h:455
LOCKMODE waitLockMode
Definition: lock.h:456
bool fastpath
Definition: lock.h:462
LOCKTAG locktag
Definition: lock.h:454
TimestampTz waitStart
Definition: lock.h:458
int leaderPid
Definition: lock.h:461
VirtualTransactionId vxid
Definition: lock.h:457
LocalTransactionId lxid
Definition: proc.h:196
pg_atomic_uint64 waitStart
Definition: proc.h:233
bool fpVXIDLock
Definition: proc.h:292
ProcNumber procNumber
Definition: proc.h:191
int pid
Definition: proc.h:178
LOCK * waitLock
Definition: proc.h:228
LOCKMODE waitLockMode
Definition: proc.h:230
struct PGPROC::@117 vxid
LocalTransactionId fpLocalTransactionId
Definition: proc.h:293
PGPROC * groupLeader
Definition: lock.h:375

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, LockInstanceData::fastpath, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc(), pg_atomic_read_u64(), LockInstanceData::pid, PGPROC::pid, ProcGlobal, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, LockInstanceData::waitStart, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 486 of file lock.c.

487 {
488  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
489 
490  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
491  return LockMethods[lockmethodid];
492 }

References Assert, lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock* GetRunningTransactionLocks ( int *  nlocks)

Definition at line 3956 of file lock.c.

3957 {
3958  xl_standby_lock *accessExclusiveLocks;
3959  PROCLOCK *proclock;
3960  HASH_SEQ_STATUS seqstat;
3961  int i;
3962  int index;
3963  int els;
3964 
3965  /*
3966  * Acquire lock on the entire shared lock data structure.
3967  *
3968  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3969  */
3970  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3972 
3973  /* Now we can safely count the number of proclocks */
3975 
3976  /*
3977  * Allocating enough space for all locks in the lock table is overkill,
3978  * but it's more convenient and faster than having to enlarge the array.
3979  */
3980  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3981 
3982  /* Now scan the tables to copy the data */
3984 
3985  /*
3986  * If lock is a currently granted AccessExclusiveLock then it will have
3987  * just one proclock holder, so locks are never accessed twice in this
3988  * particular case. Don't copy this code for use elsewhere because in the
3989  * general case this will give you duplicate locks when looking at
3990  * non-exclusive lock types.
3991  */
3992  index = 0;
3993  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3994  {
3995  /* make sure this definition matches the one used in LockAcquire */
3996  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
3997  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
3998  {
3999  PGPROC *proc = proclock->tag.myProc;
4000  LOCK *lock = proclock->tag.myLock;
4001  TransactionId xid = proc->xid;
4002 
4003  /*
4004  * Don't record locks for transactions if we know they have
4005  * already issued their WAL record for commit but not yet released
4006  * lock. It is still possible that we see locks held by already
4007  * complete transactions, if they haven't yet zeroed their xids.
4008  */
4009  if (!TransactionIdIsValid(xid))
4010  continue;
4011 
4012  accessExclusiveLocks[index].xid = xid;
4013  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4014  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4015 
4016  index++;
4017  }
4018  }
4019 
4020  Assert(index <= els);
4021 
4022  /*
4023  * And release locks. We do this in reverse order for two reasons: (1)
4024  * Anyone else who needs more than one of the locks will be trying to lock
4025  * them in increasing order; we don't want to release the other process
4026  * until it can get all the locks it needs. (2) This avoids O(N^2)
4027  * behavior inside LWLockRelease.
4028  */
4029  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4031 
4032  *nlocks = index;
4033  return accessExclusiveLocks;
4034 }
uint32 TransactionId
Definition: c.h:652
#define AccessExclusiveLock
Definition: lockdefs.h:43
TransactionId xid
Definition: proc.h:168
Definition: type.h:95
TransactionId xid
Definition: lockdefs.h:51
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References AccessExclusiveLock, Assert, xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 3861 of file lock.c.

3862 {
3863  LOCK *theLock = blocked_proc->waitLock;
3864  BlockedProcData *bproc;
3865  dlist_iter proclock_iter;
3866  dlist_iter proc_iter;
3867  dclist_head *waitQueue;
3868  int queue_size;
3869 
3870  /* Nothing to do if this proc is not blocked */
3871  if (theLock == NULL)
3872  return;
3873 
3874  /* Set up a procs[] element */
3875  bproc = &data->procs[data->nprocs++];
3876  bproc->pid = blocked_proc->pid;
3877  bproc->first_lock = data->nlocks;
3878  bproc->first_waiter = data->npids;
3879 
3880  /*
3881  * We may ignore the proc's fast-path arrays, since nothing in those could
3882  * be related to a contended lock.
3883  */
3884 
3885  /* Collect all PROCLOCKs associated with theLock */
3886  dlist_foreach(proclock_iter, &theLock->procLocks)
3887  {
3888  PROCLOCK *proclock =
3889  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3890  PGPROC *proc = proclock->tag.myProc;
3891  LOCK *lock = proclock->tag.myLock;
3892  LockInstanceData *instance;
3893 
3894  if (data->nlocks >= data->maxlocks)
3895  {
3896  data->maxlocks += MaxBackends;
3897  data->locks = (LockInstanceData *)
3898  repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3899  }
3900 
3901  instance = &data->locks[data->nlocks];
3902  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3903  instance->holdMask = proclock->holdMask;
3904  if (proc->waitLock == lock)
3905  instance->waitLockMode = proc->waitLockMode;
3906  else
3907  instance->waitLockMode = NoLock;
3908  instance->vxid.procNumber = proc->vxid.procNumber;
3909  instance->vxid.localTransactionId = proc->vxid.lxid;
3910  instance->pid = proc->pid;
3911  instance->leaderPid = proclock->groupLeader->pid;
3912  instance->fastpath = false;
3913  data->nlocks++;
3914  }
3915 
3916  /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3917  waitQueue = &(theLock->waitProcs);
3918  queue_size = dclist_count(waitQueue);
3919 
3920  if (queue_size > data->maxpids - data->npids)
3921  {
3922  data->maxpids = Max(data->maxpids + MaxBackends,
3923  data->npids + queue_size);
3924  data->waiter_pids = (int *) repalloc(data->waiter_pids,
3925  sizeof(int) * data->maxpids);
3926  }
3927 
3928  /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3929  dclist_foreach(proc_iter, waitQueue)
3930  {
3931  PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
3932 
3933  if (queued_proc == blocked_proc)
3934  break;
3935  data->waiter_pids[data->npids++] = queued_proc->pid;
3936  queued_proc = (PGPROC *) queued_proc->links.next;
3937  }
3938 
3939  bproc->num_locks = data->nlocks - bproc->first_lock;
3940  bproc->num_waiters = data->npids - bproc->first_waiter;
3941 }
#define Max(x, y)
Definition: c.h:998
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int first_lock
Definition: lock.h:476
int first_waiter
Definition: lock.h:480
int num_waiters
Definition: lock.h:481
int num_locks
Definition: lock.h:477
dclist_head waitProcs
Definition: lock.h:317
dlist_node links
Definition: proc.h:159
dlist_node * next
Definition: ilist.h:140
static struct link * links
Definition: zic.c:299

References dlist_iter::cur, data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, LockInstanceData::fastpath, BlockedProcData::first_lock, BlockedProcData::first_waiter, PROCLOCK::groupLeader, PROCLOCK::holdMask, LockInstanceData::holdMask, LockInstanceData::leaderPid, PGPROC::links, links, VirtualTransactionId::localTransactionId, LockInstanceData::locktag, PGPROC::lxid, Max, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, dlist_node::next, NoLock, BlockedProcData::num_locks, BlockedProcData::num_waiters, LockInstanceData::pid, BlockedProcData::pid, PGPROC::pid, LOCK::procLocks, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, and LOCK::waitProcs.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1767 of file lock.c.

1768 {
1770 }
static LOCALLOCK * awaitedLock
Definition: lock.c:274
static ResourceOwner awaitedOwner
Definition: lock.c:275
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1670

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup(), and ProcSleep().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1536 of file lock.c.

1537 {
1538  lock->nGranted++;
1539  lock->granted[lockmode]++;
1540  lock->grantMask |= LOCKBIT_ON(lockmode);
1541  if (lock->granted[lockmode] == lock->requested[lockmode])
1542  lock->waitMask &= LOCKBIT_OFF(lockmode);
1543  proclock->holdMask |= LOCKBIT_ON(lockmode);
1544  LOCK_PRINT("GrantLock", lock, lockmode);
1545  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1546  Assert(lock->nGranted <= lock->nRequested);
1547 }
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:85
int requested[MAX_LOCKMODES]
Definition: lock.h:318
int granted[MAX_LOCKMODES]
Definition: lock.h:320
LOCKMASK grantMask
Definition: lock.h:314
LOCKMASK waitMask
Definition: lock.h:315
int nGranted
Definition: lock.h:321

References Assert, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), ProcSleep(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1670 of file lock.c.

1671 {
1672  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1673  int i;
1674 
1675  Assert(locallock->numLockOwners < locallock->maxLockOwners);
1676  /* Count the total */
1677  locallock->nLocks++;
1678  /* Count the per-owner lock */
1679  for (i = 0; i < locallock->numLockOwners; i++)
1680  {
1681  if (lockOwners[i].owner == owner)
1682  {
1683  lockOwners[i].nLocks++;
1684  return;
1685  }
1686  }
1687  lockOwners[i].owner = owner;
1688  lockOwners[i].nLocks = 1;
1689  locallock->numLockOwners++;
1690  if (owner != NULL)
1691  ResourceOwnerRememberLock(owner, locallock);
1692 
1693  /* Indicate that the lock is acquired for certain types of locks. */
1694  CheckAndSetLockHeld(locallock, true);
1695 }
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1342
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1045
int64 nLocks
Definition: lock.h:423
struct ResourceOwnerData * owner
Definition: lock.h:422
int maxLockOwners
Definition: lock.h:437

References Assert, CheckAndSetLockHeld(), i, LOCALLOCK::lockOwners, LOCALLOCK::maxLockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLocks()

void InitLocks ( void  )

Definition at line 392 of file lock.c.

393 {
394  HASHCTL info;
395  long init_table_size,
396  max_table_size;
397  bool found;
398 
399  /*
400  * Compute init/max size to request for lock hashtables. Note these
401  * calculations must agree with LockShmemSize!
402  */
403  max_table_size = NLOCKENTS();
404  init_table_size = max_table_size / 2;
405 
406  /*
407  * Allocate hash table for LOCK structs. This stores per-locked-object
408  * information.
409  */
410  info.keysize = sizeof(LOCKTAG);
411  info.entrysize = sizeof(LOCK);
413 
414  LockMethodLockHash = ShmemInitHash("LOCK hash",
415  init_table_size,
416  max_table_size,
417  &info,
419 
420  /* Assume an average of 2 holders per lock */
421  max_table_size *= 2;
422  init_table_size *= 2;
423 
424  /*
425  * Allocate hash table for PROCLOCK structs. This stores
426  * per-lock-per-holder information.
427  */
428  info.keysize = sizeof(PROCLOCKTAG);
429  info.entrysize = sizeof(PROCLOCK);
430  info.hash = proclock_hash;
432 
433  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
434  init_table_size,
435  max_table_size,
436  &info,
438 
439  /*
440  * Allocate fast-path structures.
441  */
443  ShmemInitStruct("Fast Path Strong Relation Lock Data",
444  sizeof(FastPathStrongRelationLockData), &found);
445  if (!found)
447 
448  /*
449  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
450  * counts and resource owner information.
451  *
452  * The non-shared table could already exist in this process (this occurs
453  * when the postmaster is recreating shared memory after a backend crash).
454  * If so, delete and recreate it. (We could simply leave it, since it
455  * ought to be empty in the postmaster, but for safety let's zap it.)
456  */
459 
460  info.keysize = sizeof(LOCALLOCKTAG);
461  info.entrysize = sizeof(LOCALLOCK);
462 
463  LockMethodLocalHash = hash_create("LOCALLOCK hash",
464  16,
465  &info,
467 }
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_PARTITION
Definition: hsearch.h:92
#define NLOCKENTS()
Definition: lock.c:55
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:521
struct LOCALLOCK LOCALLOCK
struct LOCK LOCK
struct PROCLOCK PROCLOCK
struct PROCLOCKTAG PROCLOCKTAG
struct LOCALLOCKTAG LOCALLOCKTAG
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:332
#define SpinLockInit(lock)
Definition: spin.h:60
HashValueFunc hash
Definition: hsearch.h:78
long num_partitions
Definition: hsearch.h:68

References HASHCTL::entrysize, FastPathStrongRelationLocks, HASHCTL::hash, HASH_BLOBS, hash_create(), hash_destroy(), HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateOrAttachShmemStructs().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4381 of file lock.c.

4383 {
4384  lock_twophase_postcommit(xid, info, recdata, len);
4385 }
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4355
const void size_t len

References len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4355 of file lock.c.

4357 {
4358  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4359  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4360  LOCKTAG *locktag;
4361  LOCKMETHODID lockmethodid;
4362  LockMethod lockMethodTable;
4363 
4364  Assert(len == sizeof(TwoPhaseLockRecord));
4365  locktag = &rec->locktag;
4366  lockmethodid = locktag->locktag_lockmethodid;
4367 
4368  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4369  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4370  lockMethodTable = LockMethods[lockmethodid];
4371 
4372  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4373 }
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3080
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:918

References Assert, elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

◆ lock_twophase_recover()

void lock_twophase_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4142 of file lock.c.

4144 {
4145  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4146  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4147  LOCKTAG *locktag;
4148  LOCKMODE lockmode;
4149  LOCKMETHODID lockmethodid;
4150  LOCK *lock;
4151  PROCLOCK *proclock;
4152  PROCLOCKTAG proclocktag;
4153  bool found;
4154  uint32 hashcode;
4155  uint32 proclock_hashcode;
4156  int partition;
4157  LWLock *partitionLock;
4158  LockMethod lockMethodTable;
4159 
4160  Assert(len == sizeof(TwoPhaseLockRecord));
4161  locktag = &rec->locktag;
4162  lockmode = rec->lockmode;
4163  lockmethodid = locktag->locktag_lockmethodid;
4164 
4165  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4166  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4167  lockMethodTable = LockMethods[lockmethodid];
4168 
4169  hashcode = LockTagHashCode(locktag);
4170  partition = LockHashPartition(hashcode);
4171  partitionLock = LockHashPartitionLock(hashcode);
4172 
4173  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4174 
4175  /*
4176  * Find or create a lock with this tag.
4177  */
4179  locktag,
4180  hashcode,
4182  &found);
4183  if (!lock)
4184  {
4185  LWLockRelease(partitionLock);
4186  ereport(ERROR,
4187  (errcode(ERRCODE_OUT_OF_MEMORY),
4188  errmsg("out of shared memory"),
4189  errhint("You might need to increase %s.", "max_locks_per_transaction")));
4190  }
4191 
4192  /*
4193  * if it's a new lock object, initialize it
4194  */
4195  if (!found)
4196  {
4197  lock->grantMask = 0;
4198  lock->waitMask = 0;
4199  dlist_init(&lock->procLocks);
4200  dclist_init(&lock->waitProcs);
4201  lock->nRequested = 0;
4202  lock->nGranted = 0;
4203  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4204  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4205  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4206  }
4207  else
4208  {
4209  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4210  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4211  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4212  Assert(lock->nGranted <= lock->nRequested);
4213  }
4214 
4215  /*
4216  * Create the hash key for the proclock table.
4217  */
4218  proclocktag.myLock = lock;
4219  proclocktag.myProc = proc;
4220 
4221  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4222 
4223  /*
4224  * Find or create a proclock entry with this tag
4225  */
4227  &proclocktag,
4228  proclock_hashcode,
4230  &found);
4231  if (!proclock)
4232  {
4233  /* Oops, not enough shmem for the proclock */
4234  if (lock->nRequested == 0)
4235  {
4236  /*
4237  * There are no other requestors of this lock, so garbage-collect
4238  * the lock object. We *must* do this to avoid a permanent leak
4239  * of shared memory, because there won't be anything to cause
4240  * anyone to release the lock object later.
4241  */
4242  Assert(dlist_is_empty(&lock->procLocks));
4244  &(lock->tag),
4245  hashcode,
4246  HASH_REMOVE,
4247  NULL))
4248  elog(PANIC, "lock table corrupted");
4249  }
4250  LWLockRelease(partitionLock);
4251  ereport(ERROR,
4252  (errcode(ERRCODE_OUT_OF_MEMORY),
4253  errmsg("out of shared memory"),
4254  errhint("You might need to increase %s.", "max_locks_per_transaction")));
4255  }
4256 
4257  /*
4258  * If new, initialize the new entry
4259  */
4260  if (!found)
4261  {
4262  Assert(proc->lockGroupLeader == NULL);
4263  proclock->groupLeader = proc;
4264  proclock->holdMask = 0;
4265  proclock->releaseMask = 0;
4266  /* Add proclock to appropriate lists */
4267  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4268  dlist_push_tail(&proc->myProcLocks[partition],
4269  &proclock->procLink);
4270  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4271  }
4272  else
4273  {
4274  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4275  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4276  }
4277 
4278  /*
4279  * lock->nRequested and lock->requested[] count the total number of
4280  * requests, whether granted or waiting, so increment those immediately.
4281  */
4282  lock->nRequested++;
4283  lock->requested[lockmode]++;
4284  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4285 
4286  /*
4287  * We shouldn't already hold the desired lock.
4288  */
4289  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4290  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4291  lockMethodTable->lockModeNames[lockmode],
4292  lock->tag.locktag_field1, lock->tag.locktag_field2,
4293  lock->tag.locktag_field3);
4294 
4295  /*
4296  * We ignore any possible conflicts and just grant ourselves the lock. Not
4297  * only because we don't bother, but also to avoid deadlocks when
4298  * switching from standby to normal mode. See function comment.
4299  */
4300  GrantLock(lock, proclock, lockmode);
4301 
4302  /*
4303  * Bump strong lock count, to make sure any fast-path lock requests won't
4304  * be granted without consulting the primary lock table.
4305  */
4306  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4307  {
4308  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4309 
4311  FastPathStrongRelationLocks->count[fasthashcode]++;
4313  }
4314 
4315  LWLockRelease(partitionLock);
4316 }
#define MemSet(start, val, len)
Definition: c.h:1020
@ HASH_ENTER_NULL
Definition: hsearch.h:116
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define MAX_LOCKMODES
Definition: lock.h:82
#define LockHashPartition(hashcode)
Definition: lock.h:524
int LOCKMODE
Definition: lockdefs.h:26
uint32 locktag_field3
Definition: lock.h:168
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:257
LOCKMASK releaseMask
Definition: lock.h:377

References Assert, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4323 of file lock.c.

4325 {
4326  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4327  LOCKTAG *locktag;
4328  LOCKMODE lockmode;
4329  LOCKMETHODID lockmethodid;
4330 
4331  Assert(len == sizeof(TwoPhaseLockRecord));
4332  locktag = &rec->locktag;
4333  lockmode = rec->lockmode;
4334  lockmethodid = locktag->locktag_lockmethodid;
4335 
4336  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4337  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4338 
4339  if (lockmode == AccessExclusiveLock &&
4340  locktag->locktag_type == LOCKTAG_RELATION)
4341  {
4343  locktag->locktag_field1 /* dboid */ ,
4344  locktag->locktag_field2 /* reloid */ );
4345  }
4346 }
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:985

References AccessExclusiveLock, Assert, elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().

◆ LockAcquire()

LockAcquireResult LockAcquire ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait 
)

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp 
)

Definition at line 758 of file lock.c.

764 {
765  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
766  LockMethod lockMethodTable;
767  LOCALLOCKTAG localtag;
768  LOCALLOCK *locallock;
769  LOCK *lock;
770  PROCLOCK *proclock;
771  bool found;
772  ResourceOwner owner;
773  uint32 hashcode;
774  LWLock *partitionLock;
775  bool found_conflict;
776  bool log_lock = false;
777 
778  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
779  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
780  lockMethodTable = LockMethods[lockmethodid];
781  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
782  elog(ERROR, "unrecognized lock mode: %d", lockmode);
783 
784  if (RecoveryInProgress() && !InRecovery &&
785  (locktag->locktag_type == LOCKTAG_OBJECT ||
786  locktag->locktag_type == LOCKTAG_RELATION) &&
787  lockmode > RowExclusiveLock)
788  ereport(ERROR,
789  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
790  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
791  lockMethodTable->lockModeNames[lockmode]),
792  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
793 
794 #ifdef LOCK_DEBUG
795  if (LOCK_DEBUG_ENABLED(locktag))
796  elog(LOG, "LockAcquire: lock [%u,%u] %s",
797  locktag->locktag_field1, locktag->locktag_field2,
798  lockMethodTable->lockModeNames[lockmode]);
799 #endif
800 
801  /* Identify owner for lock */
802  if (sessionLock)
803  owner = NULL;
804  else
805  owner = CurrentResourceOwner;
806 
807  /*
808  * Find or create a LOCALLOCK entry for this lock and lockmode
809  */
810  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
811  localtag.lock = *locktag;
812  localtag.mode = lockmode;
813 
814  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
815  &localtag,
816  HASH_ENTER, &found);
817 
818  /*
819  * if it's a new locallock object, initialize it
820  */
821  if (!found)
822  {
823  locallock->lock = NULL;
824  locallock->proclock = NULL;
825  locallock->hashcode = LockTagHashCode(&(localtag.lock));
826  locallock->nLocks = 0;
827  locallock->holdsStrongLockCount = false;
828  locallock->lockCleared = false;
829  locallock->numLockOwners = 0;
830  locallock->maxLockOwners = 8;
831  locallock->lockOwners = NULL; /* in case next line fails */
832  locallock->lockOwners = (LOCALLOCKOWNER *)
834  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
835  }
836  else
837  {
838  /* Make sure there will be room to remember the lock */
839  if (locallock->numLockOwners >= locallock->maxLockOwners)
840  {
841  int newsize = locallock->maxLockOwners * 2;
842 
843  locallock->lockOwners = (LOCALLOCKOWNER *)
844  repalloc(locallock->lockOwners,
845  newsize * sizeof(LOCALLOCKOWNER));
846  locallock->maxLockOwners = newsize;
847  }
848  }
849  hashcode = locallock->hashcode;
850 
851  if (locallockp)
852  *locallockp = locallock;
853 
854  /*
855  * If we already hold the lock, we can just increase the count locally.
856  *
857  * If lockCleared is already set, caller need not worry about absorbing
858  * sinval messages related to the lock's object.
859  */
860  if (locallock->nLocks > 0)
861  {
862  GrantLockLocal(locallock, owner);
863  if (locallock->lockCleared)
865  else
867  }
868 
869  /*
870  * We don't acquire any other heavyweight lock while holding the relation
871  * extension lock. We do allow to acquire the same relation extension
872  * lock more than once but that case won't reach here.
873  */
874  Assert(!IsRelationExtensionLockHeld);
875 
876  /*
877  * Prepare to emit a WAL record if acquisition of this lock needs to be
878  * replayed in a standby server.
879  *
880  * Here we prepare to log; after lock is acquired we'll issue log record.
881  * This arrangement simplifies error recovery in case the preparation step
882  * fails.
883  *
884  * Only AccessExclusiveLocks can conflict with lock types that read-only
885  * transactions can acquire in a standby server. Make sure this definition
886  * matches the one in GetRunningTransactionLocks().
887  */
888  if (lockmode >= AccessExclusiveLock &&
889  locktag->locktag_type == LOCKTAG_RELATION &&
890  !RecoveryInProgress() &&
892  {
894  log_lock = true;
895  }
896 
897  /*
898  * Attempt to take lock via fast path, if eligible. But if we remember
899  * having filled up the fast path array, we don't attempt to make any
900  * further use of it until we release some locks. It's possible that some
901  * other backend has transferred some of those locks to the shared hash
902  * table, leaving space free, but it's not worth acquiring the LWLock just
903  * to check. It's also possible that we're acquiring a second or third
904  * lock type on a relation we have already locked using the fast-path, but
905  * for now we don't worry about that case either.
906  */
907  if (EligibleForRelationFastPath(locktag, lockmode) &&
909  {
910  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
911  bool acquired;
912 
913  /*
914  * LWLockAcquire acts as a memory sequencing point, so it's safe to
915  * assume that any strong locker whose increment to
916  * FastPathStrongRelationLocks->counts becomes visible after we test
917  * it has yet to begin to transfer fast-path locks.
918  */
920  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
921  acquired = false;
922  else
923  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
924  lockmode);
926  if (acquired)
927  {
928  /*
929  * The locallock might contain stale pointers to some old shared
930  * objects; we MUST reset these to null before considering the
931  * lock to be acquired via fast-path.
932  */
933  locallock->lock = NULL;
934  locallock->proclock = NULL;
935  GrantLockLocal(locallock, owner);
936  return LOCKACQUIRE_OK;
937  }
938  }
939 
940  /*
941  * If this lock could potentially have been taken via the fast-path by
942  * some other backend, we must (temporarily) disable further use of the
943  * fast-path for this lock tag, and migrate any locks already taken via
944  * this method to the main lock table.
945  */
946  if (ConflictsWithRelationFastPath(locktag, lockmode))
947  {
948  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
949 
950  BeginStrongLockAcquire(locallock, fasthashcode);
951  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
952  hashcode))
953  {
955  if (locallock->nLocks == 0)
956  RemoveLocalLock(locallock);
957  if (locallockp)
958  *locallockp = NULL;
959  if (reportMemoryError)
960  ereport(ERROR,
961  (errcode(ERRCODE_OUT_OF_MEMORY),
962  errmsg("out of shared memory"),
963  errhint("You might need to increase %s.", "max_locks_per_transaction")));
964  else
965  return LOCKACQUIRE_NOT_AVAIL;
966  }
967  }
968 
969  /*
970  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
971  * take it via the fast-path, either, so we've got to mess with the shared
972  * lock table.
973  */
974  partitionLock = LockHashPartitionLock(hashcode);
975 
976  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
977 
978  /*
979  * Find or create lock and proclock entries with this tag
980  *
981  * Note: if the locallock object already existed, it might have a pointer
982  * to the lock already ... but we should not assume that that pointer is
983  * valid, since a lock object with zero hold and request counts can go
984  * away anytime. So we have to use SetupLockInTable() to recompute the
985  * lock and proclock pointers, even if they're already set.
986  */
987  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
988  hashcode, lockmode);
989  if (!proclock)
990  {
992  LWLockRelease(partitionLock);
993  if (locallock->nLocks == 0)
994  RemoveLocalLock(locallock);
995  if (locallockp)
996  *locallockp = NULL;
997  if (reportMemoryError)
998  ereport(ERROR,
999  (errcode(ERRCODE_OUT_OF_MEMORY),
1000  errmsg("out of shared memory"),
1001  errhint("You might need to increase %s.", "max_locks_per_transaction")));
1002  else
1003  return LOCKACQUIRE_NOT_AVAIL;
1004  }
1005  locallock->proclock = proclock;
1006  lock = proclock->tag.myLock;
1007  locallock->lock = lock;
1008 
1009  /*
1010  * If lock requested conflicts with locks requested by waiters, must join
1011  * wait queue. Otherwise, check for conflict with already-held locks.
1012  * (That's last because most complex check.)
1013  */
1014  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1015  found_conflict = true;
1016  else
1017  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1018  lock, proclock);
1019 
1020  if (!found_conflict)
1021  {
1022  /* No conflict with held or previously requested locks */
1023  GrantLock(lock, proclock, lockmode);
1024  GrantLockLocal(locallock, owner);
1025  }
1026  else
1027  {
1028  /*
1029  * Set bitmask of locks this process already holds on this object.
1030  */
1031  MyProc->heldLocks = proclock->holdMask;
1032 
1033  /*
1034  * Sleep till someone wakes me up. We do this even in the dontWait
1035  * case, because while trying to go to sleep, we may discover that we
1036  * can acquire the lock immediately after all.
1037  */
1038 
1039  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1040  locktag->locktag_field2,
1041  locktag->locktag_field3,
1042  locktag->locktag_field4,
1043  locktag->locktag_type,
1044  lockmode);
1045 
1046  WaitOnLock(locallock, owner, dontWait);
1047 
1048  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1049  locktag->locktag_field2,
1050  locktag->locktag_field3,
1051  locktag->locktag_field4,
1052  locktag->locktag_type,
1053  lockmode);
1054 
1055  /*
1056  * NOTE: do not do any material change of state between here and
1057  * return. All required changes in locktable state must have been
1058  * done when the lock was granted to us --- see notes in WaitOnLock.
1059  */
1060 
1061  /*
1062  * Check the proclock entry status. If dontWait = true, this is an
1063  * expected case; otherwise, it will open happen if something in the
1064  * ipc communication doesn't work correctly.
1065  */
1066  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1067  {
1069 
1070  if (dontWait)
1071  {
1072  /*
1073  * We can't acquire the lock immediately. If caller specified
1074  * no blocking, remove useless table entries and return
1075  * LOCKACQUIRE_NOT_AVAIL without waiting.
1076  */
1077  if (proclock->holdMask == 0)
1078  {
1079  uint32 proclock_hashcode;
1080 
1081  proclock_hashcode = ProcLockHashCode(&proclock->tag,
1082  hashcode);
1083  dlist_delete(&proclock->lockLink);
1084  dlist_delete(&proclock->procLink);
1086  &(proclock->tag),
1087  proclock_hashcode,
1088  HASH_REMOVE,
1089  NULL))
1090  elog(PANIC, "proclock table corrupted");
1091  }
1092  else
1093  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1094  lock->nRequested--;
1095  lock->requested[lockmode]--;
1096  LOCK_PRINT("LockAcquire: conditional lock failed",
1097  lock, lockmode);
1098  Assert((lock->nRequested > 0) &&
1099  (lock->requested[lockmode] >= 0));
1100  Assert(lock->nGranted <= lock->nRequested);
1101  LWLockRelease(partitionLock);
1102  if (locallock->nLocks == 0)
1103  RemoveLocalLock(locallock);
1104  if (locallockp)
1105  *locallockp = NULL;
1106  return LOCKACQUIRE_NOT_AVAIL;
1107  }
1108  else
1109  {
1110  /*
1111  * We should have gotten the lock, but somehow that didn't
1112  * happen. If we get here, it's a bug.
1113  */
1114  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1115  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1116  LWLockRelease(partitionLock);
1117  elog(ERROR, "LockAcquire failed");
1118  }
1119  }
1120  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1121  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1122  }
1123 
1124  /*
1125  * Lock state is fully up-to-date now; if we error out after this, no
1126  * special error cleanup is required.
1127  */
1129 
1130  LWLockRelease(partitionLock);
1131 
1132  /*
1133  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1134  * standby server.
1135  */
1136  if (log_lock)
1137  {
1138  /*
1139  * Decode the locktag back to the original values, to avoid sending
1140  * lots of empty bytes with every message. See lock.h to check how a
1141  * locktag is defined for LOCKTAG_RELATION
1142  */
1144  locktag->locktag_field2);
1145  }
1146 
1147  return LOCKACQUIRE_OK;
1148 }
#define LOG
Definition: elog.h:31
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1354
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2680
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool dontWait)
Definition: lock.c:1796
void AbortStrongLockAcquire(void)
Definition: lock.c:1738
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2613
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:213
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1702
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1407
static void FinishStrongLockAcquire(void)
Definition: lock.c:1728
@ LOCKTAG_OBJECT
Definition: lock.h:145
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:504
@ LOCKACQUIRE_OK
Definition: lock.h:502
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:503
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:501
#define RowExclusiveLock
Definition: lockdefs.h:38
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1440
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1423
bool lockCleared
Definition: lock.h:440
uint16 locktag_field4
Definition: lock.h:169
LOCKMASK heldLocks
Definition: proc.h:231
bool RecoveryInProgress(void)
Definition: xlog.c:6290
#define XLogStandbyInfoActive()
Definition: xlog.h:121
bool InRecovery
Definition: xlogutils.c:50

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert, BeginStrongLockAcquire(), ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, dlist_delete(), EligibleForRelationFastPath, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathGrantRelationLock(), FastPathLocalUseCount, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PGPROC::heldLocks, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, InRecovery, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1407 of file lock.c.

1411 {
1412  int numLockModes = lockMethodTable->numLockModes;
1413  LOCKMASK myLocks;
1414  int conflictMask = lockMethodTable->conflictTab[lockmode];
1415  int conflictsRemaining[MAX_LOCKMODES];
1416  int totalConflictsRemaining = 0;
1417  dlist_iter proclock_iter;
1418  int i;
1419 
1420  /*
1421  * first check for global conflicts: If no locks conflict with my request,
1422  * then I get the lock.
1423  *
1424  * Checking for conflict: lock->grantMask represents the types of
1425  * currently held locks. conflictTable[lockmode] has a bit set for each
1426  * type of lock that conflicts with request. Bitwise compare tells if
1427  * there is a conflict.
1428  */
1429  if (!(conflictMask & lock->grantMask))
1430  {
1431  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1432  return false;
1433  }
1434 
1435  /*
1436  * Rats. Something conflicts. But it could still be my own lock, or a
1437  * lock held by another member of my locking group. First, figure out how
1438  * many conflicts remain after subtracting out any locks I hold myself.
1439  */
1440  myLocks = proclock->holdMask;
1441  for (i = 1; i <= numLockModes; i++)
1442  {
1443  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1444  {
1445  conflictsRemaining[i] = 0;
1446  continue;
1447  }
1448  conflictsRemaining[i] = lock->granted[i];
1449  if (myLocks & LOCKBIT_ON(i))
1450  --conflictsRemaining[i];
1451  totalConflictsRemaining += conflictsRemaining[i];
1452  }
1453 
1454  /* If no conflicts remain, we get the lock. */
1455  if (totalConflictsRemaining == 0)
1456  {
1457  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1458  return false;
1459  }
1460 
1461  /* If no group locking, it's definitely a conflict. */
1462  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1463  {
1464  Assert(proclock->tag.myProc == MyProc);
1465  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1466  proclock);
1467  return true;
1468  }
1469 
1470  /*
1471  * The relation extension lock conflict even between the group members.
1472  */
1473  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1474  {
1475  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1476  proclock);
1477  return true;
1478  }
1479 
1480  /*
1481  * Locks held in conflicting modes by members of our own lock group are
1482  * not real conflicts; we can subtract those out and see if we still have
1483  * a conflict. This is O(N) in the number of processes holding or
1484  * awaiting locks on this object. We could improve that by making the
1485  * shared memory state more complex (and larger) but it doesn't seem worth
1486  * it.
1487  */
1488  dlist_foreach(proclock_iter, &lock->procLocks)
1489  {
1490  PROCLOCK *otherproclock =
1491  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1492 
1493  if (proclock != otherproclock &&
1494  proclock->groupLeader == otherproclock->groupLeader &&
1495  (otherproclock->holdMask & conflictMask) != 0)
1496  {
1497  int intersectMask = otherproclock->holdMask & conflictMask;
1498 
1499  for (i = 1; i <= numLockModes; i++)
1500  {
1501  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1502  {
1503  if (conflictsRemaining[i] <= 0)
1504  elog(PANIC, "proclocks held do not match lock");
1505  conflictsRemaining[i]--;
1506  totalConflictsRemaining--;
1507  }
1508  }
1509 
1510  if (totalConflictsRemaining == 0)
1511  {
1512  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1513  proclock);
1514  return false;
1515  }
1516  }
1517  }
1518 
1519  /* Nope, it's a real conflict. */
1520  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1521  return true;
1522 }
#define LOCK_LOCKTAG(lock)
Definition: lock.h:325

References Assert, LockMethodData::conflictTab, dlist_iter::cur, dlist_container, dlist_foreach, elog, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by LockAcquireExtended(), ProcLockWakeup(), and ProcSleep().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 621 of file lock.c.

622 {
623  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
624  LockMethod lockMethodTable;
625  LOCALLOCKTAG localtag;
626  LOCALLOCK *locallock;
627  LOCK *lock;
628  PROCLOCK *proclock;
629  LWLock *partitionLock;
630  bool hasWaiters = false;
631 
632  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
633  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
634  lockMethodTable = LockMethods[lockmethodid];
635  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
636  elog(ERROR, "unrecognized lock mode: %d", lockmode);
637 
638 #ifdef LOCK_DEBUG
639  if (LOCK_DEBUG_ENABLED(locktag))
640  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
641  locktag->locktag_field1, locktag->locktag_field2,
642  lockMethodTable->lockModeNames[lockmode]);
643 #endif
644 
645  /*
646  * Find the LOCALLOCK entry for this lock and lockmode
647  */
648  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
649  localtag.lock = *locktag;
650  localtag.mode = lockmode;
651 
652  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
653  &localtag,
654  HASH_FIND, NULL);
655 
656  /*
657  * let the caller print its own error message, too. Do not ereport(ERROR).
658  */
659  if (!locallock || locallock->nLocks <= 0)
660  {
661  elog(WARNING, "you don't own a lock of type %s",
662  lockMethodTable->lockModeNames[lockmode]);
663  return false;
664  }
665 
666  /*
667  * Check the shared lock table.
668  */
669  partitionLock = LockHashPartitionLock(locallock->hashcode);
670 
671  LWLockAcquire(partitionLock, LW_SHARED);
672 
673  /*
674  * We don't need to re-find the lock or proclock, since we kept their
675  * addresses in the locallock table, and they couldn't have been removed
676  * while we were holding a lock on them.
677  */
678  lock = locallock->lock;
679  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
680  proclock = locallock->proclock;
681  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
682 
683  /*
684  * Double-check that we are actually holding a lock of the type we want to
685  * release.
686  */
687  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
688  {
689  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
690  LWLockRelease(partitionLock);
691  elog(WARNING, "you don't own a lock of type %s",
692  lockMethodTable->lockModeNames[lockmode]);
693  RemoveLocalLock(locallock);
694  return false;
695  }
696 
697  /*
698  * Do the checking.
699  */
700  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
701  hasWaiters = true;
702 
703  LWLockRelease(partitionLock);
704 
705  return hasWaiters;
706 }
#define WARNING
Definition: elog.h:36

References LockMethodData::conflictTab, elog, ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode 
)

Definition at line 585 of file lock.c.

586 {
587  LOCALLOCKTAG localtag;
588  LOCALLOCK *locallock;
589 
590  /*
591  * See if there is a LOCALLOCK entry for this lock and lockmode
592  */
593  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
594  localtag.lock = *locktag;
595  localtag.mode = lockmode;
596 
597  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
598  &localtag,
599  HASH_FIND, NULL);
600 
601  return (locallock && locallock->nLocks > 0);
602 }

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, LockMethodLocalHash, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2537 of file lock.c.

2538 {
2540 
2541  Assert(parent != NULL);
2542 
2543  if (locallocks == NULL)
2544  {
2545  HASH_SEQ_STATUS status;
2546  LOCALLOCK *locallock;
2547 
2549 
2550  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2551  LockReassignOwner(locallock, parent);
2552  }
2553  else
2554  {
2555  int i;
2556 
2557  for (i = nlocks - 1; i >= 0; i--)
2558  LockReassignOwner(locallocks[i], parent);
2559  }
2560 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2567
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:888

References Assert, CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), and ResourceOwnerGetParent().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2567 of file lock.c.

2568 {
2569  LOCALLOCKOWNER *lockOwners;
2570  int i;
2571  int ic = -1;
2572  int ip = -1;
2573 
2574  /*
2575  * Scan to see if there are any locks belonging to current owner or its
2576  * parent
2577  */
2578  lockOwners = locallock->lockOwners;
2579  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2580  {
2581  if (lockOwners[i].owner == CurrentResourceOwner)
2582  ic = i;
2583  else if (lockOwners[i].owner == parent)
2584  ip = i;
2585  }
2586 
2587  if (ic < 0)
2588  return; /* no current locks */
2589 
2590  if (ip < 0)
2591  {
2592  /* Parent has no slot, so just give it the child's slot */
2593  lockOwners[ic].owner = parent;
2594  ResourceOwnerRememberLock(parent, locallock);
2595  }
2596  else
2597  {
2598  /* Merge child's count with parent's */
2599  lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2600  /* compact out unused slot */
2601  locallock->numLockOwners--;
2602  if (ic < locallock->numLockOwners)
2603  lockOwners[ic] = lockOwners[locallock->numLockOwners];
2604  }
2606 }
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1065

References CurrentResourceOwner, i, LOCALLOCK::lockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3080 of file lock.c.

3083 {
3084  LOCK *lock;
3085  PROCLOCK *proclock;
3086  PROCLOCKTAG proclocktag;
3087  uint32 hashcode;
3088  uint32 proclock_hashcode;
3089  LWLock *partitionLock;
3090  bool wakeupNeeded;
3091 
3092  hashcode = LockTagHashCode(locktag);
3093  partitionLock = LockHashPartitionLock(hashcode);
3094 
3095  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3096 
3097  /*
3098  * Re-find the lock object (it had better be there).
3099  */
3101  locktag,
3102  hashcode,
3103  HASH_FIND,
3104  NULL);
3105  if (!lock)
3106  elog(PANIC, "failed to re-find shared lock object");
3107 
3108  /*
3109  * Re-find the proclock object (ditto).
3110  */
3111  proclocktag.myLock = lock;
3112  proclocktag.myProc = proc;
3113 
3114  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3115 
3117  &proclocktag,
3118  proclock_hashcode,
3119  HASH_FIND,
3120  NULL);
3121  if (!proclock)
3122  elog(PANIC, "failed to re-find shared proclock object");
3123 
3124  /*
3125  * Double-check that we are actually holding a lock of the type we want to
3126  * release.
3127  */
3128  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3129  {
3130  PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3131  LWLockRelease(partitionLock);
3132  elog(WARNING, "you don't own a lock of type %s",
3133  lockMethodTable->lockModeNames[lockmode]);
3134  return;
3135  }
3136 
3137  /*
3138  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3139  */
3140  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3141 
3142  CleanUpLock(lock, proclock,
3143  lockMethodTable, hashcode,
3144  wakeupNeeded);
3145 
3146  LWLockRelease(partitionLock);
3147 
3148  /*
3149  * Decrement strong lock count. This logic is needed only for 2PC.
3150  */
3151  if (decrement_strong_lock_count
3152  && ConflictsWithRelationFastPath(locktag, lockmode))
3153  {
3154  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3155 
3157  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3158  FastPathStrongRelationLocks->count[fasthashcode]--;
3160  }
3161 }
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1559
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1616

References Assert, CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire, SpinLockRelease, UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 1942 of file lock.c.

1943 {
1944  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1945  LockMethod lockMethodTable;
1946  LOCALLOCKTAG localtag;
1947  LOCALLOCK *locallock;
1948  LOCK *lock;
1949  PROCLOCK *proclock;
1950  LWLock *partitionLock;
1951  bool wakeupNeeded;
1952 
1953  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1954  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1955  lockMethodTable = LockMethods[lockmethodid];
1956  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1957  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1958 
1959 #ifdef LOCK_DEBUG
1960  if (LOCK_DEBUG_ENABLED(locktag))
1961  elog(LOG, "LockRelease: lock [%u,%u] %s",
1962  locktag->locktag_field1, locktag->locktag_field2,
1963  lockMethodTable->lockModeNames[lockmode]);
1964 #endif
1965 
1966  /*
1967  * Find the LOCALLOCK entry for this lock and lockmode
1968  */
1969  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1970  localtag.lock = *locktag;
1971  localtag.mode = lockmode;
1972 
1973  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1974  &localtag,
1975  HASH_FIND, NULL);
1976 
1977  /*
1978  * let the caller print its own error message, too. Do not ereport(ERROR).
1979  */
1980  if (!locallock || locallock->nLocks <= 0)
1981  {
1982  elog(WARNING, "you don't own a lock of type %s",
1983  lockMethodTable->lockModeNames[lockmode]);
1984  return false;
1985  }
1986 
1987  /*
1988  * Decrease the count for the resource owner.
1989  */
1990  {
1991  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1992  ResourceOwner owner;
1993  int i;
1994 
1995  /* Identify owner for lock */
1996  if (sessionLock)
1997  owner = NULL;
1998  else
1999  owner = CurrentResourceOwner;
2000 
2001  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2002  {
2003  if (lockOwners[i].owner == owner)
2004  {
2005  Assert(lockOwners[i].nLocks > 0);
2006  if (--lockOwners[i].nLocks == 0)
2007  {
2008  if (owner != NULL)
2009  ResourceOwnerForgetLock(owner, locallock);
2010  /* compact out unused slot */
2011  locallock->numLockOwners--;
2012  if (i < locallock->numLockOwners)
2013  lockOwners[i] = lockOwners[locallock->numLockOwners];
2014  }
2015  break;
2016  }
2017  }
2018  if (i < 0)
2019  {
2020  /* don't release a lock belonging to another owner */
2021  elog(WARNING, "you don't own a lock of type %s",
2022  lockMethodTable->lockModeNames[lockmode]);
2023  return false;
2024  }
2025  }
2026 
2027  /*
2028  * Decrease the total local count. If we're still holding the lock, we're
2029  * done.
2030  */
2031  locallock->nLocks--;
2032 
2033  if (locallock->nLocks > 0)
2034  return true;
2035 
2036  /*
2037  * At this point we can no longer suppose we are clear of invalidation
2038  * messages related to this lock. Although we'll delete the LOCALLOCK
2039  * object before any intentional return from this routine, it seems worth
2040  * the trouble to explicitly reset lockCleared right now, just in case
2041  * some error prevents us from deleting the LOCALLOCK.
2042  */
2043  locallock->lockCleared = false;
2044 
2045  /* Attempt fast release of any lock eligible for the fast path. */
2046  if (EligibleForRelationFastPath(locktag, lockmode) &&
2048  {
2049  bool released;
2050 
2051  /*
2052  * We might not find the lock here, even if we originally entered it
2053  * here. Another backend may have moved it to the main table.
2054  */
2056  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2057  lockmode);
2059  if (released)
2060  {
2061  RemoveLocalLock(locallock);
2062  return true;
2063  }
2064  }
2065 
2066  /*
2067  * Otherwise we've got to mess with the shared lock table.
2068  */
2069  partitionLock = LockHashPartitionLock(locallock->hashcode);
2070 
2071  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2072 
2073  /*
2074  * Normally, we don't need to re-find the lock or proclock, since we kept
2075  * their addresses in the locallock table, and they couldn't have been
2076  * removed while we were holding a lock on them. But it's possible that
2077  * the lock was taken fast-path and has since been moved to the main hash
2078  * table by another backend, in which case we will need to look up the
2079  * objects here. We assume the lock field is NULL if so.
2080  */
2081  lock = locallock->lock;
2082  if (!lock)
2083  {
2084  PROCLOCKTAG proclocktag;
2085 
2086  Assert(EligibleForRelationFastPath(locktag, lockmode));
2088  locktag,
2089  locallock->hashcode,
2090  HASH_FIND,
2091  NULL);
2092  if (!lock)
2093  elog(ERROR, "failed to re-find shared lock object");
2094  locallock->lock = lock;
2095 
2096  proclocktag.myLock = lock;
2097  proclocktag.myProc = MyProc;
2099  &proclocktag,
2100  HASH_FIND,
2101  NULL);
2102  if (!locallock->proclock)
2103  elog(ERROR, "failed to re-find shared proclock object");
2104  }
2105  LOCK_PRINT("LockRelease: found", lock, lockmode);
2106  proclock = locallock->proclock;
2107  PROCLOCK_PRINT("LockRelease: found", proclock);
2108 
2109  /*
2110  * Double-check that we are actually holding a lock of the type we want to
2111  * release.
2112  */
2113  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2114  {
2115  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2116  LWLockRelease(partitionLock);
2117  elog(WARNING, "you don't own a lock of type %s",
2118  lockMethodTable->lockModeNames[lockmode]);
2119  RemoveLocalLock(locallock);
2120  return false;
2121  }
2122 
2123  /*
2124  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2125  */
2126  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2127 
2128  CleanUpLock(lock, proclock,
2129  lockMethodTable, locallock->hashcode,
2130  wakeupNeeded);
2131 
2132  LWLockRelease(partitionLock);
2133 
2134  RemoveLocalLock(locallock);
2135  return true;
2136 }
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2650

References Assert, CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FastPathLocalUseCount, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2147 of file lock.c.

2148 {
2149  HASH_SEQ_STATUS status;
2150  LockMethod lockMethodTable;
2151  int i,
2152  numLockModes;
2153  LOCALLOCK *locallock;
2154  LOCK *lock;
2155  int partition;
2156  bool have_fast_path_lwlock = false;
2157 
2158  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2159  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2160  lockMethodTable = LockMethods[lockmethodid];
2161 
2162 #ifdef LOCK_DEBUG
2163  if (*(lockMethodTable->trace_flag))
2164  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2165 #endif
2166 
2167  /*
2168  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2169  * the only way that the lock we hold on our own VXID can ever get
2170  * released: it is always and only released when a toplevel transaction
2171  * ends.
2172  */
2173  if (lockmethodid == DEFAULT_LOCKMETHOD)
2175 
2176  numLockModes = lockMethodTable->numLockModes;
2177 
2178  /*
2179  * First we run through the locallock table and get rid of unwanted
2180  * entries, then we scan the process's proclocks and get rid of those. We
2181  * do this separately because we may have multiple locallock entries
2182  * pointing to the same proclock, and we daren't end up with any dangling
2183  * pointers. Fast-path locks are cleaned up during the locallock table
2184  * scan, though.
2185  */
2187 
2188  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2189  {
2190  /*
2191  * If the LOCALLOCK entry is unused, we must've run out of shared
2192  * memory while trying to set up this lock. Just forget the local
2193  * entry.
2194  */
2195  if (locallock->nLocks == 0)
2196  {
2197  RemoveLocalLock(locallock);
2198  continue;
2199  }
2200 
2201  /* Ignore items that are not of the lockmethod to be removed */
2202  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2203  continue;
2204 
2205  /*
2206  * If we are asked to release all locks, we can just zap the entry.
2207  * Otherwise, must scan to see if there are session locks. We assume
2208  * there is at most one lockOwners entry for session locks.
2209  */
2210  if (!allLocks)
2211  {
2212  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2213 
2214  /* If session lock is above array position 0, move it down to 0 */
2215  for (i = 0; i < locallock->numLockOwners; i++)
2216  {
2217  if (lockOwners[i].owner == NULL)
2218  lockOwners[0] = lockOwners[i];
2219  else
2220  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2221  }
2222 
2223  if (locallock->numLockOwners > 0 &&
2224  lockOwners[0].owner == NULL &&
2225  lockOwners[0].nLocks > 0)
2226  {
2227  /* Fix the locallock to show just the session locks */
2228  locallock->nLocks = lockOwners[0].nLocks;
2229  locallock->numLockOwners = 1;
2230  /* We aren't deleting this locallock, so done */
2231  continue;
2232  }
2233  else
2234  locallock->numLockOwners = 0;
2235  }
2236 
2237  /*
2238  * If the lock or proclock pointers are NULL, this lock was taken via
2239  * the relation fast-path (and is not known to have been transferred).
2240  */
2241  if (locallock->proclock == NULL || locallock->lock == NULL)
2242  {
2243  LOCKMODE lockmode = locallock->tag.mode;
2244  Oid relid;
2245 
2246  /* Verify that a fast-path lock is what we've got. */
2247  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2248  elog(PANIC, "locallock table corrupted");
2249 
2250  /*
2251  * If we don't currently hold the LWLock that protects our
2252  * fast-path data structures, we must acquire it before attempting
2253  * to release the lock via the fast-path. We will continue to
2254  * hold the LWLock until we're done scanning the locallock table,
2255  * unless we hit a transferred fast-path lock. (XXX is this
2256  * really such a good idea? There could be a lot of entries ...)
2257  */
2258  if (!have_fast_path_lwlock)
2259  {
2261  have_fast_path_lwlock = true;
2262  }
2263 
2264  /* Attempt fast-path release. */
2265  relid = locallock->tag.lock.locktag_field2;
2266  if (FastPathUnGrantRelationLock(relid, lockmode))
2267  {
2268  RemoveLocalLock(locallock);
2269  continue;
2270  }
2271 
2272  /*
2273  * Our lock, originally taken via the fast path, has been
2274  * transferred to the main lock table. That's going to require
2275  * some extra work, so release our fast-path lock before starting.
2276  */
2278  have_fast_path_lwlock = false;
2279 
2280  /*
2281  * Now dump the lock. We haven't got a pointer to the LOCK or
2282  * PROCLOCK in this case, so we have to handle this a bit
2283  * differently than a normal lock release. Unfortunately, this
2284  * requires an extra LWLock acquire-and-release cycle on the
2285  * partitionLock, but hopefully it shouldn't happen often.
2286  */
2287  LockRefindAndRelease(lockMethodTable, MyProc,
2288  &locallock->tag.lock, lockmode, false);
2289  RemoveLocalLock(locallock);
2290  continue;
2291  }
2292 
2293  /* Mark the proclock to show we need to release this lockmode */
2294  if (locallock->nLocks > 0)
2295  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2296 
2297  /* And remove the locallock hashtable entry */
2298  RemoveLocalLock(locallock);
2299  }
2300 
2301  /* Done with the fast-path data structures */
2302  if (have_fast_path_lwlock)
2304 
2305  /*
2306  * Now, scan each lock partition separately.
2307  */
2308  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2309  {
2310  LWLock *partitionLock;
2311  dlist_head *procLocks = &MyProc->myProcLocks[partition];
2312  dlist_mutable_iter proclock_iter;
2313 
2314  partitionLock = LockHashPartitionLockByIndex(partition);
2315 
2316  /*
2317  * If the proclock list for this partition is empty, we can skip
2318  * acquiring the partition lock. This optimization is trickier than
2319  * it looks, because another backend could be in process of adding
2320  * something to our proclock list due to promoting one of our
2321  * fast-path locks. However, any such lock must be one that we
2322  * decided not to delete above, so it's okay to skip it again now;
2323  * we'd just decide not to delete it again. We must, however, be
2324  * careful to re-fetch the list header once we've acquired the
2325  * partition lock, to be sure we have a valid, up-to-date pointer.
2326  * (There is probably no significant risk if pointer fetch/store is
2327  * atomic, but we don't wish to assume that.)
2328  *
2329  * XXX This argument assumes that the locallock table correctly
2330  * represents all of our fast-path locks. While allLocks mode
2331  * guarantees to clean up all of our normal locks regardless of the
2332  * locallock situation, we lose that guarantee for fast-path locks.
2333  * This is not ideal.
2334  */
2335  if (dlist_is_empty(procLocks))
2336  continue; /* needn't examine this partition */
2337 
2338  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2339 
2340  dlist_foreach_modify(proclock_iter, procLocks)
2341  {
2342  PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2343  bool wakeupNeeded = false;
2344 
2345  Assert(proclock->tag.myProc == MyProc);
2346 
2347  lock = proclock->tag.myLock;
2348 
2349  /* Ignore items that are not of the lockmethod to be removed */
2350  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2351  continue;
2352 
2353  /*
2354  * In allLocks mode, force release of all locks even if locallock
2355  * table had problems
2356  */
2357  if (allLocks)
2358  proclock->releaseMask = proclock->holdMask;
2359  else
2360  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2361 
2362  /*
2363  * Ignore items that have nothing to be released, unless they have
2364  * holdMask == 0 and are therefore recyclable
2365  */
2366  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2367  continue;
2368 
2369  PROCLOCK_PRINT("LockReleaseAll", proclock);
2370  LOCK_PRINT("LockReleaseAll", lock, 0);
2371  Assert(lock->nRequested >= 0);
2372  Assert(lock->nGranted >= 0);
2373  Assert(lock->nGranted <= lock->nRequested);
2374  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2375 
2376  /*
2377  * Release the previously-marked lock modes
2378  */
2379  for (i = 1; i <= numLockModes; i++)
2380  {
2381  if (proclock->releaseMask & LOCKBIT_ON(i))
2382  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2383  lockMethodTable);
2384  }
2385  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2386  Assert(lock->nGranted <= lock->nRequested);
2387  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2388 
2389  proclock->releaseMask = 0;
2390 
2391  /* CleanUpLock will wake up waiters if needed. */
2392  CleanUpLock(lock, proclock,
2393  lockMethodTable,
2394  LockTagHashCode(&lock->tag),
2395  wakeupNeeded);
2396  } /* loop over PROCLOCKs within this partition */
2397 
2398  LWLockRelease(partitionLock);
2399  } /* loop over partitions */
2400 
2401 #ifdef LOCK_DEBUG
2402  if (*(lockMethodTable->trace_flag))
2403  elog(LOG, "LockReleaseAll done");
2404 #endif
2405 }
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4428
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:443
const bool * trace_flag
Definition: lock.h:113
dlist_node * cur
Definition: ilist.h:200

References Assert, CleanUpLock(), dlist_mutable_iter::cur, DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, PANIC, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), and VirtualXactLockTableCleanup().

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2442 of file lock.c.

2443 {
2444  if (locallocks == NULL)
2445  {
2446  HASH_SEQ_STATUS status;
2447  LOCALLOCK *locallock;
2448 
2450 
2451  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2452  ReleaseLockIfHeld(locallock, false);
2453  }
2454  else
2455  {
2456  int i;
2457 
2458  for (i = nlocks - 1; i >= 0; i--)
2459  ReleaseLockIfHeld(locallocks[i], false);
2460  }
2461 }
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2477

References hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and ReleaseLockIfHeld().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2412 of file lock.c.

2413 {
2414  HASH_SEQ_STATUS status;
2415  LOCALLOCK *locallock;
2416 
2417  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2418  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2419 
2421 
2422  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2423  {
2424  /* Ignore items that are not of the specified lock method */
2425  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2426  continue;
2427 
2428  ReleaseLockIfHeld(locallock, true);
2429  }
2430 }

References elog, ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, and ReleaseLockIfHeld().

Referenced by pg_advisory_unlock_all().

◆ LockShmemSize()

Size LockShmemSize ( void  )

Definition at line 3552 of file lock.c.

3553 {
3554  Size size = 0;
3555  long max_table_size;
3556 
3557  /* lock hash table */
3558  max_table_size = NLOCKENTS();
3559  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3560 
3561  /* proclock hash table */
3562  max_table_size *= 2;
3563  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3564 
3565  /*
3566  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3567  */
3568  size = add_size(size, size / 10);
3569 
3570  return size;
3571 }
size_t Size
Definition: c.h:605
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:783
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
static pg_noinline void Size size
Definition: slab.c:607

References add_size(), hash_estimate_size(), NLOCKENTS, and size.

Referenced by CalculateShmemSize().

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 504 of file lock.c.

505 {
506  return get_hash_value(LockMethodLockHash, (const void *) locktag);
507 }
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:911

References get_hash_value(), and LockMethodLockHash.

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4639 of file lock.c.

4640 {
4641  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4642  LOCK *lock;
4643  bool found;
4644  uint32 hashcode;
4645  LWLock *partitionLock;
4646  int waiters = 0;
4647 
4648  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4649  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4650 
4651  hashcode = LockTagHashCode(locktag);
4652  partitionLock = LockHashPartitionLock(hashcode);
4653  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4654 
4656  locktag,
4657  hashcode,
4658  HASH_FIND,
4659  &found);
4660  if (found)
4661  {
4662  Assert(lock != NULL);
4663  waiters = lock->nRequested;
4664  }
4665  LWLockRelease(partitionLock);
4666 
4667  return waiters;
4668 }

References Assert, elog, ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

◆ PostPrepare_Locks()

void PostPrepare_Locks ( TransactionId  xid)

Definition at line 3368 of file lock.c.

3369 {
3370  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3371  HASH_SEQ_STATUS status;
3372  LOCALLOCK *locallock;
3373  LOCK *lock;
3374  PROCLOCK *proclock;
3375  PROCLOCKTAG proclocktag;
3376  int partition;
3377 
3378  /* Can't prepare a lock group follower. */
3379  Assert(MyProc->lockGroupLeader == NULL ||
3381 
3382  /* This is a critical section: any error means big trouble */
3384 
3385  /*
3386  * First we run through the locallock table and get rid of unwanted
3387  * entries, then we scan the process's proclocks and transfer them to the
3388  * target proc.
3389  *
3390  * We do this separately because we may have multiple locallock entries
3391  * pointing to the same proclock, and we daren't end up with any dangling
3392  * pointers.
3393  */
3395 
3396  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3397  {
3398  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3399  bool haveSessionLock;
3400  bool haveXactLock;
3401  int i;
3402 
3403  if (locallock->proclock == NULL || locallock->lock == NULL)
3404  {
3405  /*
3406  * We must've run out of shared memory while trying to set up this
3407  * lock. Just forget the local entry.
3408  */
3409  Assert(locallock->nLocks == 0);
3410  RemoveLocalLock(locallock);
3411  continue;
3412  }
3413 
3414  /* Ignore VXID locks */
3415  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3416  continue;
3417 
3418  /* Scan to see whether we hold it at session or transaction level */
3419  haveSessionLock = haveXactLock = false;
3420  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3421  {
3422  if (lockOwners[i].owner == NULL)
3423  haveSessionLock = true;
3424  else
3425  haveXactLock = true;
3426  }
3427 
3428  /* Ignore it if we have only session lock */
3429  if (!haveXactLock)
3430  continue;
3431 
3432  /* This can't happen, because we already checked it */
3433  if (haveSessionLock)
3434  ereport(PANIC,
3435  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3436  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3437 
3438  /* Mark the proclock to show we need to release this lockmode */
3439  if (locallock->nLocks > 0)
3440  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3441 
3442  /* And remove the locallock hashtable entry */
3443  RemoveLocalLock(locallock);
3444  }
3445 
3446  /*
3447  * Now, scan each lock partition separately.
3448  */
3449  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3450  {
3451  LWLock *partitionLock;
3452  dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3453  dlist_mutable_iter proclock_iter;
3454 
3455  partitionLock = LockHashPartitionLockByIndex(partition);
3456 
3457  /*
3458  * If the proclock list for this partition is empty, we can skip
3459  * acquiring the partition lock. This optimization is safer than the
3460  * situation in LockReleaseAll, because we got rid of any fast-path
3461  * locks during AtPrepare_Locks, so there cannot be any case where
3462  * another backend is adding something to our lists now. For safety,
3463  * though, we code this the same way as in LockReleaseAll.
3464  */
3465  if (dlist_is_empty(procLocks))
3466  continue; /* needn't examine this partition */
3467 
3468  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3469 
3470  dlist_foreach_modify(proclock_iter, procLocks)
3471  {
3472  proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3473 
3474  Assert(proclock->tag.myProc == MyProc);
3475 
3476  lock = proclock->tag.myLock;
3477 
3478  /* Ignore VXID locks */
3480  continue;
3481 
3482  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3483  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3484  Assert(lock->nRequested >= 0);
3485  Assert(lock->nGranted >= 0);
3486  Assert(lock->nGranted <= lock->nRequested);
3487  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3488 
3489  /* Ignore it if nothing to release (must be a session lock) */
3490  if (proclock->releaseMask == 0)
3491  continue;
3492 
3493  /* Else we should be releasing all locks */
3494  if (proclock->releaseMask != proclock->holdMask)
3495  elog(PANIC, "we seem to have dropped a bit somewhere");
3496 
3497  /*
3498  * We cannot simply modify proclock->tag.myProc to reassign
3499  * ownership of the lock, because that's part of the hash key and
3500  * the proclock would then be in the wrong hash chain. Instead
3501  * use hash_update_hash_key. (We used to create a new hash entry,
3502  * but that risks out-of-memory failure if other processes are
3503  * busy making proclocks too.) We must unlink the proclock from
3504  * our procLink chain and put it into the new proc's chain, too.
3505  *
3506  * Note: the updated proclock hash key will still belong to the
3507  * same hash partition, cf proclock_hash(). So the partition lock
3508  * we already hold is sufficient for this.
3509  */
3510  dlist_delete(&proclock->procLink);
3511 
3512  /*
3513  * Create the new hash key for the proclock.
3514  */
3515  proclocktag.myLock = lock;
3516  proclocktag.myProc = newproc;
3517 
3518  /*
3519  * Update groupLeader pointer to point to the new proc. (We'd
3520  * better not be a member of somebody else's lock group!)
3521  */
3522  Assert(proclock->groupLeader == proclock->tag.myProc);
3523  proclock->groupLeader = newproc;
3524 
3525  /*
3526  * Update the proclock. We should not find any existing entry for
3527  * the same hash key, since there can be only one entry for any
3528  * given lock with my own proc.
3529  */
3531  proclock,
3532  &proclocktag))
3533  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3534 
3535  /* Re-link into the new proc's proclock list */
3536  dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3537 
3538  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3539  } /* loop over PROCLOCKs within this partition */
3540 
3541  LWLockRelease(partitionLock);
3542  } /* loop over partitions */
3543 
3544  END_CRIT_SECTION();
3545 }
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1145
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151

References Assert, dlist_mutable_iter::cur, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void *  key,
Size  keysize 
)
static

Definition at line 521 of file lock.c.

522 {
523  const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
524  uint32 lockhash;
525  Datum procptr;
526 
527  Assert(keysize == sizeof(PROCLOCKTAG));
528 
529  /* Look into the associated LOCK object, and compute its hash code */
530  lockhash = LockTagHashCode(&proclocktag->myLock->tag);
531 
532  /*
533  * To make the hash code also depend on the PGPROC, we xor the proc
534  * struct's address into the hash code, left-shifted so that the
535  * partition-number bits don't change. Since this is only a hash, we
536  * don't care if we lose high-order bits of the address; use an
537  * intermediate variable to suppress cast-pointer-to-int warnings.
538  */
539  procptr = PointerGetDatum(proclocktag->myProc);
540  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
541 
542  return lockhash;
543 }
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:96
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64

References Assert, sort-test::key, LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PointerGetDatum(), and LOCK::tag.

Referenced by InitLocks().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 552 of file lock.c.

553 {
554  uint32 lockhash = hashcode;
555  Datum procptr;
556 
557  /*
558  * This must match proclock_hash()!
559  */
560  procptr = PointerGetDatum(proclocktag->myProc);
561  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
562 
563  return lockhash;
564 }

References LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myProc, and PointerGetDatum().

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2477 of file lock.c.

2478 {
2479  ResourceOwner owner;
2480  LOCALLOCKOWNER *lockOwners;
2481  int i;
2482 
2483  /* Identify owner for lock (must match LockRelease!) */
2484  if (sessionLock)
2485  owner = NULL;
2486  else
2487  owner = CurrentResourceOwner;
2488 
2489  /* Scan to see if there are any locks belonging to the target owner */
2490  lockOwners = locallock->lockOwners;
2491  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2492  {
2493  if (lockOwners[i].owner == owner)
2494  {
2495  Assert(lockOwners[i].nLocks > 0);
2496  if (lockOwners[i].nLocks < locallock->nLocks)
2497  {
2498  /*
2499  * We will still hold this lock after forgetting this
2500  * ResourceOwner.
2501  */
2502  locallock->nLocks -= lockOwners[i].nLocks;
2503  /* compact out unused slot */
2504  locallock->numLockOwners--;
2505  if (owner != NULL)
2506  ResourceOwnerForgetLock(owner, locallock);
2507  if (i < locallock->numLockOwners)
2508  lockOwners[i] = lockOwners[locallock->numLockOwners];
2509  }
2510  else
2511  {
2512  Assert(lockOwners[i].nLocks == locallock->nLocks);
2513  /* We want to call LockRelease just once */
2514  lockOwners[i].nLocks = 1;
2515  locallock->nLocks = 1;
2516  if (!LockRelease(&locallock->tag.lock,
2517  locallock->tag.mode,
2518  sessionLock))
2519  elog(WARNING, "ReleaseLockIfHeld: failed??");
2520  }
2521  break;
2522  }
2523  }
2524 }
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:1942

References Assert, CurrentResourceOwner, elog, i, LOCALLOCKTAG::lock, LOCALLOCK::lockOwners, LockRelease(), LOCALLOCKTAG::mode, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, ResourceOwnerForgetLock(), LOCALLOCK::tag, and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 1886 of file lock.c.

1887 {
1888  LOCK *waitLock = proc->waitLock;
1889  PROCLOCK *proclock = proc->waitProcLock;
1890  LOCKMODE lockmode = proc->waitLockMode;
1891  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1892 
1893  /* Make sure proc is waiting */
1895  Assert(proc->links.next != NULL);
1896  Assert(waitLock);
1897  Assert(!dclist_is_empty(&waitLock->waitProcs));
1898  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1899 
1900  /* Remove proc from lock's wait queue */
1901  dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
1902 
1903  /* Undo increments of request counts by waiting process */
1904  Assert(waitLock->nRequested > 0);
1905  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1906  waitLock->nRequested--;
1907  Assert(waitLock->requested[lockmode] > 0);
1908  waitLock->requested[lockmode]--;
1909  /* don't forget to clear waitMask bit if appropriate */
1910  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1911  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1912 
1913  /* Clean up the proc's own state, and pass it the ok/fail signal */
1914  proc->waitLock = NULL;
1915  proc->waitProcLock = NULL;
1917 
1918  /*
1919  * Delete the proclock immediately if it represents no already-held locks.
1920  * (This must happen now because if the owner of the lock decides to
1921  * release it, and the requested/granted counts then go to zero,
1922  * LockRelease expects there to be no remaining proclocks.) Then see if
1923  * any other waiters for the lock can be woken up now.
1924  */
1925  CleanUpLock(waitLock, proclock,
1926  LockMethods[lockmethodid], hashcode,
1927  true);
1928 }
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:120
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:121
PROCLOCK * waitProcLock
Definition: proc.h:229
ProcWaitStatus waitStatus
Definition: proc.h:163

References Assert, CleanUpLock(), dclist_delete_from_thoroughly(), dclist_is_empty(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, dlist_node::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), LockErrorCleanup(), and ProcSleep().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1354 of file lock.c.

1355 {
1356  int i;
1357 
1358  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1359  {
1360  if (locallock->lockOwners[i].owner != NULL)
1361  ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1362  }
1363  locallock->numLockOwners = 0;
1364  if (locallock->lockOwners != NULL)
1365  pfree(locallock->lockOwners);
1366  locallock->lockOwners = NULL;
1367 
1368  if (locallock->holdsStrongLockCount)
1369  {
1370  uint32 fasthashcode;
1371 
1372  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1373 
1375  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1376  FastPathStrongRelationLocks->count[fasthashcode]--;
1377  locallock->holdsStrongLockCount = false;
1379  }
1380 
1382  &(locallock->tag),
1383  HASH_REMOVE, NULL))
1384  elog(WARNING, "locallock table corrupted");
1385 
1386  /*
1387  * Indicate that the lock is released for certain types of locks
1388  */
1389  CheckAndSetLockHeld(locallock, false);
1390 }
void pfree(void *pointer)
Definition: mcxt.c:1520

References Assert, CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_REMOVE, hash_search(), LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, i, LockMethodLocalHash, LOCALLOCK::lockOwners, FastPathStrongRelationLockData::mutex, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire, SpinLockRelease, LOCALLOCK::tag, and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1161 of file lock.c.

1163 {
1164  LOCK *lock;
1165  PROCLOCK *proclock;
1166  PROCLOCKTAG proclocktag;
1167  uint32 proclock_hashcode;
1168  bool found;
1169 
1170  /*
1171  * Find or create a lock with this tag.
1172  */
1174  locktag,
1175  hashcode,
1177  &found);
1178  if (!lock)
1179  return NULL;
1180 
1181  /*
1182  * if it's a new lock object, initialize it
1183  */
1184  if (!found)
1185  {
1186  lock->grantMask = 0;
1187  lock->waitMask = 0;
1188  dlist_init(&lock->procLocks);
1189  dclist_init(&lock->waitProcs);
1190  lock->nRequested = 0;
1191  lock->nGranted = 0;
1192  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1193  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1194  LOCK_PRINT("LockAcquire: new", lock, lockmode);
1195  }
1196  else
1197  {
1198  LOCK_PRINT("LockAcquire: found", lock, lockmode);
1199  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1200  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1201  Assert(lock->nGranted <= lock->nRequested);
1202  }
1203 
1204  /*
1205  * Create the hash key for the proclock table.
1206  */
1207  proclocktag.myLock = lock;
1208  proclocktag.myProc = proc;
1209 
1210  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1211 
1212  /*
1213  * Find or create a proclock entry with this tag
1214  */
1216  &proclocktag,
1217  proclock_hashcode,
1219  &found);
1220  if (!proclock)
1221  {
1222  /* Oops, not enough shmem for the proclock */
1223  if (lock->nRequested == 0)
1224  {
1225  /*
1226  * There are no other requestors of this lock, so garbage-collect
1227  * the lock object. We *must* do this to avoid a permanent leak
1228  * of shared memory, because there won't be anything to cause
1229  * anyone to release the lock object later.
1230  */
1231  Assert(dlist_is_empty(&(lock->procLocks)));
1233  &(lock->tag),
1234  hashcode,
1235  HASH_REMOVE,
1236  NULL))
1237  elog(PANIC, "lock table corrupted");
1238  }
1239  return NULL;
1240  }
1241 
1242  /*
1243  * If new, initialize the new entry
1244  */
1245  if (!found)
1246  {
1247  uint32 partition = LockHashPartition(hashcode);
1248 
1249  /*
1250  * It might seem unsafe to access proclock->groupLeader without a
1251  * lock, but it's not really. Either we are initializing a proclock
1252  * on our own behalf, in which case our group leader isn't changing
1253  * because the group leader for a process can only ever be changed by
1254  * the process itself; or else we are transferring a fast-path lock to
1255  * the main lock table, in which case that process can't change it's
1256  * lock group leader without first releasing all of its locks (and in
1257  * particular the one we are currently transferring).
1258  */
1259  proclock->groupLeader = proc->lockGroupLeader != NULL ?
1260  proc->lockGroupLeader : proc;
1261  proclock->holdMask = 0;
1262  proclock->releaseMask = 0;
1263  /* Add proclock to appropriate lists */
1264  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1265  dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1266  PROCLOCK_PRINT("LockAcquire: new", proclock);
1267  }
1268  else
1269  {
1270  PROCLOCK_PRINT("LockAcquire: found", proclock);
1271  Assert((proclock->holdMask & ~lock->grantMask) == 0);
1272 
1273 #ifdef CHECK_DEADLOCK_RISK
1274 
1275  /*
1276  * Issue warning if we already hold a lower-level lock on this object
1277  * and do not hold a lock of the requested level or higher. This
1278  * indicates a deadlock-prone coding practice (eg, we'd have a
1279  * deadlock if another backend were following the same code path at
1280  * about the same time).
1281  *
1282  * This is not enabled by default, because it may generate log entries
1283  * about user-level coding practices that are in fact safe in context.
1284  * It can be enabled to help find system-level problems.
1285  *
1286  * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1287  * better to use a table. For now, though, this works.
1288  */
1289  {
1290  int i;
1291 
1292  for (i = lockMethodTable->numLockModes; i > 0; i--)
1293  {
1294  if (proclock->holdMask & LOCKBIT_ON(i))
1295  {
1296  if (i >= (int) lockmode)
1297  break; /* safe: we have a lock >= req level */
1298  elog(LOG, "deadlock risk: raising lock level"
1299  " from %s to %s on object %u/%u/%u",
1300  lockMethodTable->lockModeNames[i],
1301  lockMethodTable->lockModeNames[lockmode],
1302  lock->tag.locktag_field1, lock->tag.locktag_field2,
1303  lock->tag.locktag_field3);
1304  break;
1305  }
1306  }
1307  }
1308 #endif /* CHECK_DEADLOCK_RISK */
1309  }
1310 
1311  /*
1312  * lock->nRequested and lock->requested[] count the total number of
1313  * requests, whether granted or waiting, so increment those immediately.
1314  * The other counts don't increment till we get the lock.
1315  */
1316  lock->nRequested++;
1317  lock->requested[lockmode]++;
1318  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1319 
1320  /*
1321  * We shouldn't already hold the desired lock; else locallock table is
1322  * broken.
1323  */
1324  if (proclock->holdMask & LOCKBIT_ON(lockmode))
1325  elog(ERROR, "lock %s on object %u/%u/%u is already held",
1326  lockMethodTable->lockModeNames[lockmode],
1327  lock->tag.locktag_field1, lock->tag.locktag_field2,
1328  lock->tag.locktag_field3);
1329 
1330  return proclock;
1331 }

References Assert, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ERROR, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, LockMethodData::numLockModes, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1559 of file lock.c.

1561 {
1562  bool wakeupNeeded = false;
1563 
1564  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1565  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1566  Assert(lock->nGranted <= lock->nRequested);
1567 
1568  /*
1569  * fix the general lock stats
1570  */
1571  lock->nRequested--;
1572  lock->requested[lockmode]--;
1573  lock->nGranted--;
1574  lock->granted[lockmode]--;
1575 
1576  if (lock->granted[lockmode] == 0)
1577  {
1578  /* change the conflict mask. No more of this lock type. */
1579  lock->grantMask &= LOCKBIT_OFF(lockmode);
1580  }
1581 
1582  LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1583 
1584  /*
1585  * We need only run ProcLockWakeup if the released lock conflicts with at
1586  * least one of the lock types requested by waiter(s). Otherwise whatever
1587  * conflict made them wait must still exist. NOTE: before MVCC, we could
1588  * skip wakeup if lock->granted[lockmode] was still positive. But that's
1589  * not true anymore, because the remaining granted locks might belong to
1590  * some waiter, who could now be awakened because he doesn't conflict with
1591  * his own locks.
1592  */
1593  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1594  wakeupNeeded = true;
1595 
1596  /*
1597  * Now fix the per-proclock state.
1598  */
1599  proclock->holdMask &= LOCKBIT_OFF(lockmode);
1600  PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1601 
1602  return wakeupNeeded;
1603 }

References Assert, LockMethodData::conflictTab, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4528 of file lock.c.

4529 {
4530  LOCKTAG tag;
4531  PGPROC *proc;
4533 
4535 
4537  /* no vxid lock; localTransactionId is a normal, locked XID */
4538  return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4539 
4540  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4541 
4542  /*
4543  * If a lock table entry must be made, this is the PGPROC on whose behalf
4544  * it must be done. Note that the transaction might end or the PGPROC
4545  * might be reassigned to a new backend before we get around to examining
4546  * it, but it doesn't matter. If we find upon examination that the
4547  * relevant lxid is no longer running here, that's enough to prove that
4548  * it's no longer running anywhere.
4549  */
4550  proc = ProcNumberGetProc(vxid.procNumber);
4551  if (proc == NULL)
4552  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4553 
4554  /*
4555  * We must acquire this lock before checking the procNumber and lxid
4556  * against the ones we're waiting for. The target backend will only set
4557  * or clear lxid while holding this lock.
4558  */
4560 
4561  if (proc->vxid.procNumber != vxid.procNumber
4562  || proc->fpLocalTransactionId != vxid.localTransactionId)
4563  {
4564  /* VXID ended */
4565  LWLockRelease(&proc->fpInfoLock);
4566  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4567  }
4568 
4569  /*
4570  * If we aren't asked to wait, there's no need to set up a lock table
4571  * entry. The transaction is still in progress, so just return false.
4572  */
4573  if (!wait)
4574  {
4575  LWLockRelease(&proc->fpInfoLock);
4576  return false;
4577  }
4578 
4579  /*
4580  * OK, we're going to need to sleep on the VXID. But first, we must set
4581  * up the primary lock table entry, if needed (ie, convert the proc's
4582  * fast-path lock on its VXID to a regular lock).
4583  */
4584  if (proc->fpVXIDLock)
4585  {
4586  PROCLOCK *proclock;
4587  uint32 hashcode;
4588  LWLock *partitionLock;
4589 
4590  hashcode = LockTagHashCode(&tag);
4591 
4592  partitionLock = LockHashPartitionLock(hashcode);
4593  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4594 
4596  &tag, hashcode, ExclusiveLock);
4597  if (!proclock)
4598  {
4599  LWLockRelease(partitionLock);
4600  LWLockRelease(&proc->fpInfoLock);
4601  ereport(ERROR,
4602  (errcode(ERRCODE_OUT_OF_MEMORY),
4603  errmsg("out of shared memory"),
4604  errhint("You might need to increase %s.", "max_locks_per_transaction")));
4605  }
4606  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4607 
4608  LWLockRelease(partitionLock);
4609 
4610  proc->fpVXIDLock = false;
4611  }
4612 
4613  /*
4614  * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4615  * search. The proc might have assigned this XID but not yet locked it,
4616  * in which case the proc will lock this XID before releasing the VXID.
4617  * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4618  * so we won't save an XID of a different VXID. It doesn't matter whether
4619  * we save this before or after setting up the primary lock table entry.
4620  */
4621  xid = proc->xid;
4622 
4623  /* Done with proc->fpLockBits */
4624  LWLockRelease(&proc->fpInfoLock);
4625 
4626  /* Time to wait. */
4627  (void) LockAcquire(&tag, ShareLock, false, false);
4628 
4629  LockRelease(&tag, ShareLock, false);
4630  return XactLockForVirtualXact(vxid, xid, wait);
4631 }
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4477
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:734
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:69
#define ShareLock
Definition: lockdefs.h:40
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition: procarray.c:3125
#define InvalidTransactionId
Definition: transam.h:31

References Assert, DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, VirtualTransactionId::procNumber, PGPROC::procNumber, ProcNumberGetProc(), SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, PGPROC::vxid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4428 of file lock.c.

4429 {
4430  bool fastpath;
4431  LocalTransactionId lxid;
4432 
4434 
4435  /*
4436  * Clean up shared memory state.
4437  */
4439 
4440  fastpath = MyProc->fpVXIDLock;
4441  lxid = MyProc->fpLocalTransactionId;
4442  MyProc->fpVXIDLock = false;
4444 
4446 
4447  /*
4448  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4449  * that means someone transferred the lock to the main lock table.
4450  */
4451  if (!fastpath && LocalTransactionIdIsValid(lxid))
4452  {
4453  VirtualTransactionId vxid;
4454  LOCKTAG locktag;
4455 
4456  vxid.procNumber = MyProcNumber;
4457  vxid.localTransactionId = lxid;
4458  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4459 
4461  &locktag, ExclusiveLock, false);
4462  }
4463 }
uint32 LocalTransactionId
Definition: c.h:654
ProcNumber MyProcNumber
Definition: globals.c:87
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:66

References Assert, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, INVALID_PROC_NUMBER, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyProc, MyProcNumber, VirtualTransactionId::procNumber, PGPROC::procNumber, SET_LOCKTAG_VIRTUALTRANSACTION, and PGPROC::vxid.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static void WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner,
bool  dontWait 
)
static

Definition at line 1796 of file lock.c.

1797 {
1798  LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1799  LockMethod lockMethodTable = LockMethods[lockmethodid];
1800 
1801  LOCK_PRINT("WaitOnLock: sleeping on lock",
1802  locallock->lock, locallock->tag.mode);
1803 
1804  /* adjust the process title to indicate that it's waiting */
1805  set_ps_display_suffix("waiting");
1806 
1807  awaitedLock = locallock;
1808  awaitedOwner = owner;
1809 
1810  /*
1811  * NOTE: Think not to put any shared-state cleanup after the call to
1812  * ProcSleep, in either the normal or failure path. The lock state must
1813  * be fully set by the lock grantor, or by CheckDeadLock if we give up
1814  * waiting for the lock. This is necessary because of the possibility
1815  * that a cancel/die interrupt will interrupt ProcSleep after someone else
1816  * grants us the lock, but before we've noticed it. Hence, after granting,
1817  * the locktable state must fully reflect the fact that we own the lock;
1818  * we can't do additional work on return.
1819  *
1820  * We can and do use a PG_TRY block to try to clean up after failure, but
1821  * this still has a major limitation: elog(FATAL) can occur while waiting
1822  * (eg, a "die" interrupt), and then control won't come back here. So all
1823  * cleanup of essential state should happen in LockErrorCleanup, not here.
1824  * We can use PG_TRY to clear the "waiting" status flags, since doing that
1825  * is unimportant if the process exits.
1826  */
1827  PG_TRY();
1828  {
1829  /*
1830  * If dontWait = true, we handle success and failure in the same way
1831  * here. The caller will be able to sort out what has happened.
1832  */
1833  if (ProcSleep(locallock, lockMethodTable, dontWait) != PROC_WAIT_STATUS_OK
1834  && !dontWait)
1835  {
1836 
1837  /*
1838  * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1839  * now.
1840  */
1841  awaitedLock = NULL;
1842  LOCK_PRINT("WaitOnLock: aborting on lock",
1843  locallock->lock, locallock->tag.mode);
1845 
1846  /*
1847  * Now that we aren't holding the partition lock, we can give an
1848  * error report including details about the detected deadlock.
1849  */
1850  DeadLockReport();
1851  /* not reached */
1852  }
1853  }
1854  PG_CATCH();
1855  {
1856  /* In this path, awaitedLock remains set until LockErrorCleanup */
1857 
1858  /* reset ps display to remove the suffix */
1860 
1861  /* and propagate the error */
1862  PG_RE_THROW();
1863  }
1864  PG_END_TRY();
1865 
1866  awaitedLock = NULL;
1867 
1868  /* reset ps display to remove the suffix */
1870 
1871  LOCK_PRINT("WaitOnLock: wakeup on lock",
1872  locallock->lock, locallock->tag.mode);
1873 }
void DeadLockReport(void)
Definition: deadlock.c:1072
#define PG_RE_THROW()
Definition: elog.h:411
#define PG_TRY(...)
Definition: elog.h:370
#define PG_END_TRY(...)
Definition: elog.h:395
#define PG_CATCH(...)
Definition: elog.h:380
@ PROC_WAIT_STATUS_OK
Definition: proc.h:119
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:421
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:369
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1066

References awaitedLock, awaitedOwner, DeadLockReport(), LOCALLOCK::hashcode, LOCALLOCK_LOCKMETHOD, LOCALLOCK::lock, LOCK_PRINT, LockHashPartitionLock, LockMethods, LWLockRelease(), LOCALLOCKTAG::mode, PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, PROC_WAIT_STATUS_OK, ProcSleep(), set_ps_display_remove_suffix(), set_ps_display_suffix(), and LOCALLOCK::tag.

Referenced by LockAcquireExtended().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4477 of file lock.c.

4479 {
4480  bool more = false;
4481 
4482  /* There is no point to wait for 2PCs if you have no 2PCs. */
4483  if (max_prepared_xacts == 0)
4484  return true;
4485 
4486  do
4487  {
4488  LockAcquireResult lar;
4489  LOCKTAG tag;
4490 
4491  /* Clear state from previous iterations. */
4492  if (more)
4493  {
4494  xid = InvalidTransactionId;
4495  more = false;
4496  }
4497 
4498  /* If we have no xid, try to find one. */
4499  if (!TransactionIdIsValid(xid))
4500  xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4501  if (!TransactionIdIsValid(xid))
4502  {
4503  Assert(!more);
4504  return true;
4505  }
4506 
4507  /* Check or wait for XID completion. */
4508  SET_LOCKTAG_TRANSACTION(tag, xid);
4509  lar = LockAcquire(&tag, ShareLock, false, !wait);
4510  if (lar == LOCKACQUIRE_NOT_AVAIL)
4511  return false;
4512  LockRelease(&tag, ShareLock, false);
4513  } while (more);
4514 
4515  return true;
4516 }
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:226
LockAcquireResult
Definition: lock.h:500
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:852

References Assert, InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 274 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 275 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition: lock.c:121
static const char *const lock_mode_names[]
Definition: lock.c:107
static const LOCKMASK LockConflicts[]
Definition: lock.c:64
#define MaxLockMode
Definition: lockdefs.h:45

Definition at line 124 of file lock.c.

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 121 of file lock.c.

◆ FastPathLocalUseCount

int FastPathLocalUseCount = 0
static

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 107 of file lock.c.

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 64 of file lock.c.

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ max_locks_per_xact

int max_locks_per_xact

Definition at line 53 of file lock.c.

Referenced by CheckRequiredParameterValues(), InitControlFile(), and XLogReportParameters().

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 185 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 273 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod

const LockMethodData user_lockmethod
static
Initial value:

Definition at line 135 of file lock.c.