PostgreSQL Source Code  git master
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/sinvaladt.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner_private.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_GET_BITS(proc, n)    (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static void WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void InitLocks (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (TransactionId xid)
 
Size LockShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCount = 0
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
@ LOCKTAG_RELATION
Definition: lock.h:137
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
static PgChecksumMode mode
Definition: pg_checksums.c:56
#define InvalidOid
Definition: postgres_ext.h:36

Definition at line 221 of file lock.c.

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition: globals.c:89

Definition at line 215 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:
AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
#define AssertMacro(condition)
Definition: c.h:848
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:191
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:190
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:79

Definition at line 195 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 190 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 204 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 202 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)

Definition at line 193 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 191 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 192 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 200 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 248 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 249 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 251 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 353 of file lock.c.

◆ NLOCKENTS

Definition at line 57 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 354 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1728 of file lock.c.

1729 {
1730  uint32 fasthashcode;
1731  LOCALLOCK *locallock = StrongLockInProgress;
1732 
1733  if (locallock == NULL)
1734  return;
1735 
1736  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1737  Assert(locallock->holdsStrongLockCount == true);
1739  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1740  FastPathStrongRelationLocks->count[fasthashcode]--;
1741  locallock->holdsStrongLockCount = false;
1742  StrongLockInProgress = NULL;
1744 }
unsigned int uint32
Definition: c.h:495
Assert(fmt[strlen(fmt) - 1] !='\n')
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:251
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:260
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:275
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:257
uint32 hashcode
Definition: lock.h:432
bool holdsStrongLockCount
Definition: lock.h:439

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3255 of file lock.c.

3256 {
3257  HASH_SEQ_STATUS status;
3258  LOCALLOCK *locallock;
3259 
3260  /* First, verify there aren't locks of both xact and session level */
3262 
3263  /* Now do the per-locallock cleanup work */
3265 
3266  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3267  {
3268  TwoPhaseLockRecord record;
3269  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3270  bool haveSessionLock;
3271  bool haveXactLock;
3272  int i;
3273 
3274  /*
3275  * Ignore VXID locks. We don't want those to be held by prepared
3276  * transactions, since they aren't meaningful after a restart.
3277  */
3278  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3279  continue;
3280 
3281  /* Ignore it if we don't actually hold the lock */
3282  if (locallock->nLocks <= 0)
3283  continue;
3284 
3285  /* Scan to see whether we hold it at session or transaction level */
3286  haveSessionLock = haveXactLock = false;
3287  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3288  {
3289  if (lockOwners[i].owner == NULL)
3290  haveSessionLock = true;
3291  else
3292  haveXactLock = true;
3293  }
3294 
3295  /* Ignore it if we have only session lock */
3296  if (!haveXactLock)
3297  continue;
3298 
3299  /* This can't happen, because we already checked it */
3300  if (haveSessionLock)
3301  ereport(ERROR,
3302  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3303  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3304 
3305  /*
3306  * If the local lock was taken via the fast-path, we need to move it
3307  * to the primary lock table, or just get a pointer to the existing
3308  * primary lock table entry if by chance it's already been
3309  * transferred.
3310  */
3311  if (locallock->proclock == NULL)
3312  {
3313  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3314  locallock->lock = locallock->proclock->tag.myLock;
3315  }
3316 
3317  /*
3318  * Arrange to not release any strong lock count held by this lock
3319  * entry. We must retain the count until the prepared transaction is
3320  * committed or rolled back.
3321  */
3322  locallock->holdsStrongLockCount = false;
3323 
3324  /*
3325  * Create a 2PC record.
3326  */
3327  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3328  record.lockmode = locallock->tag.mode;
3329 
3331  &record, sizeof(TwoPhaseLockRecord));
3332  }
3333 }
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1431
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1421
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int i
Definition: isn.c:73
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2751
static HTAB * LockMethodLocalHash
Definition: lock.c:271
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3167
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:143
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
LOCALLOCKOWNER * lockOwners
Definition: lock.h:438
LOCK * lock
Definition: lock.h:433
int64 nLocks
Definition: lock.h:435
int numLockOwners
Definition: lock.h:436
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
LOCK * myLock
Definition: lock.h:365
PROCLOCKTAG tag
Definition: lock.h:372
LOCKTAG locktag
Definition: lock.c:161
LOCKMODE lockmode
Definition: lock.c:162
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1258
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1692 of file lock.c.

1693 {
1694  Assert(StrongLockInProgress == NULL);
1695  Assert(locallock->holdsStrongLockCount == false);
1696 
1697  /*
1698  * Adding to a memory location is not atomic, so we take a spinlock to
1699  * ensure we don't collide with someone else trying to bump the count at
1700  * the same time.
1701  *
1702  * XXX: It might be worth considering using an atomic fetch-and-add
1703  * instruction here, on architectures where that is supported.
1704  */
1705 
1707  FastPathStrongRelationLocks->count[fasthashcode]++;
1708  locallock->holdsStrongLockCount = true;
1709  StrongLockInProgress = locallock;
1711 }

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1332 of file lock.c.

1333 {
1334 #ifdef USE_ASSERT_CHECKING
1335  if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1336  IsRelationExtensionLockHeld = acquired;
1337 #endif
1338 }
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:138
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:444

References LOCALLOCK_LOCKTAG, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3167 of file lock.c.

3168 {
3169  typedef struct
3170  {
3171  LOCKTAG lock; /* identifies the lockable object */
3172  bool sessLock; /* is any lockmode held at session level? */
3173  bool xactLock; /* is any lockmode held at xact level? */
3174  } PerLockTagEntry;
3175 
3176  HASHCTL hash_ctl;
3177  HTAB *lockhtab;
3178  HASH_SEQ_STATUS status;
3179  LOCALLOCK *locallock;
3180 
3181  /* Create a local hash table keyed by LOCKTAG only */
3182  hash_ctl.keysize = sizeof(LOCKTAG);
3183  hash_ctl.entrysize = sizeof(PerLockTagEntry);
3184  hash_ctl.hcxt = CurrentMemoryContext;
3185 
3186  lockhtab = hash_create("CheckForSessionAndXactLocks table",
3187  256, /* arbitrary initial size */
3188  &hash_ctl,
3190 
3191  /* Scan local lock table to find entries for each LOCKTAG */
3193 
3194  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3195  {
3196  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3197  PerLockTagEntry *hentry;
3198  bool found;
3199  int i;
3200 
3201  /*
3202  * Ignore VXID locks. We don't want those to be held by prepared
3203  * transactions, since they aren't meaningful after a restart.
3204  */
3205  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3206  continue;
3207 
3208  /* Ignore it if we don't actually hold the lock */
3209  if (locallock->nLocks <= 0)
3210  continue;
3211 
3212  /* Otherwise, find or make an entry in lockhtab */
3213  hentry = (PerLockTagEntry *) hash_search(lockhtab,
3214  &locallock->tag.lock,
3215  HASH_ENTER, &found);
3216  if (!found) /* initialize, if newly created */
3217  hentry->sessLock = hentry->xactLock = false;
3218 
3219  /* Scan to see if we hold lock at session or xact level or both */
3220  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3221  {
3222  if (lockOwners[i].owner == NULL)
3223  hentry->sessLock = true;
3224  else
3225  hentry->xactLock = true;
3226  }
3227 
3228  /*
3229  * We can throw error immediately when we see both types of locks; no
3230  * need to wait around to see if there are more violations.
3231  */
3232  if (hentry->sessLock && hentry->xactLock)
3233  ereport(ERROR,
3234  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3235  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3236  }
3237 
3238  /* Success, so clean up */
3239  hash_destroy(lockhtab);
3240 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:863
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
struct LOCKTAG LOCKTAG
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220

References CurrentMemoryContext, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), HASHCTL::hcxt, i, HASHCTL::keysize, LOCALLOCKTAG::lock, LockMethodLocalHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1606 of file lock.c.

1609 {
1610  /*
1611  * If this was my last hold on this lock, delete my entry in the proclock
1612  * table.
1613  */
1614  if (proclock->holdMask == 0)
1615  {
1616  uint32 proclock_hashcode;
1617 
1618  PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1619  dlist_delete(&proclock->lockLink);
1620  dlist_delete(&proclock->procLink);
1621  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1623  &(proclock->tag),
1624  proclock_hashcode,
1625  HASH_REMOVE,
1626  NULL))
1627  elog(PANIC, "proclock table corrupted");
1628  }
1629 
1630  if (lock->nRequested == 0)
1631  {
1632  /*
1633  * The caller just released the last lock, so garbage-collect the lock
1634  * object.
1635  */
1636  LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1637  Assert(dlist_is_empty(&lock->procLocks));
1639  &(lock->tag),
1640  hashcode,
1641  HASH_REMOVE,
1642  NULL))
1643  elog(PANIC, "lock table corrupted");
1644  }
1645  else if (wakeupNeeded)
1646  {
1647  /* There are waiters on this lock, so wake them up. */
1648  ProcLockWakeup(lockMethodTable, lock);
1649  }
1650 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:966
#define PANIC
Definition: elog.h:42
@ HASH_REMOVE
Definition: hsearch.h:115
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:353
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:553
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:354
static HTAB * LockMethodLockHash
Definition: lock.c:269
static HTAB * LockMethodProcLockHash
Definition: lock.c:270
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1637
int nRequested
Definition: lock.h:319
LOCKTAG tag
Definition: lock.h:311
dlist_head procLocks
Definition: lock.h:316
LOCKMASK holdMask
Definition: lock.h:376
dlist_node lockLink
Definition: lock.h:378
dlist_node procLink
Definition: lock.h:379

References Assert(), dlist_delete(), dlist_is_empty(), elog(), HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 571 of file lock.c.

572 {
573  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
574 
575  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
576  return true;
577 
578  return false;
579 }
static const LockMethod LockMethods[]
Definition: lock.c:151
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
const LOCKMASK * conflictTab
Definition: lock.h:111

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2751 of file lock.c.

2752 {
2753  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2754  LOCKTAG *locktag = &locallock->tag.lock;
2755  PROCLOCK *proclock = NULL;
2756  LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2757  Oid relid = locktag->locktag_field2;
2758  uint32 f;
2759 
2761 
2762  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2763  {
2764  uint32 lockmode;
2765 
2766  /* Look for an allocated slot matching the given relid. */
2767  if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2768  continue;
2769 
2770  /* If we don't have a lock of the given mode, forget it! */
2771  lockmode = locallock->tag.mode;
2772  if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2773  break;
2774 
2775  /* Find or create lock object. */
2776  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2777 
2778  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2779  locallock->hashcode, lockmode);
2780  if (!proclock)
2781  {
2782  LWLockRelease(partitionLock);
2784  ereport(ERROR,
2785  (errcode(ERRCODE_OUT_OF_MEMORY),
2786  errmsg("out of shared memory"),
2787  errhint("You might need to increase %s.", "max_locks_per_transaction")));
2788  }
2789  GrantLock(proclock->tag.myLock, proclock, lockmode);
2790  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2791 
2792  LWLockRelease(partitionLock);
2793 
2794  /* No need to examine remaining slots. */
2795  break;
2796  }
2797 
2799 
2800  /* Lock may have already been transferred by some other backend. */
2801  if (proclock == NULL)
2802  {
2803  LOCK *lock;
2804  PROCLOCKTAG proclocktag;
2805  uint32 proclock_hashcode;
2806 
2807  LWLockAcquire(partitionLock, LW_SHARED);
2808 
2810  locktag,
2811  locallock->hashcode,
2812  HASH_FIND,
2813  NULL);
2814  if (!lock)
2815  elog(ERROR, "failed to re-find shared lock object");
2816 
2817  proclocktag.myLock = lock;
2818  proclocktag.myProc = MyProc;
2819 
2820  proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2821  proclock = (PROCLOCK *)
2823  &proclocktag,
2824  proclock_hashcode,
2825  HASH_FIND,
2826  NULL);
2827  if (!proclock)
2828  elog(ERROR, "failed to re-find shared proclock object");
2829  LWLockRelease(partitionLock);
2830  }
2831 
2832  return proclock;
2833 }
int errhint(const char *fmt,...)
Definition: elog.c:1316
@ HASH_FIND
Definition: hsearch.h:113
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1151
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:204
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1526
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:202
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:193
#define LockHashPartitionLock(hashcode)
Definition: lock.h:527
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1195
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1808
@ LW_SHARED
Definition: lwlock.h:117
@ LW_EXCLUSIVE
Definition: lwlock.h:116
unsigned int Oid
Definition: postgres_ext.h:31
PGPROC * MyProc
Definition: proc.c:66
uint32 locktag_field2
Definition: lock.h:167
Definition: lock.h:309
Definition: lwlock.h:41
LWLock fpInfoLock
Definition: proc.h:284
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:286
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370

References DEFAULT_LOCKMETHOD, elog(), ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), LOCALLOCK::hashcode, LOCALLOCKTAG::lock, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, ProcLockHashCode(), SetupLockInTable(), PROCLOCK::tag, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2596 of file lock.c.

2597 {
2598  uint32 f;
2599  uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2600 
2601  /* Scan for existing entry for this relid, remembering empty slot. */
2602  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2603  {
2604  if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2605  unused_slot = f;
2606  else if (MyProc->fpRelId[f] == relid)
2607  {
2608  Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2609  FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2610  return true;
2611  }
2612  }
2613 
2614  /* If no existing entry, use any empty slot. */
2615  if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2616  {
2617  MyProc->fpRelId[unused_slot] = relid;
2618  FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2620  return true;
2621  }
2622 
2623  /* No existing entry, and no empty slot. */
2624  return false;
2625 }
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:200
static int FastPathLocalUseCount
Definition: lock.c:172

References Assert(), FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_SET_LOCKMODE, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2663 of file lock.c.

2665 {
2666  LWLock *partitionLock = LockHashPartitionLock(hashcode);
2667  Oid relid = locktag->locktag_field2;
2668  uint32 i;
2669 
2670  /*
2671  * Every PGPROC that can potentially hold a fast-path lock is present in
2672  * ProcGlobal->allProcs. Prepared transactions are not, but any
2673  * outstanding fast-path locks held by prepared transactions are
2674  * transferred to the main lock table.
2675  */
2676  for (i = 0; i < ProcGlobal->allProcCount; i++)
2677  {
2678  PGPROC *proc = &ProcGlobal->allProcs[i];
2679  uint32 f;
2680 
2682 
2683  /*
2684  * If the target backend isn't referencing the same database as the
2685  * lock, then we needn't examine the individual relation IDs at all;
2686  * none of them can be relevant.
2687  *
2688  * proc->databaseId is set at backend startup time and never changes
2689  * thereafter, so it might be safe to perform this test before
2690  * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2691  * assume that if the target backend holds any fast-path locks, it
2692  * must have performed a memory-fencing operation (in particular, an
2693  * LWLock acquisition) since setting proc->databaseId. However, it's
2694  * less clear that our backend is certain to have performed a memory
2695  * fencing operation since the other backend set proc->databaseId. So
2696  * for now, we test it after acquiring the LWLock just to be safe.
2697  */
2698  if (proc->databaseId != locktag->locktag_field1)
2699  {
2700  LWLockRelease(&proc->fpInfoLock);
2701  continue;
2702  }
2703 
2704  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2705  {
2706  uint32 lockmode;
2707 
2708  /* Look for an allocated slot matching the given relid. */
2709  if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2710  continue;
2711 
2712  /* Find or create lock object. */
2713  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2714  for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2716  ++lockmode)
2717  {
2718  PROCLOCK *proclock;
2719 
2720  if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2721  continue;
2722  proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2723  hashcode, lockmode);
2724  if (!proclock)
2725  {
2726  LWLockRelease(partitionLock);
2727  LWLockRelease(&proc->fpInfoLock);
2728  return false;
2729  }
2730  GrantLock(proclock->tag.myLock, proclock, lockmode);
2731  FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2732  }
2733  LWLockRelease(partitionLock);
2734 
2735  /* No need to examine remaining slots. */
2736  break;
2737  }
2738  LWLockRelease(&proc->fpInfoLock);
2739  }
2740  return true;
2741 }
PROC_HDR * ProcGlobal
Definition: proc.c:78
uint32 locktag_field1
Definition: lock.h:166
Definition: proc.h:162
Oid databaseId
Definition: proc.h:198
PGPROC * allProcs
Definition: proc.h:362
uint32 allProcCount
Definition: proc.h:380

References PROC_HDR::allProcCount, PROC_HDR::allProcs, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), i, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2633 of file lock.c.

2634 {
2635  uint32 f;
2636  bool result = false;
2637 
2639  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2640  {
2641  if (MyProc->fpRelId[f] == relid
2642  && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2643  {
2644  Assert(!result);
2645  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2646  result = true;
2647  /* we continue iterating so as to update FastPathLocalUseCount */
2648  }
2649  if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2651  }
2652  return result;
2653 }

References Assert(), FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1718 of file lock.c.

1719 {
1720  StrongLockInProgress = NULL;
1721 }

References StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetBlockerStatusData()

BlockedProcsData* GetBlockerStatusData ( int  blocked_pid)

Definition at line 3764 of file lock.c.

3765 {
3767  PGPROC *proc;
3768  int i;
3769 
3771 
3772  /*
3773  * Guess how much space we'll need, and preallocate. Most of the time
3774  * this will avoid needing to do repalloc while holding the LWLocks. (We
3775  * assume, but check with an Assert, that MaxBackends is enough entries
3776  * for the procs[] array; the other two could need enlargement, though.)
3777  */
3778  data->nprocs = data->nlocks = data->npids = 0;
3779  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3780  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3781  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3782  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3783 
3784  /*
3785  * In order to search the ProcArray for blocked_pid and assume that that
3786  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3787  * In addition, to examine the lock grouping fields of any other backend,
3788  * we must hold all the hash partition locks. (Only one of those locks is
3789  * actually relevant for any one lock group, but we can't know which one
3790  * ahead of time.) It's fairly annoying to hold all those locks
3791  * throughout this, but it's no worse than GetLockStatusData(), and it
3792  * does have the advantage that we're guaranteed to return a
3793  * self-consistent instantaneous state.
3794  */
3795  LWLockAcquire(ProcArrayLock, LW_SHARED);
3796 
3797  proc = BackendPidGetProcWithLock(blocked_pid);
3798 
3799  /* Nothing to do if it's gone */
3800  if (proc != NULL)
3801  {
3802  /*
3803  * Acquire lock on the entire shared lock data structure. See notes
3804  * in GetLockStatusData().
3805  */
3806  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3808 
3809  if (proc->lockGroupLeader == NULL)
3810  {
3811  /* Easy case, proc is not a lock group member */
3813  }
3814  else
3815  {
3816  /* Examine all procs in proc's lock group */
3817  dlist_iter iter;
3818 
3820  {
3821  PGPROC *memberProc;
3822 
3823  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3824  GetSingleProcBlockerStatusData(memberProc, data);
3825  }
3826  }
3827 
3828  /*
3829  * And release locks. See notes in GetLockStatusData().
3830  */
3831  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3833 
3834  Assert(data->nprocs <= data->maxprocs);
3835  }
3836 
3837  LWLockRelease(ProcArrayLock);
3838 
3839  return data;
3840 }
int MaxBackends
Definition: globals.c:140
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3844
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:530
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:99
void * palloc(Size size)
Definition: mcxt.c:1226
const void * data
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3126
dlist_head lockGroupMembers
Definition: proc.h:296
PGPROC * lockGroupLeader
Definition: proc.h:295
dlist_node * cur
Definition: ilist.h:179

References Assert(), BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, and palloc().

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId* GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 2855 of file lock.c.

2856 {
2857  static VirtualTransactionId *vxids;
2858  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2859  LockMethod lockMethodTable;
2860  LOCK *lock;
2861  LOCKMASK conflictMask;
2862  dlist_iter proclock_iter;
2863  PROCLOCK *proclock;
2864  uint32 hashcode;
2865  LWLock *partitionLock;
2866  int count = 0;
2867  int fast_count = 0;
2868 
2869  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2870  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2871  lockMethodTable = LockMethods[lockmethodid];
2872  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2873  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2874 
2875  /*
2876  * Allocate memory to store results, and fill with InvalidVXID. We only
2877  * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2878  * InHotStandby allocate once in TopMemoryContext.
2879  */
2880  if (InHotStandby)
2881  {
2882  if (vxids == NULL)
2883  vxids = (VirtualTransactionId *)
2885  sizeof(VirtualTransactionId) *
2887  }
2888  else
2889  vxids = (VirtualTransactionId *)
2890  palloc0(sizeof(VirtualTransactionId) *
2892 
2893  /* Compute hash code and partition lock, and look up conflicting modes. */
2894  hashcode = LockTagHashCode(locktag);
2895  partitionLock = LockHashPartitionLock(hashcode);
2896  conflictMask = lockMethodTable->conflictTab[lockmode];
2897 
2898  /*
2899  * Fast path locks might not have been entered in the primary lock table.
2900  * If the lock we're dealing with could conflict with such a lock, we must
2901  * examine each backend's fast-path array for conflicts.
2902  */
2903  if (ConflictsWithRelationFastPath(locktag, lockmode))
2904  {
2905  int i;
2906  Oid relid = locktag->locktag_field2;
2907  VirtualTransactionId vxid;
2908 
2909  /*
2910  * Iterate over relevant PGPROCs. Anything held by a prepared
2911  * transaction will have been transferred to the primary lock table,
2912  * so we need not worry about those. This is all a bit fuzzy, because
2913  * new locks could be taken after we've visited a particular
2914  * partition, but the callers had better be prepared to deal with that
2915  * anyway, since the locks could equally well be taken between the
2916  * time we return the value and the time the caller does something
2917  * with it.
2918  */
2919  for (i = 0; i < ProcGlobal->allProcCount; i++)
2920  {
2921  PGPROC *proc = &ProcGlobal->allProcs[i];
2922  uint32 f;
2923 
2924  /* A backend never blocks itself */
2925  if (proc == MyProc)
2926  continue;
2927 
2929 
2930  /*
2931  * If the target backend isn't referencing the same database as
2932  * the lock, then we needn't examine the individual relation IDs
2933  * at all; none of them can be relevant.
2934  *
2935  * See FastPathTransferRelationLocks() for discussion of why we do
2936  * this test after acquiring the lock.
2937  */
2938  if (proc->databaseId != locktag->locktag_field1)
2939  {
2940  LWLockRelease(&proc->fpInfoLock);
2941  continue;
2942  }
2943 
2944  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2945  {
2946  uint32 lockmask;
2947 
2948  /* Look for an allocated slot matching the given relid. */
2949  if (relid != proc->fpRelId[f])
2950  continue;
2951  lockmask = FAST_PATH_GET_BITS(proc, f);
2952  if (!lockmask)
2953  continue;
2954  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2955 
2956  /*
2957  * There can only be one entry per relation, so if we found it
2958  * and it doesn't conflict, we can skip the rest of the slots.
2959  */
2960  if ((lockmask & conflictMask) == 0)
2961  break;
2962 
2963  /* Conflict! */
2964  GET_VXID_FROM_PGPROC(vxid, *proc);
2965 
2966  if (VirtualTransactionIdIsValid(vxid))
2967  vxids[count++] = vxid;
2968  /* else, xact already committed or aborted */
2969 
2970  /* No need to examine remaining slots. */
2971  break;
2972  }
2973 
2974  LWLockRelease(&proc->fpInfoLock);
2975  }
2976  }
2977 
2978  /* Remember how many fast-path conflicts we found. */
2979  fast_count = count;
2980 
2981  /*
2982  * Look up the lock object matching the tag.
2983  */
2984  LWLockAcquire(partitionLock, LW_SHARED);
2985 
2987  locktag,
2988  hashcode,
2989  HASH_FIND,
2990  NULL);
2991  if (!lock)
2992  {
2993  /*
2994  * If the lock object doesn't exist, there is nothing holding a lock
2995  * on this lockable object.
2996  */
2997  LWLockRelease(partitionLock);
2998  vxids[count].backendId = InvalidBackendId;
3000  if (countp)
3001  *countp = count;
3002  return vxids;
3003  }
3004 
3005  /*
3006  * Examine each existing holder (or awaiter) of the lock.
3007  */
3008  dlist_foreach(proclock_iter, &lock->procLocks)
3009  {
3010  proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3011 
3012  if (conflictMask & proclock->holdMask)
3013  {
3014  PGPROC *proc = proclock->tag.myProc;
3015 
3016  /* A backend never blocks itself */
3017  if (proc != MyProc)
3018  {
3019  VirtualTransactionId vxid;
3020 
3021  GET_VXID_FROM_PGPROC(vxid, *proc);
3022 
3023  if (VirtualTransactionIdIsValid(vxid))
3024  {
3025  int i;
3026 
3027  /* Avoid duplicate entries. */
3028  for (i = 0; i < fast_count; ++i)
3029  if (VirtualTransactionIdEquals(vxids[i], vxid))
3030  break;
3031  if (i >= fast_count)
3032  vxids[count++] = vxid;
3033  }
3034  /* else, xact already committed or aborted */
3035  }
3036  }
3037  }
3038 
3039  LWLockRelease(partitionLock);
3040 
3041  if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3042  elog(PANIC, "too many conflicting locks found");
3043 
3044  vxids[count].backendId = InvalidBackendId;
3046  if (countp)
3047  *countp = count;
3048  return vxids;
3049 }
#define InvalidBackendId
Definition: backendid.h:23
#define lengthof(array)
Definition: c.h:777
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:221
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:505
uint16 LOCKMETHODID
Definition: lock.h:122
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:67
#define InvalidLocalTransactionId
Definition: lock.h:65
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:71
#define GET_VXID_FROM_PGPROC(vxid, proc)
Definition: lock.h:77
int LOCKMASK
Definition: lockdefs.h:25
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * palloc0(Size size)
Definition: mcxt.c:1257
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1021
uint8 locktag_lockmethodid
Definition: lock.h:171
int numLockModes
Definition: lock.h:110
LocalTransactionId localTransactionId
Definition: lock.h:62
BackendId backendId
Definition: lock.h:61
int max_prepared_xacts
Definition: twophase.c:118
#define InHotStandby
Definition: xlogutils.h:57

References PROC_HDR::allProcCount, PROC_HDR::allProcs, VirtualTransactionId::backendId, ConflictsWithRelationFastPath, LockMethodData::conflictTab, dlist_iter::cur, PGPROC::databaseId, dlist_container, dlist_foreach, elog(), ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, InvalidBackendId, InvalidLocalTransactionId, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char* GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 4021 of file lock.c.

4022 {
4023  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4024  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4025  return LockMethods[lockmethodid]->lockModeNames[mode];
4026 }
const char *const * lockModeNames
Definition: lock.h:112

References Assert(), lengthof, LockMethods, LockMethodData::lockModeNames, and mode.

Referenced by CheckRelationLockedByMe(), DeadLockReport(), pg_lock_status(), and ProcSleep().

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 475 of file lock.c.

476 {
477  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
478 
479  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
480  return LockMethods[lockmethodid];
481 }
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:324

References Assert(), lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData* GetLockStatusData ( void  )

Definition at line 3572 of file lock.c.

3573 {
3574  LockData *data;
3575  PROCLOCK *proclock;
3576  HASH_SEQ_STATUS seqstat;
3577  int els;
3578  int el;
3579  int i;
3580 
3581  data = (LockData *) palloc(sizeof(LockData));
3582 
3583  /* Guess how much space we'll need. */
3584  els = MaxBackends;
3585  el = 0;
3586  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3587 
3588  /*
3589  * First, we iterate through the per-backend fast-path arrays, locking
3590  * them one at a time. This might produce an inconsistent picture of the
3591  * system state, but taking all of those LWLocks at the same time seems
3592  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3593  * matter too much, because none of these locks can be involved in lock
3594  * conflicts anyway - anything that might must be present in the main lock
3595  * table. (For the same reason, we don't sweat about making leaderPid
3596  * completely valid. We cannot safely dereference another backend's
3597  * lockGroupLeader field without holding all lock partition locks, and
3598  * it's not worth that.)
3599  */
3600  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3601  {
3602  PGPROC *proc = &ProcGlobal->allProcs[i];
3603  uint32 f;
3604 
3606 
3607  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3608  {
3609  LockInstanceData *instance;
3610  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3611 
3612  /* Skip unallocated slots. */
3613  if (!lockbits)
3614  continue;
3615 
3616  if (el >= els)
3617  {
3618  els += MaxBackends;
3619  data->locks = (LockInstanceData *)
3620  repalloc(data->locks, sizeof(LockInstanceData) * els);
3621  }
3622 
3623  instance = &data->locks[el];
3624  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3625  proc->fpRelId[f]);
3626  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3627  instance->waitLockMode = NoLock;
3628  instance->backend = proc->backendId;
3629  instance->lxid = proc->lxid;
3630  instance->pid = proc->pid;
3631  instance->leaderPid = proc->pid;
3632  instance->fastpath = true;
3633 
3634  /*
3635  * Successfully taking fast path lock means there were no
3636  * conflicting locks.
3637  */
3638  instance->waitStart = 0;
3639 
3640  el++;
3641  }
3642 
3643  if (proc->fpVXIDLock)
3644  {
3645  VirtualTransactionId vxid;
3646  LockInstanceData *instance;
3647 
3648  if (el >= els)
3649  {
3650  els += MaxBackends;
3651  data->locks = (LockInstanceData *)
3652  repalloc(data->locks, sizeof(LockInstanceData) * els);
3653  }
3654 
3655  vxid.backendId = proc->backendId;
3657 
3658  instance = &data->locks[el];
3659  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3660  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3661  instance->waitLockMode = NoLock;
3662  instance->backend = proc->backendId;
3663  instance->lxid = proc->lxid;
3664  instance->pid = proc->pid;
3665  instance->leaderPid = proc->pid;
3666  instance->fastpath = true;
3667  instance->waitStart = 0;
3668 
3669  el++;
3670  }
3671 
3672  LWLockRelease(&proc->fpInfoLock);
3673  }
3674 
3675  /*
3676  * Next, acquire lock on the entire shared lock data structure. We do
3677  * this so that, at least for locks in the primary lock table, the state
3678  * will be self-consistent.
3679  *
3680  * Since this is a read-only operation, we take shared instead of
3681  * exclusive lock. There's not a whole lot of point to this, because all
3682  * the normal operations require exclusive lock, but it doesn't hurt
3683  * anything either. It will at least allow two backends to do
3684  * GetLockStatusData in parallel.
3685  *
3686  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3687  */
3688  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3690 
3691  /* Now we can safely count the number of proclocks */
3693  if (data->nelements > els)
3694  {
3695  els = data->nelements;
3696  data->locks = (LockInstanceData *)
3697  repalloc(data->locks, sizeof(LockInstanceData) * els);
3698  }
3699 
3700  /* Now scan the tables to copy the data */
3702 
3703  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3704  {
3705  PGPROC *proc = proclock->tag.myProc;
3706  LOCK *lock = proclock->tag.myLock;
3707  LockInstanceData *instance = &data->locks[el];
3708 
3709  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3710  instance->holdMask = proclock->holdMask;
3711  if (proc->waitLock == proclock->tag.myLock)
3712  instance->waitLockMode = proc->waitLockMode;
3713  else
3714  instance->waitLockMode = NoLock;
3715  instance->backend = proc->backendId;
3716  instance->lxid = proc->lxid;
3717  instance->pid = proc->pid;
3718  instance->leaderPid = proclock->groupLeader->pid;
3719  instance->fastpath = false;
3720  instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3721 
3722  el++;
3723  }
3724 
3725  /*
3726  * And release locks. We do this in reverse order for two reasons: (1)
3727  * Anyone else who needs more than one of the locks will be trying to lock
3728  * them in increasing order; we don't want to release the other process
3729  * until it can get all the locks it needs. (2) This avoids O(N^2)
3730  * behavior inside LWLockRelease.
3731  */
3732  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3734 
3735  Assert(el == data->nelements);
3736 
3737  return data;
3738 }
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:424
int64 TimestampTz
Definition: timestamp.h:39
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1377
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:235
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:181
#define NoLock
Definition: lockdefs.h:34
#define ExclusiveLock
Definition: lockdefs.h:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1476
Definition: lock.h:467
LOCKMASK holdMask
Definition: lock.h:455
LOCKMODE waitLockMode
Definition: lock.h:456
bool fastpath
Definition: lock.h:463
LOCKTAG locktag
Definition: lock.h:454
TimestampTz waitStart
Definition: lock.h:459
int leaderPid
Definition: lock.h:462
BackendId backend
Definition: lock.h:457
LocalTransactionId lxid
Definition: lock.h:458
LocalTransactionId lxid
Definition: proc.h:183
pg_atomic_uint64 waitStart
Definition: proc.h:228
bool fpVXIDLock
Definition: proc.h:287
BackendId backendId
Definition: proc.h:197
int pid
Definition: proc.h:186
LOCK * waitLock
Definition: proc.h:223
LOCKMODE waitLockMode
Definition: proc.h:225
LocalTransactionId fpLocalTransactionId
Definition: proc.h:288
PGPROC * groupLeader
Definition: lock.h:375

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert(), LockInstanceData::backend, VirtualTransactionId::backendId, PGPROC::backendId, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, LockInstanceData::fastpath, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), LockInstanceData::lxid, PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc(), pg_atomic_read_u64(), LockInstanceData::pid, PGPROC::pid, ProcGlobal, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, LockInstanceData::waitStart, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 487 of file lock.c.

488 {
489  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
490 
491  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
492  return LockMethods[lockmethodid];
493 }

References Assert(), lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock* GetRunningTransactionLocks ( int *  nlocks)

Definition at line 3939 of file lock.c.

3940 {
3941  xl_standby_lock *accessExclusiveLocks;
3942  PROCLOCK *proclock;
3943  HASH_SEQ_STATUS seqstat;
3944  int i;
3945  int index;
3946  int els;
3947 
3948  /*
3949  * Acquire lock on the entire shared lock data structure.
3950  *
3951  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3952  */
3953  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3955 
3956  /* Now we can safely count the number of proclocks */
3958 
3959  /*
3960  * Allocating enough space for all locks in the lock table is overkill,
3961  * but it's more convenient and faster than having to enlarge the array.
3962  */
3963  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3964 
3965  /* Now scan the tables to copy the data */
3967 
3968  /*
3969  * If lock is a currently granted AccessExclusiveLock then it will have
3970  * just one proclock holder, so locks are never accessed twice in this
3971  * particular case. Don't copy this code for use elsewhere because in the
3972  * general case this will give you duplicate locks when looking at
3973  * non-exclusive lock types.
3974  */
3975  index = 0;
3976  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3977  {
3978  /* make sure this definition matches the one used in LockAcquire */
3979  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
3980  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
3981  {
3982  PGPROC *proc = proclock->tag.myProc;
3983  LOCK *lock = proclock->tag.myLock;
3984  TransactionId xid = proc->xid;
3985 
3986  /*
3987  * Don't record locks for transactions if we know they have
3988  * already issued their WAL record for commit but not yet released
3989  * lock. It is still possible that we see locks held by already
3990  * complete transactions, if they haven't yet zeroed their xids.
3991  */
3992  if (!TransactionIdIsValid(xid))
3993  continue;
3994 
3995  accessExclusiveLocks[index].xid = xid;
3996  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
3997  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
3998 
3999  index++;
4000  }
4001  }
4002 
4003  Assert(index <= els);
4004 
4005  /*
4006  * And release locks. We do this in reverse order for two reasons: (1)
4007  * Anyone else who needs more than one of the locks will be trying to lock
4008  * them in increasing order; we don't want to release the other process
4009  * until it can get all the locks it needs. (2) This avoids O(N^2)
4010  * behavior inside LWLockRelease.
4011  */
4012  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4014 
4015  *nlocks = index;
4016  return accessExclusiveLocks;
4017 }
uint32 TransactionId
Definition: c.h:641
#define AccessExclusiveLock
Definition: lockdefs.h:43
TransactionId xid
Definition: proc.h:173
Definition: type.h:95
TransactionId xid
Definition: lockdefs.h:51
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References AccessExclusiveLock, Assert(), xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 3844 of file lock.c.

3845 {
3846  LOCK *theLock = blocked_proc->waitLock;
3847  BlockedProcData *bproc;
3848  dlist_iter proclock_iter;
3849  dlist_iter proc_iter;
3850  dclist_head *waitQueue;
3851  int queue_size;
3852 
3853  /* Nothing to do if this proc is not blocked */
3854  if (theLock == NULL)
3855  return;
3856 
3857  /* Set up a procs[] element */
3858  bproc = &data->procs[data->nprocs++];
3859  bproc->pid = blocked_proc->pid;
3860  bproc->first_lock = data->nlocks;
3861  bproc->first_waiter = data->npids;
3862 
3863  /*
3864  * We may ignore the proc's fast-path arrays, since nothing in those could
3865  * be related to a contended lock.
3866  */
3867 
3868  /* Collect all PROCLOCKs associated with theLock */
3869  dlist_foreach(proclock_iter, &theLock->procLocks)
3870  {
3871  PROCLOCK *proclock =
3872  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3873  PGPROC *proc = proclock->tag.myProc;
3874  LOCK *lock = proclock->tag.myLock;
3875  LockInstanceData *instance;
3876 
3877  if (data->nlocks >= data->maxlocks)
3878  {
3879  data->maxlocks += MaxBackends;
3880  data->locks = (LockInstanceData *)
3881  repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3882  }
3883 
3884  instance = &data->locks[data->nlocks];
3885  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3886  instance->holdMask = proclock->holdMask;
3887  if (proc->waitLock == lock)
3888  instance->waitLockMode = proc->waitLockMode;
3889  else
3890  instance->waitLockMode = NoLock;
3891  instance->backend = proc->backendId;
3892  instance->lxid = proc->lxid;
3893  instance->pid = proc->pid;
3894  instance->leaderPid = proclock->groupLeader->pid;
3895  instance->fastpath = false;
3896  data->nlocks++;
3897  }
3898 
3899  /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3900  waitQueue = &(theLock->waitProcs);
3901  queue_size = dclist_count(waitQueue);
3902 
3903  if (queue_size > data->maxpids - data->npids)
3904  {
3905  data->maxpids = Max(data->maxpids + MaxBackends,
3906  data->npids + queue_size);
3907  data->waiter_pids = (int *) repalloc(data->waiter_pids,
3908  sizeof(int) * data->maxpids);
3909  }
3910 
3911  /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3912  dclist_foreach(proc_iter, waitQueue)
3913  {
3914  PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
3915 
3916  if (queued_proc == blocked_proc)
3917  break;
3918  data->waiter_pids[data->npids++] = queued_proc->pid;
3919  queued_proc = (PGPROC *) queued_proc->links.next;
3920  }
3921 
3922  bproc->num_locks = data->nlocks - bproc->first_lock;
3923  bproc->num_waiters = data->npids - bproc->first_waiter;
3924 }
#define Max(x, y)
Definition: c.h:987
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int first_lock
Definition: lock.h:477
int first_waiter
Definition: lock.h:481
int num_waiters
Definition: lock.h:482
int num_locks
Definition: lock.h:478
dclist_head waitProcs
Definition: lock.h:317
dlist_node links
Definition: proc.h:164
dlist_node * next
Definition: ilist.h:140
static struct link * links
Definition: zic.c:299

References LockInstanceData::backend, PGPROC::backendId, dlist_iter::cur, data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, LockInstanceData::fastpath, BlockedProcData::first_lock, BlockedProcData::first_waiter, PROCLOCK::groupLeader, PROCLOCK::holdMask, LockInstanceData::holdMask, LockInstanceData::leaderPid, PGPROC::links, links, LockInstanceData::locktag, LockInstanceData::lxid, PGPROC::lxid, Max, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, dlist_node::next, NoLock, BlockedProcData::num_locks, BlockedProcData::num_waiters, LockInstanceData::pid, BlockedProcData::pid, PGPROC::pid, LOCK::procLocks, repalloc(), LOCK::tag, PROCLOCK::tag, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, and LOCK::waitProcs.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1757 of file lock.c.

1758 {
1760 }
static LOCALLOCK * awaitedLock
Definition: lock.c:276
static ResourceOwner awaitedOwner
Definition: lock.c:277
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1660

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup(), and ProcSleep().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1526 of file lock.c.

1527 {
1528  lock->nGranted++;
1529  lock->granted[lockmode]++;
1530  lock->grantMask |= LOCKBIT_ON(lockmode);
1531  if (lock->granted[lockmode] == lock->requested[lockmode])
1532  lock->waitMask &= LOCKBIT_OFF(lockmode);
1533  proclock->holdMask |= LOCKBIT_ON(lockmode);
1534  LOCK_PRINT("GrantLock", lock, lockmode);
1535  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1536  Assert(lock->nGranted <= lock->nRequested);
1537 }
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:85
int requested[MAX_LOCKMODES]
Definition: lock.h:318
int granted[MAX_LOCKMODES]
Definition: lock.h:320
LOCKMASK grantMask
Definition: lock.h:314
LOCKMASK waitMask
Definition: lock.h:315
int nGranted
Definition: lock.h:321

References Assert(), LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), ProcSleep(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1660 of file lock.c.

1661 {
1662  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1663  int i;
1664 
1665  Assert(locallock->numLockOwners < locallock->maxLockOwners);
1666  /* Count the total */
1667  locallock->nLocks++;
1668  /* Count the per-owner lock */
1669  for (i = 0; i < locallock->numLockOwners; i++)
1670  {
1671  if (lockOwners[i].owner == owner)
1672  {
1673  lockOwners[i].nLocks++;
1674  return;
1675  }
1676  }
1677  lockOwners[i].owner = owner;
1678  lockOwners[i].nLocks = 1;
1679  locallock->numLockOwners++;
1680  if (owner != NULL)
1681  ResourceOwnerRememberLock(owner, locallock);
1682 
1683  /* Indicate that the lock is acquired for certain types of locks. */
1684  CheckAndSetLockHeld(locallock, true);
1685 }
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1332
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1050
int64 nLocks
Definition: lock.h:423
struct ResourceOwnerData * owner
Definition: lock.h:422
int maxLockOwners
Definition: lock.h:437

References Assert(), CheckAndSetLockHeld(), i, LOCALLOCK::lockOwners, LOCALLOCK::maxLockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLocks()

void InitLocks ( void  )

Definition at line 393 of file lock.c.

394 {
395  HASHCTL info;
396  long init_table_size,
397  max_table_size;
398  bool found;
399 
400  /*
401  * Compute init/max size to request for lock hashtables. Note these
402  * calculations must agree with LockShmemSize!
403  */
404  max_table_size = NLOCKENTS();
405  init_table_size = max_table_size / 2;
406 
407  /*
408  * Allocate hash table for LOCK structs. This stores per-locked-object
409  * information.
410  */
411  info.keysize = sizeof(LOCKTAG);
412  info.entrysize = sizeof(LOCK);
414 
415  LockMethodLockHash = ShmemInitHash("LOCK hash",
416  init_table_size,
417  max_table_size,
418  &info,
420 
421  /* Assume an average of 2 holders per lock */
422  max_table_size *= 2;
423  init_table_size *= 2;
424 
425  /*
426  * Allocate hash table for PROCLOCK structs. This stores
427  * per-lock-per-holder information.
428  */
429  info.keysize = sizeof(PROCLOCKTAG);
430  info.entrysize = sizeof(PROCLOCK);
431  info.hash = proclock_hash;
433 
434  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
435  init_table_size,
436  max_table_size,
437  &info,
439 
440  /*
441  * Allocate fast-path structures.
442  */
444  ShmemInitStruct("Fast Path Strong Relation Lock Data",
445  sizeof(FastPathStrongRelationLockData), &found);
446  if (!found)
448 
449  /*
450  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
451  * counts and resource owner information.
452  *
453  * The non-shared table could already exist in this process (this occurs
454  * when the postmaster is recreating shared memory after a backend crash).
455  * If so, delete and recreate it. (We could simply leave it, since it
456  * ought to be empty in the postmaster, but for safety let's zap it.)
457  */
460 
461  info.keysize = sizeof(LOCALLOCKTAG);
462  info.entrysize = sizeof(LOCALLOCK);
463 
464  LockMethodLocalHash = hash_create("LOCALLOCK hash",
465  16,
466  &info,
468 }
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_PARTITION
Definition: hsearch.h:92
#define NLOCKENTS()
Definition: lock.c:57
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:522
struct LOCALLOCK LOCALLOCK
struct LOCK LOCK
struct PROCLOCK PROCLOCK
struct PROCLOCKTAG PROCLOCKTAG
struct LOCALLOCKTAG LOCALLOCKTAG
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:341
#define SpinLockInit(lock)
Definition: spin.h:60
HashValueFunc hash
Definition: hsearch.h:78
long num_partitions
Definition: hsearch.h:68

References HASHCTL::entrysize, FastPathStrongRelationLocks, HASHCTL::hash, HASH_BLOBS, hash_create(), hash_destroy(), HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateSharedMemoryAndSemaphores().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4364 of file lock.c.

4366 {
4367  lock_twophase_postcommit(xid, info, recdata, len);
4368 }
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4338
const void size_t len

References len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4338 of file lock.c.

4340 {
4341  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4342  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4343  LOCKTAG *locktag;
4344  LOCKMETHODID lockmethodid;
4345  LockMethod lockMethodTable;
4346 
4347  Assert(len == sizeof(TwoPhaseLockRecord));
4348  locktag = &rec->locktag;
4349  lockmethodid = locktag->locktag_lockmethodid;
4350 
4351  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4352  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4353  lockMethodTable = LockMethods[lockmethodid];
4354 
4355  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4356 }
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3063
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:934

References Assert(), elog(), ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

◆ lock_twophase_recover()

void lock_twophase_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4125 of file lock.c.

4127 {
4128  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4129  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4130  LOCKTAG *locktag;
4131  LOCKMODE lockmode;
4132  LOCKMETHODID lockmethodid;
4133  LOCK *lock;
4134  PROCLOCK *proclock;
4135  PROCLOCKTAG proclocktag;
4136  bool found;
4137  uint32 hashcode;
4138  uint32 proclock_hashcode;
4139  int partition;
4140  LWLock *partitionLock;
4141  LockMethod lockMethodTable;
4142 
4143  Assert(len == sizeof(TwoPhaseLockRecord));
4144  locktag = &rec->locktag;
4145  lockmode = rec->lockmode;
4146  lockmethodid = locktag->locktag_lockmethodid;
4147 
4148  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4149  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4150  lockMethodTable = LockMethods[lockmethodid];
4151 
4152  hashcode = LockTagHashCode(locktag);
4153  partition = LockHashPartition(hashcode);
4154  partitionLock = LockHashPartitionLock(hashcode);
4155 
4156  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4157 
4158  /*
4159  * Find or create a lock with this tag.
4160  */
4162  locktag,
4163  hashcode,
4165  &found);
4166  if (!lock)
4167  {
4168  LWLockRelease(partitionLock);
4169  ereport(ERROR,
4170  (errcode(ERRCODE_OUT_OF_MEMORY),
4171  errmsg("out of shared memory"),
4172  errhint("You might need to increase %s.", "max_locks_per_transaction")));
4173  }
4174 
4175  /*
4176  * if it's a new lock object, initialize it
4177  */
4178  if (!found)
4179  {
4180  lock->grantMask = 0;
4181  lock->waitMask = 0;
4182  dlist_init(&lock->procLocks);
4183  dclist_init(&lock->waitProcs);
4184  lock->nRequested = 0;
4185  lock->nGranted = 0;
4186  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4187  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4188  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4189  }
4190  else
4191  {
4192  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4193  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4194  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4195  Assert(lock->nGranted <= lock->nRequested);
4196  }
4197 
4198  /*
4199  * Create the hash key for the proclock table.
4200  */
4201  proclocktag.myLock = lock;
4202  proclocktag.myProc = proc;
4203 
4204  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4205 
4206  /*
4207  * Find or create a proclock entry with this tag
4208  */
4210  &proclocktag,
4211  proclock_hashcode,
4213  &found);
4214  if (!proclock)
4215  {
4216  /* Oops, not enough shmem for the proclock */
4217  if (lock->nRequested == 0)
4218  {
4219  /*
4220  * There are no other requestors of this lock, so garbage-collect
4221  * the lock object. We *must* do this to avoid a permanent leak
4222  * of shared memory, because there won't be anything to cause
4223  * anyone to release the lock object later.
4224  */
4225  Assert(dlist_is_empty(&lock->procLocks));
4227  &(lock->tag),
4228  hashcode,
4229  HASH_REMOVE,
4230  NULL))
4231  elog(PANIC, "lock table corrupted");
4232  }
4233  LWLockRelease(partitionLock);
4234  ereport(ERROR,
4235  (errcode(ERRCODE_OUT_OF_MEMORY),
4236  errmsg("out of shared memory"),
4237  errhint("You might need to increase %s.", "max_locks_per_transaction")));
4238  }
4239 
4240  /*
4241  * If new, initialize the new entry
4242  */
4243  if (!found)
4244  {
4245  Assert(proc->lockGroupLeader == NULL);
4246  proclock->groupLeader = proc;
4247  proclock->holdMask = 0;
4248  proclock->releaseMask = 0;
4249  /* Add proclock to appropriate lists */
4250  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4251  dlist_push_tail(&proc->myProcLocks[partition],
4252  &proclock->procLink);
4253  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4254  }
4255  else
4256  {
4257  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4258  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4259  }
4260 
4261  /*
4262  * lock->nRequested and lock->requested[] count the total number of
4263  * requests, whether granted or waiting, so increment those immediately.
4264  */
4265  lock->nRequested++;
4266  lock->requested[lockmode]++;
4267  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4268 
4269  /*
4270  * We shouldn't already hold the desired lock.
4271  */
4272  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4273  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4274  lockMethodTable->lockModeNames[lockmode],
4275  lock->tag.locktag_field1, lock->tag.locktag_field2,
4276  lock->tag.locktag_field3);
4277 
4278  /*
4279  * We ignore any possible conflicts and just grant ourselves the lock. Not
4280  * only because we don't bother, but also to avoid deadlocks when
4281  * switching from standby to normal mode. See function comment.
4282  */
4283  GrantLock(lock, proclock, lockmode);
4284 
4285  /*
4286  * Bump strong lock count, to make sure any fast-path lock requests won't
4287  * be granted without consulting the primary lock table.
4288  */
4289  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4290  {
4291  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4292 
4294  FastPathStrongRelationLocks->count[fasthashcode]++;
4296  }
4297 
4298  LWLockRelease(partitionLock);
4299 }
#define MemSet(start, val, len)
Definition: c.h:1009
@ HASH_ENTER_NULL
Definition: hsearch.h:116
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define MAX_LOCKMODES
Definition: lock.h:82
#define LockHashPartition(hashcode)
Definition: lock.h:525
int LOCKMODE
Definition: lockdefs.h:26
uint32 locktag_field3
Definition: lock.h:168
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:252
LOCKMASK releaseMask
Definition: lock.h:377

References Assert(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog(), ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4306 of file lock.c.

4308 {
4309  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4310  LOCKTAG *locktag;
4311  LOCKMODE lockmode;
4312  LOCKMETHODID lockmethodid;
4313 
4314  Assert(len == sizeof(TwoPhaseLockRecord));
4315  locktag = &rec->locktag;
4316  lockmode = rec->lockmode;
4317  lockmethodid = locktag->locktag_lockmethodid;
4318 
4319  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4320  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4321 
4322  if (lockmode == AccessExclusiveLock &&
4323  locktag->locktag_type == LOCKTAG_RELATION)
4324  {
4326  locktag->locktag_field1 /* dboid */ ,
4327  locktag->locktag_field2 /* reloid */ );
4328  }
4329 }
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:986

References AccessExclusiveLock, Assert(), elog(), ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().

◆ LockAcquire()

LockAcquireResult LockAcquire ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait 
)

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp 
)

Definition at line 759 of file lock.c.

765 {
766  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
767  LockMethod lockMethodTable;
768  LOCALLOCKTAG localtag;
769  LOCALLOCK *locallock;
770  LOCK *lock;
771  PROCLOCK *proclock;
772  bool found;
773  ResourceOwner owner;
774  uint32 hashcode;
775  LWLock *partitionLock;
776  bool found_conflict;
777  bool log_lock = false;
778 
779  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
780  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
781  lockMethodTable = LockMethods[lockmethodid];
782  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
783  elog(ERROR, "unrecognized lock mode: %d", lockmode);
784 
785  if (RecoveryInProgress() && !InRecovery &&
786  (locktag->locktag_type == LOCKTAG_OBJECT ||
787  locktag->locktag_type == LOCKTAG_RELATION) &&
788  lockmode > RowExclusiveLock)
789  ereport(ERROR,
790  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
791  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
792  lockMethodTable->lockModeNames[lockmode]),
793  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
794 
795 #ifdef LOCK_DEBUG
796  if (LOCK_DEBUG_ENABLED(locktag))
797  elog(LOG, "LockAcquire: lock [%u,%u] %s",
798  locktag->locktag_field1, locktag->locktag_field2,
799  lockMethodTable->lockModeNames[lockmode]);
800 #endif
801 
802  /* Identify owner for lock */
803  if (sessionLock)
804  owner = NULL;
805  else
806  owner = CurrentResourceOwner;
807 
808  /*
809  * Find or create a LOCALLOCK entry for this lock and lockmode
810  */
811  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
812  localtag.lock = *locktag;
813  localtag.mode = lockmode;
814 
815  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
816  &localtag,
817  HASH_ENTER, &found);
818 
819  /*
820  * if it's a new locallock object, initialize it
821  */
822  if (!found)
823  {
824  locallock->lock = NULL;
825  locallock->proclock = NULL;
826  locallock->hashcode = LockTagHashCode(&(localtag.lock));
827  locallock->nLocks = 0;
828  locallock->holdsStrongLockCount = false;
829  locallock->lockCleared = false;
830  locallock->numLockOwners = 0;
831  locallock->maxLockOwners = 8;
832  locallock->lockOwners = NULL; /* in case next line fails */
833  locallock->lockOwners = (LOCALLOCKOWNER *)
835  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
836  }
837  else
838  {
839  /* Make sure there will be room to remember the lock */
840  if (locallock->numLockOwners >= locallock->maxLockOwners)
841  {
842  int newsize = locallock->maxLockOwners * 2;
843 
844  locallock->lockOwners = (LOCALLOCKOWNER *)
845  repalloc(locallock->lockOwners,
846  newsize * sizeof(LOCALLOCKOWNER));
847  locallock->maxLockOwners = newsize;
848  }
849  }
850  hashcode = locallock->hashcode;
851 
852  if (locallockp)
853  *locallockp = locallock;
854 
855  /*
856  * If we already hold the lock, we can just increase the count locally.
857  *
858  * If lockCleared is already set, caller need not worry about absorbing
859  * sinval messages related to the lock's object.
860  */
861  if (locallock->nLocks > 0)
862  {
863  GrantLockLocal(locallock, owner);
864  if (locallock->lockCleared)
866  else
868  }
869 
870  /*
871  * We don't acquire any other heavyweight lock while holding the relation
872  * extension lock. We do allow to acquire the same relation extension
873  * lock more than once but that case won't reach here.
874  */
875  Assert(!IsRelationExtensionLockHeld);
876 
877  /*
878  * Prepare to emit a WAL record if acquisition of this lock needs to be
879  * replayed in a standby server.
880  *
881  * Here we prepare to log; after lock is acquired we'll issue log record.
882  * This arrangement simplifies error recovery in case the preparation step
883  * fails.
884  *
885  * Only AccessExclusiveLocks can conflict with lock types that read-only
886  * transactions can acquire in a standby server. Make sure this definition
887  * matches the one in GetRunningTransactionLocks().
888  */
889  if (lockmode >= AccessExclusiveLock &&
890  locktag->locktag_type == LOCKTAG_RELATION &&
891  !RecoveryInProgress() &&
893  {
895  log_lock = true;
896  }
897 
898  /*
899  * Attempt to take lock via fast path, if eligible. But if we remember
900  * having filled up the fast path array, we don't attempt to make any
901  * further use of it until we release some locks. It's possible that some
902  * other backend has transferred some of those locks to the shared hash
903  * table, leaving space free, but it's not worth acquiring the LWLock just
904  * to check. It's also possible that we're acquiring a second or third
905  * lock type on a relation we have already locked using the fast-path, but
906  * for now we don't worry about that case either.
907  */
908  if (EligibleForRelationFastPath(locktag, lockmode) &&
910  {
911  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
912  bool acquired;
913 
914  /*
915  * LWLockAcquire acts as a memory sequencing point, so it's safe to
916  * assume that any strong locker whose increment to
917  * FastPathStrongRelationLocks->counts becomes visible after we test
918  * it has yet to begin to transfer fast-path locks.
919  */
921  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
922  acquired = false;
923  else
924  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
925  lockmode);
927  if (acquired)
928  {
929  /*
930  * The locallock might contain stale pointers to some old shared
931  * objects; we MUST reset these to null before considering the
932  * lock to be acquired via fast-path.
933  */
934  locallock->lock = NULL;
935  locallock->proclock = NULL;
936  GrantLockLocal(locallock, owner);
937  return LOCKACQUIRE_OK;
938  }
939  }
940 
941  /*
942  * If this lock could potentially have been taken via the fast-path by
943  * some other backend, we must (temporarily) disable further use of the
944  * fast-path for this lock tag, and migrate any locks already taken via
945  * this method to the main lock table.
946  */
947  if (ConflictsWithRelationFastPath(locktag, lockmode))
948  {
949  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
950 
951  BeginStrongLockAcquire(locallock, fasthashcode);
952  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
953  hashcode))
954  {
956  if (locallock->nLocks == 0)
957  RemoveLocalLock(locallock);
958  if (locallockp)
959  *locallockp = NULL;
960  if (reportMemoryError)
961  ereport(ERROR,
962  (errcode(ERRCODE_OUT_OF_MEMORY),
963  errmsg("out of shared memory"),
964  errhint("You might need to increase %s.", "max_locks_per_transaction")));
965  else
966  return LOCKACQUIRE_NOT_AVAIL;
967  }
968  }
969 
970  /*
971  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
972  * take it via the fast-path, either, so we've got to mess with the shared
973  * lock table.
974  */
975  partitionLock = LockHashPartitionLock(hashcode);
976 
977  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
978 
979  /*
980  * Find or create lock and proclock entries with this tag
981  *
982  * Note: if the locallock object already existed, it might have a pointer
983  * to the lock already ... but we should not assume that that pointer is
984  * valid, since a lock object with zero hold and request counts can go
985  * away anytime. So we have to use SetupLockInTable() to recompute the
986  * lock and proclock pointers, even if they're already set.
987  */
988  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
989  hashcode, lockmode);
990  if (!proclock)
991  {
993  LWLockRelease(partitionLock);
994  if (locallock->nLocks == 0)
995  RemoveLocalLock(locallock);
996  if (locallockp)
997  *locallockp = NULL;
998  if (reportMemoryError)
999  ereport(ERROR,
1000  (errcode(ERRCODE_OUT_OF_MEMORY),
1001  errmsg("out of shared memory"),
1002  errhint("You might need to increase %s.", "max_locks_per_transaction")));
1003  else
1004  return LOCKACQUIRE_NOT_AVAIL;
1005  }
1006  locallock->proclock = proclock;
1007  lock = proclock->tag.myLock;
1008  locallock->lock = lock;
1009 
1010  /*
1011  * If lock requested conflicts with locks requested by waiters, must join
1012  * wait queue. Otherwise, check for conflict with already-held locks.
1013  * (That's last because most complex check.)
1014  */
1015  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1016  found_conflict = true;
1017  else
1018  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1019  lock, proclock);
1020 
1021  if (!found_conflict)
1022  {
1023  /* No conflict with held or previously requested locks */
1024  GrantLock(lock, proclock, lockmode);
1025  GrantLockLocal(locallock, owner);
1026  }
1027  else
1028  {
1029  /*
1030  * We can't acquire the lock immediately. If caller specified no
1031  * blocking, remove useless table entries and return
1032  * LOCKACQUIRE_NOT_AVAIL without waiting.
1033  */
1034  if (dontWait)
1035  {
1037  if (proclock->holdMask == 0)
1038  {
1039  uint32 proclock_hashcode;
1040 
1041  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1042  dlist_delete(&proclock->lockLink);
1043  dlist_delete(&proclock->procLink);
1045  &(proclock->tag),
1046  proclock_hashcode,
1047  HASH_REMOVE,
1048  NULL))
1049  elog(PANIC, "proclock table corrupted");
1050  }
1051  else
1052  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1053  lock->nRequested--;
1054  lock->requested[lockmode]--;
1055  LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1056  Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1057  Assert(lock->nGranted <= lock->nRequested);
1058  LWLockRelease(partitionLock);
1059  if (locallock->nLocks == 0)
1060  RemoveLocalLock(locallock);
1061  if (locallockp)
1062  *locallockp = NULL;
1063  return LOCKACQUIRE_NOT_AVAIL;
1064  }
1065 
1066  /*
1067  * Set bitmask of locks this process already holds on this object.
1068  */
1069  MyProc->heldLocks = proclock->holdMask;
1070 
1071  /*
1072  * Sleep till someone wakes me up.
1073  */
1074 
1075  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1076  locktag->locktag_field2,
1077  locktag->locktag_field3,
1078  locktag->locktag_field4,
1079  locktag->locktag_type,
1080  lockmode);
1081 
1082  WaitOnLock(locallock, owner);
1083 
1084  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1085  locktag->locktag_field2,
1086  locktag->locktag_field3,
1087  locktag->locktag_field4,
1088  locktag->locktag_type,
1089  lockmode);
1090 
1091  /*
1092  * NOTE: do not do any material change of state between here and
1093  * return. All required changes in locktable state must have been
1094  * done when the lock was granted to us --- see notes in WaitOnLock.
1095  */
1096 
1097  /*
1098  * Check the proclock entry status, in case something in the ipc
1099  * communication doesn't work correctly.
1100  */
1101  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1102  {
1104  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1105  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1106  /* Should we retry ? */
1107  LWLockRelease(partitionLock);
1108  elog(ERROR, "LockAcquire failed");
1109  }
1110  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1111  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1112  }
1113 
1114  /*
1115  * Lock state is fully up-to-date now; if we error out after this, no
1116  * special error cleanup is required.
1117  */
1119 
1120  LWLockRelease(partitionLock);
1121 
1122  /*
1123  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1124  * standby server.
1125  */
1126  if (log_lock)
1127  {
1128  /*
1129  * Decode the locktag back to the original values, to avoid sending
1130  * lots of empty bytes with every message. See lock.h to check how a
1131  * locktag is defined for LOCKTAG_RELATION
1132  */
1134  locktag->locktag_field2);
1135  }
1136 
1137  return LOCKACQUIRE_OK;
1138 }
#define LOG
Definition: elog.h:31
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1344
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2663
void AbortStrongLockAcquire(void)
Definition: lock.c:1728
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2596
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1785
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:215
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1692
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1397
static void FinishStrongLockAcquire(void)
Definition: lock.c:1718
@ LOCKTAG_OBJECT
Definition: lock.h:145
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:505
@ LOCKACQUIRE_OK
Definition: lock.h:503
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:504
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:502
#define RowExclusiveLock
Definition: lockdefs.h:38
ResourceOwner CurrentResourceOwner
Definition: resowner.c:147
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1442
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1425
bool lockCleared
Definition: lock.h:440
uint16 locktag_field4
Definition: lock.h:169
LOCKMASK heldLocks
Definition: proc.h:226
bool RecoveryInProgress(void)
Definition: xlog.c:5948
#define XLogStandbyInfoActive()
Definition: xlog.h:118
bool InRecovery
Definition: xlogutils.c:53

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert(), BeginStrongLockAcquire(), ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, dlist_delete(), EligibleForRelationFastPath, elog(), ereport, errcode(), errhint(), errmsg(), ERROR, FastPathGrantRelationLock(), FastPathLocalUseCount, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PGPROC::heldLocks, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, InRecovery, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1397 of file lock.c.

1401 {
1402  int numLockModes = lockMethodTable->numLockModes;
1403  LOCKMASK myLocks;
1404  int conflictMask = lockMethodTable->conflictTab[lockmode];
1405  int conflictsRemaining[MAX_LOCKMODES];
1406  int totalConflictsRemaining = 0;
1407  dlist_iter proclock_iter;
1408  int i;
1409 
1410  /*
1411  * first check for global conflicts: If no locks conflict with my request,
1412  * then I get the lock.
1413  *
1414  * Checking for conflict: lock->grantMask represents the types of
1415  * currently held locks. conflictTable[lockmode] has a bit set for each
1416  * type of lock that conflicts with request. Bitwise compare tells if
1417  * there is a conflict.
1418  */
1419  if (!(conflictMask & lock->grantMask))
1420  {
1421  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1422  return false;
1423  }
1424 
1425  /*
1426  * Rats. Something conflicts. But it could still be my own lock, or a
1427  * lock held by another member of my locking group. First, figure out how
1428  * many conflicts remain after subtracting out any locks I hold myself.
1429  */
1430  myLocks = proclock->holdMask;
1431  for (i = 1; i <= numLockModes; i++)
1432  {
1433  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1434  {
1435  conflictsRemaining[i] = 0;
1436  continue;
1437  }
1438  conflictsRemaining[i] = lock->granted[i];
1439  if (myLocks & LOCKBIT_ON(i))
1440  --conflictsRemaining[i];
1441  totalConflictsRemaining += conflictsRemaining[i];
1442  }
1443 
1444  /* If no conflicts remain, we get the lock. */
1445  if (totalConflictsRemaining == 0)
1446  {
1447  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1448  return false;
1449  }
1450 
1451  /* If no group locking, it's definitely a conflict. */
1452  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1453  {
1454  Assert(proclock->tag.myProc == MyProc);
1455  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1456  proclock);
1457  return true;
1458  }
1459 
1460  /*
1461  * The relation extension lock conflict even between the group members.
1462  */
1463  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1464  {
1465  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1466  proclock);
1467  return true;
1468  }
1469 
1470  /*
1471  * Locks held in conflicting modes by members of our own lock group are
1472  * not real conflicts; we can subtract those out and see if we still have
1473  * a conflict. This is O(N) in the number of processes holding or
1474  * awaiting locks on this object. We could improve that by making the
1475  * shared memory state more complex (and larger) but it doesn't seem worth
1476  * it.
1477  */
1478  dlist_foreach(proclock_iter, &lock->procLocks)
1479  {
1480  PROCLOCK *otherproclock =
1481  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1482 
1483  if (proclock != otherproclock &&
1484  proclock->groupLeader == otherproclock->groupLeader &&
1485  (otherproclock->holdMask & conflictMask) != 0)
1486  {
1487  int intersectMask = otherproclock->holdMask & conflictMask;
1488 
1489  for (i = 1; i <= numLockModes; i++)
1490  {
1491  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1492  {
1493  if (conflictsRemaining[i] <= 0)
1494  elog(PANIC, "proclocks held do not match lock");
1495  conflictsRemaining[i]--;
1496  totalConflictsRemaining--;
1497  }
1498  }
1499 
1500  if (totalConflictsRemaining == 0)
1501  {
1502  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1503  proclock);
1504  return false;
1505  }
1506  }
1507  }
1508 
1509  /* Nope, it's a real conflict. */
1510  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1511  return true;
1512 }
#define LOCK_LOCKTAG(lock)
Definition: lock.h:325

References Assert(), LockMethodData::conflictTab, dlist_iter::cur, dlist_container, dlist_foreach, elog(), LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by LockAcquireExtended(), ProcLockWakeup(), and ProcSleep().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 622 of file lock.c.

623 {
624  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
625  LockMethod lockMethodTable;
626  LOCALLOCKTAG localtag;
627  LOCALLOCK *locallock;
628  LOCK *lock;
629  PROCLOCK *proclock;
630  LWLock *partitionLock;
631  bool hasWaiters = false;
632 
633  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
634  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
635  lockMethodTable = LockMethods[lockmethodid];
636  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
637  elog(ERROR, "unrecognized lock mode: %d", lockmode);
638 
639 #ifdef LOCK_DEBUG
640  if (LOCK_DEBUG_ENABLED(locktag))
641  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
642  locktag->locktag_field1, locktag->locktag_field2,
643  lockMethodTable->lockModeNames[lockmode]);
644 #endif
645 
646  /*
647  * Find the LOCALLOCK entry for this lock and lockmode
648  */
649  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
650  localtag.lock = *locktag;
651  localtag.mode = lockmode;
652 
653  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
654  &localtag,
655  HASH_FIND, NULL);
656 
657  /*
658  * let the caller print its own error message, too. Do not ereport(ERROR).
659  */
660  if (!locallock || locallock->nLocks <= 0)
661  {
662  elog(WARNING, "you don't own a lock of type %s",
663  lockMethodTable->lockModeNames[lockmode]);
664  return false;
665  }
666 
667  /*
668  * Check the shared lock table.
669  */
670  partitionLock = LockHashPartitionLock(locallock->hashcode);
671 
672  LWLockAcquire(partitionLock, LW_SHARED);
673 
674  /*
675  * We don't need to re-find the lock or proclock, since we kept their
676  * addresses in the locallock table, and they couldn't have been removed
677  * while we were holding a lock on them.
678  */
679  lock = locallock->lock;
680  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
681  proclock = locallock->proclock;
682  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
683 
684  /*
685  * Double-check that we are actually holding a lock of the type we want to
686  * release.
687  */
688  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
689  {
690  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
691  LWLockRelease(partitionLock);
692  elog(WARNING, "you don't own a lock of type %s",
693  lockMethodTable->lockModeNames[lockmode]);
694  RemoveLocalLock(locallock);
695  return false;
696  }
697 
698  /*
699  * Do the checking.
700  */
701  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
702  hasWaiters = true;
703 
704  LWLockRelease(partitionLock);
705 
706  return hasWaiters;
707 }
#define WARNING
Definition: elog.h:36

References LockMethodData::conflictTab, elog(), ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode 
)

Definition at line 586 of file lock.c.

587 {
588  LOCALLOCKTAG localtag;
589  LOCALLOCK *locallock;
590 
591  /*
592  * See if there is a LOCALLOCK entry for this lock and lockmode
593  */
594  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
595  localtag.lock = *locktag;
596  localtag.mode = lockmode;
597 
598  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
599  &localtag,
600  HASH_FIND, NULL);
601 
602  return (locallock && locallock->nLocks > 0);
603 }

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, LockMethodLocalHash, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2520 of file lock.c.

2521 {
2523 
2524  Assert(parent != NULL);
2525 
2526  if (locallocks == NULL)
2527  {
2528  HASH_SEQ_STATUS status;
2529  LOCALLOCK *locallock;
2530 
2532 
2533  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2534  LockReassignOwner(locallock, parent);
2535  }
2536  else
2537  {
2538  int i;
2539 
2540  for (i = nlocks - 1; i >= 0; i--)
2541  LockReassignOwner(locallocks[i], parent);
2542  }
2543 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2550
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:819

References Assert(), CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), and ResourceOwnerGetParent().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2550 of file lock.c.

2551 {
2552  LOCALLOCKOWNER *lockOwners;
2553  int i;
2554  int ic = -1;
2555  int ip = -1;
2556 
2557  /*
2558  * Scan to see if there are any locks belonging to current owner or its
2559  * parent
2560  */
2561  lockOwners = locallock->lockOwners;
2562  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2563  {
2564  if (lockOwners[i].owner == CurrentResourceOwner)
2565  ic = i;
2566  else if (lockOwners[i].owner == parent)
2567  ip = i;
2568  }
2569 
2570  if (ic < 0)
2571  return; /* no current locks */
2572 
2573  if (ip < 0)
2574  {
2575  /* Parent has no slot, so just give it the child's slot */
2576  lockOwners[ic].owner = parent;
2577  ResourceOwnerRememberLock(parent, locallock);
2578  }
2579  else
2580  {
2581  /* Merge child's count with parent's */
2582  lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2583  /* compact out unused slot */
2584  locallock->numLockOwners--;
2585  if (ic < locallock->numLockOwners)
2586  lockOwners[ic] = lockOwners[locallock->numLockOwners];
2587  }
2589 }
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1070

References CurrentResourceOwner, i, LOCALLOCK::lockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3063 of file lock.c.

3066 {
3067  LOCK *lock;
3068  PROCLOCK *proclock;
3069  PROCLOCKTAG proclocktag;
3070  uint32 hashcode;
3071  uint32 proclock_hashcode;
3072  LWLock *partitionLock;
3073  bool wakeupNeeded;
3074 
3075  hashcode = LockTagHashCode(locktag);
3076  partitionLock = LockHashPartitionLock(hashcode);
3077 
3078  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3079 
3080  /*
3081  * Re-find the lock object (it had better be there).
3082  */
3084  locktag,
3085  hashcode,
3086  HASH_FIND,
3087  NULL);
3088  if (!lock)
3089  elog(PANIC, "failed to re-find shared lock object");
3090 
3091  /*
3092  * Re-find the proclock object (ditto).
3093  */
3094  proclocktag.myLock = lock;
3095  proclocktag.myProc = proc;
3096 
3097  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3098 
3100  &proclocktag,
3101  proclock_hashcode,
3102  HASH_FIND,
3103  NULL);
3104  if (!proclock)
3105  elog(PANIC, "failed to re-find shared proclock object");
3106 
3107  /*
3108  * Double-check that we are actually holding a lock of the type we want to
3109  * release.
3110  */
3111  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3112  {
3113  PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3114  LWLockRelease(partitionLock);
3115  elog(WARNING, "you don't own a lock of type %s",
3116  lockMethodTable->lockModeNames[lockmode]);
3117  return;
3118  }
3119 
3120  /*
3121  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3122  */
3123  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3124 
3125  CleanUpLock(lock, proclock,
3126  lockMethodTable, hashcode,
3127  wakeupNeeded);
3128 
3129  LWLockRelease(partitionLock);
3130 
3131  /*
3132  * Decrement strong lock count. This logic is needed only for 2PC.
3133  */
3134  if (decrement_strong_lock_count
3135  && ConflictsWithRelationFastPath(locktag, lockmode))
3136  {
3137  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3138 
3140  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3141  FastPathStrongRelationLocks->count[fasthashcode]--;
3143  }
3144 }
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1549
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1606

References Assert(), CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog(), FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire, SpinLockRelease, UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 1925 of file lock.c.

1926 {
1927  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1928  LockMethod lockMethodTable;
1929  LOCALLOCKTAG localtag;
1930  LOCALLOCK *locallock;
1931  LOCK *lock;
1932  PROCLOCK *proclock;
1933  LWLock *partitionLock;
1934  bool wakeupNeeded;
1935 
1936  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1937  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1938  lockMethodTable = LockMethods[lockmethodid];
1939  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1940  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1941 
1942 #ifdef LOCK_DEBUG
1943  if (LOCK_DEBUG_ENABLED(locktag))
1944  elog(LOG, "LockRelease: lock [%u,%u] %s",
1945  locktag->locktag_field1, locktag->locktag_field2,
1946  lockMethodTable->lockModeNames[lockmode]);
1947 #endif
1948 
1949  /*
1950  * Find the LOCALLOCK entry for this lock and lockmode
1951  */
1952  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1953  localtag.lock = *locktag;
1954  localtag.mode = lockmode;
1955 
1956  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1957  &localtag,
1958  HASH_FIND, NULL);
1959 
1960  /*
1961  * let the caller print its own error message, too. Do not ereport(ERROR).
1962  */
1963  if (!locallock || locallock->nLocks <= 0)
1964  {
1965  elog(WARNING, "you don't own a lock of type %s",
1966  lockMethodTable->lockModeNames[lockmode]);
1967  return false;
1968  }
1969 
1970  /*
1971  * Decrease the count for the resource owner.
1972  */
1973  {
1974  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1975  ResourceOwner owner;
1976  int i;
1977 
1978  /* Identify owner for lock */
1979  if (sessionLock)
1980  owner = NULL;
1981  else
1982  owner = CurrentResourceOwner;
1983 
1984  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1985  {
1986  if (lockOwners[i].owner == owner)
1987  {
1988  Assert(lockOwners[i].nLocks > 0);
1989  if (--lockOwners[i].nLocks == 0)
1990  {
1991  if (owner != NULL)
1992  ResourceOwnerForgetLock(owner, locallock);
1993  /* compact out unused slot */
1994  locallock->numLockOwners--;
1995  if (i < locallock->numLockOwners)
1996  lockOwners[i] = lockOwners[locallock->numLockOwners];
1997  }
1998  break;
1999  }
2000  }
2001  if (i < 0)
2002  {
2003  /* don't release a lock belonging to another owner */
2004  elog(WARNING, "you don't own a lock of type %s",
2005  lockMethodTable->lockModeNames[lockmode]);
2006  return false;
2007  }
2008  }
2009 
2010  /*
2011  * Decrease the total local count. If we're still holding the lock, we're
2012  * done.
2013  */
2014  locallock->nLocks--;
2015 
2016  if (locallock->nLocks > 0)
2017  return true;
2018 
2019  /*
2020  * At this point we can no longer suppose we are clear of invalidation
2021  * messages related to this lock. Although we'll delete the LOCALLOCK
2022  * object before any intentional return from this routine, it seems worth
2023  * the trouble to explicitly reset lockCleared right now, just in case
2024  * some error prevents us from deleting the LOCALLOCK.
2025  */
2026  locallock->lockCleared = false;
2027 
2028  /* Attempt fast release of any lock eligible for the fast path. */
2029  if (EligibleForRelationFastPath(locktag, lockmode) &&
2031  {
2032  bool released;
2033 
2034  /*
2035  * We might not find the lock here, even if we originally entered it
2036  * here. Another backend may have moved it to the main table.
2037  */
2039  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2040  lockmode);
2042  if (released)
2043  {
2044  RemoveLocalLock(locallock);
2045  return true;
2046  }
2047  }
2048 
2049  /*
2050  * Otherwise we've got to mess with the shared lock table.
2051  */
2052  partitionLock = LockHashPartitionLock(locallock->hashcode);
2053 
2054  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2055 
2056  /*
2057  * Normally, we don't need to re-find the lock or proclock, since we kept
2058  * their addresses in the locallock table, and they couldn't have been
2059  * removed while we were holding a lock on them. But it's possible that
2060  * the lock was taken fast-path and has since been moved to the main hash
2061  * table by another backend, in which case we will need to look up the
2062  * objects here. We assume the lock field is NULL if so.
2063  */
2064  lock = locallock->lock;
2065  if (!lock)
2066  {
2067  PROCLOCKTAG proclocktag;
2068 
2069  Assert(EligibleForRelationFastPath(locktag, lockmode));
2071  locktag,
2072  locallock->hashcode,
2073  HASH_FIND,
2074  NULL);
2075  if (!lock)
2076  elog(ERROR, "failed to re-find shared lock object");
2077  locallock->lock = lock;
2078 
2079  proclocktag.myLock = lock;
2080  proclocktag.myProc = MyProc;
2082  &proclocktag,
2083  HASH_FIND,
2084  NULL);
2085  if (!locallock->proclock)
2086  elog(ERROR, "failed to re-find shared proclock object");
2087  }
2088  LOCK_PRINT("LockRelease: found", lock, lockmode);
2089  proclock = locallock->proclock;
2090  PROCLOCK_PRINT("LockRelease: found", proclock);
2091 
2092  /*
2093  * Double-check that we are actually holding a lock of the type we want to
2094  * release.
2095  */
2096  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2097  {
2098  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2099  LWLockRelease(partitionLock);
2100  elog(WARNING, "you don't own a lock of type %s",
2101  lockMethodTable->lockModeNames[lockmode]);
2102  RemoveLocalLock(locallock);
2103  return false;
2104  }
2105 
2106  /*
2107  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2108  */
2109  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2110 
2111  CleanUpLock(lock, proclock,
2112  lockMethodTable, locallock->hashcode,
2113  wakeupNeeded);
2114 
2115  LWLockRelease(partitionLock);
2116 
2117  RemoveLocalLock(locallock);
2118  return true;
2119 }
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2633

References Assert(), CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog(), ERROR, FastPathLocalUseCount, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2130 of file lock.c.

2131 {
2132  HASH_SEQ_STATUS status;
2133  LockMethod lockMethodTable;
2134  int i,
2135  numLockModes;
2136  LOCALLOCK *locallock;
2137  LOCK *lock;
2138  int partition;
2139  bool have_fast_path_lwlock = false;
2140 
2141  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2142  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2143  lockMethodTable = LockMethods[lockmethodid];
2144 
2145 #ifdef LOCK_DEBUG
2146  if (*(lockMethodTable->trace_flag))
2147  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2148 #endif
2149 
2150  /*
2151  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2152  * the only way that the lock we hold on our own VXID can ever get
2153  * released: it is always and only released when a toplevel transaction
2154  * ends.
2155  */
2156  if (lockmethodid == DEFAULT_LOCKMETHOD)
2158 
2159  numLockModes = lockMethodTable->numLockModes;
2160 
2161  /*
2162  * First we run through the locallock table and get rid of unwanted
2163  * entries, then we scan the process's proclocks and get rid of those. We
2164  * do this separately because we may have multiple locallock entries
2165  * pointing to the same proclock, and we daren't end up with any dangling
2166  * pointers. Fast-path locks are cleaned up during the locallock table
2167  * scan, though.
2168  */
2170 
2171  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2172  {
2173  /*
2174  * If the LOCALLOCK entry is unused, we must've run out of shared
2175  * memory while trying to set up this lock. Just forget the local
2176  * entry.
2177  */
2178  if (locallock->nLocks == 0)
2179  {
2180  RemoveLocalLock(locallock);
2181  continue;
2182  }
2183 
2184  /* Ignore items that are not of the lockmethod to be removed */
2185  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2186  continue;
2187 
2188  /*
2189  * If we are asked to release all locks, we can just zap the entry.
2190  * Otherwise, must scan to see if there are session locks. We assume
2191  * there is at most one lockOwners entry for session locks.
2192  */
2193  if (!allLocks)
2194  {
2195  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2196 
2197  /* If session lock is above array position 0, move it down to 0 */
2198  for (i = 0; i < locallock->numLockOwners; i++)
2199  {
2200  if (lockOwners[i].owner == NULL)
2201  lockOwners[0] = lockOwners[i];
2202  else
2203  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2204  }
2205 
2206  if (locallock->numLockOwners > 0 &&
2207  lockOwners[0].owner == NULL &&
2208  lockOwners[0].nLocks > 0)
2209  {
2210  /* Fix the locallock to show just the session locks */
2211  locallock->nLocks = lockOwners[0].nLocks;
2212  locallock->numLockOwners = 1;
2213  /* We aren't deleting this locallock, so done */
2214  continue;
2215  }
2216  else
2217  locallock->numLockOwners = 0;
2218  }
2219 
2220  /*
2221  * If the lock or proclock pointers are NULL, this lock was taken via
2222  * the relation fast-path (and is not known to have been transferred).
2223  */
2224  if (locallock->proclock == NULL || locallock->lock == NULL)
2225  {
2226  LOCKMODE lockmode = locallock->tag.mode;
2227  Oid relid;
2228 
2229  /* Verify that a fast-path lock is what we've got. */
2230  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2231  elog(PANIC, "locallock table corrupted");
2232 
2233  /*
2234  * If we don't currently hold the LWLock that protects our
2235  * fast-path data structures, we must acquire it before attempting
2236  * to release the lock via the fast-path. We will continue to
2237  * hold the LWLock until we're done scanning the locallock table,
2238  * unless we hit a transferred fast-path lock. (XXX is this
2239  * really such a good idea? There could be a lot of entries ...)
2240  */
2241  if (!have_fast_path_lwlock)
2242  {
2244  have_fast_path_lwlock = true;
2245  }
2246 
2247  /* Attempt fast-path release. */
2248  relid = locallock->tag.lock.locktag_field2;
2249  if (FastPathUnGrantRelationLock(relid, lockmode))
2250  {
2251  RemoveLocalLock(locallock);
2252  continue;
2253  }
2254 
2255  /*
2256  * Our lock, originally taken via the fast path, has been
2257  * transferred to the main lock table. That's going to require
2258  * some extra work, so release our fast-path lock before starting.
2259  */
2261  have_fast_path_lwlock = false;
2262 
2263  /*
2264  * Now dump the lock. We haven't got a pointer to the LOCK or
2265  * PROCLOCK in this case, so we have to handle this a bit
2266  * differently than a normal lock release. Unfortunately, this
2267  * requires an extra LWLock acquire-and-release cycle on the
2268  * partitionLock, but hopefully it shouldn't happen often.
2269  */
2270  LockRefindAndRelease(lockMethodTable, MyProc,
2271  &locallock->tag.lock, lockmode, false);
2272  RemoveLocalLock(locallock);
2273  continue;
2274  }
2275 
2276  /* Mark the proclock to show we need to release this lockmode */
2277  if (locallock->nLocks > 0)
2278  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2279 
2280  /* And remove the locallock hashtable entry */
2281  RemoveLocalLock(locallock);
2282  }
2283 
2284  /* Done with the fast-path data structures */
2285  if (have_fast_path_lwlock)
2287 
2288  /*
2289  * Now, scan each lock partition separately.
2290  */
2291  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2292  {
2293  LWLock *partitionLock;
2294  dlist_head *procLocks = &MyProc->myProcLocks[partition];
2295  dlist_mutable_iter proclock_iter;
2296 
2297  partitionLock = LockHashPartitionLockByIndex(partition);
2298 
2299  /*
2300  * If the proclock list for this partition is empty, we can skip
2301  * acquiring the partition lock. This optimization is trickier than
2302  * it looks, because another backend could be in process of adding
2303  * something to our proclock list due to promoting one of our
2304  * fast-path locks. However, any such lock must be one that we
2305  * decided not to delete above, so it's okay to skip it again now;
2306  * we'd just decide not to delete it again. We must, however, be
2307  * careful to re-fetch the list header once we've acquired the
2308  * partition lock, to be sure we have a valid, up-to-date pointer.
2309  * (There is probably no significant risk if pointer fetch/store is
2310  * atomic, but we don't wish to assume that.)
2311  *
2312  * XXX This argument assumes that the locallock table correctly
2313  * represents all of our fast-path locks. While allLocks mode
2314  * guarantees to clean up all of our normal locks regardless of the
2315  * locallock situation, we lose that guarantee for fast-path locks.
2316  * This is not ideal.
2317  */
2318  if (dlist_is_empty(procLocks))
2319  continue; /* needn't examine this partition */
2320 
2321  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2322 
2323  dlist_foreach_modify(proclock_iter, procLocks)
2324  {
2325  PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2326  bool wakeupNeeded = false;
2327 
2328  Assert(proclock->tag.myProc == MyProc);
2329 
2330  lock = proclock->tag.myLock;
2331 
2332  /* Ignore items that are not of the lockmethod to be removed */
2333  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2334  continue;
2335 
2336  /*
2337  * In allLocks mode, force release of all locks even if locallock
2338  * table had problems
2339  */
2340  if (allLocks)
2341  proclock->releaseMask = proclock->holdMask;
2342  else
2343  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2344 
2345  /*
2346  * Ignore items that have nothing to be released, unless they have
2347  * holdMask == 0 and are therefore recyclable
2348  */
2349  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2350  continue;
2351 
2352  PROCLOCK_PRINT("LockReleaseAll", proclock);
2353  LOCK_PRINT("LockReleaseAll", lock, 0);
2354  Assert(lock->nRequested >= 0);
2355  Assert(lock->nGranted >= 0);
2356  Assert(lock->nGranted <= lock->nRequested);
2357  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2358 
2359  /*
2360  * Release the previously-marked lock modes
2361  */
2362  for (i = 1; i <= numLockModes; i++)
2363  {
2364  if (proclock->releaseMask & LOCKBIT_ON(i))
2365  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2366  lockMethodTable);
2367  }
2368  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2369  Assert(lock->nGranted <= lock->nRequested);
2370  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2371 
2372  proclock->releaseMask = 0;
2373 
2374  /* CleanUpLock will wake up waiters if needed. */
2375  CleanUpLock(lock, proclock,
2376  lockMethodTable,
2377  LockTagHashCode(&lock->tag),
2378  wakeupNeeded);
2379  } /* loop over PROCLOCKs within this partition */
2380 
2381  LWLockRelease(partitionLock);
2382  } /* loop over partitions */
2383 
2384 #ifdef LOCK_DEBUG
2385  if (*(lockMethodTable->trace_flag))
2386  elog(LOG, "LockReleaseAll done");
2387 #endif
2388 }
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4411
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:443
const bool * trace_flag
Definition: lock.h:113
dlist_node * cur
Definition: ilist.h:200

References Assert(), CleanUpLock(), dlist_mutable_iter::cur, DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog(), ERROR, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, PANIC, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), and VirtualXactLockTableCleanup().

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2425 of file lock.c.

2426 {
2427  if (locallocks == NULL)
2428  {
2429  HASH_SEQ_STATUS status;
2430  LOCALLOCK *locallock;
2431 
2433 
2434  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2435  ReleaseLockIfHeld(locallock, false);
2436  }
2437  else
2438  {
2439  int i;
2440 
2441  for (i = nlocks - 1; i >= 0; i--)
2442  ReleaseLockIfHeld(locallocks[i], false);
2443  }
2444 }
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2460

References hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and ReleaseLockIfHeld().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2395 of file lock.c.

2396 {
2397  HASH_SEQ_STATUS status;
2398  LOCALLOCK *locallock;
2399 
2400  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2401  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2402 
2404 
2405  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2406  {
2407  /* Ignore items that are not of the specified lock method */
2408  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2409  continue;
2410 
2411  ReleaseLockIfHeld(locallock, true);
2412  }
2413 }

References elog(), ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, and ReleaseLockIfHeld().

Referenced by pg_advisory_unlock_all().

◆ LockShmemSize()

Size LockShmemSize ( void  )

Definition at line 3535 of file lock.c.

3536 {
3537  Size size = 0;
3538  long max_table_size;
3539 
3540  /* lock hash table */
3541  max_table_size = NLOCKENTS();
3542  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3543 
3544  /* proclock hash table */
3545  max_table_size *= 2;
3546  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3547 
3548  /*
3549  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3550  */
3551  size = add_size(size, size / 10);
3552 
3553  return size;
3554 }
size_t Size
Definition: c.h:594
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:781
Size add_size(Size s1, Size s2)
Definition: shmem.c:502

References add_size(), hash_estimate_size(), and NLOCKENTS.

Referenced by CalculateShmemSize().

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 505 of file lock.c.

506 {
507  return get_hash_value(LockMethodLockHash, (const void *) locktag);
508 }
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:909

References get_hash_value(), and LockMethodLockHash.

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4622 of file lock.c.

4623 {
4624  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4625  LOCK *lock;
4626  bool found;
4627  uint32 hashcode;
4628  LWLock *partitionLock;
4629  int waiters = 0;
4630 
4631  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4632  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4633 
4634  hashcode = LockTagHashCode(locktag);
4635  partitionLock = LockHashPartitionLock(hashcode);
4636  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4637 
4639  locktag,
4640  hashcode,
4641  HASH_FIND,
4642  &found);
4643  if (found)
4644  {
4645  Assert(lock != NULL);
4646  waiters = lock->nRequested;
4647  }
4648  LWLockRelease(partitionLock);
4649 
4650  return waiters;
4651 }

References Assert(), elog(), ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

Definition at line 1770 of file lock.c.

1771 {
1772  Assert(locallock->nLocks > 0);
1773  locallock->lockCleared = true;
1774 }

References Assert(), LOCALLOCK::lockCleared, and LOCALLOCK::nLocks.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ PostPrepare_Locks()

void PostPrepare_Locks ( TransactionId  xid)

Definition at line 3351 of file lock.c.

3352 {
3353  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3354  HASH_SEQ_STATUS status;
3355  LOCALLOCK *locallock;
3356  LOCK *lock;
3357  PROCLOCK *proclock;
3358  PROCLOCKTAG proclocktag;
3359  int partition;
3360 
3361  /* Can't prepare a lock group follower. */
3362  Assert(MyProc->lockGroupLeader == NULL ||
3364 
3365  /* This is a critical section: any error means big trouble */
3367 
3368  /*
3369  * First we run through the locallock table and get rid of unwanted
3370  * entries, then we scan the process's proclocks and transfer them to the
3371  * target proc.
3372  *
3373  * We do this separately because we may have multiple locallock entries
3374  * pointing to the same proclock, and we daren't end up with any dangling
3375  * pointers.
3376  */
3378 
3379  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3380  {
3381  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3382  bool haveSessionLock;
3383  bool haveXactLock;
3384  int i;
3385 
3386  if (locallock->proclock == NULL || locallock->lock == NULL)
3387  {
3388  /*
3389  * We must've run out of shared memory while trying to set up this
3390  * lock. Just forget the local entry.
3391  */
3392  Assert(locallock->nLocks == 0);
3393  RemoveLocalLock(locallock);
3394  continue;
3395  }
3396 
3397  /* Ignore VXID locks */
3398  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3399  continue;
3400 
3401  /* Scan to see whether we hold it at session or transaction level */
3402  haveSessionLock = haveXactLock = false;
3403  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3404  {
3405  if (lockOwners[i].owner == NULL)
3406  haveSessionLock = true;
3407  else
3408  haveXactLock = true;
3409  }
3410 
3411  /* Ignore it if we have only session lock */
3412  if (!haveXactLock)
3413  continue;
3414 
3415  /* This can't happen, because we already checked it */
3416  if (haveSessionLock)
3417  ereport(PANIC,
3418  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3419  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3420 
3421  /* Mark the proclock to show we need to release this lockmode */
3422  if (locallock->nLocks > 0)
3423  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3424 
3425  /* And remove the locallock hashtable entry */
3426  RemoveLocalLock(locallock);
3427  }
3428 
3429  /*
3430  * Now, scan each lock partition separately.
3431  */
3432  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3433  {
3434  LWLock *partitionLock;
3435  dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3436  dlist_mutable_iter proclock_iter;
3437 
3438  partitionLock = LockHashPartitionLockByIndex(partition);
3439 
3440  /*
3441  * If the proclock list for this partition is empty, we can skip
3442  * acquiring the partition lock. This optimization is safer than the
3443  * situation in LockReleaseAll, because we got rid of any fast-path
3444  * locks during AtPrepare_Locks, so there cannot be any case where
3445  * another backend is adding something to our lists now. For safety,
3446  * though, we code this the same way as in LockReleaseAll.
3447  */
3448  if (dlist_is_empty(procLocks))
3449  continue; /* needn't examine this partition */
3450 
3451  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3452 
3453  dlist_foreach_modify(proclock_iter, procLocks)
3454  {
3455  proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3456 
3457  Assert(proclock->tag.myProc == MyProc);
3458 
3459  lock = proclock->tag.myLock;
3460 
3461  /* Ignore VXID locks */
3463  continue;
3464 
3465  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3466  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3467  Assert(lock->nRequested >= 0);
3468  Assert(lock->nGranted >= 0);
3469  Assert(lock->nGranted <= lock->nRequested);
3470  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3471 
3472  /* Ignore it if nothing to release (must be a session lock) */
3473  if (proclock->releaseMask == 0)
3474  continue;
3475 
3476  /* Else we should be releasing all locks */
3477  if (proclock->releaseMask != proclock->holdMask)
3478  elog(PANIC, "we seem to have dropped a bit somewhere");
3479 
3480  /*
3481  * We cannot simply modify proclock->tag.myProc to reassign
3482  * ownership of the lock, because that's part of the hash key and
3483  * the proclock would then be in the wrong hash chain. Instead
3484  * use hash_update_hash_key. (We used to create a new hash entry,
3485  * but that risks out-of-memory failure if other processes are
3486  * busy making proclocks too.) We must unlink the proclock from
3487  * our procLink chain and put it into the new proc's chain, too.
3488  *
3489  * Note: the updated proclock hash key will still belong to the
3490  * same hash partition, cf proclock_hash(). So the partition lock
3491  * we already hold is sufficient for this.
3492  */
3493  dlist_delete(&proclock->procLink);
3494 
3495  /*
3496  * Create the new hash key for the proclock.
3497  */
3498  proclocktag.myLock = lock;
3499  proclocktag.myProc = newproc;
3500 
3501  /*
3502  * Update groupLeader pointer to point to the new proc. (We'd
3503  * better not be a member of somebody else's lock group!)
3504  */
3505  Assert(proclock->groupLeader == proclock->tag.myProc);
3506  proclock->groupLeader = newproc;
3507 
3508  /*
3509  * Update the proclock. We should not find any existing entry for
3510  * the same hash key, since there can be only one entry for any
3511  * given lock with my own proc.
3512  */
3514  proclock,
3515  &proclocktag))
3516  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3517 
3518  /* Re-link into the new proc's proclock list */
3519  dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3520 
3521  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3522  } /* loop over PROCLOCKs within this partition */
3523 
3524  LWLockRelease(partitionLock);
3525  } /* loop over partitions */
3526 
3527  END_CRIT_SECTION();
3528 }
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1157
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150

References Assert(), dlist_mutable_iter::cur, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog(), END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void *  key,
Size  keysize 
)
static

Definition at line 522 of file lock.c.

523 {
524  const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
525  uint32 lockhash;
526  Datum procptr;
527 
528  Assert(keysize == sizeof(PROCLOCKTAG));
529 
530  /* Look into the associated LOCK object, and compute its hash code */
531  lockhash = LockTagHashCode(&proclocktag->myLock->tag);
532 
533  /*
534  * To make the hash code also depend on the PGPROC, we xor the proc
535  * struct's address into the hash code, left-shifted so that the
536  * partition-number bits don't change. Since this is only a hash, we
537  * don't care if we lose high-order bits of the address; use an
538  * intermediate variable to suppress cast-pointer-to-int warnings.
539  */
540  procptr = PointerGetDatum(proclocktag->myProc);
541  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
542 
543  return lockhash;
544 }
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:98
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64

References Assert(), sort-test::key, LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PointerGetDatum(), and LOCK::tag.

Referenced by InitLocks().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 553 of file lock.c.

554 {
555  uint32 lockhash = hashcode;
556  Datum procptr;
557 
558  /*
559  * This must match proclock_hash()!
560  */
561  procptr = PointerGetDatum(proclocktag->myProc);
562  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
563 
564  return lockhash;
565 }

References LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myProc, and PointerGetDatum().

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2460 of file lock.c.

2461 {
2462  ResourceOwner owner;
2463  LOCALLOCKOWNER *lockOwners;
2464  int i;
2465 
2466  /* Identify owner for lock (must match LockRelease!) */
2467  if (sessionLock)
2468  owner = NULL;
2469  else
2470  owner = CurrentResourceOwner;
2471 
2472  /* Scan to see if there are any locks belonging to the target owner */
2473  lockOwners = locallock->lockOwners;
2474  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2475  {
2476  if (lockOwners[i].owner == owner)
2477  {
2478  Assert(lockOwners[i].nLocks > 0);
2479  if (lockOwners[i].nLocks < locallock->nLocks)
2480  {
2481  /*
2482  * We will still hold this lock after forgetting this
2483  * ResourceOwner.
2484  */
2485  locallock->nLocks -= lockOwners[i].nLocks;
2486  /* compact out unused slot */
2487  locallock->numLockOwners--;
2488  if (owner != NULL)
2489  ResourceOwnerForgetLock(owner, locallock);
2490  if (i < locallock->numLockOwners)
2491  lockOwners[i] = lockOwners[locallock->numLockOwners];
2492  }
2493  else
2494  {
2495  Assert(lockOwners[i].nLocks == locallock->nLocks);
2496  /* We want to call LockRelease just once */
2497  lockOwners[i].nLocks = 1;
2498  locallock->nLocks = 1;
2499  if (!LockRelease(&locallock->tag.lock,
2500  locallock->tag.mode,
2501  sessionLock))
2502  elog(WARNING, "ReleaseLockIfHeld: failed??");
2503  }
2504  break;
2505  }
2506  }
2507 }
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:1925

References Assert(), CurrentResourceOwner, elog(), i, LOCALLOCKTAG::lock, LOCALLOCK::lockOwners, LockRelease(), LOCALLOCKTAG::mode, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, ResourceOwnerForgetLock(), LOCALLOCK::tag, and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 1869 of file lock.c.

1870 {
1871  LOCK *waitLock = proc->waitLock;
1872  PROCLOCK *proclock = proc->waitProcLock;
1873  LOCKMODE lockmode = proc->waitLockMode;
1874  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1875 
1876  /* Make sure proc is waiting */
1878  Assert(proc->links.next != NULL);
1879  Assert(waitLock);
1880  Assert(!dclist_is_empty(&waitLock->waitProcs));
1881  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1882 
1883  /* Remove proc from lock's wait queue */
1884  dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
1885 
1886  /* Undo increments of request counts by waiting process */
1887  Assert(waitLock->nRequested > 0);
1888  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1889  waitLock->nRequested--;
1890  Assert(waitLock->requested[lockmode] > 0);
1891  waitLock->requested[lockmode]--;
1892  /* don't forget to clear waitMask bit if appropriate */
1893  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1894  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1895 
1896  /* Clean up the proc's own state, and pass it the ok/fail signal */
1897  proc->waitLock = NULL;
1898  proc->waitProcLock = NULL;
1900 
1901  /*
1902  * Delete the proclock immediately if it represents no already-held locks.
1903  * (This must happen now because if the owner of the lock decides to
1904  * release it, and the requested/granted counts then go to zero,
1905  * LockRelease expects there to be no remaining proclocks.) Then see if
1906  * any other waiters for the lock can be woken up now.
1907  */
1908  CleanUpLock(waitLock, proclock,
1909  LockMethods[lockmethodid], hashcode,
1910  true);
1911 }
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:125
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:126
PROCLOCK * waitProcLock
Definition: proc.h:224
ProcWaitStatus waitStatus
Definition: proc.h:168

References Assert(), CleanUpLock(), dclist_delete_from_thoroughly(), dclist_is_empty(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, dlist_node::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), LockErrorCleanup(), and ProcSleep().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1344 of file lock.c.

1345 {
1346  int i;
1347 
1348  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1349  {
1350  if (locallock->lockOwners[i].owner != NULL)
1351  ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1352  }
1353  locallock->numLockOwners = 0;
1354  if (locallock->lockOwners != NULL)
1355  pfree(locallock->lockOwners);
1356  locallock->lockOwners = NULL;
1357 
1358  if (locallock->holdsStrongLockCount)
1359  {
1360  uint32 fasthashcode;
1361 
1362  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1363 
1365  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1366  FastPathStrongRelationLocks->count[fasthashcode]--;
1367  locallock->holdsStrongLockCount = false;
1369  }
1370 
1372  &(locallock->tag),
1373  HASH_REMOVE, NULL))
1374  elog(WARNING, "locallock table corrupted");
1375 
1376  /*
1377  * Indicate that the lock is released for certain types of locks
1378  */
1379  CheckAndSetLockHeld(locallock, false);
1380 }
void pfree(void *pointer)
Definition: mcxt.c:1456

References Assert(), CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog(), FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_REMOVE, hash_search(), LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, i, LockMethodLocalHash, LOCALLOCK::lockOwners, FastPathStrongRelationLockData::mutex, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire, SpinLockRelease, LOCALLOCK::tag, and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1151 of file lock.c.

1153 {
1154  LOCK *lock;
1155  PROCLOCK *proclock;
1156  PROCLOCKTAG proclocktag;
1157  uint32 proclock_hashcode;
1158  bool found;
1159 
1160  /*
1161  * Find or create a lock with this tag.
1162  */
1164  locktag,
1165  hashcode,
1167  &found);
1168  if (!lock)
1169  return NULL;
1170 
1171  /*
1172  * if it's a new lock object, initialize it
1173  */
1174  if (!found)
1175  {
1176  lock->grantMask = 0;
1177  lock->waitMask = 0;
1178  dlist_init(&lock->procLocks);
1179  dclist_init(&lock->waitProcs);
1180  lock->nRequested = 0;
1181  lock->nGranted = 0;
1182  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1183  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1184  LOCK_PRINT("LockAcquire: new", lock, lockmode);
1185  }
1186  else
1187  {
1188  LOCK_PRINT("LockAcquire: found", lock, lockmode);
1189  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1190  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1191  Assert(lock->nGranted <= lock->nRequested);
1192  }
1193 
1194  /*
1195  * Create the hash key for the proclock table.
1196  */
1197  proclocktag.myLock = lock;
1198  proclocktag.myProc = proc;
1199 
1200  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1201 
1202  /*
1203  * Find or create a proclock entry with this tag
1204  */
1206  &proclocktag,
1207  proclock_hashcode,
1209  &found);
1210  if (!proclock)
1211  {
1212  /* Oops, not enough shmem for the proclock */
1213  if (lock->nRequested == 0)
1214  {
1215  /*
1216  * There are no other requestors of this lock, so garbage-collect
1217  * the lock object. We *must* do this to avoid a permanent leak
1218  * of shared memory, because there won't be anything to cause
1219  * anyone to release the lock object later.
1220  */
1221  Assert(dlist_is_empty(&(lock->procLocks)));
1223  &(lock->tag),
1224  hashcode,
1225  HASH_REMOVE,
1226  NULL))
1227  elog(PANIC, "lock table corrupted");
1228  }
1229  return NULL;
1230  }
1231 
1232  /*
1233  * If new, initialize the new entry
1234  */
1235  if (!found)
1236  {
1237  uint32 partition = LockHashPartition(hashcode);
1238 
1239  /*
1240  * It might seem unsafe to access proclock->groupLeader without a
1241  * lock, but it's not really. Either we are initializing a proclock
1242  * on our own behalf, in which case our group leader isn't changing
1243  * because the group leader for a process can only ever be changed by
1244  * the process itself; or else we are transferring a fast-path lock to
1245  * the main lock table, in which case that process can't change it's
1246  * lock group leader without first releasing all of its locks (and in
1247  * particular the one we are currently transferring).
1248  */
1249  proclock->groupLeader = proc->lockGroupLeader != NULL ?
1250  proc->lockGroupLeader : proc;
1251  proclock->holdMask = 0;
1252  proclock->releaseMask = 0;
1253  /* Add proclock to appropriate lists */
1254  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1255  dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1256  PROCLOCK_PRINT("LockAcquire: new", proclock);
1257  }
1258  else
1259  {
1260  PROCLOCK_PRINT("LockAcquire: found", proclock);
1261  Assert((proclock->holdMask & ~lock->grantMask) == 0);
1262 
1263 #ifdef CHECK_DEADLOCK_RISK
1264 
1265  /*
1266  * Issue warning if we already hold a lower-level lock on this object
1267  * and do not hold a lock of the requested level or higher. This
1268  * indicates a deadlock-prone coding practice (eg, we'd have a
1269  * deadlock if another backend were following the same code path at
1270  * about the same time).
1271  *
1272  * This is not enabled by default, because it may generate log entries
1273  * about user-level coding practices that are in fact safe in context.
1274  * It can be enabled to help find system-level problems.
1275  *
1276  * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1277  * better to use a table. For now, though, this works.
1278  */
1279  {
1280  int i;
1281 
1282  for (i = lockMethodTable->numLockModes; i > 0; i--)
1283  {
1284  if (proclock->holdMask & LOCKBIT_ON(i))
1285  {
1286  if (i >= (int) lockmode)
1287  break; /* safe: we have a lock >= req level */
1288  elog(LOG, "deadlock risk: raising lock level"
1289  " from %s to %s on object %u/%u/%u",
1290  lockMethodTable->lockModeNames[i],
1291  lockMethodTable->lockModeNames[lockmode],
1292  lock->tag.locktag_field1, lock->tag.locktag_field2,
1293  lock->tag.locktag_field3);
1294  break;
1295  }
1296  }
1297  }
1298 #endif /* CHECK_DEADLOCK_RISK */
1299  }
1300 
1301  /*
1302  * lock->nRequested and lock->requested[] count the total number of
1303  * requests, whether granted or waiting, so increment those immediately.
1304  * The other counts don't increment till we get the lock.
1305  */
1306  lock->nRequested++;
1307  lock->requested[lockmode]++;
1308  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1309 
1310  /*
1311  * We shouldn't already hold the desired lock; else locallock table is
1312  * broken.
1313  */
1314  if (proclock->holdMask & LOCKBIT_ON(lockmode))
1315  elog(ERROR, "lock %s on object %u/%u/%u is already held",
1316  lockMethodTable->lockModeNames[lockmode],
1317  lock->tag.locktag_field1, lock->tag.locktag_field2,
1318  lock->tag.locktag_field3);
1319 
1320  return proclock;
1321 }

References Assert(), dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog(), ERROR, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, LockMethodData::numLockModes, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1549 of file lock.c.

1551 {
1552  bool wakeupNeeded = false;
1553 
1554  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1555  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1556  Assert(lock->nGranted <= lock->nRequested);
1557 
1558  /*
1559  * fix the general lock stats
1560  */
1561  lock->nRequested--;
1562  lock->requested[lockmode]--;
1563  lock->nGranted--;
1564  lock->granted[lockmode]--;
1565 
1566  if (lock->granted[lockmode] == 0)
1567  {
1568  /* change the conflict mask. No more of this lock type. */
1569  lock->grantMask &= LOCKBIT_OFF(lockmode);
1570  }
1571 
1572  LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1573 
1574  /*
1575  * We need only run ProcLockWakeup if the released lock conflicts with at
1576  * least one of the lock types requested by waiter(s). Otherwise whatever
1577  * conflict made them wait must still exist. NOTE: before MVCC, we could
1578  * skip wakeup if lock->granted[lockmode] was still positive. But that's
1579  * not true anymore, because the remaining granted locks might belong to
1580  * some waiter, who could now be awakened because he doesn't conflict with
1581  * his own locks.
1582  */
1583  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1584  wakeupNeeded = true;
1585 
1586  /*
1587  * Now fix the per-proclock state.
1588  */
1589  proclock->holdMask &= LOCKBIT_OFF(lockmode);
1590  PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1591 
1592  return wakeupNeeded;
1593 }

References Assert(), LockMethodData::conflictTab, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4511 of file lock.c.

4512 {
4513  LOCKTAG tag;
4514  PGPROC *proc;
4516 
4518 
4520  /* no vxid lock; localTransactionId is a normal, locked XID */
4521  return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4522 
4523  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4524 
4525  /*
4526  * If a lock table entry must be made, this is the PGPROC on whose behalf
4527  * it must be done. Note that the transaction might end or the PGPROC
4528  * might be reassigned to a new backend before we get around to examining
4529  * it, but it doesn't matter. If we find upon examination that the
4530  * relevant lxid is no longer running here, that's enough to prove that
4531  * it's no longer running anywhere.
4532  */
4533  proc = BackendIdGetProc(vxid.backendId);
4534  if (proc == NULL)
4535  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4536 
4537  /*
4538  * We must acquire this lock before checking the backendId and lxid
4539  * against the ones we're waiting for. The target backend will only set
4540  * or clear lxid while holding this lock.
4541  */
4543 
4544  if (proc->backendId != vxid.backendId
4545  || proc->fpLocalTransactionId != vxid.localTransactionId)
4546  {
4547  /* VXID ended */
4548  LWLockRelease(&proc->fpInfoLock);
4549  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4550  }
4551 
4552  /*
4553  * If we aren't asked to wait, there's no need to set up a lock table
4554  * entry. The transaction is still in progress, so just return false.
4555  */
4556  if (!wait)
4557  {
4558  LWLockRelease(&proc->fpInfoLock);
4559  return false;
4560  }
4561 
4562  /*
4563  * OK, we're going to need to sleep on the VXID. But first, we must set
4564  * up the primary lock table entry, if needed (ie, convert the proc's
4565  * fast-path lock on its VXID to a regular lock).
4566  */
4567  if (proc->fpVXIDLock)
4568  {
4569  PROCLOCK *proclock;
4570  uint32 hashcode;
4571  LWLock *partitionLock;
4572 
4573  hashcode = LockTagHashCode(&tag);
4574 
4575  partitionLock = LockHashPartitionLock(hashcode);
4576  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4577 
4579  &tag, hashcode, ExclusiveLock);
4580  if (!proclock)
4581  {
4582  LWLockRelease(partitionLock);
4583  LWLockRelease(&proc->fpInfoLock);
4584  ereport(ERROR,
4585  (errcode(ERRCODE_OUT_OF_MEMORY),
4586  errmsg("out of shared memory"),
4587  errhint("You might need to increase %s.", "max_locks_per_transaction")));
4588  }
4589  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4590 
4591  LWLockRelease(partitionLock);
4592 
4593  proc->fpVXIDLock = false;
4594  }
4595 
4596  /*
4597  * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4598  * search. The proc might have assigned this XID but not yet locked it,
4599  * in which case the proc will lock this XID before releasing the VXID.
4600  * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4601  * so we won't save an XID of a different VXID. It doesn't matter whether
4602  * we save this before or after setting up the primary lock table entry.
4603  */
4604  xid = proc->xid;
4605 
4606  /* Done with proc->fpLockBits */
4607  LWLockRelease(&proc->fpInfoLock);
4608 
4609  /* Time to wait. */
4610  (void) LockAcquire(&tag, ShareLock, false, false);
4611 
4612  LockRelease(&tag, ShareLock, false);
4613  return XactLockForVirtualXact(vxid, xid, wait);
4614 }
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4460
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:735
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:69
#define ShareLock
Definition: lockdefs.h:40
PGPROC * BackendIdGetProc(int backendID)
Definition: sinvaladt.c:385
#define InvalidTransactionId
Definition: transam.h:31

References Assert(), VirtualTransactionId::backendId, PGPROC::backendId, BackendIdGetProc(), DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4411 of file lock.c.

4412 {
4413  bool fastpath;
4414  LocalTransactionId lxid;
4415 
4417 
4418  /*
4419  * Clean up shared memory state.
4420  */
4422 
4423  fastpath = MyProc->fpVXIDLock;
4424  lxid = MyProc->fpLocalTransactionId;
4425  MyProc->fpVXIDLock = false;
4427 
4429 
4430  /*
4431  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4432  * that means someone transferred the lock to the main lock table.
4433  */
4434  if (!fastpath && LocalTransactionIdIsValid(lxid))
4435  {
4436  VirtualTransactionId vxid;
4437  LOCKTAG locktag;
4438 
4439  vxid.backendId = MyBackendId;
4440  vxid.localTransactionId = lxid;
4441  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4442 
4444  &locktag, ExclusiveLock, false);
4445  }
4446 }
uint32 LocalTransactionId
Definition: c.h:643
BackendId MyBackendId
Definition: globals.c:85
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:66

References Assert(), VirtualTransactionId::backendId, PGPROC::backendId, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, InvalidBackendId, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyBackendId, MyProc, and SET_LOCKTAG_VIRTUALTRANSACTION.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static void WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1785 of file lock.c.

1786 {
1787  LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1788  LockMethod lockMethodTable = LockMethods[lockmethodid];
1789 
1790  LOCK_PRINT("WaitOnLock: sleeping on lock",
1791  locallock->lock, locallock->tag.mode);
1792 
1793  /* adjust the process title to indicate that it's waiting */
1794  set_ps_display_suffix("waiting");
1795 
1796  awaitedLock = locallock;
1797  awaitedOwner = owner;
1798 
1799  /*
1800  * NOTE: Think not to put any shared-state cleanup after the call to
1801  * ProcSleep, in either the normal or failure path. The lock state must
1802  * be fully set by the lock grantor, or by CheckDeadLock if we give up
1803  * waiting for the lock. This is necessary because of the possibility
1804  * that a cancel/die interrupt will interrupt ProcSleep after someone else
1805  * grants us the lock, but before we've noticed it. Hence, after granting,
1806  * the locktable state must fully reflect the fact that we own the lock;
1807  * we can't do additional work on return.
1808  *
1809  * We can and do use a PG_TRY block to try to clean up after failure, but
1810  * this still has a major limitation: elog(FATAL) can occur while waiting
1811  * (eg, a "die" interrupt), and then control won't come back here. So all
1812  * cleanup of essential state should happen in LockErrorCleanup, not here.
1813  * We can use PG_TRY to clear the "waiting" status flags, since doing that
1814  * is unimportant if the process exits.
1815  */
1816  PG_TRY();
1817  {
1818  if (ProcSleep(locallock, lockMethodTable) != PROC_WAIT_STATUS_OK)
1819  {
1820  /*
1821  * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1822  * now.
1823  */
1824  awaitedLock = NULL;
1825  LOCK_PRINT("WaitOnLock: aborting on lock",
1826  locallock->lock, locallock->tag.mode);
1828 
1829  /*
1830  * Now that we aren't holding the partition lock, we can give an
1831  * error report including details about the detected deadlock.
1832  */
1833  DeadLockReport();
1834  /* not reached */
1835  }
1836  }
1837  PG_CATCH();
1838  {
1839  /* In this path, awaitedLock remains set until LockErrorCleanup */
1840 
1841  /* reset ps display to remove the suffix */
1843 
1844  /* and propagate the error */
1845  PG_RE_THROW();
1846  }
1847  PG_END_TRY();
1848 
1849  awaitedLock = NULL;
1850 
1851  /* reset ps display to remove the suffix */
1853 
1854  LOCK_PRINT("WaitOnLock: wakeup on lock",
1855  locallock->lock, locallock->tag.mode);
1856 }
void DeadLockReport(void)
Definition: deadlock.c:1072
#define PG_RE_THROW()
Definition: elog.h:411
#define PG_TRY(...)
Definition: elog.h:370
#define PG_END_TRY(...)
Definition: elog.h:395
#define PG_CATCH(...)
Definition: elog.h:380
@ PROC_WAIT_STATUS_OK
Definition: proc.h:124
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:396
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:344
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1004

References awaitedLock, awaitedOwner, DeadLockReport(), LOCALLOCK::hashcode, LOCALLOCK_LOCKMETHOD, LOCALLOCK::lock, LOCK_PRINT, LockHashPartitionLock, LockMethods, LWLockRelease(), LOCALLOCKTAG::mode, PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, PROC_WAIT_STATUS_OK, ProcSleep(), set_ps_display_remove_suffix(), set_ps_display_suffix(), and LOCALLOCK::tag.

Referenced by LockAcquireExtended().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4460 of file lock.c.

4462 {
4463  bool more = false;
4464 
4465  /* There is no point to wait for 2PCs if you have no 2PCs. */
4466  if (max_prepared_xacts == 0)
4467  return true;
4468 
4469  do
4470  {
4471  LockAcquireResult lar;
4472  LOCKTAG tag;
4473 
4474  /* Clear state from previous iterations. */
4475  if (more)
4476  {
4477  xid = InvalidTransactionId;
4478  more = false;
4479  }
4480 
4481  /* If we have no xid, try to find one. */
4482  if (!TransactionIdIsValid(xid))
4483  xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4484  if (!TransactionIdIsValid(xid))
4485  {
4486  Assert(!more);
4487  return true;
4488  }
4489 
4490  /* Check or wait for XID completion. */
4491  SET_LOCKTAG_TRANSACTION(tag, xid);
4492  lar = LockAcquire(&tag, ShareLock, false, !wait);
4493  if (lar == LOCKACQUIRE_NOT_AVAIL)
4494  return false;
4495  LockRelease(&tag, ShareLock, false);
4496  } while (more);
4497 
4498  return true;
4499 }
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:226
LockAcquireResult
Definition: lock.h:501
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:871

References Assert(), InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 276 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 277 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition: lock.c:123
static const char *const lock_mode_names[]
Definition: lock.c:109
static const LOCKMASK LockConflicts[]
Definition: lock.c:66
#define MaxLockMode
Definition: lockdefs.h:45

Definition at line 126 of file lock.c.

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 123 of file lock.c.

◆ FastPathLocalUseCount

int FastPathLocalUseCount = 0
static

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 109 of file lock.c.

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 66 of file lock.c.

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ max_locks_per_xact

int max_locks_per_xact

Definition at line 55 of file lock.c.

Referenced by CheckRequiredParameterValues(), InitControlFile(), and XLogReportParameters().

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 187 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 275 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod

const LockMethodData user_lockmethod
static
Initial value:

Definition at line 137 of file lock.c.