PostgreSQL Source Code  git master
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/sinvaladt.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner_private.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_GET_BITS(proc, n)    (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static void WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void InitLocks (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (TransactionId xid)
 
Size LockShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCount = 0
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
@ LOCKTAG_RELATION
Definition: lock.h:137
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
static PgChecksumMode mode
Definition: pg_checksums.c:65
#define InvalidOid
Definition: postgres_ext.h:36

Definition at line 233 of file lock.c.

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition: globals.c:89

Definition at line 227 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:
AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
#define AssertMacro(condition)
Definition: c.h:843
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:203
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:202
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:79

Definition at line 207 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 202 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 216 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 214 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)

Definition at line 205 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 203 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 204 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 212 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 260 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 261 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 263 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 365 of file lock.c.

◆ NLOCKENTS

Definition at line 57 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 366 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1752 of file lock.c.

1753 {
1754  uint32 fasthashcode;
1755  LOCALLOCK *locallock = StrongLockInProgress;
1756 
1757  if (locallock == NULL)
1758  return;
1759 
1760  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1761  Assert(locallock->holdsStrongLockCount == true);
1763  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1764  FastPathStrongRelationLocks->count[fasthashcode]--;
1765  locallock->holdsStrongLockCount = false;
1766  StrongLockInProgress = NULL;
1768 }
unsigned int uint32
Definition: c.h:490
Assert(fmt[strlen(fmt) - 1] !='\n')
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:263
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:272
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:287
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:269
uint32 hashcode
Definition: lock.h:432
bool holdsStrongLockCount
Definition: lock.h:439

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3279 of file lock.c.

3280 {
3282  LOCALLOCK *locallock;
3283 
3284  /* First, verify there aren't locks of both xact and session level */
3286 
3287  /* Now do the per-locallock cleanup work */
3289 
3290  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3291  {
3292  TwoPhaseLockRecord record;
3293  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3294  bool haveSessionLock;
3295  bool haveXactLock;
3296  int i;
3297 
3298  /*
3299  * Ignore VXID locks. We don't want those to be held by prepared
3300  * transactions, since they aren't meaningful after a restart.
3301  */
3302  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3303  continue;
3304 
3305  /* Ignore it if we don't actually hold the lock */
3306  if (locallock->nLocks <= 0)
3307  continue;
3308 
3309  /* Scan to see whether we hold it at session or transaction level */
3310  haveSessionLock = haveXactLock = false;
3311  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3312  {
3313  if (lockOwners[i].owner == NULL)
3314  haveSessionLock = true;
3315  else
3316  haveXactLock = true;
3317  }
3318 
3319  /* Ignore it if we have only session lock */
3320  if (!haveXactLock)
3321  continue;
3322 
3323  /* This can't happen, because we already checked it */
3324  if (haveSessionLock)
3325  ereport(ERROR,
3326  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3327  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3328 
3329  /*
3330  * If the local lock was taken via the fast-path, we need to move it
3331  * to the primary lock table, or just get a pointer to the existing
3332  * primary lock table entry if by chance it's already been
3333  * transferred.
3334  */
3335  if (locallock->proclock == NULL)
3336  {
3337  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3338  locallock->lock = locallock->proclock->tag.myLock;
3339  }
3340 
3341  /*
3342  * Arrange to not release any strong lock count held by this lock
3343  * entry. We must retain the count until the prepared transaction is
3344  * committed or rolled back.
3345  */
3346  locallock->holdsStrongLockCount = false;
3347 
3348  /*
3349  * Create a 2PC record.
3350  */
3351  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3352  record.lockmode = locallock->tag.mode;
3353 
3355  &record, sizeof(TwoPhaseLockRecord));
3356  }
3357 }
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1431
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1421
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int i
Definition: isn.c:73
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2775
static HTAB * LockMethodLocalHash
Definition: lock.c:283
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3191
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:143
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
LOCALLOCKOWNER * lockOwners
Definition: lock.h:438
LOCK * lock
Definition: lock.h:433
int64 nLocks
Definition: lock.h:435
int numLockOwners
Definition: lock.h:436
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
LOCK * myLock
Definition: lock.h:365
PROCLOCKTAG tag
Definition: lock.h:372
LOCKTAG locktag
Definition: lock.c:161
LOCKMODE lockmode
Definition: lock.c:162
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1257
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), status(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1716 of file lock.c.

1717 {
1718  Assert(StrongLockInProgress == NULL);
1719  Assert(locallock->holdsStrongLockCount == false);
1720 
1721  /*
1722  * Adding to a memory location is not atomic, so we take a spinlock to
1723  * ensure we don't collide with someone else trying to bump the count at
1724  * the same time.
1725  *
1726  * XXX: It might be worth considering using an atomic fetch-and-add
1727  * instruction here, on architectures where that is supported.
1728  */
1729 
1731  FastPathStrongRelationLocks->count[fasthashcode]++;
1732  locallock->holdsStrongLockCount = true;
1733  StrongLockInProgress = locallock;
1735 }

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1351 of file lock.c.

1352 {
1353 #ifdef USE_ASSERT_CHECKING
1354  if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1355  IsRelationExtensionLockHeld = acquired;
1356  else if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_PAGE)
1357  IsPageLockHeld = acquired;
1358 
1359 #endif
1360 }
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:138
@ LOCKTAG_PAGE
Definition: lock.h:140
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:444

References LOCALLOCK_LOCKTAG, LOCKTAG_PAGE, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3191 of file lock.c.

3192 {
3193  typedef struct
3194  {
3195  LOCKTAG lock; /* identifies the lockable object */
3196  bool sessLock; /* is any lockmode held at session level? */
3197  bool xactLock; /* is any lockmode held at xact level? */
3198  } PerLockTagEntry;
3199 
3200  HASHCTL hash_ctl;
3201  HTAB *lockhtab;
3203  LOCALLOCK *locallock;
3204 
3205  /* Create a local hash table keyed by LOCKTAG only */
3206  hash_ctl.keysize = sizeof(LOCKTAG);
3207  hash_ctl.entrysize = sizeof(PerLockTagEntry);
3208  hash_ctl.hcxt = CurrentMemoryContext;
3209 
3210  lockhtab = hash_create("CheckForSessionAndXactLocks table",
3211  256, /* arbitrary initial size */
3212  &hash_ctl,
3214 
3215  /* Scan local lock table to find entries for each LOCKTAG */
3217 
3218  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3219  {
3220  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3221  PerLockTagEntry *hentry;
3222  bool found;
3223  int i;
3224 
3225  /*
3226  * Ignore VXID locks. We don't want those to be held by prepared
3227  * transactions, since they aren't meaningful after a restart.
3228  */
3229  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3230  continue;
3231 
3232  /* Ignore it if we don't actually hold the lock */
3233  if (locallock->nLocks <= 0)
3234  continue;
3235 
3236  /* Otherwise, find or make an entry in lockhtab */
3237  hentry = (PerLockTagEntry *) hash_search(lockhtab,
3238  &locallock->tag.lock,
3239  HASH_ENTER, &found);
3240  if (!found) /* initialize, if newly created */
3241  hentry->sessLock = hentry->xactLock = false;
3242 
3243  /* Scan to see if we hold lock at session or xact level or both */
3244  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3245  {
3246  if (lockOwners[i].owner == NULL)
3247  hentry->sessLock = true;
3248  else
3249  hentry->xactLock = true;
3250  }
3251 
3252  /*
3253  * We can throw error immediately when we see both types of locks; no
3254  * need to wait around to see if there are more violations.
3255  */
3256  if (hentry->sessLock && hentry->xactLock)
3257  ereport(ERROR,
3258  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3259  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3260  }
3261 
3262  /* Success, so clean up */
3263  hash_destroy(lockhtab);
3264 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:863
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
struct LOCKTAG LOCKTAG
MemoryContext CurrentMemoryContext
Definition: mcxt.c:135
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220

References CurrentMemoryContext, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), HASHCTL::hcxt, i, HASHCTL::keysize, LOCALLOCKTAG::lock, LockMethodLocalHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, status(), and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1630 of file lock.c.

1633 {
1634  /*
1635  * If this was my last hold on this lock, delete my entry in the proclock
1636  * table.
1637  */
1638  if (proclock->holdMask == 0)
1639  {
1640  uint32 proclock_hashcode;
1641 
1642  PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1643  dlist_delete(&proclock->lockLink);
1644  dlist_delete(&proclock->procLink);
1645  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1647  &(proclock->tag),
1648  proclock_hashcode,
1649  HASH_REMOVE,
1650  NULL))
1651  elog(PANIC, "proclock table corrupted");
1652  }
1653 
1654  if (lock->nRequested == 0)
1655  {
1656  /*
1657  * The caller just released the last lock, so garbage-collect the lock
1658  * object.
1659  */
1660  LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1661  Assert(dlist_is_empty(&lock->procLocks));
1663  &(lock->tag),
1664  hashcode,
1665  HASH_REMOVE,
1666  NULL))
1667  elog(PANIC, "lock table corrupted");
1668  }
1669  else if (wakeupNeeded)
1670  {
1671  /* There are waiters on this lock, so wake them up. */
1672  ProcLockWakeup(lockMethodTable, lock);
1673  }
1674 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:966
#define PANIC
Definition: elog.h:42
@ HASH_REMOVE
Definition: hsearch.h:115
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:365
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:565
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:366
static HTAB * LockMethodLockHash
Definition: lock.c:281
static HTAB * LockMethodProcLockHash
Definition: lock.c:282
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1637
int nRequested
Definition: lock.h:319
LOCKTAG tag
Definition: lock.h:311
dlist_head procLocks
Definition: lock.h:316
LOCKMASK holdMask
Definition: lock.h:376
dlist_node lockLink
Definition: lock.h:378
dlist_node procLink
Definition: lock.h:379

References Assert(), dlist_delete(), dlist_is_empty(), elog(), HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 583 of file lock.c.

584 {
585  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
586 
587  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
588  return true;
589 
590  return false;
591 }
static const LockMethod LockMethods[]
Definition: lock.c:151
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
const LOCKMASK * conflictTab
Definition: lock.h:111

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2775 of file lock.c.

2776 {
2777  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2778  LOCKTAG *locktag = &locallock->tag.lock;
2779  PROCLOCK *proclock = NULL;
2780  LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2781  Oid relid = locktag->locktag_field2;
2782  uint32 f;
2783 
2785 
2786  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2787  {
2788  uint32 lockmode;
2789 
2790  /* Look for an allocated slot matching the given relid. */
2791  if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2792  continue;
2793 
2794  /* If we don't have a lock of the given mode, forget it! */
2795  lockmode = locallock->tag.mode;
2796  if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2797  break;
2798 
2799  /* Find or create lock object. */
2800  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2801 
2802  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2803  locallock->hashcode, lockmode);
2804  if (!proclock)
2805  {
2806  LWLockRelease(partitionLock);
2808  ereport(ERROR,
2809  (errcode(ERRCODE_OUT_OF_MEMORY),
2810  errmsg("out of shared memory"),
2811  errhint("You might need to increase max_locks_per_transaction.")));
2812  }
2813  GrantLock(proclock->tag.myLock, proclock, lockmode);
2814  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2815 
2816  LWLockRelease(partitionLock);
2817 
2818  /* No need to examine remaining slots. */
2819  break;
2820  }
2821 
2823 
2824  /* Lock may have already been transferred by some other backend. */
2825  if (proclock == NULL)
2826  {
2827  LOCK *lock;
2828  PROCLOCKTAG proclocktag;
2829  uint32 proclock_hashcode;
2830 
2831  LWLockAcquire(partitionLock, LW_SHARED);
2832 
2834  locktag,
2835  locallock->hashcode,
2836  HASH_FIND,
2837  NULL);
2838  if (!lock)
2839  elog(ERROR, "failed to re-find shared lock object");
2840 
2841  proclocktag.myLock = lock;
2842  proclocktag.myProc = MyProc;
2843 
2844  proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2845  proclock = (PROCLOCK *)
2847  &proclocktag,
2848  proclock_hashcode,
2849  HASH_FIND,
2850  NULL);
2851  if (!proclock)
2852  elog(ERROR, "failed to re-find shared proclock object");
2853  LWLockRelease(partitionLock);
2854  }
2855 
2856  return proclock;
2857 }
int errhint(const char *fmt,...)
Definition: elog.c:1316
@ HASH_FIND
Definition: hsearch.h:113
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1170
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:216
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1550
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:214
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:205
#define LockHashPartitionLock(hashcode)
Definition: lock.h:527
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1195
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
@ LW_SHARED
Definition: lwlock.h:116
@ LW_EXCLUSIVE
Definition: lwlock.h:115
unsigned int Oid
Definition: postgres_ext.h:31
PGPROC * MyProc
Definition: proc.c:66
uint32 locktag_field2
Definition: lock.h:167
Definition: lock.h:309
Definition: lwlock.h:40
LWLock fpInfoLock
Definition: proc.h:284
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:286
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370

References DEFAULT_LOCKMETHOD, elog(), ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), LOCALLOCK::hashcode, LOCALLOCKTAG::lock, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, ProcLockHashCode(), SetupLockInTable(), PROCLOCK::tag, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2620 of file lock.c.

2621 {
2622  uint32 f;
2623  uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2624 
2625  /* Scan for existing entry for this relid, remembering empty slot. */
2626  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2627  {
2628  if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2629  unused_slot = f;
2630  else if (MyProc->fpRelId[f] == relid)
2631  {
2632  Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2633  FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2634  return true;
2635  }
2636  }
2637 
2638  /* If no existing entry, use any empty slot. */
2639  if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2640  {
2641  MyProc->fpRelId[unused_slot] = relid;
2642  FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2644  return true;
2645  }
2646 
2647  /* No existing entry, and no empty slot. */
2648  return false;
2649 }
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:212
static int FastPathLocalUseCount
Definition: lock.c:172

References Assert(), FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_SET_LOCKMODE, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2687 of file lock.c.

2689 {
2690  LWLock *partitionLock = LockHashPartitionLock(hashcode);
2691  Oid relid = locktag->locktag_field2;
2692  uint32 i;
2693 
2694  /*
2695  * Every PGPROC that can potentially hold a fast-path lock is present in
2696  * ProcGlobal->allProcs. Prepared transactions are not, but any
2697  * outstanding fast-path locks held by prepared transactions are
2698  * transferred to the main lock table.
2699  */
2700  for (i = 0; i < ProcGlobal->allProcCount; i++)
2701  {
2702  PGPROC *proc = &ProcGlobal->allProcs[i];
2703  uint32 f;
2704 
2706 
2707  /*
2708  * If the target backend isn't referencing the same database as the
2709  * lock, then we needn't examine the individual relation IDs at all;
2710  * none of them can be relevant.
2711  *
2712  * proc->databaseId is set at backend startup time and never changes
2713  * thereafter, so it might be safe to perform this test before
2714  * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2715  * assume that if the target backend holds any fast-path locks, it
2716  * must have performed a memory-fencing operation (in particular, an
2717  * LWLock acquisition) since setting proc->databaseId. However, it's
2718  * less clear that our backend is certain to have performed a memory
2719  * fencing operation since the other backend set proc->databaseId. So
2720  * for now, we test it after acquiring the LWLock just to be safe.
2721  */
2722  if (proc->databaseId != locktag->locktag_field1)
2723  {
2724  LWLockRelease(&proc->fpInfoLock);
2725  continue;
2726  }
2727 
2728  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2729  {
2730  uint32 lockmode;
2731 
2732  /* Look for an allocated slot matching the given relid. */
2733  if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2734  continue;
2735 
2736  /* Find or create lock object. */
2737  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2738  for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2740  ++lockmode)
2741  {
2742  PROCLOCK *proclock;
2743 
2744  if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2745  continue;
2746  proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2747  hashcode, lockmode);
2748  if (!proclock)
2749  {
2750  LWLockRelease(partitionLock);
2751  LWLockRelease(&proc->fpInfoLock);
2752  return false;
2753  }
2754  GrantLock(proclock->tag.myLock, proclock, lockmode);
2755  FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2756  }
2757  LWLockRelease(partitionLock);
2758 
2759  /* No need to examine remaining slots. */
2760  break;
2761  }
2762  LWLockRelease(&proc->fpInfoLock);
2763  }
2764  return true;
2765 }
PROC_HDR * ProcGlobal
Definition: proc.c:78
uint32 locktag_field1
Definition: lock.h:166
Definition: proc.h:162
Oid databaseId
Definition: proc.h:198
PGPROC * allProcs
Definition: proc.h:362
uint32 allProcCount
Definition: proc.h:380

References PROC_HDR::allProcCount, PROC_HDR::allProcs, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), i, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2657 of file lock.c.

2658 {
2659  uint32 f;
2660  bool result = false;
2661 
2663  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2664  {
2665  if (MyProc->fpRelId[f] == relid
2666  && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2667  {
2668  Assert(!result);
2669  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2670  result = true;
2671  /* we continue iterating so as to update FastPathLocalUseCount */
2672  }
2673  if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2675  }
2676  return result;
2677 }

References Assert(), FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1742 of file lock.c.

1743 {
1744  StrongLockInProgress = NULL;
1745 }

References StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetBlockerStatusData()

BlockedProcsData* GetBlockerStatusData ( int  blocked_pid)

Definition at line 3788 of file lock.c.

3789 {
3791  PGPROC *proc;
3792  int i;
3793 
3795 
3796  /*
3797  * Guess how much space we'll need, and preallocate. Most of the time
3798  * this will avoid needing to do repalloc while holding the LWLocks. (We
3799  * assume, but check with an Assert, that MaxBackends is enough entries
3800  * for the procs[] array; the other two could need enlargement, though.)
3801  */
3802  data->nprocs = data->nlocks = data->npids = 0;
3803  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3804  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3805  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3806  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3807 
3808  /*
3809  * In order to search the ProcArray for blocked_pid and assume that that
3810  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3811  * In addition, to examine the lock grouping fields of any other backend,
3812  * we must hold all the hash partition locks. (Only one of those locks is
3813  * actually relevant for any one lock group, but we can't know which one
3814  * ahead of time.) It's fairly annoying to hold all those locks
3815  * throughout this, but it's no worse than GetLockStatusData(), and it
3816  * does have the advantage that we're guaranteed to return a
3817  * self-consistent instantaneous state.
3818  */
3819  LWLockAcquire(ProcArrayLock, LW_SHARED);
3820 
3821  proc = BackendPidGetProcWithLock(blocked_pid);
3822 
3823  /* Nothing to do if it's gone */
3824  if (proc != NULL)
3825  {
3826  /*
3827  * Acquire lock on the entire shared lock data structure. See notes
3828  * in GetLockStatusData().
3829  */
3830  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3832 
3833  if (proc->lockGroupLeader == NULL)
3834  {
3835  /* Easy case, proc is not a lock group member */
3837  }
3838  else
3839  {
3840  /* Examine all procs in proc's lock group */
3841  dlist_iter iter;
3842 
3844  {
3845  PGPROC *memberProc;
3846 
3847  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3848  GetSingleProcBlockerStatusData(memberProc, data);
3849  }
3850  }
3851 
3852  /*
3853  * And release locks. See notes in GetLockStatusData().
3854  */
3855  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3857 
3858  Assert(data->nprocs <= data->maxprocs);
3859  }
3860 
3861  LWLockRelease(ProcArrayLock);
3862 
3863  return data;
3864 }
int MaxBackends
Definition: globals.c:140
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3868
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:530
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:98
void * palloc(Size size)
Definition: mcxt.c:1210
const void * data
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3208
dlist_head lockGroupMembers
Definition: proc.h:296
PGPROC * lockGroupLeader
Definition: proc.h:295
dlist_node * cur
Definition: ilist.h:179

References Assert(), BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, and palloc().

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId* GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 2879 of file lock.c.

2880 {
2881  static VirtualTransactionId *vxids;
2882  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2883  LockMethod lockMethodTable;
2884  LOCK *lock;
2885  LOCKMASK conflictMask;
2886  dlist_iter proclock_iter;
2887  PROCLOCK *proclock;
2888  uint32 hashcode;
2889  LWLock *partitionLock;
2890  int count = 0;
2891  int fast_count = 0;
2892 
2893  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2894  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2895  lockMethodTable = LockMethods[lockmethodid];
2896  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2897  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2898 
2899  /*
2900  * Allocate memory to store results, and fill with InvalidVXID. We only
2901  * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2902  * InHotStandby allocate once in TopMemoryContext.
2903  */
2904  if (InHotStandby)
2905  {
2906  if (vxids == NULL)
2907  vxids = (VirtualTransactionId *)
2909  sizeof(VirtualTransactionId) *
2911  }
2912  else
2913  vxids = (VirtualTransactionId *)
2914  palloc0(sizeof(VirtualTransactionId) *
2916 
2917  /* Compute hash code and partition lock, and look up conflicting modes. */
2918  hashcode = LockTagHashCode(locktag);
2919  partitionLock = LockHashPartitionLock(hashcode);
2920  conflictMask = lockMethodTable->conflictTab[lockmode];
2921 
2922  /*
2923  * Fast path locks might not have been entered in the primary lock table.
2924  * If the lock we're dealing with could conflict with such a lock, we must
2925  * examine each backend's fast-path array for conflicts.
2926  */
2927  if (ConflictsWithRelationFastPath(locktag, lockmode))
2928  {
2929  int i;
2930  Oid relid = locktag->locktag_field2;
2931  VirtualTransactionId vxid;
2932 
2933  /*
2934  * Iterate over relevant PGPROCs. Anything held by a prepared
2935  * transaction will have been transferred to the primary lock table,
2936  * so we need not worry about those. This is all a bit fuzzy, because
2937  * new locks could be taken after we've visited a particular
2938  * partition, but the callers had better be prepared to deal with that
2939  * anyway, since the locks could equally well be taken between the
2940  * time we return the value and the time the caller does something
2941  * with it.
2942  */
2943  for (i = 0; i < ProcGlobal->allProcCount; i++)
2944  {
2945  PGPROC *proc = &ProcGlobal->allProcs[i];
2946  uint32 f;
2947 
2948  /* A backend never blocks itself */
2949  if (proc == MyProc)
2950  continue;
2951 
2953 
2954  /*
2955  * If the target backend isn't referencing the same database as
2956  * the lock, then we needn't examine the individual relation IDs
2957  * at all; none of them can be relevant.
2958  *
2959  * See FastPathTransferRelationLocks() for discussion of why we do
2960  * this test after acquiring the lock.
2961  */
2962  if (proc->databaseId != locktag->locktag_field1)
2963  {
2964  LWLockRelease(&proc->fpInfoLock);
2965  continue;
2966  }
2967 
2968  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2969  {
2970  uint32 lockmask;
2971 
2972  /* Look for an allocated slot matching the given relid. */
2973  if (relid != proc->fpRelId[f])
2974  continue;
2975  lockmask = FAST_PATH_GET_BITS(proc, f);
2976  if (!lockmask)
2977  continue;
2978  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2979 
2980  /*
2981  * There can only be one entry per relation, so if we found it
2982  * and it doesn't conflict, we can skip the rest of the slots.
2983  */
2984  if ((lockmask & conflictMask) == 0)
2985  break;
2986 
2987  /* Conflict! */
2988  GET_VXID_FROM_PGPROC(vxid, *proc);
2989 
2990  if (VirtualTransactionIdIsValid(vxid))
2991  vxids[count++] = vxid;
2992  /* else, xact already committed or aborted */
2993 
2994  /* No need to examine remaining slots. */
2995  break;
2996  }
2997 
2998  LWLockRelease(&proc->fpInfoLock);
2999  }
3000  }
3001 
3002  /* Remember how many fast-path conflicts we found. */
3003  fast_count = count;
3004 
3005  /*
3006  * Look up the lock object matching the tag.
3007  */
3008  LWLockAcquire(partitionLock, LW_SHARED);
3009 
3011  locktag,
3012  hashcode,
3013  HASH_FIND,
3014  NULL);
3015  if (!lock)
3016  {
3017  /*
3018  * If the lock object doesn't exist, there is nothing holding a lock
3019  * on this lockable object.
3020  */
3021  LWLockRelease(partitionLock);
3022  vxids[count].backendId = InvalidBackendId;
3024  if (countp)
3025  *countp = count;
3026  return vxids;
3027  }
3028 
3029  /*
3030  * Examine each existing holder (or awaiter) of the lock.
3031  */
3032  dlist_foreach(proclock_iter, &lock->procLocks)
3033  {
3034  proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3035 
3036  if (conflictMask & proclock->holdMask)
3037  {
3038  PGPROC *proc = proclock->tag.myProc;
3039 
3040  /* A backend never blocks itself */
3041  if (proc != MyProc)
3042  {
3043  VirtualTransactionId vxid;
3044 
3045  GET_VXID_FROM_PGPROC(vxid, *proc);
3046 
3047  if (VirtualTransactionIdIsValid(vxid))
3048  {
3049  int i;
3050 
3051  /* Avoid duplicate entries. */
3052  for (i = 0; i < fast_count; ++i)
3053  if (VirtualTransactionIdEquals(vxids[i], vxid))
3054  break;
3055  if (i >= fast_count)
3056  vxids[count++] = vxid;
3057  }
3058  /* else, xact already committed or aborted */
3059  }
3060  }
3061  }
3062 
3063  LWLockRelease(partitionLock);
3064 
3065  if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3066  elog(PANIC, "too many conflicting locks found");
3067 
3068  vxids[count].backendId = InvalidBackendId;
3070  if (countp)
3071  *countp = count;
3072  return vxids;
3073 }
#define InvalidBackendId
Definition: backendid.h:23
#define lengthof(array)
Definition: c.h:772
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:233
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:517
uint16 LOCKMETHODID
Definition: lock.h:122
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:67
#define InvalidLocalTransactionId
Definition: lock.h:65
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:71
#define GET_VXID_FROM_PGPROC(vxid, proc)
Definition: lock.h:77
int LOCKMASK
Definition: lockdefs.h:25
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * palloc0(Size size)
Definition: mcxt.c:1241
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1005
uint8 locktag_lockmethodid
Definition: lock.h:171
int numLockModes
Definition: lock.h:110
LocalTransactionId localTransactionId
Definition: lock.h:62
BackendId backendId
Definition: lock.h:61
int max_prepared_xacts
Definition: twophase.c:117
#define InHotStandby
Definition: xlogutils.h:57

References PROC_HDR::allProcCount, PROC_HDR::allProcs, VirtualTransactionId::backendId, ConflictsWithRelationFastPath, LockMethodData::conflictTab, dlist_iter::cur, PGPROC::databaseId, dlist_container, dlist_foreach, elog(), ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, InvalidBackendId, InvalidLocalTransactionId, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char* GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 4044 of file lock.c.

4045 {
4046  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4047  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4048  return LockMethods[lockmethodid]->lockModeNames[mode];
4049 }
const char *const * lockModeNames
Definition: lock.h:112

References Assert(), lengthof, LockMethods, LockMethodData::lockModeNames, and mode.

Referenced by CheckRelationLockedByMe(), DeadLockReport(), pg_lock_status(), and ProcSleep().

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 487 of file lock.c.

488 {
489  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
490 
491  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
492  return LockMethods[lockmethodid];
493 }
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:324

References Assert(), lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData* GetLockStatusData ( void  )

Definition at line 3596 of file lock.c.

3597 {
3598  LockData *data;
3599  PROCLOCK *proclock;
3600  HASH_SEQ_STATUS seqstat;
3601  int els;
3602  int el;
3603  int i;
3604 
3605  data = (LockData *) palloc(sizeof(LockData));
3606 
3607  /* Guess how much space we'll need. */
3608  els = MaxBackends;
3609  el = 0;
3610  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3611 
3612  /*
3613  * First, we iterate through the per-backend fast-path arrays, locking
3614  * them one at a time. This might produce an inconsistent picture of the
3615  * system state, but taking all of those LWLocks at the same time seems
3616  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3617  * matter too much, because none of these locks can be involved in lock
3618  * conflicts anyway - anything that might must be present in the main lock
3619  * table. (For the same reason, we don't sweat about making leaderPid
3620  * completely valid. We cannot safely dereference another backend's
3621  * lockGroupLeader field without holding all lock partition locks, and
3622  * it's not worth that.)
3623  */
3624  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3625  {
3626  PGPROC *proc = &ProcGlobal->allProcs[i];
3627  uint32 f;
3628 
3630 
3631  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3632  {
3633  LockInstanceData *instance;
3634  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3635 
3636  /* Skip unallocated slots. */
3637  if (!lockbits)
3638  continue;
3639 
3640  if (el >= els)
3641  {
3642  els += MaxBackends;
3643  data->locks = (LockInstanceData *)
3644  repalloc(data->locks, sizeof(LockInstanceData) * els);
3645  }
3646 
3647  instance = &data->locks[el];
3648  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3649  proc->fpRelId[f]);
3650  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3651  instance->waitLockMode = NoLock;
3652  instance->backend = proc->backendId;
3653  instance->lxid = proc->lxid;
3654  instance->pid = proc->pid;
3655  instance->leaderPid = proc->pid;
3656  instance->fastpath = true;
3657 
3658  /*
3659  * Successfully taking fast path lock means there were no
3660  * conflicting locks.
3661  */
3662  instance->waitStart = 0;
3663 
3664  el++;
3665  }
3666 
3667  if (proc->fpVXIDLock)
3668  {
3669  VirtualTransactionId vxid;
3670  LockInstanceData *instance;
3671 
3672  if (el >= els)
3673  {
3674  els += MaxBackends;
3675  data->locks = (LockInstanceData *)
3676  repalloc(data->locks, sizeof(LockInstanceData) * els);
3677  }
3678 
3679  vxid.backendId = proc->backendId;
3681 
3682  instance = &data->locks[el];
3683  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3684  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3685  instance->waitLockMode = NoLock;
3686  instance->backend = proc->backendId;
3687  instance->lxid = proc->lxid;
3688  instance->pid = proc->pid;
3689  instance->leaderPid = proc->pid;
3690  instance->fastpath = true;
3691  instance->waitStart = 0;
3692 
3693  el++;
3694  }
3695 
3696  LWLockRelease(&proc->fpInfoLock);
3697  }
3698 
3699  /*
3700  * Next, acquire lock on the entire shared lock data structure. We do
3701  * this so that, at least for locks in the primary lock table, the state
3702  * will be self-consistent.
3703  *
3704  * Since this is a read-only operation, we take shared instead of
3705  * exclusive lock. There's not a whole lot of point to this, because all
3706  * the normal operations require exclusive lock, but it doesn't hurt
3707  * anything either. It will at least allow two backends to do
3708  * GetLockStatusData in parallel.
3709  *
3710  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3711  */
3712  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3714 
3715  /* Now we can safely count the number of proclocks */
3717  if (data->nelements > els)
3718  {
3719  els = data->nelements;
3720  data->locks = (LockInstanceData *)
3721  repalloc(data->locks, sizeof(LockInstanceData) * els);
3722  }
3723 
3724  /* Now scan the tables to copy the data */
3726 
3727  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3728  {
3729  PGPROC *proc = proclock->tag.myProc;
3730  LOCK *lock = proclock->tag.myLock;
3731  LockInstanceData *instance = &data->locks[el];
3732 
3733  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3734  instance->holdMask = proclock->holdMask;
3735  if (proc->waitLock == proclock->tag.myLock)
3736  instance->waitLockMode = proc->waitLockMode;
3737  else
3738  instance->waitLockMode = NoLock;
3739  instance->backend = proc->backendId;
3740  instance->lxid = proc->lxid;
3741  instance->pid = proc->pid;
3742  instance->leaderPid = proclock->groupLeader->pid;
3743  instance->fastpath = false;
3744  instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3745 
3746  el++;
3747  }
3748 
3749  /*
3750  * And release locks. We do this in reverse order for two reasons: (1)
3751  * Anyone else who needs more than one of the locks will be trying to lock
3752  * them in increasing order; we don't want to release the other process
3753  * until it can get all the locks it needs. (2) This avoids O(N^2)
3754  * behavior inside LWLockRelease.
3755  */
3756  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3758 
3759  Assert(el == data->nelements);
3760 
3761  return data;
3762 }
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:424
int64 TimestampTz
Definition: timestamp.h:39
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1377
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:235
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:181
#define NoLock
Definition: lockdefs.h:34
#define ExclusiveLock
Definition: lockdefs.h:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1456
Definition: lock.h:467
LOCKMASK holdMask
Definition: lock.h:455
LOCKMODE waitLockMode
Definition: lock.h:456
bool fastpath
Definition: lock.h:463
LOCKTAG locktag
Definition: lock.h:454
TimestampTz waitStart
Definition: lock.h:459
int leaderPid
Definition: lock.h:462
BackendId backend
Definition: lock.h:457
LocalTransactionId lxid
Definition: lock.h:458
LocalTransactionId lxid
Definition: proc.h:183
pg_atomic_uint64 waitStart
Definition: proc.h:228
bool fpVXIDLock
Definition: proc.h:287
BackendId backendId
Definition: proc.h:197
int pid
Definition: proc.h:186
LOCK * waitLock
Definition: proc.h:223
LOCKMODE waitLockMode
Definition: proc.h:225
LocalTransactionId fpLocalTransactionId
Definition: proc.h:288
PGPROC * groupLeader
Definition: lock.h:375

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert(), LockInstanceData::backend, VirtualTransactionId::backendId, PGPROC::backendId, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, LockInstanceData::fastpath, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), LockInstanceData::lxid, PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc(), pg_atomic_read_u64(), LockInstanceData::pid, PGPROC::pid, ProcGlobal, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, LockInstanceData::waitStart, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 499 of file lock.c.

500 {
501  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
502 
503  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
504  return LockMethods[lockmethodid];
505 }

References Assert(), lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock* GetRunningTransactionLocks ( int *  nlocks)

Definition at line 3962 of file lock.c.

3963 {
3964  xl_standby_lock *accessExclusiveLocks;
3965  PROCLOCK *proclock;
3966  HASH_SEQ_STATUS seqstat;
3967  int i;
3968  int index;
3969  int els;
3970 
3971  /*
3972  * Acquire lock on the entire shared lock data structure.
3973  *
3974  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3975  */
3976  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3978 
3979  /* Now we can safely count the number of proclocks */
3981 
3982  /*
3983  * Allocating enough space for all locks in the lock table is overkill,
3984  * but it's more convenient and faster than having to enlarge the array.
3985  */
3986  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3987 
3988  /* Now scan the tables to copy the data */
3990 
3991  /*
3992  * If lock is a currently granted AccessExclusiveLock then it will have
3993  * just one proclock holder, so locks are never accessed twice in this
3994  * particular case. Don't copy this code for use elsewhere because in the
3995  * general case this will give you duplicate locks when looking at
3996  * non-exclusive lock types.
3997  */
3998  index = 0;
3999  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4000  {
4001  /* make sure this definition matches the one used in LockAcquire */
4002  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4003  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4004  {
4005  PGPROC *proc = proclock->tag.myProc;
4006  LOCK *lock = proclock->tag.myLock;
4007  TransactionId xid = proc->xid;
4008 
4009  /*
4010  * Don't record locks for transactions if we know they have
4011  * already issued their WAL record for commit but not yet released
4012  * lock. It is still possible that we see locks held by already
4013  * complete transactions, if they haven't yet zeroed their xids.
4014  */
4015  if (!TransactionIdIsValid(xid))
4016  continue;
4017 
4018  accessExclusiveLocks[index].xid = xid;
4019  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4020  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4021 
4022  index++;
4023  }
4024  }
4025 
4026  Assert(index <= els);
4027 
4028  /*
4029  * And release locks. We do this in reverse order for two reasons: (1)
4030  * Anyone else who needs more than one of the locks will be trying to lock
4031  * them in increasing order; we don't want to release the other process
4032  * until it can get all the locks it needs. (2) This avoids O(N^2)
4033  * behavior inside LWLockRelease.
4034  */
4035  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4037 
4038  *nlocks = index;
4039  return accessExclusiveLocks;
4040 }
uint32 TransactionId
Definition: c.h:636
#define AccessExclusiveLock
Definition: lockdefs.h:43
TransactionId xid
Definition: proc.h:173
Definition: type.h:95
TransactionId xid
Definition: lockdefs.h:51
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References AccessExclusiveLock, Assert(), xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 3868 of file lock.c.

3869 {
3870  LOCK *theLock = blocked_proc->waitLock;
3871  BlockedProcData *bproc;
3872  dlist_iter proclock_iter;
3873  dlist_iter proc_iter;
3874  dclist_head *waitQueue;
3875  int queue_size;
3876 
3877  /* Nothing to do if this proc is not blocked */
3878  if (theLock == NULL)
3879  return;
3880 
3881  /* Set up a procs[] element */
3882  bproc = &data->procs[data->nprocs++];
3883  bproc->pid = blocked_proc->pid;
3884  bproc->first_lock = data->nlocks;
3885  bproc->first_waiter = data->npids;
3886 
3887  /*
3888  * We may ignore the proc's fast-path arrays, since nothing in those could
3889  * be related to a contended lock.
3890  */
3891 
3892  /* Collect all PROCLOCKs associated with theLock */
3893  dlist_foreach(proclock_iter, &theLock->procLocks)
3894  {
3895  PROCLOCK *proclock =
3896  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3897  PGPROC *proc = proclock->tag.myProc;
3898  LOCK *lock = proclock->tag.myLock;
3899  LockInstanceData *instance;
3900 
3901  if (data->nlocks >= data->maxlocks)
3902  {
3903  data->maxlocks += MaxBackends;
3904  data->locks = (LockInstanceData *)
3905  repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3906  }
3907 
3908  instance = &data->locks[data->nlocks];
3909  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3910  instance->holdMask = proclock->holdMask;
3911  if (proc->waitLock == lock)
3912  instance->waitLockMode = proc->waitLockMode;
3913  else
3914  instance->waitLockMode = NoLock;
3915  instance->backend = proc->backendId;
3916  instance->lxid = proc->lxid;
3917  instance->pid = proc->pid;
3918  instance->leaderPid = proclock->groupLeader->pid;
3919  instance->fastpath = false;
3920  data->nlocks++;
3921  }
3922 
3923  /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3924  waitQueue = &(theLock->waitProcs);
3925  queue_size = dclist_count(waitQueue);
3926 
3927  if (queue_size > data->maxpids - data->npids)
3928  {
3929  data->maxpids = Max(data->maxpids + MaxBackends,
3930  data->npids + queue_size);
3931  data->waiter_pids = (int *) repalloc(data->waiter_pids,
3932  sizeof(int) * data->maxpids);
3933  }
3934 
3935  /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3936  dclist_foreach(proc_iter, waitQueue)
3937  {
3938  PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
3939  if (queued_proc == blocked_proc)
3940  break;
3941  data->waiter_pids[data->npids++] = queued_proc->pid;
3942  queued_proc = (PGPROC *) queued_proc->links.next;
3943  }
3944 
3945  bproc->num_locks = data->nlocks - bproc->first_lock;
3946  bproc->num_waiters = data->npids - bproc->first_waiter;
3947 }
#define Max(x, y)
Definition: c.h:982
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int first_lock
Definition: lock.h:477
int first_waiter
Definition: lock.h:481
int num_waiters
Definition: lock.h:482
int num_locks
Definition: lock.h:478
dclist_head waitProcs
Definition: lock.h:317
dlist_node links
Definition: proc.h:164
dlist_node * next
Definition: ilist.h:140
static struct link * links
Definition: zic.c:299

References LockInstanceData::backend, PGPROC::backendId, dlist_iter::cur, data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, LockInstanceData::fastpath, BlockedProcData::first_lock, BlockedProcData::first_waiter, PROCLOCK::groupLeader, PROCLOCK::holdMask, LockInstanceData::holdMask, LockInstanceData::leaderPid, PGPROC::links, links, LockInstanceData::locktag, LockInstanceData::lxid, PGPROC::lxid, Max, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, dlist_node::next, NoLock, BlockedProcData::num_locks, BlockedProcData::num_waiters, LockInstanceData::pid, BlockedProcData::pid, PGPROC::pid, LOCK::procLocks, repalloc(), LOCK::tag, PROCLOCK::tag, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, and LOCK::waitProcs.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1781 of file lock.c.

1782 {
1784 }
static LOCALLOCK * awaitedLock
Definition: lock.c:288
static ResourceOwner awaitedOwner
Definition: lock.c:289
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1684

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup(), and ProcSleep().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1550 of file lock.c.

1551 {
1552  lock->nGranted++;
1553  lock->granted[lockmode]++;
1554  lock->grantMask |= LOCKBIT_ON(lockmode);
1555  if (lock->granted[lockmode] == lock->requested[lockmode])
1556  lock->waitMask &= LOCKBIT_OFF(lockmode);
1557  proclock->holdMask |= LOCKBIT_ON(lockmode);
1558  LOCK_PRINT("GrantLock", lock, lockmode);
1559  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1560  Assert(lock->nGranted <= lock->nRequested);
1561 }
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:85
int requested[MAX_LOCKMODES]
Definition: lock.h:318
int granted[MAX_LOCKMODES]
Definition: lock.h:320
LOCKMASK grantMask
Definition: lock.h:314
LOCKMASK waitMask
Definition: lock.h:315
int nGranted
Definition: lock.h:321

References Assert(), LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), ProcSleep(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1684 of file lock.c.

1685 {
1686  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1687  int i;
1688 
1689  Assert(locallock->numLockOwners < locallock->maxLockOwners);
1690  /* Count the total */
1691  locallock->nLocks++;
1692  /* Count the per-owner lock */
1693  for (i = 0; i < locallock->numLockOwners; i++)
1694  {
1695  if (lockOwners[i].owner == owner)
1696  {
1697  lockOwners[i].nLocks++;
1698  return;
1699  }
1700  }
1701  lockOwners[i].owner = owner;
1702  lockOwners[i].nLocks = 1;
1703  locallock->numLockOwners++;
1704  if (owner != NULL)
1705  ResourceOwnerRememberLock(owner, locallock);
1706 
1707  /* Indicate that the lock is acquired for certain types of locks. */
1708  CheckAndSetLockHeld(locallock, true);
1709 }
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1351
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:990
int64 nLocks
Definition: lock.h:423
struct ResourceOwnerData * owner
Definition: lock.h:422
int maxLockOwners
Definition: lock.h:437

References Assert(), CheckAndSetLockHeld(), i, LOCALLOCK::lockOwners, LOCALLOCK::maxLockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLocks()

void InitLocks ( void  )

Definition at line 405 of file lock.c.

406 {
407  HASHCTL info;
408  long init_table_size,
409  max_table_size;
410  bool found;
411 
412  /*
413  * Compute init/max size to request for lock hashtables. Note these
414  * calculations must agree with LockShmemSize!
415  */
416  max_table_size = NLOCKENTS();
417  init_table_size = max_table_size / 2;
418 
419  /*
420  * Allocate hash table for LOCK structs. This stores per-locked-object
421  * information.
422  */
423  info.keysize = sizeof(LOCKTAG);
424  info.entrysize = sizeof(LOCK);
426 
427  LockMethodLockHash = ShmemInitHash("LOCK hash",
428  init_table_size,
429  max_table_size,
430  &info,
432 
433  /* Assume an average of 2 holders per lock */
434  max_table_size *= 2;
435  init_table_size *= 2;
436 
437  /*
438  * Allocate hash table for PROCLOCK structs. This stores
439  * per-lock-per-holder information.
440  */
441  info.keysize = sizeof(PROCLOCKTAG);
442  info.entrysize = sizeof(PROCLOCK);
443  info.hash = proclock_hash;
445 
446  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
447  init_table_size,
448  max_table_size,
449  &info,
451 
452  /*
453  * Allocate fast-path structures.
454  */
456  ShmemInitStruct("Fast Path Strong Relation Lock Data",
457  sizeof(FastPathStrongRelationLockData), &found);
458  if (!found)
460 
461  /*
462  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
463  * counts and resource owner information.
464  *
465  * The non-shared table could already exist in this process (this occurs
466  * when the postmaster is recreating shared memory after a backend crash).
467  * If so, delete and recreate it. (We could simply leave it, since it
468  * ought to be empty in the postmaster, but for safety let's zap it.)
469  */
472 
473  info.keysize = sizeof(LOCALLOCKTAG);
474  info.entrysize = sizeof(LOCALLOCK);
475 
476  LockMethodLocalHash = hash_create("LOCALLOCK hash",
477  16,
478  &info,
480 }
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_PARTITION
Definition: hsearch.h:92
#define NLOCKENTS()
Definition: lock.c:57
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:534
struct LOCALLOCK LOCALLOCK
struct LOCK LOCK
struct PROCLOCK PROCLOCK
struct PROCLOCKTAG PROCLOCKTAG
struct LOCALLOCKTAG LOCALLOCKTAG
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:341
#define SpinLockInit(lock)
Definition: spin.h:60
HashValueFunc hash
Definition: hsearch.h:78
long num_partitions
Definition: hsearch.h:68

References HASHCTL::entrysize, FastPathStrongRelationLocks, HASHCTL::hash, HASH_BLOBS, hash_create(), hash_destroy(), HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateSharedMemoryAndSemaphores().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4387 of file lock.c.

4389 {
4390  lock_twophase_postcommit(xid, info, recdata, len);
4391 }
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4361
const void size_t len

References len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4361 of file lock.c.

4363 {
4364  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4365  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4366  LOCKTAG *locktag;
4367  LOCKMETHODID lockmethodid;
4368  LockMethod lockMethodTable;
4369 
4370  Assert(len == sizeof(TwoPhaseLockRecord));
4371  locktag = &rec->locktag;
4372  lockmethodid = locktag->locktag_lockmethodid;
4373 
4374  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4375  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4376  lockMethodTable = LockMethods[lockmethodid];
4377 
4378  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4379 }
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3087
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:933

References Assert(), elog(), ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

◆ lock_twophase_recover()

void lock_twophase_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4148 of file lock.c.

4150 {
4151  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4152  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4153  LOCKTAG *locktag;
4154  LOCKMODE lockmode;
4155  LOCKMETHODID lockmethodid;
4156  LOCK *lock;
4157  PROCLOCK *proclock;
4158  PROCLOCKTAG proclocktag;
4159  bool found;
4160  uint32 hashcode;
4161  uint32 proclock_hashcode;
4162  int partition;
4163  LWLock *partitionLock;
4164  LockMethod lockMethodTable;
4165 
4166  Assert(len == sizeof(TwoPhaseLockRecord));
4167  locktag = &rec->locktag;
4168  lockmode = rec->lockmode;
4169  lockmethodid = locktag->locktag_lockmethodid;
4170 
4171  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4172  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4173  lockMethodTable = LockMethods[lockmethodid];
4174 
4175  hashcode = LockTagHashCode(locktag);
4176  partition = LockHashPartition(hashcode);
4177  partitionLock = LockHashPartitionLock(hashcode);
4178 
4179  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4180 
4181  /*
4182  * Find or create a lock with this tag.
4183  */
4185  locktag,
4186  hashcode,
4188  &found);
4189  if (!lock)
4190  {
4191  LWLockRelease(partitionLock);
4192  ereport(ERROR,
4193  (errcode(ERRCODE_OUT_OF_MEMORY),
4194  errmsg("out of shared memory"),
4195  errhint("You might need to increase max_locks_per_transaction.")));
4196  }
4197 
4198  /*
4199  * if it's a new lock object, initialize it
4200  */
4201  if (!found)
4202  {
4203  lock->grantMask = 0;
4204  lock->waitMask = 0;
4205  dlist_init(&lock->procLocks);
4206  dclist_init(&lock->waitProcs);
4207  lock->nRequested = 0;
4208  lock->nGranted = 0;
4209  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4210  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4211  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4212  }
4213  else
4214  {
4215  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4216  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4217  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4218  Assert(lock->nGranted <= lock->nRequested);
4219  }
4220 
4221  /*
4222  * Create the hash key for the proclock table.
4223  */
4224  proclocktag.myLock = lock;
4225  proclocktag.myProc = proc;
4226 
4227  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4228 
4229  /*
4230  * Find or create a proclock entry with this tag
4231  */
4233  &proclocktag,
4234  proclock_hashcode,
4236  &found);
4237  if (!proclock)
4238  {
4239  /* Oops, not enough shmem for the proclock */
4240  if (lock->nRequested == 0)
4241  {
4242  /*
4243  * There are no other requestors of this lock, so garbage-collect
4244  * the lock object. We *must* do this to avoid a permanent leak
4245  * of shared memory, because there won't be anything to cause
4246  * anyone to release the lock object later.
4247  */
4248  Assert(dlist_is_empty(&lock->procLocks));
4250  &(lock->tag),
4251  hashcode,
4252  HASH_REMOVE,
4253  NULL))
4254  elog(PANIC, "lock table corrupted");
4255  }
4256  LWLockRelease(partitionLock);
4257  ereport(ERROR,
4258  (errcode(ERRCODE_OUT_OF_MEMORY),
4259  errmsg("out of shared memory"),
4260  errhint("You might need to increase max_locks_per_transaction.")));
4261  }
4262 
4263  /*
4264  * If new, initialize the new entry
4265  */
4266  if (!found)
4267  {
4268  Assert(proc->lockGroupLeader == NULL);
4269  proclock->groupLeader = proc;
4270  proclock->holdMask = 0;
4271  proclock->releaseMask = 0;
4272  /* Add proclock to appropriate lists */
4273  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4274  dlist_push_tail(&proc->myProcLocks[partition],
4275  &proclock->procLink);
4276  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4277  }
4278  else
4279  {
4280  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4281  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4282  }
4283 
4284  /*
4285  * lock->nRequested and lock->requested[] count the total number of
4286  * requests, whether granted or waiting, so increment those immediately.
4287  */
4288  lock->nRequested++;
4289  lock->requested[lockmode]++;
4290  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4291 
4292  /*
4293  * We shouldn't already hold the desired lock.
4294  */
4295  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4296  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4297  lockMethodTable->lockModeNames[lockmode],
4298  lock->tag.locktag_field1, lock->tag.locktag_field2,
4299  lock->tag.locktag_field3);
4300 
4301  /*
4302  * We ignore any possible conflicts and just grant ourselves the lock. Not
4303  * only because we don't bother, but also to avoid deadlocks when
4304  * switching from standby to normal mode. See function comment.
4305  */
4306  GrantLock(lock, proclock, lockmode);
4307 
4308  /*
4309  * Bump strong lock count, to make sure any fast-path lock requests won't
4310  * be granted without consulting the primary lock table.
4311  */
4312  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4313  {
4314  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4315 
4317  FastPathStrongRelationLocks->count[fasthashcode]++;
4319  }
4320 
4321  LWLockRelease(partitionLock);
4322 }
#define MemSet(start, val, len)
Definition: c.h:1004
@ HASH_ENTER_NULL
Definition: hsearch.h:116
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define MAX_LOCKMODES
Definition: lock.h:82
#define LockHashPartition(hashcode)
Definition: lock.h:525
int LOCKMODE
Definition: lockdefs.h:26
uint32 locktag_field3
Definition: lock.h:168
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:252
LOCKMASK releaseMask
Definition: lock.h:377

References Assert(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog(), ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4329 of file lock.c.

4331 {
4332  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4333  LOCKTAG *locktag;
4334  LOCKMODE lockmode;
4335  LOCKMETHODID lockmethodid;
4336 
4337  Assert(len == sizeof(TwoPhaseLockRecord));
4338  locktag = &rec->locktag;
4339  lockmode = rec->lockmode;
4340  lockmethodid = locktag->locktag_lockmethodid;
4341 
4342  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4343  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4344 
4345  if (lockmode == AccessExclusiveLock &&
4346  locktag->locktag_type == LOCKTAG_RELATION)
4347  {
4349  locktag->locktag_field1 /* dboid */ ,
4350  locktag->locktag_field2 /* reloid */ );
4351  }
4352 }
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:972

References AccessExclusiveLock, Assert(), elog(), ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().

◆ LockAcquire()

LockAcquireResult LockAcquire ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait 
)

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp 
)

Definition at line 771 of file lock.c.

777 {
778  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
779  LockMethod lockMethodTable;
780  LOCALLOCKTAG localtag;
781  LOCALLOCK *locallock;
782  LOCK *lock;
783  PROCLOCK *proclock;
784  bool found;
785  ResourceOwner owner;
786  uint32 hashcode;
787  LWLock *partitionLock;
788  bool found_conflict;
789  bool log_lock = false;
790 
791  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
792  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
793  lockMethodTable = LockMethods[lockmethodid];
794  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
795  elog(ERROR, "unrecognized lock mode: %d", lockmode);
796 
797  if (RecoveryInProgress() && !InRecovery &&
798  (locktag->locktag_type == LOCKTAG_OBJECT ||
799  locktag->locktag_type == LOCKTAG_RELATION) &&
800  lockmode > RowExclusiveLock)
801  ereport(ERROR,
802  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
803  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
804  lockMethodTable->lockModeNames[lockmode]),
805  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
806 
807 #ifdef LOCK_DEBUG
808  if (LOCK_DEBUG_ENABLED(locktag))
809  elog(LOG, "LockAcquire: lock [%u,%u] %s",
810  locktag->locktag_field1, locktag->locktag_field2,
811  lockMethodTable->lockModeNames[lockmode]);
812 #endif
813 
814  /* Identify owner for lock */
815  if (sessionLock)
816  owner = NULL;
817  else
818  owner = CurrentResourceOwner;
819 
820  /*
821  * Find or create a LOCALLOCK entry for this lock and lockmode
822  */
823  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
824  localtag.lock = *locktag;
825  localtag.mode = lockmode;
826 
827  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
828  &localtag,
829  HASH_ENTER, &found);
830 
831  /*
832  * if it's a new locallock object, initialize it
833  */
834  if (!found)
835  {
836  locallock->lock = NULL;
837  locallock->proclock = NULL;
838  locallock->hashcode = LockTagHashCode(&(localtag.lock));
839  locallock->nLocks = 0;
840  locallock->holdsStrongLockCount = false;
841  locallock->lockCleared = false;
842  locallock->numLockOwners = 0;
843  locallock->maxLockOwners = 8;
844  locallock->lockOwners = NULL; /* in case next line fails */
845  locallock->lockOwners = (LOCALLOCKOWNER *)
847  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
848  }
849  else
850  {
851  /* Make sure there will be room to remember the lock */
852  if (locallock->numLockOwners >= locallock->maxLockOwners)
853  {
854  int newsize = locallock->maxLockOwners * 2;
855 
856  locallock->lockOwners = (LOCALLOCKOWNER *)
857  repalloc(locallock->lockOwners,
858  newsize * sizeof(LOCALLOCKOWNER));
859  locallock->maxLockOwners = newsize;
860  }
861  }
862  hashcode = locallock->hashcode;
863 
864  if (locallockp)
865  *locallockp = locallock;
866 
867  /*
868  * If we already hold the lock, we can just increase the count locally.
869  *
870  * If lockCleared is already set, caller need not worry about absorbing
871  * sinval messages related to the lock's object.
872  */
873  if (locallock->nLocks > 0)
874  {
875  GrantLockLocal(locallock, owner);
876  if (locallock->lockCleared)
878  else
880  }
881 
882  /*
883  * We don't acquire any other heavyweight lock while holding the relation
884  * extension lock. We do allow to acquire the same relation extension
885  * lock more than once but that case won't reach here.
886  */
887  Assert(!IsRelationExtensionLockHeld);
888 
889  /*
890  * We don't acquire any other heavyweight lock while holding the page lock
891  * except for relation extension.
892  */
893  Assert(!IsPageLockHeld ||
894  (locktag->locktag_type == LOCKTAG_RELATION_EXTEND));
895 
896  /*
897  * Prepare to emit a WAL record if acquisition of this lock needs to be
898  * replayed in a standby server.
899  *
900  * Here we prepare to log; after lock is acquired we'll issue log record.
901  * This arrangement simplifies error recovery in case the preparation step
902  * fails.
903  *
904  * Only AccessExclusiveLocks can conflict with lock types that read-only
905  * transactions can acquire in a standby server. Make sure this definition
906  * matches the one in GetRunningTransactionLocks().
907  */
908  if (lockmode >= AccessExclusiveLock &&
909  locktag->locktag_type == LOCKTAG_RELATION &&
910  !RecoveryInProgress() &&
912  {
914  log_lock = true;
915  }
916 
917  /*
918  * Attempt to take lock via fast path, if eligible. But if we remember
919  * having filled up the fast path array, we don't attempt to make any
920  * further use of it until we release some locks. It's possible that some
921  * other backend has transferred some of those locks to the shared hash
922  * table, leaving space free, but it's not worth acquiring the LWLock just
923  * to check. It's also possible that we're acquiring a second or third
924  * lock type on a relation we have already locked using the fast-path, but
925  * for now we don't worry about that case either.
926  */
927  if (EligibleForRelationFastPath(locktag, lockmode) &&
929  {
930  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
931  bool acquired;
932 
933  /*
934  * LWLockAcquire acts as a memory sequencing point, so it's safe to
935  * assume that any strong locker whose increment to
936  * FastPathStrongRelationLocks->counts becomes visible after we test
937  * it has yet to begin to transfer fast-path locks.
938  */
940  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
941  acquired = false;
942  else
943  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
944  lockmode);
946  if (acquired)
947  {
948  /*
949  * The locallock might contain stale pointers to some old shared
950  * objects; we MUST reset these to null before considering the
951  * lock to be acquired via fast-path.
952  */
953  locallock->lock = NULL;
954  locallock->proclock = NULL;
955  GrantLockLocal(locallock, owner);
956  return LOCKACQUIRE_OK;
957  }
958  }
959 
960  /*
961  * If this lock could potentially have been taken via the fast-path by
962  * some other backend, we must (temporarily) disable further use of the
963  * fast-path for this lock tag, and migrate any locks already taken via
964  * this method to the main lock table.
965  */
966  if (ConflictsWithRelationFastPath(locktag, lockmode))
967  {
968  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
969 
970  BeginStrongLockAcquire(locallock, fasthashcode);
971  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
972  hashcode))
973  {
975  if (locallock->nLocks == 0)
976  RemoveLocalLock(locallock);
977  if (locallockp)
978  *locallockp = NULL;
979  if (reportMemoryError)
980  ereport(ERROR,
981  (errcode(ERRCODE_OUT_OF_MEMORY),
982  errmsg("out of shared memory"),
983  errhint("You might need to increase max_locks_per_transaction.")));
984  else
985  return LOCKACQUIRE_NOT_AVAIL;
986  }
987  }
988 
989  /*
990  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
991  * take it via the fast-path, either, so we've got to mess with the shared
992  * lock table.
993  */
994  partitionLock = LockHashPartitionLock(hashcode);
995 
996  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
997 
998  /*
999  * Find or create lock and proclock entries with this tag
1000  *
1001  * Note: if the locallock object already existed, it might have a pointer
1002  * to the lock already ... but we should not assume that that pointer is
1003  * valid, since a lock object with zero hold and request counts can go
1004  * away anytime. So we have to use SetupLockInTable() to recompute the
1005  * lock and proclock pointers, even if they're already set.
1006  */
1007  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1008  hashcode, lockmode);
1009  if (!proclock)
1010  {
1012  LWLockRelease(partitionLock);
1013  if (locallock->nLocks == 0)
1014  RemoveLocalLock(locallock);
1015  if (locallockp)
1016  *locallockp = NULL;
1017  if (reportMemoryError)
1018  ereport(ERROR,
1019  (errcode(ERRCODE_OUT_OF_MEMORY),
1020  errmsg("out of shared memory"),
1021  errhint("You might need to increase max_locks_per_transaction.")));
1022  else
1023  return LOCKACQUIRE_NOT_AVAIL;
1024  }
1025  locallock->proclock = proclock;
1026  lock = proclock->tag.myLock;
1027  locallock->lock = lock;
1028 
1029  /*
1030  * If lock requested conflicts with locks requested by waiters, must join
1031  * wait queue. Otherwise, check for conflict with already-held locks.
1032  * (That's last because most complex check.)
1033  */
1034  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1035  found_conflict = true;
1036  else
1037  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1038  lock, proclock);
1039 
1040  if (!found_conflict)
1041  {
1042  /* No conflict with held or previously requested locks */
1043  GrantLock(lock, proclock, lockmode);
1044  GrantLockLocal(locallock, owner);
1045  }
1046  else
1047  {
1048  /*
1049  * We can't acquire the lock immediately. If caller specified no
1050  * blocking, remove useless table entries and return
1051  * LOCKACQUIRE_NOT_AVAIL without waiting.
1052  */
1053  if (dontWait)
1054  {
1056  if (proclock->holdMask == 0)
1057  {
1058  uint32 proclock_hashcode;
1059 
1060  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1061  dlist_delete(&proclock->lockLink);
1062  dlist_delete(&proclock->procLink);
1064  &(proclock->tag),
1065  proclock_hashcode,
1066  HASH_REMOVE,
1067  NULL))
1068  elog(PANIC, "proclock table corrupted");
1069  }
1070  else
1071  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1072  lock->nRequested--;
1073  lock->requested[lockmode]--;
1074  LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1075  Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1076  Assert(lock->nGranted <= lock->nRequested);
1077  LWLockRelease(partitionLock);
1078  if (locallock->nLocks == 0)
1079  RemoveLocalLock(locallock);
1080  if (locallockp)
1081  *locallockp = NULL;
1082  return LOCKACQUIRE_NOT_AVAIL;
1083  }
1084 
1085  /*
1086  * Set bitmask of locks this process already holds on this object.
1087  */
1088  MyProc->heldLocks = proclock->holdMask;
1089 
1090  /*
1091  * Sleep till someone wakes me up.
1092  */
1093 
1094  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1095  locktag->locktag_field2,
1096  locktag->locktag_field3,
1097  locktag->locktag_field4,
1098  locktag->locktag_type,
1099  lockmode);
1100 
1101  WaitOnLock(locallock, owner);
1102 
1103  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1104  locktag->locktag_field2,
1105  locktag->locktag_field3,
1106  locktag->locktag_field4,
1107  locktag->locktag_type,
1108  lockmode);
1109 
1110  /*
1111  * NOTE: do not do any material change of state between here and
1112  * return. All required changes in locktable state must have been
1113  * done when the lock was granted to us --- see notes in WaitOnLock.
1114  */
1115 
1116  /*
1117  * Check the proclock entry status, in case something in the ipc
1118  * communication doesn't work correctly.
1119  */
1120  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1121  {
1123  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1124  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1125  /* Should we retry ? */
1126  LWLockRelease(partitionLock);
1127  elog(ERROR, "LockAcquire failed");
1128  }
1129  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1130  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1131  }
1132 
1133  /*
1134  * Lock state is fully up-to-date now; if we error out after this, no
1135  * special error cleanup is required.
1136  */
1138 
1139  LWLockRelease(partitionLock);
1140 
1141  /*
1142  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1143  * standby server.
1144  */
1145  if (log_lock)
1146  {
1147  /*
1148  * Decode the locktag back to the original values, to avoid sending
1149  * lots of empty bytes with every message. See lock.h to check how a
1150  * locktag is defined for LOCKTAG_RELATION
1151  */
1153  locktag->locktag_field2);
1154  }
1155 
1156  return LOCKACQUIRE_OK;
1157 }
#define LOG
Definition: elog.h:31
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1366
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2687
void AbortStrongLockAcquire(void)
Definition: lock.c:1752
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2620
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1809
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:227
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1716
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1419
static void FinishStrongLockAcquire(void)
Definition: lock.c:1742
@ LOCKTAG_OBJECT
Definition: lock.h:145
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:505
@ LOCKACQUIRE_OK
Definition: lock.h:503
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:504
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:502
#define RowExclusiveLock
Definition: lockdefs.h:38
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1419
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1402
bool lockCleared
Definition: lock.h:440
uint16 locktag_field4
Definition: lock.h:169
LOCKMASK heldLocks
Definition: proc.h:226
bool RecoveryInProgress(void)
Definition: xlog.c:5908
#define XLogStandbyInfoActive()
Definition: xlog.h:118
bool InRecovery
Definition: xlogutils.c:53

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert(), BeginStrongLockAcquire(), ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, dlist_delete(), EligibleForRelationFastPath, elog(), ereport, errcode(), errhint(), errmsg(), ERROR, FastPathGrantRelationLock(), FastPathLocalUseCount, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PGPROC::heldLocks, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, InRecovery, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG_RELATION_EXTEND, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1419 of file lock.c.

1423 {
1424  int numLockModes = lockMethodTable->numLockModes;
1425  LOCKMASK myLocks;
1426  int conflictMask = lockMethodTable->conflictTab[lockmode];
1427  int conflictsRemaining[MAX_LOCKMODES];
1428  int totalConflictsRemaining = 0;
1429  dlist_iter proclock_iter;
1430  int i;
1431 
1432  /*
1433  * first check for global conflicts: If no locks conflict with my request,
1434  * then I get the lock.
1435  *
1436  * Checking for conflict: lock->grantMask represents the types of
1437  * currently held locks. conflictTable[lockmode] has a bit set for each
1438  * type of lock that conflicts with request. Bitwise compare tells if
1439  * there is a conflict.
1440  */
1441  if (!(conflictMask & lock->grantMask))
1442  {
1443  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1444  return false;
1445  }
1446 
1447  /*
1448  * Rats. Something conflicts. But it could still be my own lock, or a
1449  * lock held by another member of my locking group. First, figure out how
1450  * many conflicts remain after subtracting out any locks I hold myself.
1451  */
1452  myLocks = proclock->holdMask;
1453  for (i = 1; i <= numLockModes; i++)
1454  {
1455  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1456  {
1457  conflictsRemaining[i] = 0;
1458  continue;
1459  }
1460  conflictsRemaining[i] = lock->granted[i];
1461  if (myLocks & LOCKBIT_ON(i))
1462  --conflictsRemaining[i];
1463  totalConflictsRemaining += conflictsRemaining[i];
1464  }
1465 
1466  /* If no conflicts remain, we get the lock. */
1467  if (totalConflictsRemaining == 0)
1468  {
1469  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1470  return false;
1471  }
1472 
1473  /* If no group locking, it's definitely a conflict. */
1474  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1475  {
1476  Assert(proclock->tag.myProc == MyProc);
1477  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1478  proclock);
1479  return true;
1480  }
1481 
1482  /*
1483  * The relation extension or page lock conflict even between the group
1484  * members.
1485  */
1486  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
1487  (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
1488  {
1489  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1490  proclock);
1491  return true;
1492  }
1493 
1494  /*
1495  * Locks held in conflicting modes by members of our own lock group are
1496  * not real conflicts; we can subtract those out and see if we still have
1497  * a conflict. This is O(N) in the number of processes holding or
1498  * awaiting locks on this object. We could improve that by making the
1499  * shared memory state more complex (and larger) but it doesn't seem worth
1500  * it.
1501  */
1502  dlist_foreach(proclock_iter, &lock->procLocks)
1503  {
1504  PROCLOCK *otherproclock =
1505  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1506 
1507  if (proclock != otherproclock &&
1508  proclock->groupLeader == otherproclock->groupLeader &&
1509  (otherproclock->holdMask & conflictMask) != 0)
1510  {
1511  int intersectMask = otherproclock->holdMask & conflictMask;
1512 
1513  for (i = 1; i <= numLockModes; i++)
1514  {
1515  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1516  {
1517  if (conflictsRemaining[i] <= 0)
1518  elog(PANIC, "proclocks held do not match lock");
1519  conflictsRemaining[i]--;
1520  totalConflictsRemaining--;
1521  }
1522  }
1523 
1524  if (totalConflictsRemaining == 0)
1525  {
1526  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1527  proclock);
1528  return false;
1529  }
1530  }
1531  }
1532 
1533  /* Nope, it's a real conflict. */
1534  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1535  return true;
1536 }
#define LOCK_LOCKTAG(lock)
Definition: lock.h:325

References Assert(), LockMethodData::conflictTab, dlist_iter::cur, dlist_container, dlist_foreach, elog(), LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_PAGE, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by LockAcquireExtended(), ProcLockWakeup(), and ProcSleep().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 634 of file lock.c.

635 {
636  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
637  LockMethod lockMethodTable;
638  LOCALLOCKTAG localtag;
639  LOCALLOCK *locallock;
640  LOCK *lock;
641  PROCLOCK *proclock;
642  LWLock *partitionLock;
643  bool hasWaiters = false;
644 
645  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
646  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
647  lockMethodTable = LockMethods[lockmethodid];
648  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
649  elog(ERROR, "unrecognized lock mode: %d", lockmode);
650 
651 #ifdef LOCK_DEBUG
652  if (LOCK_DEBUG_ENABLED(locktag))
653  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
654  locktag->locktag_field1, locktag->locktag_field2,
655  lockMethodTable->lockModeNames[lockmode]);
656 #endif
657 
658  /*
659  * Find the LOCALLOCK entry for this lock and lockmode
660  */
661  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
662  localtag.lock = *locktag;
663  localtag.mode = lockmode;
664 
665  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
666  &localtag,
667  HASH_FIND, NULL);
668 
669  /*
670  * let the caller print its own error message, too. Do not ereport(ERROR).
671  */
672  if (!locallock || locallock->nLocks <= 0)
673  {
674  elog(WARNING, "you don't own a lock of type %s",
675  lockMethodTable->lockModeNames[lockmode]);
676  return false;
677  }
678 
679  /*
680  * Check the shared lock table.
681  */
682  partitionLock = LockHashPartitionLock(locallock->hashcode);
683 
684  LWLockAcquire(partitionLock, LW_SHARED);
685 
686  /*
687  * We don't need to re-find the lock or proclock, since we kept their
688  * addresses in the locallock table, and they couldn't have been removed
689  * while we were holding a lock on them.
690  */
691  lock = locallock->lock;
692  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
693  proclock = locallock->proclock;
694  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
695 
696  /*
697  * Double-check that we are actually holding a lock of the type we want to
698  * release.
699  */
700  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
701  {
702  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
703  LWLockRelease(partitionLock);
704  elog(WARNING, "you don't own a lock of type %s",
705  lockMethodTable->lockModeNames[lockmode]);
706  RemoveLocalLock(locallock);
707  return false;
708  }
709 
710  /*
711  * Do the checking.
712  */
713  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
714  hasWaiters = true;
715 
716  LWLockRelease(partitionLock);
717 
718  return hasWaiters;
719 }
#define WARNING
Definition: elog.h:36

References LockMethodData::conflictTab, elog(), ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode 
)

Definition at line 598 of file lock.c.

599 {
600  LOCALLOCKTAG localtag;
601  LOCALLOCK *locallock;
602 
603  /*
604  * See if there is a LOCALLOCK entry for this lock and lockmode
605  */
606  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
607  localtag.lock = *locktag;
608  localtag.mode = lockmode;
609 
610  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
611  &localtag,
612  HASH_FIND, NULL);
613 
614  return (locallock && locallock->nLocks > 0);
615 }

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, LockMethodLocalHash, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2544 of file lock.c.

2545 {
2547 
2548  Assert(parent != NULL);
2549 
2550  if (locallocks == NULL)
2551  {
2553  LOCALLOCK *locallock;
2554 
2556 
2557  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2558  LockReassignOwner(locallock, parent);
2559  }
2560  else
2561  {
2562  int i;
2563 
2564  for (i = nlocks - 1; i >= 0; i--)
2565  LockReassignOwner(locallocks[i], parent);
2566  }
2567 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2574
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:797

References Assert(), CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), ResourceOwnerGetParent(), and status().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2574 of file lock.c.

2575 {
2576  LOCALLOCKOWNER *lockOwners;
2577  int i;
2578  int ic = -1;
2579  int ip = -1;
2580 
2581  /*
2582  * Scan to see if there are any locks belonging to current owner or its
2583  * parent
2584  */
2585  lockOwners = locallock->lockOwners;
2586  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2587  {
2588  if (lockOwners[i].owner == CurrentResourceOwner)
2589  ic = i;
2590  else if (lockOwners[i].owner == parent)
2591  ip = i;
2592  }
2593 
2594  if (ic < 0)
2595  return; /* no current locks */
2596 
2597  if (ip < 0)
2598  {
2599  /* Parent has no slot, so just give it the child's slot */
2600  lockOwners[ic].owner = parent;
2601  ResourceOwnerRememberLock(parent, locallock);
2602  }
2603  else
2604  {
2605  /* Merge child's count with parent's */
2606  lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2607  /* compact out unused slot */
2608  locallock->numLockOwners--;
2609  if (ic < locallock->numLockOwners)
2610  lockOwners[ic] = lockOwners[locallock->numLockOwners];
2611  }
2613 }
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1010

References CurrentResourceOwner, i, LOCALLOCK::lockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3087 of file lock.c.

3090 {
3091  LOCK *lock;
3092  PROCLOCK *proclock;
3093  PROCLOCKTAG proclocktag;
3094  uint32 hashcode;
3095  uint32 proclock_hashcode;
3096  LWLock *partitionLock;
3097  bool wakeupNeeded;
3098 
3099  hashcode = LockTagHashCode(locktag);
3100  partitionLock = LockHashPartitionLock(hashcode);
3101 
3102  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3103 
3104  /*
3105  * Re-find the lock object (it had better be there).
3106  */
3108  locktag,
3109  hashcode,
3110  HASH_FIND,
3111  NULL);
3112  if (!lock)
3113  elog(PANIC, "failed to re-find shared lock object");
3114 
3115  /*
3116  * Re-find the proclock object (ditto).
3117  */
3118  proclocktag.myLock = lock;
3119  proclocktag.myProc = proc;
3120 
3121  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3122 
3124  &proclocktag,
3125  proclock_hashcode,
3126  HASH_FIND,
3127  NULL);
3128  if (!proclock)
3129  elog(PANIC, "failed to re-find shared proclock object");
3130 
3131  /*
3132  * Double-check that we are actually holding a lock of the type we want to
3133  * release.
3134  */
3135  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3136  {
3137  PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3138  LWLockRelease(partitionLock);
3139  elog(WARNING, "you don't own a lock of type %s",
3140  lockMethodTable->lockModeNames[lockmode]);
3141  return;
3142  }
3143 
3144  /*
3145  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3146  */
3147  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3148 
3149  CleanUpLock(lock, proclock,
3150  lockMethodTable, hashcode,
3151  wakeupNeeded);
3152 
3153  LWLockRelease(partitionLock);
3154 
3155  /*
3156  * Decrement strong lock count. This logic is needed only for 2PC.
3157  */
3158  if (decrement_strong_lock_count
3159  && ConflictsWithRelationFastPath(locktag, lockmode))
3160  {
3161  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3162 
3164  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3165  FastPathStrongRelationLocks->count[fasthashcode]--;
3167  }
3168 }
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1573
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1630

References Assert(), CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog(), FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire, SpinLockRelease, UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 1949 of file lock.c.

1950 {
1951  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1952  LockMethod lockMethodTable;
1953  LOCALLOCKTAG localtag;
1954  LOCALLOCK *locallock;
1955  LOCK *lock;
1956  PROCLOCK *proclock;
1957  LWLock *partitionLock;
1958  bool wakeupNeeded;
1959 
1960  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1961  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1962  lockMethodTable = LockMethods[lockmethodid];
1963  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1964  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1965 
1966 #ifdef LOCK_DEBUG
1967  if (LOCK_DEBUG_ENABLED(locktag))
1968  elog(LOG, "LockRelease: lock [%u,%u] %s",
1969  locktag->locktag_field1, locktag->locktag_field2,
1970  lockMethodTable->lockModeNames[lockmode]);
1971 #endif
1972 
1973  /*
1974  * Find the LOCALLOCK entry for this lock and lockmode
1975  */
1976  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1977  localtag.lock = *locktag;
1978  localtag.mode = lockmode;
1979 
1980  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1981  &localtag,
1982  HASH_FIND, NULL);
1983 
1984  /*
1985  * let the caller print its own error message, too. Do not ereport(ERROR).
1986  */
1987  if (!locallock || locallock->nLocks <= 0)
1988  {
1989  elog(WARNING, "you don't own a lock of type %s",
1990  lockMethodTable->lockModeNames[lockmode]);
1991  return false;
1992  }
1993 
1994  /*
1995  * Decrease the count for the resource owner.
1996  */
1997  {
1998  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1999  ResourceOwner owner;
2000  int i;
2001 
2002  /* Identify owner for lock */
2003  if (sessionLock)
2004  owner = NULL;
2005  else
2006  owner = CurrentResourceOwner;
2007 
2008  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2009  {
2010  if (lockOwners[i].owner == owner)
2011  {
2012  Assert(lockOwners[i].nLocks > 0);
2013  if (--lockOwners[i].nLocks == 0)
2014  {
2015  if (owner != NULL)
2016  ResourceOwnerForgetLock(owner, locallock);
2017  /* compact out unused slot */
2018  locallock->numLockOwners--;
2019  if (i < locallock->numLockOwners)
2020  lockOwners[i] = lockOwners[locallock->numLockOwners];
2021  }
2022  break;
2023  }
2024  }
2025  if (i < 0)
2026  {
2027  /* don't release a lock belonging to another owner */
2028  elog(WARNING, "you don't own a lock of type %s",
2029  lockMethodTable->lockModeNames[lockmode]);
2030  return false;
2031  }
2032  }
2033 
2034  /*
2035  * Decrease the total local count. If we're still holding the lock, we're
2036  * done.
2037  */
2038  locallock->nLocks--;
2039 
2040  if (locallock->nLocks > 0)
2041  return true;
2042 
2043  /*
2044  * At this point we can no longer suppose we are clear of invalidation
2045  * messages related to this lock. Although we'll delete the LOCALLOCK
2046  * object before any intentional return from this routine, it seems worth
2047  * the trouble to explicitly reset lockCleared right now, just in case
2048  * some error prevents us from deleting the LOCALLOCK.
2049  */
2050  locallock->lockCleared = false;
2051 
2052  /* Attempt fast release of any lock eligible for the fast path. */
2053  if (EligibleForRelationFastPath(locktag, lockmode) &&
2055  {
2056  bool released;
2057 
2058  /*
2059  * We might not find the lock here, even if we originally entered it
2060  * here. Another backend may have moved it to the main table.
2061  */
2063  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2064  lockmode);
2066  if (released)
2067  {
2068  RemoveLocalLock(locallock);
2069  return true;
2070  }
2071  }
2072 
2073  /*
2074  * Otherwise we've got to mess with the shared lock table.
2075  */
2076  partitionLock = LockHashPartitionLock(locallock->hashcode);
2077 
2078  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2079 
2080  /*
2081  * Normally, we don't need to re-find the lock or proclock, since we kept
2082  * their addresses in the locallock table, and they couldn't have been
2083  * removed while we were holding a lock on them. But it's possible that
2084  * the lock was taken fast-path and has since been moved to the main hash
2085  * table by another backend, in which case we will need to look up the
2086  * objects here. We assume the lock field is NULL if so.
2087  */
2088  lock = locallock->lock;
2089  if (!lock)
2090  {
2091  PROCLOCKTAG proclocktag;
2092 
2093  Assert(EligibleForRelationFastPath(locktag, lockmode));
2095  locktag,
2096  locallock->hashcode,
2097  HASH_FIND,
2098  NULL);
2099  if (!lock)
2100  elog(ERROR, "failed to re-find shared lock object");
2101  locallock->lock = lock;
2102 
2103  proclocktag.myLock = lock;
2104  proclocktag.myProc = MyProc;
2106  &proclocktag,
2107  HASH_FIND,
2108  NULL);
2109  if (!locallock->proclock)
2110  elog(ERROR, "failed to re-find shared proclock object");
2111  }
2112  LOCK_PRINT("LockRelease: found", lock, lockmode);
2113  proclock = locallock->proclock;
2114  PROCLOCK_PRINT("LockRelease: found", proclock);
2115 
2116  /*
2117  * Double-check that we are actually holding a lock of the type we want to
2118  * release.
2119  */
2120  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2121  {
2122  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2123  LWLockRelease(partitionLock);
2124  elog(WARNING, "you don't own a lock of type %s",
2125  lockMethodTable->lockModeNames[lockmode]);
2126  RemoveLocalLock(locallock);
2127  return false;
2128  }
2129 
2130  /*
2131  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2132  */
2133  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2134 
2135  CleanUpLock(lock, proclock,
2136  lockMethodTable, locallock->hashcode,
2137  wakeupNeeded);
2138 
2139  LWLockRelease(partitionLock);
2140 
2141  RemoveLocalLock(locallock);
2142  return true;
2143 }
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2657

References Assert(), CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog(), ERROR, FastPathLocalUseCount, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2154 of file lock.c.

2155 {
2157  LockMethod lockMethodTable;
2158  int i,
2159  numLockModes;
2160  LOCALLOCK *locallock;
2161  LOCK *lock;
2162  int partition;
2163  bool have_fast_path_lwlock = false;
2164 
2165  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2166  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2167  lockMethodTable = LockMethods[lockmethodid];
2168 
2169 #ifdef LOCK_DEBUG
2170  if (*(lockMethodTable->trace_flag))
2171  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2172 #endif
2173 
2174  /*
2175  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2176  * the only way that the lock we hold on our own VXID can ever get
2177  * released: it is always and only released when a toplevel transaction
2178  * ends.
2179  */
2180  if (lockmethodid == DEFAULT_LOCKMETHOD)
2182 
2183  numLockModes = lockMethodTable->numLockModes;
2184 
2185  /*
2186  * First we run through the locallock table and get rid of unwanted
2187  * entries, then we scan the process's proclocks and get rid of those. We
2188  * do this separately because we may have multiple locallock entries
2189  * pointing to the same proclock, and we daren't end up with any dangling
2190  * pointers. Fast-path locks are cleaned up during the locallock table
2191  * scan, though.
2192  */
2194 
2195  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2196  {
2197  /*
2198  * If the LOCALLOCK entry is unused, we must've run out of shared
2199  * memory while trying to set up this lock. Just forget the local
2200  * entry.
2201  */
2202  if (locallock->nLocks == 0)
2203  {
2204  RemoveLocalLock(locallock);
2205  continue;
2206  }
2207 
2208  /* Ignore items that are not of the lockmethod to be removed */
2209  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2210  continue;
2211 
2212  /*
2213  * If we are asked to release all locks, we can just zap the entry.
2214  * Otherwise, must scan to see if there are session locks. We assume
2215  * there is at most one lockOwners entry for session locks.
2216  */
2217  if (!allLocks)
2218  {
2219  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2220 
2221  /* If session lock is above array position 0, move it down to 0 */
2222  for (i = 0; i < locallock->numLockOwners; i++)
2223  {
2224  if (lockOwners[i].owner == NULL)
2225  lockOwners[0] = lockOwners[i];
2226  else
2227  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2228  }
2229 
2230  if (locallock->numLockOwners > 0 &&
2231  lockOwners[0].owner == NULL &&
2232  lockOwners[0].nLocks > 0)
2233  {
2234  /* Fix the locallock to show just the session locks */
2235  locallock->nLocks = lockOwners[0].nLocks;
2236  locallock->numLockOwners = 1;
2237  /* We aren't deleting this locallock, so done */
2238  continue;
2239  }
2240  else
2241  locallock->numLockOwners = 0;
2242  }
2243 
2244  /*
2245  * If the lock or proclock pointers are NULL, this lock was taken via
2246  * the relation fast-path (and is not known to have been transferred).
2247  */
2248  if (locallock->proclock == NULL || locallock->lock == NULL)
2249  {
2250  LOCKMODE lockmode = locallock->tag.mode;
2251  Oid relid;
2252 
2253  /* Verify that a fast-path lock is what we've got. */
2254  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2255  elog(PANIC, "locallock table corrupted");
2256 
2257  /*
2258  * If we don't currently hold the LWLock that protects our
2259  * fast-path data structures, we must acquire it before attempting
2260  * to release the lock via the fast-path. We will continue to
2261  * hold the LWLock until we're done scanning the locallock table,
2262  * unless we hit a transferred fast-path lock. (XXX is this
2263  * really such a good idea? There could be a lot of entries ...)
2264  */
2265  if (!have_fast_path_lwlock)
2266  {
2268  have_fast_path_lwlock = true;
2269  }
2270 
2271  /* Attempt fast-path release. */
2272  relid = locallock->tag.lock.locktag_field2;
2273  if (FastPathUnGrantRelationLock(relid, lockmode))
2274  {
2275  RemoveLocalLock(locallock);
2276  continue;
2277  }
2278 
2279  /*
2280  * Our lock, originally taken via the fast path, has been
2281  * transferred to the main lock table. That's going to require
2282  * some extra work, so release our fast-path lock before starting.
2283  */
2285  have_fast_path_lwlock = false;
2286 
2287  /*
2288  * Now dump the lock. We haven't got a pointer to the LOCK or
2289  * PROCLOCK in this case, so we have to handle this a bit
2290  * differently than a normal lock release. Unfortunately, this
2291  * requires an extra LWLock acquire-and-release cycle on the
2292  * partitionLock, but hopefully it shouldn't happen often.
2293  */
2294  LockRefindAndRelease(lockMethodTable, MyProc,
2295  &locallock->tag.lock, lockmode, false);
2296  RemoveLocalLock(locallock);
2297  continue;
2298  }
2299 
2300  /* Mark the proclock to show we need to release this lockmode */
2301  if (locallock->nLocks > 0)
2302  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2303 
2304  /* And remove the locallock hashtable entry */
2305  RemoveLocalLock(locallock);
2306  }
2307 
2308  /* Done with the fast-path data structures */
2309  if (have_fast_path_lwlock)
2311 
2312  /*
2313  * Now, scan each lock partition separately.
2314  */
2315  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2316  {
2317  LWLock *partitionLock;
2318  dlist_head *procLocks = &MyProc->myProcLocks[partition];
2319  dlist_mutable_iter proclock_iter;
2320 
2321  partitionLock = LockHashPartitionLockByIndex(partition);
2322 
2323  /*
2324  * If the proclock list for this partition is empty, we can skip
2325  * acquiring the partition lock. This optimization is trickier than
2326  * it looks, because another backend could be in process of adding
2327  * something to our proclock list due to promoting one of our
2328  * fast-path locks. However, any such lock must be one that we
2329  * decided not to delete above, so it's okay to skip it again now;
2330  * we'd just decide not to delete it again. We must, however, be
2331  * careful to re-fetch the list header once we've acquired the
2332  * partition lock, to be sure we have a valid, up-to-date pointer.
2333  * (There is probably no significant risk if pointer fetch/store is
2334  * atomic, but we don't wish to assume that.)
2335  *
2336  * XXX This argument assumes that the locallock table correctly
2337  * represents all of our fast-path locks. While allLocks mode
2338  * guarantees to clean up all of our normal locks regardless of the
2339  * locallock situation, we lose that guarantee for fast-path locks.
2340  * This is not ideal.
2341  */
2342  if (dlist_is_empty(procLocks))
2343  continue; /* needn't examine this partition */
2344 
2345  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2346 
2347  dlist_foreach_modify(proclock_iter, procLocks)
2348  {
2349  PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2350  bool wakeupNeeded = false;
2351 
2352  Assert(proclock->tag.myProc == MyProc);
2353 
2354  lock = proclock->tag.myLock;
2355 
2356  /* Ignore items that are not of the lockmethod to be removed */
2357  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2358  continue;
2359 
2360  /*
2361  * In allLocks mode, force release of all locks even if locallock
2362  * table had problems
2363  */
2364  if (allLocks)
2365  proclock->releaseMask = proclock->holdMask;
2366  else
2367  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2368 
2369  /*
2370  * Ignore items that have nothing to be released, unless they have
2371  * holdMask == 0 and are therefore recyclable
2372  */
2373  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2374  continue;
2375 
2376  PROCLOCK_PRINT("LockReleaseAll", proclock);
2377  LOCK_PRINT("LockReleaseAll", lock, 0);
2378  Assert(lock->nRequested >= 0);
2379  Assert(lock->nGranted >= 0);
2380  Assert(lock->nGranted <= lock->nRequested);
2381  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2382 
2383  /*
2384  * Release the previously-marked lock modes
2385  */
2386  for (i = 1; i <= numLockModes; i++)
2387  {
2388  if (proclock->releaseMask & LOCKBIT_ON(i))
2389  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2390  lockMethodTable);
2391  }
2392  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2393  Assert(lock->nGranted <= lock->nRequested);
2394  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2395 
2396  proclock->releaseMask = 0;
2397 
2398  /* CleanUpLock will wake up waiters if needed. */
2399  CleanUpLock(lock, proclock,
2400  lockMethodTable,
2401  LockTagHashCode(&lock->tag),
2402  wakeupNeeded);
2403  } /* loop over PROCLOCKs within this partition */
2404 
2405  LWLockRelease(partitionLock);
2406  } /* loop over partitions */
2407 
2408 #ifdef LOCK_DEBUG
2409  if (*(lockMethodTable->trace_flag))
2410  elog(LOG, "LockReleaseAll done");
2411 #endif
2412 }
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4434
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:443
const bool * trace_flag
Definition: lock.h:113
dlist_node * cur
Definition: ilist.h:200

References Assert(), CleanUpLock(), dlist_mutable_iter::cur, DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog(), ERROR, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, PANIC, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), status(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), and VirtualXactLockTableCleanup().

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2449 of file lock.c.

2450 {
2451  if (locallocks == NULL)
2452  {
2454  LOCALLOCK *locallock;
2455 
2457 
2458  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2459  ReleaseLockIfHeld(locallock, false);
2460  }
2461  else
2462  {
2463  int i;
2464 
2465  for (i = nlocks - 1; i >= 0; i--)
2466  ReleaseLockIfHeld(locallocks[i], false);
2467  }
2468 }
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2484

References hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, ReleaseLockIfHeld(), and status().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2419 of file lock.c.

2420 {
2422  LOCALLOCK *locallock;
2423 
2424  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2425  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2426 
2428 
2429  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2430  {
2431  /* Ignore items that are not of the specified lock method */
2432  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2433  continue;
2434 
2435  ReleaseLockIfHeld(locallock, true);
2436  }
2437 }

References elog(), ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, ReleaseLockIfHeld(), and status().

Referenced by pg_advisory_unlock_all().

◆ LockShmemSize()

Size LockShmemSize ( void  )

Definition at line 3559 of file lock.c.

3560 {
3561  Size size = 0;
3562  long max_table_size;
3563 
3564  /* lock hash table */
3565  max_table_size = NLOCKENTS();
3566  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3567 
3568  /* proclock hash table */
3569  max_table_size *= 2;
3570  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3571 
3572  /*
3573  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3574  */
3575  size = add_size(size, size / 10);
3576 
3577  return size;
3578 }
size_t Size
Definition: c.h:589
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:781
Size add_size(Size s1, Size s2)
Definition: shmem.c:502

References add_size(), hash_estimate_size(), and NLOCKENTS.

Referenced by CalculateShmemSize().

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 517 of file lock.c.

518 {
519  return get_hash_value(LockMethodLockHash, (const void *) locktag);
520 }
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:909

References get_hash_value(), and LockMethodLockHash.

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4645 of file lock.c.

4646 {
4647  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4648  LOCK *lock;
4649  bool found;
4650  uint32 hashcode;
4651  LWLock *partitionLock;
4652  int waiters = 0;
4653 
4654  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4655  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4656 
4657  hashcode = LockTagHashCode(locktag);
4658  partitionLock = LockHashPartitionLock(hashcode);
4659  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4660 
4662  locktag,
4663  hashcode,
4664  HASH_FIND,
4665  &found);
4666  if (found)
4667  {
4668  Assert(lock != NULL);
4669  waiters = lock->nRequested;
4670  }
4671  LWLockRelease(partitionLock);
4672 
4673  return waiters;
4674 }

References Assert(), elog(), ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

Definition at line 1794 of file lock.c.

1795 {
1796  Assert(locallock->nLocks > 0);
1797  locallock->lockCleared = true;
1798 }

References Assert(), LOCALLOCK::lockCleared, and LOCALLOCK::nLocks.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ PostPrepare_Locks()

void PostPrepare_Locks ( TransactionId  xid)

Definition at line 3375 of file lock.c.

3376 {
3377  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3379  LOCALLOCK *locallock;
3380  LOCK *lock;
3381  PROCLOCK *proclock;
3382  PROCLOCKTAG proclocktag;
3383  int partition;
3384 
3385  /* Can't prepare a lock group follower. */
3386  Assert(MyProc->lockGroupLeader == NULL ||
3388 
3389  /* This is a critical section: any error means big trouble */
3391 
3392  /*
3393  * First we run through the locallock table and get rid of unwanted
3394  * entries, then we scan the process's proclocks and transfer them to the
3395  * target proc.
3396  *
3397  * We do this separately because we may have multiple locallock entries
3398  * pointing to the same proclock, and we daren't end up with any dangling
3399  * pointers.
3400  */
3402 
3403  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3404  {
3405  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3406  bool haveSessionLock;
3407  bool haveXactLock;
3408  int i;
3409 
3410  if (locallock->proclock == NULL || locallock->lock == NULL)
3411  {
3412  /*
3413  * We must've run out of shared memory while trying to set up this
3414  * lock. Just forget the local entry.
3415  */
3416  Assert(locallock->nLocks == 0);
3417  RemoveLocalLock(locallock);
3418  continue;
3419  }
3420 
3421  /* Ignore VXID locks */
3422  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3423  continue;
3424 
3425  /* Scan to see whether we hold it at session or transaction level */
3426  haveSessionLock = haveXactLock = false;
3427  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3428  {
3429  if (lockOwners[i].owner == NULL)
3430  haveSessionLock = true;
3431  else
3432  haveXactLock = true;
3433  }
3434 
3435  /* Ignore it if we have only session lock */
3436  if (!haveXactLock)
3437  continue;
3438 
3439  /* This can't happen, because we already checked it */
3440  if (haveSessionLock)
3441  ereport(PANIC,
3442  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3443  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3444 
3445  /* Mark the proclock to show we need to release this lockmode */
3446  if (locallock->nLocks > 0)
3447  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3448 
3449  /* And remove the locallock hashtable entry */
3450  RemoveLocalLock(locallock);
3451  }
3452 
3453  /*
3454  * Now, scan each lock partition separately.
3455  */
3456  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3457  {
3458  LWLock *partitionLock;
3459  dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3460  dlist_mutable_iter proclock_iter;
3461 
3462  partitionLock = LockHashPartitionLockByIndex(partition);
3463 
3464  /*
3465  * If the proclock list for this partition is empty, we can skip
3466  * acquiring the partition lock. This optimization is safer than the
3467  * situation in LockReleaseAll, because we got rid of any fast-path
3468  * locks during AtPrepare_Locks, so there cannot be any case where
3469  * another backend is adding something to our lists now. For safety,
3470  * though, we code this the same way as in LockReleaseAll.
3471  */
3472  if (dlist_is_empty(procLocks))
3473  continue; /* needn't examine this partition */
3474 
3475  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3476 
3477  dlist_foreach_modify(proclock_iter, procLocks)
3478  {
3479  proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3480 
3481  Assert(proclock->tag.myProc == MyProc);
3482 
3483  lock = proclock->tag.myLock;
3484 
3485  /* Ignore VXID locks */
3487  continue;
3488 
3489  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3490  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3491  Assert(lock->nRequested >= 0);
3492  Assert(lock->nGranted >= 0);
3493  Assert(lock->nGranted <= lock->nRequested);
3494  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3495 
3496  /* Ignore it if nothing to release (must be a session lock) */
3497  if (proclock->releaseMask == 0)
3498  continue;
3499 
3500  /* Else we should be releasing all locks */
3501  if (proclock->releaseMask != proclock->holdMask)
3502  elog(PANIC, "we seem to have dropped a bit somewhere");
3503 
3504  /*
3505  * We cannot simply modify proclock->tag.myProc to reassign
3506  * ownership of the lock, because that's part of the hash key and
3507  * the proclock would then be in the wrong hash chain. Instead
3508  * use hash_update_hash_key. (We used to create a new hash entry,
3509  * but that risks out-of-memory failure if other processes are
3510  * busy making proclocks too.) We must unlink the proclock from
3511  * our procLink chain and put it into the new proc's chain, too.
3512  *
3513  * Note: the updated proclock hash key will still belong to the
3514  * same hash partition, cf proclock_hash(). So the partition lock
3515  * we already hold is sufficient for this.
3516  */
3517  dlist_delete(&proclock->procLink);
3518 
3519  /*
3520  * Create the new hash key for the proclock.
3521  */
3522  proclocktag.myLock = lock;
3523  proclocktag.myProc = newproc;
3524 
3525  /*
3526  * Update groupLeader pointer to point to the new proc. (We'd
3527  * better not be a member of somebody else's lock group!)
3528  */
3529  Assert(proclock->groupLeader == proclock->tag.myProc);
3530  proclock->groupLeader = newproc;
3531 
3532  /*
3533  * Update the proclock. We should not find any existing entry for
3534  * the same hash key, since there can be only one entry for any
3535  * given lock with my own proc.
3536  */
3538  proclock,
3539  &proclocktag))
3540  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3541 
3542  /* Re-link into the new proc's proclock list */
3543  dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3544 
3545  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3546  } /* loop over PROCLOCKs within this partition */
3547 
3548  LWLockRelease(partitionLock);
3549  } /* loop over partitions */
3550 
3551  END_CRIT_SECTION();
3552 }
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1157
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150

References Assert(), dlist_mutable_iter::cur, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog(), END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, status(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void *  key,
Size  keysize 
)
static

Definition at line 534 of file lock.c.

535 {
536  const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
537  uint32 lockhash;
538  Datum procptr;
539 
540  Assert(keysize == sizeof(PROCLOCKTAG));
541 
542  /* Look into the associated LOCK object, and compute its hash code */
543  lockhash = LockTagHashCode(&proclocktag->myLock->tag);
544 
545  /*
546  * To make the hash code also depend on the PGPROC, we xor the proc
547  * struct's address into the hash code, left-shifted so that the
548  * partition-number bits don't change. Since this is only a hash, we
549  * don't care if we lose high-order bits of the address; use an
550  * intermediate variable to suppress cast-pointer-to-int warnings.
551  */
552  procptr = PointerGetDatum(proclocktag->myProc);
553  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
554 
555  return lockhash;
556 }
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:97
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64

References Assert(), sort-test::key, LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PointerGetDatum(), and LOCK::tag.

Referenced by InitLocks().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 565 of file lock.c.

566 {
567  uint32 lockhash = hashcode;
568  Datum procptr;
569 
570  /*
571  * This must match proclock_hash()!
572  */
573  procptr = PointerGetDatum(proclocktag->myProc);
574  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
575 
576  return lockhash;
577 }

References LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myProc, and PointerGetDatum().

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2484 of file lock.c.

2485 {
2486  ResourceOwner owner;
2487  LOCALLOCKOWNER *lockOwners;
2488  int i;
2489 
2490  /* Identify owner for lock (must match LockRelease!) */
2491  if (sessionLock)
2492  owner = NULL;
2493  else
2494  owner = CurrentResourceOwner;
2495 
2496  /* Scan to see if there are any locks belonging to the target owner */
2497  lockOwners = locallock->lockOwners;
2498  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2499  {
2500  if (lockOwners[i].owner == owner)
2501  {
2502  Assert(lockOwners[i].nLocks > 0);
2503  if (lockOwners[i].nLocks < locallock->nLocks)
2504  {
2505  /*
2506  * We will still hold this lock after forgetting this
2507  * ResourceOwner.
2508  */
2509  locallock->nLocks -= lockOwners[i].nLocks;
2510  /* compact out unused slot */
2511  locallock->numLockOwners--;
2512  if (owner != NULL)
2513  ResourceOwnerForgetLock(owner, locallock);
2514  if (i < locallock->numLockOwners)
2515  lockOwners[i] = lockOwners[locallock->numLockOwners];
2516  }
2517  else
2518  {
2519  Assert(lockOwners[i].nLocks == locallock->nLocks);
2520  /* We want to call LockRelease just once */
2521  lockOwners[i].nLocks = 1;
2522  locallock->nLocks = 1;
2523  if (!LockRelease(&locallock->tag.lock,
2524  locallock->tag.mode,
2525  sessionLock))
2526  elog(WARNING, "ReleaseLockIfHeld: failed??");
2527  }
2528  break;
2529  }
2530  }
2531 }
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:1949

References Assert(), CurrentResourceOwner, elog(), i, LOCALLOCKTAG::lock, LOCALLOCK::lockOwners, LockRelease(), LOCALLOCKTAG::mode, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, ResourceOwnerForgetLock(), LOCALLOCK::tag, and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 1893 of file lock.c.

1894 {
1895  LOCK *waitLock = proc->waitLock;
1896  PROCLOCK *proclock = proc->waitProcLock;
1897  LOCKMODE lockmode = proc->waitLockMode;
1898  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1899 
1900  /* Make sure proc is waiting */
1902  Assert(proc->links.next != NULL);
1903  Assert(waitLock);
1904  Assert(!dclist_is_empty(&waitLock->waitProcs));
1905  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1906 
1907  /* Remove proc from lock's wait queue */
1908  dclist_delete_from(&waitLock->waitProcs, &proc->links);
1909 
1910  /* Undo increments of request counts by waiting process */
1911  Assert(waitLock->nRequested > 0);
1912  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1913  waitLock->nRequested--;
1914  Assert(waitLock->requested[lockmode] > 0);
1915  waitLock->requested[lockmode]--;
1916  /* don't forget to clear waitMask bit if appropriate */
1917  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1918  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1919 
1920  /* Clean up the proc's own state, and pass it the ok/fail signal */
1921  proc->waitLock = NULL;
1922  proc->waitProcLock = NULL;
1924 
1925  /*
1926  * Delete the proclock immediately if it represents no already-held locks.
1927  * (This must happen now because if the owner of the lock decides to
1928  * release it, and the requested/granted counts then go to zero,
1929  * LockRelease expects there to be no remaining proclocks.) Then see if
1930  * any other waiters for the lock can be woken up now.
1931  */
1932  CleanUpLock(waitLock, proclock,
1933  LockMethods[lockmethodid], hashcode,
1934  true);
1935 }
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static void dclist_delete_from(dclist_head *head, dlist_node *node)
Definition: ilist.h:763
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:125
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:126
PROCLOCK * waitProcLock
Definition: proc.h:224
ProcWaitStatus waitStatus
Definition: proc.h:168

References Assert(), CleanUpLock(), dclist_delete_from(), dclist_is_empty(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, dlist_node::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), LockErrorCleanup(), and ProcSleep().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1366 of file lock.c.

1367 {
1368  int i;
1369 
1370  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1371  {
1372  if (locallock->lockOwners[i].owner != NULL)
1373  ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1374  }
1375  locallock->numLockOwners = 0;
1376  if (locallock->lockOwners != NULL)
1377  pfree(locallock->lockOwners);
1378  locallock->lockOwners = NULL;
1379 
1380  if (locallock->holdsStrongLockCount)
1381  {
1382  uint32 fasthashcode;
1383 
1384  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1385 
1387  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1388  FastPathStrongRelationLocks->count[fasthashcode]--;
1389  locallock->holdsStrongLockCount = false;
1391  }
1392 
1394  &(locallock->tag),
1395  HASH_REMOVE, NULL))
1396  elog(WARNING, "locallock table corrupted");
1397 
1398  /*
1399  * Indicate that the lock is released for certain types of locks
1400  */
1401  CheckAndSetLockHeld(locallock, false);
1402 }
void pfree(void *pointer)
Definition: mcxt.c:1436

References Assert(), CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog(), FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_REMOVE, hash_search(), LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, i, LockMethodLocalHash, LOCALLOCK::lockOwners, FastPathStrongRelationLockData::mutex, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire, SpinLockRelease, LOCALLOCK::tag, and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1170 of file lock.c.

1172 {
1173  LOCK *lock;
1174  PROCLOCK *proclock;
1175  PROCLOCKTAG proclocktag;
1176  uint32 proclock_hashcode;
1177  bool found;
1178 
1179  /*
1180  * Find or create a lock with this tag.
1181  */
1183  locktag,
1184  hashcode,
1186  &found);
1187  if (!lock)
1188  return NULL;
1189 
1190  /*
1191  * if it's a new lock object, initialize it
1192  */
1193  if (!found)
1194  {
1195  lock->grantMask = 0;
1196  lock->waitMask = 0;
1197  dlist_init(&lock->procLocks);
1198  dclist_init(&lock->waitProcs);
1199  lock->nRequested = 0;
1200  lock->nGranted = 0;
1201  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1202  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1203  LOCK_PRINT("LockAcquire: new", lock, lockmode);
1204  }
1205  else
1206  {
1207  LOCK_PRINT("LockAcquire: found", lock, lockmode);
1208  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1209  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1210  Assert(lock->nGranted <= lock->nRequested);
1211  }
1212 
1213  /*
1214  * Create the hash key for the proclock table.
1215  */
1216  proclocktag.myLock = lock;
1217  proclocktag.myProc = proc;
1218 
1219  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1220 
1221  /*
1222  * Find or create a proclock entry with this tag
1223  */
1225  &proclocktag,
1226  proclock_hashcode,
1228  &found);
1229  if (!proclock)
1230  {
1231  /* Oops, not enough shmem for the proclock */
1232  if (lock->nRequested == 0)
1233  {
1234  /*
1235  * There are no other requestors of this lock, so garbage-collect
1236  * the lock object. We *must* do this to avoid a permanent leak
1237  * of shared memory, because there won't be anything to cause
1238  * anyone to release the lock object later.
1239  */
1240  Assert(dlist_is_empty(&(lock->procLocks)));
1242  &(lock->tag),
1243  hashcode,
1244  HASH_REMOVE,
1245  NULL))
1246  elog(PANIC, "lock table corrupted");
1247  }
1248  return NULL;
1249  }
1250 
1251  /*
1252  * If new, initialize the new entry
1253  */
1254  if (!found)
1255  {
1256  uint32 partition = LockHashPartition(hashcode);
1257 
1258  /*
1259  * It might seem unsafe to access proclock->groupLeader without a
1260  * lock, but it's not really. Either we are initializing a proclock
1261  * on our own behalf, in which case our group leader isn't changing
1262  * because the group leader for a process can only ever be changed by
1263  * the process itself; or else we are transferring a fast-path lock to
1264  * the main lock table, in which case that process can't change it's
1265  * lock group leader without first releasing all of its locks (and in
1266  * particular the one we are currently transferring).
1267  */
1268  proclock->groupLeader = proc->lockGroupLeader != NULL ?
1269  proc->lockGroupLeader : proc;
1270  proclock->holdMask = 0;
1271  proclock->releaseMask = 0;
1272  /* Add proclock to appropriate lists */
1273  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1274  dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1275  PROCLOCK_PRINT("LockAcquire: new", proclock);
1276  }
1277  else
1278  {
1279  PROCLOCK_PRINT("LockAcquire: found", proclock);
1280  Assert((proclock->holdMask & ~lock->grantMask) == 0);
1281 
1282 #ifdef CHECK_DEADLOCK_RISK
1283 
1284  /*
1285  * Issue warning if we already hold a lower-level lock on this object
1286  * and do not hold a lock of the requested level or higher. This
1287  * indicates a deadlock-prone coding practice (eg, we'd have a
1288  * deadlock if another backend were following the same code path at
1289  * about the same time).
1290  *
1291  * This is not enabled by default, because it may generate log entries
1292  * about user-level coding practices that are in fact safe in context.
1293  * It can be enabled to help find system-level problems.
1294  *
1295  * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1296  * better to use a table. For now, though, this works.
1297  */
1298  {
1299  int i;
1300 
1301  for (i = lockMethodTable->numLockModes; i > 0; i--)
1302  {
1303  if (proclock->holdMask & LOCKBIT_ON(i))
1304  {
1305  if (i >= (int) lockmode)
1306  break; /* safe: we have a lock >= req level */
1307  elog(LOG, "deadlock risk: raising lock level"
1308  " from %s to %s on object %u/%u/%u",
1309  lockMethodTable->lockModeNames[i],
1310  lockMethodTable->lockModeNames[lockmode],
1311  lock->tag.locktag_field1, lock->tag.locktag_field2,
1312  lock->tag.locktag_field3);
1313  break;
1314  }
1315  }
1316  }
1317 #endif /* CHECK_DEADLOCK_RISK */
1318  }
1319 
1320  /*
1321  * lock->nRequested and lock->requested[] count the total number of
1322  * requests, whether granted or waiting, so increment those immediately.
1323  * The other counts don't increment till we get the lock.
1324  */
1325  lock->nRequested++;
1326  lock->requested[lockmode]++;
1327  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1328 
1329  /*
1330  * We shouldn't already hold the desired lock; else locallock table is
1331  * broken.
1332  */
1333  if (proclock->holdMask & LOCKBIT_ON(lockmode))
1334  elog(ERROR, "lock %s on object %u/%u/%u is already held",
1335  lockMethodTable->lockModeNames[lockmode],
1336  lock->tag.locktag_field1, lock->tag.locktag_field2,
1337  lock->tag.locktag_field3);
1338 
1339  return proclock;
1340 }

References Assert(), dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog(), ERROR, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, LockMethodData::numLockModes, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1573 of file lock.c.

1575 {
1576  bool wakeupNeeded = false;
1577 
1578  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1579  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1580  Assert(lock->nGranted <= lock->nRequested);
1581 
1582  /*
1583  * fix the general lock stats
1584  */
1585  lock->nRequested--;
1586  lock->requested[lockmode]--;
1587  lock->nGranted--;
1588  lock->granted[lockmode]--;
1589 
1590  if (lock->granted[lockmode] == 0)
1591  {
1592  /* change the conflict mask. No more of this lock type. */
1593  lock->grantMask &= LOCKBIT_OFF(lockmode);
1594  }
1595 
1596  LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1597 
1598  /*
1599  * We need only run ProcLockWakeup if the released lock conflicts with at
1600  * least one of the lock types requested by waiter(s). Otherwise whatever
1601  * conflict made them wait must still exist. NOTE: before MVCC, we could
1602  * skip wakeup if lock->granted[lockmode] was still positive. But that's
1603  * not true anymore, because the remaining granted locks might belong to
1604  * some waiter, who could now be awakened because he doesn't conflict with
1605  * his own locks.
1606  */
1607  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1608  wakeupNeeded = true;
1609 
1610  /*
1611  * Now fix the per-proclock state.
1612  */
1613  proclock->holdMask &= LOCKBIT_OFF(lockmode);
1614  PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1615 
1616  return wakeupNeeded;
1617 }

References Assert(), LockMethodData::conflictTab, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4534 of file lock.c.

4535 {
4536  LOCKTAG tag;
4537  PGPROC *proc;
4539 
4541 
4543  /* no vxid lock; localTransactionId is a normal, locked XID */
4544  return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4545 
4546  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4547 
4548  /*
4549  * If a lock table entry must be made, this is the PGPROC on whose behalf
4550  * it must be done. Note that the transaction might end or the PGPROC
4551  * might be reassigned to a new backend before we get around to examining
4552  * it, but it doesn't matter. If we find upon examination that the
4553  * relevant lxid is no longer running here, that's enough to prove that
4554  * it's no longer running anywhere.
4555  */
4556  proc = BackendIdGetProc(vxid.backendId);
4557  if (proc == NULL)
4558  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4559 
4560  /*
4561  * We must acquire this lock before checking the backendId and lxid
4562  * against the ones we're waiting for. The target backend will only set
4563  * or clear lxid while holding this lock.
4564  */
4566 
4567  if (proc->backendId != vxid.backendId
4568  || proc->fpLocalTransactionId != vxid.localTransactionId)
4569  {
4570  /* VXID ended */
4571  LWLockRelease(&proc->fpInfoLock);
4572  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4573  }
4574 
4575  /*
4576  * If we aren't asked to wait, there's no need to set up a lock table
4577  * entry. The transaction is still in progress, so just return false.
4578  */
4579  if (!wait)
4580  {
4581  LWLockRelease(&proc->fpInfoLock);
4582  return false;
4583  }
4584 
4585  /*
4586  * OK, we're going to need to sleep on the VXID. But first, we must set
4587  * up the primary lock table entry, if needed (ie, convert the proc's
4588  * fast-path lock on its VXID to a regular lock).
4589  */
4590  if (proc->fpVXIDLock)
4591  {
4592  PROCLOCK *proclock;
4593  uint32 hashcode;
4594  LWLock *partitionLock;
4595 
4596  hashcode = LockTagHashCode(&tag);
4597 
4598  partitionLock = LockHashPartitionLock(hashcode);
4599  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4600 
4602  &tag, hashcode, ExclusiveLock);
4603  if (!proclock)
4604  {
4605  LWLockRelease(partitionLock);
4606  LWLockRelease(&proc->fpInfoLock);
4607  ereport(ERROR,
4608  (errcode(ERRCODE_OUT_OF_MEMORY),
4609  errmsg("out of shared memory"),
4610  errhint("You might need to increase max_locks_per_transaction.")));
4611  }
4612  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4613 
4614  LWLockRelease(partitionLock);
4615 
4616  proc->fpVXIDLock = false;
4617  }
4618 
4619  /*
4620  * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4621  * search. The proc might have assigned this XID but not yet locked it,
4622  * in which case the proc will lock this XID before releasing the VXID.
4623  * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4624  * so we won't save an XID of a different VXID. It doesn't matter whether
4625  * we save this before or after setting up the primary lock table entry.
4626  */
4627  xid = proc->xid;
4628 
4629  /* Done with proc->fpLockBits */
4630  LWLockRelease(&proc->fpInfoLock);
4631 
4632  /* Time to wait. */
4633  (void) LockAcquire(&tag, ShareLock, false, false);
4634 
4635  LockRelease(&tag, ShareLock, false);
4636  return XactLockForVirtualXact(vxid, xid, wait);
4637 }
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4483
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:747
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:69
#define ShareLock
Definition: lockdefs.h:40
PGPROC * BackendIdGetProc(int backendID)
Definition: sinvaladt.c:385
#define InvalidTransactionId
Definition: transam.h:31

References Assert(), VirtualTransactionId::backendId, PGPROC::backendId, BackendIdGetProc(), DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4434 of file lock.c.

4435 {
4436  bool fastpath;
4437  LocalTransactionId lxid;
4438 
4440 
4441  /*
4442  * Clean up shared memory state.
4443  */
4445 
4446  fastpath = MyProc->fpVXIDLock;
4447  lxid = MyProc->fpLocalTransactionId;
4448  MyProc->fpVXIDLock = false;
4450 
4452 
4453  /*
4454  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4455  * that means someone transferred the lock to the main lock table.
4456  */
4457  if (!fastpath && LocalTransactionIdIsValid(lxid))
4458  {
4459  VirtualTransactionId vxid;
4460  LOCKTAG locktag;
4461 
4462  vxid.backendId = MyBackendId;
4463  vxid.localTransactionId = lxid;
4464  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4465 
4467  &locktag, ExclusiveLock, false);
4468  }
4469 }
uint32 LocalTransactionId
Definition: c.h:638
BackendId MyBackendId
Definition: globals.c:85
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:66

References Assert(), VirtualTransactionId::backendId, PGPROC::backendId, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, InvalidBackendId, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyBackendId, MyProc, and SET_LOCKTAG_VIRTUALTRANSACTION.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static void WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1809 of file lock.c.

1810 {
1811  LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1812  LockMethod lockMethodTable = LockMethods[lockmethodid];
1813 
1814  LOCK_PRINT("WaitOnLock: sleeping on lock",
1815  locallock->lock, locallock->tag.mode);
1816 
1817  /* adjust the process title to indicate that it's waiting */
1818  set_ps_display_suffix("waiting");
1819 
1820  awaitedLock = locallock;
1821  awaitedOwner = owner;
1822 
1823  /*
1824  * NOTE: Think not to put any shared-state cleanup after the call to
1825  * ProcSleep, in either the normal or failure path. The lock state must
1826  * be fully set by the lock grantor, or by CheckDeadLock if we give up
1827  * waiting for the lock. This is necessary because of the possibility
1828  * that a cancel/die interrupt will interrupt ProcSleep after someone else
1829  * grants us the lock, but before we've noticed it. Hence, after granting,
1830  * the locktable state must fully reflect the fact that we own the lock;
1831  * we can't do additional work on return.
1832  *
1833  * We can and do use a PG_TRY block to try to clean up after failure, but
1834  * this still has a major limitation: elog(FATAL) can occur while waiting
1835  * (eg, a "die" interrupt), and then control won't come back here. So all
1836  * cleanup of essential state should happen in LockErrorCleanup, not here.
1837  * We can use PG_TRY to clear the "waiting" status flags, since doing that
1838  * is unimportant if the process exits.
1839  */
1840  PG_TRY();
1841  {
1842  if (ProcSleep(locallock, lockMethodTable) != PROC_WAIT_STATUS_OK)
1843  {
1844  /*
1845  * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1846  * now.
1847  */
1848  awaitedLock = NULL;
1849  LOCK_PRINT("WaitOnLock: aborting on lock",
1850  locallock->lock, locallock->tag.mode);
1852 
1853  /*
1854  * Now that we aren't holding the partition lock, we can give an
1855  * error report including details about the detected deadlock.
1856  */
1857  DeadLockReport();
1858  /* not reached */
1859  }
1860  }
1861  PG_CATCH();
1862  {
1863  /* In this path, awaitedLock remains set until LockErrorCleanup */
1864 
1865  /* reset ps display to remove the suffix */
1867 
1868  /* and propagate the error */
1869  PG_RE_THROW();
1870  }
1871  PG_END_TRY();
1872 
1873  awaitedLock = NULL;
1874 
1875  /* reset ps display to remove the suffix */
1877 
1878  LOCK_PRINT("WaitOnLock: wakeup on lock",
1879  locallock->lock, locallock->tag.mode);
1880 }
void DeadLockReport(void)
Definition: deadlock.c:1073
#define PG_RE_THROW()
Definition: elog.h:411
#define PG_TRY(...)
Definition: elog.h:370
#define PG_END_TRY(...)
Definition: elog.h:395
#define PG_CATCH(...)
Definition: elog.h:380
@ PROC_WAIT_STATUS_OK
Definition: proc.h:124
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:396
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:344
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1004

References awaitedLock, awaitedOwner, DeadLockReport(), LOCALLOCK::hashcode, LOCALLOCK_LOCKMETHOD, LOCALLOCK::lock, LOCK_PRINT, LockHashPartitionLock, LockMethods, LWLockRelease(), LOCALLOCKTAG::mode, PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, PROC_WAIT_STATUS_OK, ProcSleep(), set_ps_display_remove_suffix(), set_ps_display_suffix(), and LOCALLOCK::tag.

Referenced by LockAcquireExtended().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4483 of file lock.c.

4485 {
4486  bool more = false;
4487 
4488  /* There is no point to wait for 2PCs if you have no 2PCs. */
4489  if (max_prepared_xacts == 0)
4490  return true;
4491 
4492  do
4493  {
4494  LockAcquireResult lar;
4495  LOCKTAG tag;
4496 
4497  /* Clear state from previous iterations. */
4498  if (more)
4499  {
4500  xid = InvalidTransactionId;
4501  more = false;
4502  }
4503 
4504  /* If we have no xid, try to find one. */
4505  if (!TransactionIdIsValid(xid))
4506  xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4507  if (!TransactionIdIsValid(xid))
4508  {
4509  Assert(!more);
4510  return true;
4511  }
4512 
4513  /* Check or wait for XID completion. */
4514  SET_LOCKTAG_TRANSACTION(tag, xid);
4515  lar = LockAcquire(&tag, ShareLock, false, !wait);
4516  if (lar == LOCKACQUIRE_NOT_AVAIL)
4517  return false;
4518  LockRelease(&tag, ShareLock, false);
4519  } while (more);
4520 
4521  return true;
4522 }
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:226
LockAcquireResult
Definition: lock.h:501
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:870

References Assert(), InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 288 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 289 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition: lock.c:123
static const char *const lock_mode_names[]
Definition: lock.c:109
static const LOCKMASK LockConflicts[]
Definition: lock.c:66
#define MaxLockMode
Definition: lockdefs.h:45

Definition at line 126 of file lock.c.

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 123 of file lock.c.

◆ FastPathLocalUseCount

int FastPathLocalUseCount = 0
static

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 109 of file lock.c.

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 66 of file lock.c.

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ max_locks_per_xact

int max_locks_per_xact

Definition at line 55 of file lock.c.

Referenced by CheckRequiredParameterValues(), InitControlFile(), and XLogReportParameters().

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsPageLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 187 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 287 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod