PostgreSQL Source Code  git master
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/sinvaladt.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_GET_BITS(proc, n)    (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static void WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner, bool dontWait)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void InitLocks (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (TransactionId xid)
 
Size LockShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCount = 0
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
@ LOCKTAG_RELATION
Definition: lock.h:137
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
static PgChecksumMode mode
Definition: pg_checksums.c:56
#define InvalidOid
Definition: postgres_ext.h:36

Definition at line 219 of file lock.c.

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition: globals.c:92

Definition at line 213 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:
AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
#define AssertMacro(condition)
Definition: c.h:859
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:189
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:188
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:80

Definition at line 193 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 188 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 202 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 200 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)

Definition at line 191 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 189 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 190 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 198 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 246 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 247 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 249 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 351 of file lock.c.

◆ NLOCKENTS

Definition at line 55 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 352 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1760 of file lock.c.

1761 {
1762  uint32 fasthashcode;
1763  LOCALLOCK *locallock = StrongLockInProgress;
1764 
1765  if (locallock == NULL)
1766  return;
1767 
1768  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1769  Assert(locallock->holdsStrongLockCount == true);
1771  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1772  FastPathStrongRelationLocks->count[fasthashcode]--;
1773  locallock->holdsStrongLockCount = false;
1774  StrongLockInProgress = NULL;
1776 }
unsigned int uint32
Definition: c.h:506
#define Assert(condition)
Definition: c.h:858
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:249
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:258
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:273
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:255
uint32 hashcode
Definition: lock.h:432
bool holdsStrongLockCount
Definition: lock.h:439

References Assert, FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3294 of file lock.c.

3295 {
3296  HASH_SEQ_STATUS status;
3297  LOCALLOCK *locallock;
3298 
3299  /* First, verify there aren't locks of both xact and session level */
3301 
3302  /* Now do the per-locallock cleanup work */
3304 
3305  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3306  {
3307  TwoPhaseLockRecord record;
3308  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3309  bool haveSessionLock;
3310  bool haveXactLock;
3311  int i;
3312 
3313  /*
3314  * Ignore VXID locks. We don't want those to be held by prepared
3315  * transactions, since they aren't meaningful after a restart.
3316  */
3317  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3318  continue;
3319 
3320  /* Ignore it if we don't actually hold the lock */
3321  if (locallock->nLocks <= 0)
3322  continue;
3323 
3324  /* Scan to see whether we hold it at session or transaction level */
3325  haveSessionLock = haveXactLock = false;
3326  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3327  {
3328  if (lockOwners[i].owner == NULL)
3329  haveSessionLock = true;
3330  else
3331  haveXactLock = true;
3332  }
3333 
3334  /* Ignore it if we have only session lock */
3335  if (!haveXactLock)
3336  continue;
3337 
3338  /* This can't happen, because we already checked it */
3339  if (haveSessionLock)
3340  ereport(ERROR,
3341  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3342  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3343 
3344  /*
3345  * If the local lock was taken via the fast-path, we need to move it
3346  * to the primary lock table, or just get a pointer to the existing
3347  * primary lock table entry if by chance it's already been
3348  * transferred.
3349  */
3350  if (locallock->proclock == NULL)
3351  {
3352  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3353  locallock->lock = locallock->proclock->tag.myLock;
3354  }
3355 
3356  /*
3357  * Arrange to not release any strong lock count held by this lock
3358  * entry. We must retain the count until the prepared transaction is
3359  * committed or rolled back.
3360  */
3361  locallock->holdsStrongLockCount = false;
3362 
3363  /*
3364  * Create a 2PC record.
3365  */
3366  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3367  record.lockmode = locallock->tag.mode;
3368 
3370  &record, sizeof(TwoPhaseLockRecord));
3371  }
3372 }
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1395
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int i
Definition: isn.c:73
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2790
static HTAB * LockMethodLocalHash
Definition: lock.c:269
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3206
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:143
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
LOCALLOCKOWNER * lockOwners
Definition: lock.h:438
LOCK * lock
Definition: lock.h:433
int64 nLocks
Definition: lock.h:435
int numLockOwners
Definition: lock.h:436
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
LOCK * myLock
Definition: lock.h:365
PROCLOCKTAG tag
Definition: lock.h:372
LOCKTAG locktag
Definition: lock.c:159
LOCKMODE lockmode
Definition: lock.c:160
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1280
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1724 of file lock.c.

1725 {
1726  Assert(StrongLockInProgress == NULL);
1727  Assert(locallock->holdsStrongLockCount == false);
1728 
1729  /*
1730  * Adding to a memory location is not atomic, so we take a spinlock to
1731  * ensure we don't collide with someone else trying to bump the count at
1732  * the same time.
1733  *
1734  * XXX: It might be worth considering using an atomic fetch-and-add
1735  * instruction here, on architectures where that is supported.
1736  */
1737 
1739  FastPathStrongRelationLocks->count[fasthashcode]++;
1740  locallock->holdsStrongLockCount = true;
1741  StrongLockInProgress = locallock;
1743 }

References Assert, FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1364 of file lock.c.

1365 {
1366 #ifdef USE_ASSERT_CHECKING
1367  if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1368  IsRelationExtensionLockHeld = acquired;
1369 #endif
1370 }
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:138
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:444

References LOCALLOCK_LOCKTAG, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3206 of file lock.c.

3207 {
3208  typedef struct
3209  {
3210  LOCKTAG lock; /* identifies the lockable object */
3211  bool sessLock; /* is any lockmode held at session level? */
3212  bool xactLock; /* is any lockmode held at xact level? */
3213  } PerLockTagEntry;
3214 
3215  HASHCTL hash_ctl;
3216  HTAB *lockhtab;
3217  HASH_SEQ_STATUS status;
3218  LOCALLOCK *locallock;
3219 
3220  /* Create a local hash table keyed by LOCKTAG only */
3221  hash_ctl.keysize = sizeof(LOCKTAG);
3222  hash_ctl.entrysize = sizeof(PerLockTagEntry);
3223  hash_ctl.hcxt = CurrentMemoryContext;
3224 
3225  lockhtab = hash_create("CheckForSessionAndXactLocks table",
3226  256, /* arbitrary initial size */
3227  &hash_ctl,
3229 
3230  /* Scan local lock table to find entries for each LOCKTAG */
3232 
3233  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3234  {
3235  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3236  PerLockTagEntry *hentry;
3237  bool found;
3238  int i;
3239 
3240  /*
3241  * Ignore VXID locks. We don't want those to be held by prepared
3242  * transactions, since they aren't meaningful after a restart.
3243  */
3244  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3245  continue;
3246 
3247  /* Ignore it if we don't actually hold the lock */
3248  if (locallock->nLocks <= 0)
3249  continue;
3250 
3251  /* Otherwise, find or make an entry in lockhtab */
3252  hentry = (PerLockTagEntry *) hash_search(lockhtab,
3253  &locallock->tag.lock,
3254  HASH_ENTER, &found);
3255  if (!found) /* initialize, if newly created */
3256  hentry->sessLock = hentry->xactLock = false;
3257 
3258  /* Scan to see if we hold lock at session or xact level or both */
3259  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3260  {
3261  if (lockOwners[i].owner == NULL)
3262  hentry->sessLock = true;
3263  else
3264  hentry->xactLock = true;
3265  }
3266 
3267  /*
3268  * We can throw error immediately when we see both types of locks; no
3269  * need to wait around to see if there are more violations.
3270  */
3271  if (hentry->sessLock && hentry->xactLock)
3272  ereport(ERROR,
3273  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3274  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3275  }
3276 
3277  /* Success, so clean up */
3278  hash_destroy(lockhtab);
3279 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
struct LOCKTAG LOCKTAG
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220

References CurrentMemoryContext, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), HASHCTL::hcxt, i, HASHCTL::keysize, LOCALLOCKTAG::lock, LockMethodLocalHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1638 of file lock.c.

1641 {
1642  /*
1643  * If this was my last hold on this lock, delete my entry in the proclock
1644  * table.
1645  */
1646  if (proclock->holdMask == 0)
1647  {
1648  uint32 proclock_hashcode;
1649 
1650  PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1651  dlist_delete(&proclock->lockLink);
1652  dlist_delete(&proclock->procLink);
1653  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1655  &(proclock->tag),
1656  proclock_hashcode,
1657  HASH_REMOVE,
1658  NULL))
1659  elog(PANIC, "proclock table corrupted");
1660  }
1661 
1662  if (lock->nRequested == 0)
1663  {
1664  /*
1665  * The caller just released the last lock, so garbage-collect the lock
1666  * object.
1667  */
1668  LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1669  Assert(dlist_is_empty(&lock->procLocks));
1671  &(lock->tag),
1672  hashcode,
1673  HASH_REMOVE,
1674  NULL))
1675  elog(PANIC, "lock table corrupted");
1676  }
1677  else if (wakeupNeeded)
1678  {
1679  /* There are waiters on this lock, so wake them up. */
1680  ProcLockWakeup(lockMethodTable, lock);
1681  }
1682 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:968
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:224
@ HASH_REMOVE
Definition: hsearch.h:115
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:351
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:552
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:352
static HTAB * LockMethodLockHash
Definition: lock.c:267
static HTAB * LockMethodProcLockHash
Definition: lock.c:268
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1706
int nRequested
Definition: lock.h:319
LOCKTAG tag
Definition: lock.h:311
dlist_head procLocks
Definition: lock.h:316
LOCKMASK holdMask
Definition: lock.h:376
dlist_node lockLink
Definition: lock.h:378
dlist_node procLink
Definition: lock.h:379

References Assert, dlist_delete(), dlist_is_empty(), elog, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 570 of file lock.c.

571 {
572  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
573 
574  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
575  return true;
576 
577  return false;
578 }
static const LockMethod LockMethods[]
Definition: lock.c:149
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
const LOCKMASK * conflictTab
Definition: lock.h:111

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2790 of file lock.c.

2791 {
2792  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2793  LOCKTAG *locktag = &locallock->tag.lock;
2794  PROCLOCK *proclock = NULL;
2795  LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2796  Oid relid = locktag->locktag_field2;
2797  uint32 f;
2798 
2800 
2801  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2802  {
2803  uint32 lockmode;
2804 
2805  /* Look for an allocated slot matching the given relid. */
2806  if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2807  continue;
2808 
2809  /* If we don't have a lock of the given mode, forget it! */
2810  lockmode = locallock->tag.mode;
2811  if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2812  break;
2813 
2814  /* Find or create lock object. */
2815  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2816 
2817  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2818  locallock->hashcode, lockmode);
2819  if (!proclock)
2820  {
2821  LWLockRelease(partitionLock);
2823  ereport(ERROR,
2824  (errcode(ERRCODE_OUT_OF_MEMORY),
2825  errmsg("out of shared memory"),
2826  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
2827  }
2828  GrantLock(proclock->tag.myLock, proclock, lockmode);
2829  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2830 
2831  LWLockRelease(partitionLock);
2832 
2833  /* No need to examine remaining slots. */
2834  break;
2835  }
2836 
2838 
2839  /* Lock may have already been transferred by some other backend. */
2840  if (proclock == NULL)
2841  {
2842  LOCK *lock;
2843  PROCLOCKTAG proclocktag;
2844  uint32 proclock_hashcode;
2845 
2846  LWLockAcquire(partitionLock, LW_SHARED);
2847 
2849  locktag,
2850  locallock->hashcode,
2851  HASH_FIND,
2852  NULL);
2853  if (!lock)
2854  elog(ERROR, "failed to re-find shared lock object");
2855 
2856  proclocktag.myLock = lock;
2857  proclocktag.myProc = MyProc;
2858 
2859  proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2860  proclock = (PROCLOCK *)
2862  &proclocktag,
2863  proclock_hashcode,
2864  HASH_FIND,
2865  NULL);
2866  if (!proclock)
2867  elog(ERROR, "failed to re-find shared proclock object");
2868  LWLockRelease(partitionLock);
2869  }
2870 
2871  return proclock;
2872 }
int errhint(const char *fmt,...)
Definition: elog.c:1317
@ HASH_FIND
Definition: hsearch.h:113
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1183
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:202
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1558
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:200
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:191
#define LockHashPartitionLock(hashcode)
Definition: lock.h:526
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
unsigned int Oid
Definition: postgres_ext.h:31
PGPROC * MyProc
Definition: proc.c:66
uint32 locktag_field2
Definition: lock.h:167
Definition: lock.h:309
Definition: lwlock.h:42
LWLock fpInfoLock
Definition: proc.h:288
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:290
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370

References DEFAULT_LOCKMETHOD, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), LOCALLOCK::hashcode, LOCALLOCKTAG::lock, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, ProcLockHashCode(), SetupLockInTable(), PROCLOCK::tag, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2635 of file lock.c.

2636 {
2637  uint32 f;
2638  uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2639 
2640  /* Scan for existing entry for this relid, remembering empty slot. */
2641  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2642  {
2643  if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2644  unused_slot = f;
2645  else if (MyProc->fpRelId[f] == relid)
2646  {
2647  Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2648  FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2649  return true;
2650  }
2651  }
2652 
2653  /* If no existing entry, use any empty slot. */
2654  if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2655  {
2656  MyProc->fpRelId[unused_slot] = relid;
2657  FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2659  return true;
2660  }
2661 
2662  /* No existing entry, and no empty slot. */
2663  return false;
2664 }
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:198
static int FastPathLocalUseCount
Definition: lock.c:170

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_SET_LOCKMODE, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2702 of file lock.c.

2704 {
2705  LWLock *partitionLock = LockHashPartitionLock(hashcode);
2706  Oid relid = locktag->locktag_field2;
2707  uint32 i;
2708 
2709  /*
2710  * Every PGPROC that can potentially hold a fast-path lock is present in
2711  * ProcGlobal->allProcs. Prepared transactions are not, but any
2712  * outstanding fast-path locks held by prepared transactions are
2713  * transferred to the main lock table.
2714  */
2715  for (i = 0; i < ProcGlobal->allProcCount; i++)
2716  {
2717  PGPROC *proc = &ProcGlobal->allProcs[i];
2718  uint32 f;
2719 
2721 
2722  /*
2723  * If the target backend isn't referencing the same database as the
2724  * lock, then we needn't examine the individual relation IDs at all;
2725  * none of them can be relevant.
2726  *
2727  * proc->databaseId is set at backend startup time and never changes
2728  * thereafter, so it might be safe to perform this test before
2729  * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2730  * assume that if the target backend holds any fast-path locks, it
2731  * must have performed a memory-fencing operation (in particular, an
2732  * LWLock acquisition) since setting proc->databaseId. However, it's
2733  * less clear that our backend is certain to have performed a memory
2734  * fencing operation since the other backend set proc->databaseId. So
2735  * for now, we test it after acquiring the LWLock just to be safe.
2736  */
2737  if (proc->databaseId != locktag->locktag_field1)
2738  {
2739  LWLockRelease(&proc->fpInfoLock);
2740  continue;
2741  }
2742 
2743  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2744  {
2745  uint32 lockmode;
2746 
2747  /* Look for an allocated slot matching the given relid. */
2748  if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2749  continue;
2750 
2751  /* Find or create lock object. */
2752  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2753  for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2755  ++lockmode)
2756  {
2757  PROCLOCK *proclock;
2758 
2759  if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2760  continue;
2761  proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2762  hashcode, lockmode);
2763  if (!proclock)
2764  {
2765  LWLockRelease(partitionLock);
2766  LWLockRelease(&proc->fpInfoLock);
2767  return false;
2768  }
2769  GrantLock(proclock->tag.myLock, proclock, lockmode);
2770  FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2771  }
2772  LWLockRelease(partitionLock);
2773 
2774  /* No need to examine remaining slots. */
2775  break;
2776  }
2777  LWLockRelease(&proc->fpInfoLock);
2778  }
2779  return true;
2780 }
PROC_HDR * ProcGlobal
Definition: proc.c:78
uint32 locktag_field1
Definition: lock.h:166
Definition: proc.h:157
Oid databaseId
Definition: proc.h:202
PGPROC * allProcs
Definition: proc.h:379
uint32 allProcCount
Definition: proc.h:397

References PROC_HDR::allProcCount, PROC_HDR::allProcs, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), i, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2672 of file lock.c.

2673 {
2674  uint32 f;
2675  bool result = false;
2676 
2678  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2679  {
2680  if (MyProc->fpRelId[f] == relid
2681  && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2682  {
2683  Assert(!result);
2684  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2685  result = true;
2686  /* we continue iterating so as to update FastPathLocalUseCount */
2687  }
2688  if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2690  }
2691  return result;
2692 }

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1750 of file lock.c.

1751 {
1752  StrongLockInProgress = NULL;
1753 }

References StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetBlockerStatusData()

BlockedProcsData* GetBlockerStatusData ( int  blocked_pid)

Definition at line 3803 of file lock.c.

3804 {
3806  PGPROC *proc;
3807  int i;
3808 
3810 
3811  /*
3812  * Guess how much space we'll need, and preallocate. Most of the time
3813  * this will avoid needing to do repalloc while holding the LWLocks. (We
3814  * assume, but check with an Assert, that MaxBackends is enough entries
3815  * for the procs[] array; the other two could need enlargement, though.)
3816  */
3817  data->nprocs = data->nlocks = data->npids = 0;
3818  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3819  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3820  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3821  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3822 
3823  /*
3824  * In order to search the ProcArray for blocked_pid and assume that that
3825  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3826  * In addition, to examine the lock grouping fields of any other backend,
3827  * we must hold all the hash partition locks. (Only one of those locks is
3828  * actually relevant for any one lock group, but we can't know which one
3829  * ahead of time.) It's fairly annoying to hold all those locks
3830  * throughout this, but it's no worse than GetLockStatusData(), and it
3831  * does have the advantage that we're guaranteed to return a
3832  * self-consistent instantaneous state.
3833  */
3834  LWLockAcquire(ProcArrayLock, LW_SHARED);
3835 
3836  proc = BackendPidGetProcWithLock(blocked_pid);
3837 
3838  /* Nothing to do if it's gone */
3839  if (proc != NULL)
3840  {
3841  /*
3842  * Acquire lock on the entire shared lock data structure. See notes
3843  * in GetLockStatusData().
3844  */
3845  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3847 
3848  if (proc->lockGroupLeader == NULL)
3849  {
3850  /* Easy case, proc is not a lock group member */
3852  }
3853  else
3854  {
3855  /* Examine all procs in proc's lock group */
3856  dlist_iter iter;
3857 
3859  {
3860  PGPROC *memberProc;
3861 
3862  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3863  GetSingleProcBlockerStatusData(memberProc, data);
3864  }
3865  }
3866 
3867  /*
3868  * And release locks. See notes in GetLockStatusData().
3869  */
3870  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3872 
3873  Assert(data->nprocs <= data->maxprocs);
3874  }
3875 
3876  LWLockRelease(ProcArrayLock);
3877 
3878  return data;
3879 }
int MaxBackends
Definition: globals.c:144
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3883
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:529
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:97
void * palloc(Size size)
Definition: mcxt.c:1317
const void * data
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3223
dlist_head lockGroupMembers
Definition: proc.h:300
PGPROC * lockGroupLeader
Definition: proc.h:299
dlist_node * cur
Definition: ilist.h:179

References Assert, BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, and palloc().

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId* GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 2894 of file lock.c.

2895 {
2896  static VirtualTransactionId *vxids;
2897  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2898  LockMethod lockMethodTable;
2899  LOCK *lock;
2900  LOCKMASK conflictMask;
2901  dlist_iter proclock_iter;
2902  PROCLOCK *proclock;
2903  uint32 hashcode;
2904  LWLock *partitionLock;
2905  int count = 0;
2906  int fast_count = 0;
2907 
2908  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2909  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2910  lockMethodTable = LockMethods[lockmethodid];
2911  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2912  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2913 
2914  /*
2915  * Allocate memory to store results, and fill with InvalidVXID. We only
2916  * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2917  * InHotStandby allocate once in TopMemoryContext.
2918  */
2919  if (InHotStandby)
2920  {
2921  if (vxids == NULL)
2922  vxids = (VirtualTransactionId *)
2924  sizeof(VirtualTransactionId) *
2926  }
2927  else
2928  vxids = (VirtualTransactionId *)
2929  palloc0(sizeof(VirtualTransactionId) *
2931 
2932  /* Compute hash code and partition lock, and look up conflicting modes. */
2933  hashcode = LockTagHashCode(locktag);
2934  partitionLock = LockHashPartitionLock(hashcode);
2935  conflictMask = lockMethodTable->conflictTab[lockmode];
2936 
2937  /*
2938  * Fast path locks might not have been entered in the primary lock table.
2939  * If the lock we're dealing with could conflict with such a lock, we must
2940  * examine each backend's fast-path array for conflicts.
2941  */
2942  if (ConflictsWithRelationFastPath(locktag, lockmode))
2943  {
2944  int i;
2945  Oid relid = locktag->locktag_field2;
2946  VirtualTransactionId vxid;
2947 
2948  /*
2949  * Iterate over relevant PGPROCs. Anything held by a prepared
2950  * transaction will have been transferred to the primary lock table,
2951  * so we need not worry about those. This is all a bit fuzzy, because
2952  * new locks could be taken after we've visited a particular
2953  * partition, but the callers had better be prepared to deal with that
2954  * anyway, since the locks could equally well be taken between the
2955  * time we return the value and the time the caller does something
2956  * with it.
2957  */
2958  for (i = 0; i < ProcGlobal->allProcCount; i++)
2959  {
2960  PGPROC *proc = &ProcGlobal->allProcs[i];
2961  uint32 f;
2962 
2963  /* A backend never blocks itself */
2964  if (proc == MyProc)
2965  continue;
2966 
2968 
2969  /*
2970  * If the target backend isn't referencing the same database as
2971  * the lock, then we needn't examine the individual relation IDs
2972  * at all; none of them can be relevant.
2973  *
2974  * See FastPathTransferRelationLocks() for discussion of why we do
2975  * this test after acquiring the lock.
2976  */
2977  if (proc->databaseId != locktag->locktag_field1)
2978  {
2979  LWLockRelease(&proc->fpInfoLock);
2980  continue;
2981  }
2982 
2983  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2984  {
2985  uint32 lockmask;
2986 
2987  /* Look for an allocated slot matching the given relid. */
2988  if (relid != proc->fpRelId[f])
2989  continue;
2990  lockmask = FAST_PATH_GET_BITS(proc, f);
2991  if (!lockmask)
2992  continue;
2993  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2994 
2995  /*
2996  * There can only be one entry per relation, so if we found it
2997  * and it doesn't conflict, we can skip the rest of the slots.
2998  */
2999  if ((lockmask & conflictMask) == 0)
3000  break;
3001 
3002  /* Conflict! */
3003  GET_VXID_FROM_PGPROC(vxid, *proc);
3004 
3005  if (VirtualTransactionIdIsValid(vxid))
3006  vxids[count++] = vxid;
3007  /* else, xact already committed or aborted */
3008 
3009  /* No need to examine remaining slots. */
3010  break;
3011  }
3012 
3013  LWLockRelease(&proc->fpInfoLock);
3014  }
3015  }
3016 
3017  /* Remember how many fast-path conflicts we found. */
3018  fast_count = count;
3019 
3020  /*
3021  * Look up the lock object matching the tag.
3022  */
3023  LWLockAcquire(partitionLock, LW_SHARED);
3024 
3026  locktag,
3027  hashcode,
3028  HASH_FIND,
3029  NULL);
3030  if (!lock)
3031  {
3032  /*
3033  * If the lock object doesn't exist, there is nothing holding a lock
3034  * on this lockable object.
3035  */
3036  LWLockRelease(partitionLock);
3037  vxids[count].procNumber = INVALID_PROC_NUMBER;
3039  if (countp)
3040  *countp = count;
3041  return vxids;
3042  }
3043 
3044  /*
3045  * Examine each existing holder (or awaiter) of the lock.
3046  */
3047  dlist_foreach(proclock_iter, &lock->procLocks)
3048  {
3049  proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3050 
3051  if (conflictMask & proclock->holdMask)
3052  {
3053  PGPROC *proc = proclock->tag.myProc;
3054 
3055  /* A backend never blocks itself */
3056  if (proc != MyProc)
3057  {
3058  VirtualTransactionId vxid;
3059 
3060  GET_VXID_FROM_PGPROC(vxid, *proc);
3061 
3062  if (VirtualTransactionIdIsValid(vxid))
3063  {
3064  int i;
3065 
3066  /* Avoid duplicate entries. */
3067  for (i = 0; i < fast_count; ++i)
3068  if (VirtualTransactionIdEquals(vxids[i], vxid))
3069  break;
3070  if (i >= fast_count)
3071  vxids[count++] = vxid;
3072  }
3073  /* else, xact already committed or aborted */
3074  }
3075  }
3076  }
3077 
3078  LWLockRelease(partitionLock);
3079 
3080  if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3081  elog(PANIC, "too many conflicting locks found");
3082 
3083  vxids[count].procNumber = INVALID_PROC_NUMBER;
3085  if (countp)
3086  *countp = count;
3087  return vxids;
3088 }
#define lengthof(array)
Definition: c.h:788
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:219
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:504
uint16 LOCKMETHODID
Definition: lock.h:122
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:67
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition: lock.h:77
#define InvalidLocalTransactionId
Definition: lock.h:65
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:71
int LOCKMASK
Definition: lockdefs.h:25
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void * palloc0(Size size)
Definition: mcxt.c:1347
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
uint8 locktag_lockmethodid
Definition: lock.h:171
int numLockModes
Definition: lock.h:110
LocalTransactionId localTransactionId
Definition: lock.h:62
ProcNumber procNumber
Definition: lock.h:61
int max_prepared_xacts
Definition: twophase.c:115
#define InHotStandby
Definition: xlogutils.h:60

References PROC_HDR::allProcCount, PROC_HDR::allProcs, ConflictsWithRelationFastPath, LockMethodData::conflictTab, dlist_iter::cur, PGPROC::databaseId, dlist_container, dlist_foreach, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, INVALID_PROC_NUMBER, InvalidLocalTransactionId, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, VirtualTransactionId::procNumber, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char* GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 4060 of file lock.c.

4061 {
4062  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4063  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4064  return LockMethods[lockmethodid]->lockModeNames[mode];
4065 }
const char *const * lockModeNames
Definition: lock.h:112

References Assert, lengthof, LockMethods, LockMethodData::lockModeNames, and mode.

Referenced by DeadLockReport(), pg_lock_status(), and ProcSleep().

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 474 of file lock.c.

475 {
476  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
477 
478  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
479  return LockMethods[lockmethodid];
480 }
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:324

References Assert, lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData* GetLockStatusData ( void  )

Definition at line 3611 of file lock.c.

3612 {
3613  LockData *data;
3614  PROCLOCK *proclock;
3615  HASH_SEQ_STATUS seqstat;
3616  int els;
3617  int el;
3618  int i;
3619 
3620  data = (LockData *) palloc(sizeof(LockData));
3621 
3622  /* Guess how much space we'll need. */
3623  els = MaxBackends;
3624  el = 0;
3625  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3626 
3627  /*
3628  * First, we iterate through the per-backend fast-path arrays, locking
3629  * them one at a time. This might produce an inconsistent picture of the
3630  * system state, but taking all of those LWLocks at the same time seems
3631  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3632  * matter too much, because none of these locks can be involved in lock
3633  * conflicts anyway - anything that might must be present in the main lock
3634  * table. (For the same reason, we don't sweat about making leaderPid
3635  * completely valid. We cannot safely dereference another backend's
3636  * lockGroupLeader field without holding all lock partition locks, and
3637  * it's not worth that.)
3638  */
3639  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3640  {
3641  PGPROC *proc = &ProcGlobal->allProcs[i];
3642  uint32 f;
3643 
3645 
3646  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3647  {
3648  LockInstanceData *instance;
3649  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3650 
3651  /* Skip unallocated slots. */
3652  if (!lockbits)
3653  continue;
3654 
3655  if (el >= els)
3656  {
3657  els += MaxBackends;
3658  data->locks = (LockInstanceData *)
3659  repalloc(data->locks, sizeof(LockInstanceData) * els);
3660  }
3661 
3662  instance = &data->locks[el];
3663  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3664  proc->fpRelId[f]);
3665  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3666  instance->waitLockMode = NoLock;
3667  instance->vxid.procNumber = proc->vxid.procNumber;
3668  instance->vxid.localTransactionId = proc->vxid.lxid;
3669  instance->pid = proc->pid;
3670  instance->leaderPid = proc->pid;
3671  instance->fastpath = true;
3672 
3673  /*
3674  * Successfully taking fast path lock means there were no
3675  * conflicting locks.
3676  */
3677  instance->waitStart = 0;
3678 
3679  el++;
3680  }
3681 
3682  if (proc->fpVXIDLock)
3683  {
3684  VirtualTransactionId vxid;
3685  LockInstanceData *instance;
3686 
3687  if (el >= els)
3688  {
3689  els += MaxBackends;
3690  data->locks = (LockInstanceData *)
3691  repalloc(data->locks, sizeof(LockInstanceData) * els);
3692  }
3693 
3694  vxid.procNumber = proc->vxid.procNumber;
3696 
3697  instance = &data->locks[el];
3698  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3699  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3700  instance->waitLockMode = NoLock;
3701  instance->vxid.procNumber = proc->vxid.procNumber;
3702  instance->vxid.localTransactionId = proc->vxid.lxid;
3703  instance->pid = proc->pid;
3704  instance->leaderPid = proc->pid;
3705  instance->fastpath = true;
3706  instance->waitStart = 0;
3707 
3708  el++;
3709  }
3710 
3711  LWLockRelease(&proc->fpInfoLock);
3712  }
3713 
3714  /*
3715  * Next, acquire lock on the entire shared lock data structure. We do
3716  * this so that, at least for locks in the primary lock table, the state
3717  * will be self-consistent.
3718  *
3719  * Since this is a read-only operation, we take shared instead of
3720  * exclusive lock. There's not a whole lot of point to this, because all
3721  * the normal operations require exclusive lock, but it doesn't hurt
3722  * anything either. It will at least allow two backends to do
3723  * GetLockStatusData in parallel.
3724  *
3725  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3726  */
3727  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3729 
3730  /* Now we can safely count the number of proclocks */
3732  if (data->nelements > els)
3733  {
3734  els = data->nelements;
3735  data->locks = (LockInstanceData *)
3736  repalloc(data->locks, sizeof(LockInstanceData) * els);
3737  }
3738 
3739  /* Now scan the tables to copy the data */
3741 
3742  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3743  {
3744  PGPROC *proc = proclock->tag.myProc;
3745  LOCK *lock = proclock->tag.myLock;
3746  LockInstanceData *instance = &data->locks[el];
3747 
3748  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3749  instance->holdMask = proclock->holdMask;
3750  if (proc->waitLock == proclock->tag.myLock)
3751  instance->waitLockMode = proc->waitLockMode;
3752  else
3753  instance->waitLockMode = NoLock;
3754  instance->vxid.procNumber = proc->vxid.procNumber;
3755  instance->vxid.localTransactionId = proc->vxid.lxid;
3756  instance->pid = proc->pid;
3757  instance->leaderPid = proclock->groupLeader->pid;
3758  instance->fastpath = false;
3759  instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3760 
3761  el++;
3762  }
3763 
3764  /*
3765  * And release locks. We do this in reverse order for two reasons: (1)
3766  * Anyone else who needs more than one of the locks will be trying to lock
3767  * them in increasing order; we don't want to release the other process
3768  * until it can get all the locks it needs. (2) This avoids O(N^2)
3769  * behavior inside LWLockRelease.
3770  */
3771  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3773 
3774  Assert(el == data->nelements);
3775 
3776  return data;
3777 }
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:460
int64 TimestampTz
Definition: timestamp.h:39
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1341
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:235
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:181
#define NoLock
Definition: lockdefs.h:34
#define ExclusiveLock
Definition: lockdefs.h:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1541
Definition: lock.h:466
LOCKMASK holdMask
Definition: lock.h:455
LOCKMODE waitLockMode
Definition: lock.h:456
bool fastpath
Definition: lock.h:462
LOCKTAG locktag
Definition: lock.h:454
TimestampTz waitStart
Definition: lock.h:458
int leaderPid
Definition: lock.h:461
VirtualTransactionId vxid
Definition: lock.h:457
LocalTransactionId lxid
Definition: proc.h:195
pg_atomic_uint64 waitStart
Definition: proc.h:232
bool fpVXIDLock
Definition: proc.h:291
ProcNumber procNumber
Definition: proc.h:190
int pid
Definition: proc.h:177
LOCK * waitLock
Definition: proc.h:227
LOCKMODE waitLockMode
Definition: proc.h:229
struct PGPROC::@117 vxid
LocalTransactionId fpLocalTransactionId
Definition: proc.h:292
PGPROC * groupLeader
Definition: lock.h:375

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, LockInstanceData::fastpath, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc(), pg_atomic_read_u64(), LockInstanceData::pid, PGPROC::pid, ProcGlobal, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, LockInstanceData::waitStart, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 486 of file lock.c.

487 {
488  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
489 
490  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
491  return LockMethods[lockmethodid];
492 }

References Assert, lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock* GetRunningTransactionLocks ( int *  nlocks)

Definition at line 3978 of file lock.c.

3979 {
3980  xl_standby_lock *accessExclusiveLocks;
3981  PROCLOCK *proclock;
3982  HASH_SEQ_STATUS seqstat;
3983  int i;
3984  int index;
3985  int els;
3986 
3987  /*
3988  * Acquire lock on the entire shared lock data structure.
3989  *
3990  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3991  */
3992  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3994 
3995  /* Now we can safely count the number of proclocks */
3997 
3998  /*
3999  * Allocating enough space for all locks in the lock table is overkill,
4000  * but it's more convenient and faster than having to enlarge the array.
4001  */
4002  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4003 
4004  /* Now scan the tables to copy the data */
4006 
4007  /*
4008  * If lock is a currently granted AccessExclusiveLock then it will have
4009  * just one proclock holder, so locks are never accessed twice in this
4010  * particular case. Don't copy this code for use elsewhere because in the
4011  * general case this will give you duplicate locks when looking at
4012  * non-exclusive lock types.
4013  */
4014  index = 0;
4015  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4016  {
4017  /* make sure this definition matches the one used in LockAcquire */
4018  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4019  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4020  {
4021  PGPROC *proc = proclock->tag.myProc;
4022  LOCK *lock = proclock->tag.myLock;
4023  TransactionId xid = proc->xid;
4024 
4025  /*
4026  * Don't record locks for transactions if we know they have
4027  * already issued their WAL record for commit but not yet released
4028  * lock. It is still possible that we see locks held by already
4029  * complete transactions, if they haven't yet zeroed their xids.
4030  */
4031  if (!TransactionIdIsValid(xid))
4032  continue;
4033 
4034  accessExclusiveLocks[index].xid = xid;
4035  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4036  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4037 
4038  index++;
4039  }
4040  }
4041 
4042  Assert(index <= els);
4043 
4044  /*
4045  * And release locks. We do this in reverse order for two reasons: (1)
4046  * Anyone else who needs more than one of the locks will be trying to lock
4047  * them in increasing order; we don't want to release the other process
4048  * until it can get all the locks it needs. (2) This avoids O(N^2)
4049  * behavior inside LWLockRelease.
4050  */
4051  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4053 
4054  *nlocks = index;
4055  return accessExclusiveLocks;
4056 }
uint32 TransactionId
Definition: c.h:652
#define AccessExclusiveLock
Definition: lockdefs.h:43
TransactionId xid
Definition: proc.h:167
Definition: type.h:95
TransactionId xid
Definition: lockdefs.h:51
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References AccessExclusiveLock, Assert, xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 3883 of file lock.c.

3884 {
3885  LOCK *theLock = blocked_proc->waitLock;
3886  BlockedProcData *bproc;
3887  dlist_iter proclock_iter;
3888  dlist_iter proc_iter;
3889  dclist_head *waitQueue;
3890  int queue_size;
3891 
3892  /* Nothing to do if this proc is not blocked */
3893  if (theLock == NULL)
3894  return;
3895 
3896  /* Set up a procs[] element */
3897  bproc = &data->procs[data->nprocs++];
3898  bproc->pid = blocked_proc->pid;
3899  bproc->first_lock = data->nlocks;
3900  bproc->first_waiter = data->npids;
3901 
3902  /*
3903  * We may ignore the proc's fast-path arrays, since nothing in those could
3904  * be related to a contended lock.
3905  */
3906 
3907  /* Collect all PROCLOCKs associated with theLock */
3908  dlist_foreach(proclock_iter, &theLock->procLocks)
3909  {
3910  PROCLOCK *proclock =
3911  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3912  PGPROC *proc = proclock->tag.myProc;
3913  LOCK *lock = proclock->tag.myLock;
3914  LockInstanceData *instance;
3915 
3916  if (data->nlocks >= data->maxlocks)
3917  {
3918  data->maxlocks += MaxBackends;
3919  data->locks = (LockInstanceData *)
3920  repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3921  }
3922 
3923  instance = &data->locks[data->nlocks];
3924  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3925  instance->holdMask = proclock->holdMask;
3926  if (proc->waitLock == lock)
3927  instance->waitLockMode = proc->waitLockMode;
3928  else
3929  instance->waitLockMode = NoLock;
3930  instance->vxid.procNumber = proc->vxid.procNumber;
3931  instance->vxid.localTransactionId = proc->vxid.lxid;
3932  instance->pid = proc->pid;
3933  instance->leaderPid = proclock->groupLeader->pid;
3934  instance->fastpath = false;
3935  data->nlocks++;
3936  }
3937 
3938  /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3939  waitQueue = &(theLock->waitProcs);
3940  queue_size = dclist_count(waitQueue);
3941 
3942  if (queue_size > data->maxpids - data->npids)
3943  {
3944  data->maxpids = Max(data->maxpids + MaxBackends,
3945  data->npids + queue_size);
3946  data->waiter_pids = (int *) repalloc(data->waiter_pids,
3947  sizeof(int) * data->maxpids);
3948  }
3949 
3950  /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3951  dclist_foreach(proc_iter, waitQueue)
3952  {
3953  PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
3954 
3955  if (queued_proc == blocked_proc)
3956  break;
3957  data->waiter_pids[data->npids++] = queued_proc->pid;
3958  queued_proc = (PGPROC *) queued_proc->links.next;
3959  }
3960 
3961  bproc->num_locks = data->nlocks - bproc->first_lock;
3962  bproc->num_waiters = data->npids - bproc->first_waiter;
3963 }
#define Max(x, y)
Definition: c.h:998
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int first_lock
Definition: lock.h:476
int first_waiter
Definition: lock.h:480
int num_waiters
Definition: lock.h:481
int num_locks
Definition: lock.h:477
dclist_head waitProcs
Definition: lock.h:317
dlist_node links
Definition: proc.h:158
dlist_node * next
Definition: ilist.h:140
static struct link * links
Definition: zic.c:299

References dlist_iter::cur, data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, LockInstanceData::fastpath, BlockedProcData::first_lock, BlockedProcData::first_waiter, PROCLOCK::groupLeader, PROCLOCK::holdMask, LockInstanceData::holdMask, LockInstanceData::leaderPid, PGPROC::links, links, VirtualTransactionId::localTransactionId, LockInstanceData::locktag, PGPROC::lxid, Max, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, dlist_node::next, NoLock, BlockedProcData::num_locks, BlockedProcData::num_waiters, LockInstanceData::pid, BlockedProcData::pid, PGPROC::pid, LOCK::procLocks, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, and LOCK::waitProcs.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1789 of file lock.c.

1790 {
1792 }
static LOCALLOCK * awaitedLock
Definition: lock.c:274
static ResourceOwner awaitedOwner
Definition: lock.c:275
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1692

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup(), and ProcSleep().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1558 of file lock.c.

1559 {
1560  lock->nGranted++;
1561  lock->granted[lockmode]++;
1562  lock->grantMask |= LOCKBIT_ON(lockmode);
1563  if (lock->granted[lockmode] == lock->requested[lockmode])
1564  lock->waitMask &= LOCKBIT_OFF(lockmode);
1565  proclock->holdMask |= LOCKBIT_ON(lockmode);
1566  LOCK_PRINT("GrantLock", lock, lockmode);
1567  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1568  Assert(lock->nGranted <= lock->nRequested);
1569 }
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:85
int requested[MAX_LOCKMODES]
Definition: lock.h:318
int granted[MAX_LOCKMODES]
Definition: lock.h:320
LOCKMASK grantMask
Definition: lock.h:314
LOCKMASK waitMask
Definition: lock.h:315
int nGranted
Definition: lock.h:321

References Assert, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), ProcSleep(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1692 of file lock.c.

1693 {
1694  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1695  int i;
1696 
1697  Assert(locallock->numLockOwners < locallock->maxLockOwners);
1698  /* Count the total */
1699  locallock->nLocks++;
1700  /* Count the per-owner lock */
1701  for (i = 0; i < locallock->numLockOwners; i++)
1702  {
1703  if (lockOwners[i].owner == owner)
1704  {
1705  lockOwners[i].nLocks++;
1706  return;
1707  }
1708  }
1709  lockOwners[i].owner = owner;
1710  lockOwners[i].nLocks = 1;
1711  locallock->numLockOwners++;
1712  if (owner != NULL)
1713  ResourceOwnerRememberLock(owner, locallock);
1714 
1715  /* Indicate that the lock is acquired for certain types of locks. */
1716  CheckAndSetLockHeld(locallock, true);
1717 }
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1364
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1045
int64 nLocks
Definition: lock.h:423
struct ResourceOwnerData * owner
Definition: lock.h:422
int maxLockOwners
Definition: lock.h:437

References Assert, CheckAndSetLockHeld(), i, LOCALLOCK::lockOwners, LOCALLOCK::maxLockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLocks()

void InitLocks ( void  )

Definition at line 392 of file lock.c.

393 {
394  HASHCTL info;
395  long init_table_size,
396  max_table_size;
397  bool found;
398 
399  /*
400  * Compute init/max size to request for lock hashtables. Note these
401  * calculations must agree with LockShmemSize!
402  */
403  max_table_size = NLOCKENTS();
404  init_table_size = max_table_size / 2;
405 
406  /*
407  * Allocate hash table for LOCK structs. This stores per-locked-object
408  * information.
409  */
410  info.keysize = sizeof(LOCKTAG);
411  info.entrysize = sizeof(LOCK);
413 
414  LockMethodLockHash = ShmemInitHash("LOCK hash",
415  init_table_size,
416  max_table_size,
417  &info,
419 
420  /* Assume an average of 2 holders per lock */
421  max_table_size *= 2;
422  init_table_size *= 2;
423 
424  /*
425  * Allocate hash table for PROCLOCK structs. This stores
426  * per-lock-per-holder information.
427  */
428  info.keysize = sizeof(PROCLOCKTAG);
429  info.entrysize = sizeof(PROCLOCK);
430  info.hash = proclock_hash;
432 
433  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
434  init_table_size,
435  max_table_size,
436  &info,
438 
439  /*
440  * Allocate fast-path structures.
441  */
443  ShmemInitStruct("Fast Path Strong Relation Lock Data",
444  sizeof(FastPathStrongRelationLockData), &found);
445  if (!found)
447 
448  /*
449  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
450  * counts and resource owner information.
451  *
452  * The non-shared table could already exist in this process (this occurs
453  * when the postmaster is recreating shared memory after a backend crash).
454  * If so, delete and recreate it. (We could simply leave it, since it
455  * ought to be empty in the postmaster, but for safety let's zap it.)
456  */
459 
460  info.keysize = sizeof(LOCALLOCKTAG);
461  info.entrysize = sizeof(LOCALLOCK);
462 
463  LockMethodLocalHash = hash_create("LOCALLOCK hash",
464  16,
465  &info,
467 }
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_PARTITION
Definition: hsearch.h:92
#define NLOCKENTS()
Definition: lock.c:55
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:521
struct LOCALLOCK LOCALLOCK
struct LOCK LOCK
struct PROCLOCK PROCLOCK
struct PROCLOCKTAG PROCLOCKTAG
struct LOCALLOCKTAG LOCALLOCKTAG
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:332
#define SpinLockInit(lock)
Definition: spin.h:60
HashValueFunc hash
Definition: hsearch.h:78
long num_partitions
Definition: hsearch.h:68

References HASHCTL::entrysize, FastPathStrongRelationLocks, HASHCTL::hash, HASH_BLOBS, hash_create(), hash_destroy(), HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateOrAttachShmemStructs().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4403 of file lock.c.

4405 {
4406  lock_twophase_postcommit(xid, info, recdata, len);
4407 }
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4377
const void size_t len

References len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4377 of file lock.c.

4379 {
4380  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4381  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4382  LOCKTAG *locktag;
4383  LOCKMETHODID lockmethodid;
4384  LockMethod lockMethodTable;
4385 
4386  Assert(len == sizeof(TwoPhaseLockRecord));
4387  locktag = &rec->locktag;
4388  lockmethodid = locktag->locktag_lockmethodid;
4389 
4390  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4391  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4392  lockMethodTable = LockMethods[lockmethodid];
4393 
4394  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4395 }
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3102
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:918

References Assert, elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

◆ lock_twophase_recover()

void lock_twophase_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4164 of file lock.c.

4166 {
4167  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4168  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4169  LOCKTAG *locktag;
4170  LOCKMODE lockmode;
4171  LOCKMETHODID lockmethodid;
4172  LOCK *lock;
4173  PROCLOCK *proclock;
4174  PROCLOCKTAG proclocktag;
4175  bool found;
4176  uint32 hashcode;
4177  uint32 proclock_hashcode;
4178  int partition;
4179  LWLock *partitionLock;
4180  LockMethod lockMethodTable;
4181 
4182  Assert(len == sizeof(TwoPhaseLockRecord));
4183  locktag = &rec->locktag;
4184  lockmode = rec->lockmode;
4185  lockmethodid = locktag->locktag_lockmethodid;
4186 
4187  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4188  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4189  lockMethodTable = LockMethods[lockmethodid];
4190 
4191  hashcode = LockTagHashCode(locktag);
4192  partition = LockHashPartition(hashcode);
4193  partitionLock = LockHashPartitionLock(hashcode);
4194 
4195  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4196 
4197  /*
4198  * Find or create a lock with this tag.
4199  */
4201  locktag,
4202  hashcode,
4204  &found);
4205  if (!lock)
4206  {
4207  LWLockRelease(partitionLock);
4208  ereport(ERROR,
4209  (errcode(ERRCODE_OUT_OF_MEMORY),
4210  errmsg("out of shared memory"),
4211  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4212  }
4213 
4214  /*
4215  * if it's a new lock object, initialize it
4216  */
4217  if (!found)
4218  {
4219  lock->grantMask = 0;
4220  lock->waitMask = 0;
4221  dlist_init(&lock->procLocks);
4222  dclist_init(&lock->waitProcs);
4223  lock->nRequested = 0;
4224  lock->nGranted = 0;
4225  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4226  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4227  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4228  }
4229  else
4230  {
4231  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4232  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4233  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4234  Assert(lock->nGranted <= lock->nRequested);
4235  }
4236 
4237  /*
4238  * Create the hash key for the proclock table.
4239  */
4240  proclocktag.myLock = lock;
4241  proclocktag.myProc = proc;
4242 
4243  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4244 
4245  /*
4246  * Find or create a proclock entry with this tag
4247  */
4249  &proclocktag,
4250  proclock_hashcode,
4252  &found);
4253  if (!proclock)
4254  {
4255  /* Oops, not enough shmem for the proclock */
4256  if (lock->nRequested == 0)
4257  {
4258  /*
4259  * There are no other requestors of this lock, so garbage-collect
4260  * the lock object. We *must* do this to avoid a permanent leak
4261  * of shared memory, because there won't be anything to cause
4262  * anyone to release the lock object later.
4263  */
4264  Assert(dlist_is_empty(&lock->procLocks));
4266  &(lock->tag),
4267  hashcode,
4268  HASH_REMOVE,
4269  NULL))
4270  elog(PANIC, "lock table corrupted");
4271  }
4272  LWLockRelease(partitionLock);
4273  ereport(ERROR,
4274  (errcode(ERRCODE_OUT_OF_MEMORY),
4275  errmsg("out of shared memory"),
4276  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4277  }
4278 
4279  /*
4280  * If new, initialize the new entry
4281  */
4282  if (!found)
4283  {
4284  Assert(proc->lockGroupLeader == NULL);
4285  proclock->groupLeader = proc;
4286  proclock->holdMask = 0;
4287  proclock->releaseMask = 0;
4288  /* Add proclock to appropriate lists */
4289  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4290  dlist_push_tail(&proc->myProcLocks[partition],
4291  &proclock->procLink);
4292  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4293  }
4294  else
4295  {
4296  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4297  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4298  }
4299 
4300  /*
4301  * lock->nRequested and lock->requested[] count the total number of
4302  * requests, whether granted or waiting, so increment those immediately.
4303  */
4304  lock->nRequested++;
4305  lock->requested[lockmode]++;
4306  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4307 
4308  /*
4309  * We shouldn't already hold the desired lock.
4310  */
4311  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4312  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4313  lockMethodTable->lockModeNames[lockmode],
4314  lock->tag.locktag_field1, lock->tag.locktag_field2,
4315  lock->tag.locktag_field3);
4316 
4317  /*
4318  * We ignore any possible conflicts and just grant ourselves the lock. Not
4319  * only because we don't bother, but also to avoid deadlocks when
4320  * switching from standby to normal mode. See function comment.
4321  */
4322  GrantLock(lock, proclock, lockmode);
4323 
4324  /*
4325  * Bump strong lock count, to make sure any fast-path lock requests won't
4326  * be granted without consulting the primary lock table.
4327  */
4328  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4329  {
4330  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4331 
4333  FastPathStrongRelationLocks->count[fasthashcode]++;
4335  }
4336 
4337  LWLockRelease(partitionLock);
4338 }
#define MemSet(start, val, len)
Definition: c.h:1020
@ HASH_ENTER_NULL
Definition: hsearch.h:116
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define MAX_LOCKMODES
Definition: lock.h:82
#define LockHashPartition(hashcode)
Definition: lock.h:524
int LOCKMODE
Definition: lockdefs.h:26
uint32 locktag_field3
Definition: lock.h:168
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:256
LOCKMASK releaseMask
Definition: lock.h:377

References Assert, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4345 of file lock.c.

4347 {
4348  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4349  LOCKTAG *locktag;
4350  LOCKMODE lockmode;
4351  LOCKMETHODID lockmethodid;
4352 
4353  Assert(len == sizeof(TwoPhaseLockRecord));
4354  locktag = &rec->locktag;
4355  lockmode = rec->lockmode;
4356  lockmethodid = locktag->locktag_lockmethodid;
4357 
4358  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4359  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4360 
4361  if (lockmode == AccessExclusiveLock &&
4362  locktag->locktag_type == LOCKTAG_RELATION)
4363  {
4365  locktag->locktag_field1 /* dboid */ ,
4366  locktag->locktag_field2 /* reloid */ );
4367  }
4368 }
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:985

References AccessExclusiveLock, Assert, elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().

◆ LockAcquire()

LockAcquireResult LockAcquire ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait 
)

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp 
)

Definition at line 780 of file lock.c.

786 {
787  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
788  LockMethod lockMethodTable;
789  LOCALLOCKTAG localtag;
790  LOCALLOCK *locallock;
791  LOCK *lock;
792  PROCLOCK *proclock;
793  bool found;
794  ResourceOwner owner;
795  uint32 hashcode;
796  LWLock *partitionLock;
797  bool found_conflict;
798  bool log_lock = false;
799 
800  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
801  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
802  lockMethodTable = LockMethods[lockmethodid];
803  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
804  elog(ERROR, "unrecognized lock mode: %d", lockmode);
805 
806  if (RecoveryInProgress() && !InRecovery &&
807  (locktag->locktag_type == LOCKTAG_OBJECT ||
808  locktag->locktag_type == LOCKTAG_RELATION) &&
809  lockmode > RowExclusiveLock)
810  ereport(ERROR,
811  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
812  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
813  lockMethodTable->lockModeNames[lockmode]),
814  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
815 
816 #ifdef LOCK_DEBUG
817  if (LOCK_DEBUG_ENABLED(locktag))
818  elog(LOG, "LockAcquire: lock [%u,%u] %s",
819  locktag->locktag_field1, locktag->locktag_field2,
820  lockMethodTable->lockModeNames[lockmode]);
821 #endif
822 
823  /* Identify owner for lock */
824  if (sessionLock)
825  owner = NULL;
826  else
827  owner = CurrentResourceOwner;
828 
829  /*
830  * Find or create a LOCALLOCK entry for this lock and lockmode
831  */
832  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
833  localtag.lock = *locktag;
834  localtag.mode = lockmode;
835 
836  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
837  &localtag,
838  HASH_ENTER, &found);
839 
840  /*
841  * if it's a new locallock object, initialize it
842  */
843  if (!found)
844  {
845  locallock->lock = NULL;
846  locallock->proclock = NULL;
847  locallock->hashcode = LockTagHashCode(&(localtag.lock));
848  locallock->nLocks = 0;
849  locallock->holdsStrongLockCount = false;
850  locallock->lockCleared = false;
851  locallock->numLockOwners = 0;
852  locallock->maxLockOwners = 8;
853  locallock->lockOwners = NULL; /* in case next line fails */
854  locallock->lockOwners = (LOCALLOCKOWNER *)
856  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
857  }
858  else
859  {
860  /* Make sure there will be room to remember the lock */
861  if (locallock->numLockOwners >= locallock->maxLockOwners)
862  {
863  int newsize = locallock->maxLockOwners * 2;
864 
865  locallock->lockOwners = (LOCALLOCKOWNER *)
866  repalloc(locallock->lockOwners,
867  newsize * sizeof(LOCALLOCKOWNER));
868  locallock->maxLockOwners = newsize;
869  }
870  }
871  hashcode = locallock->hashcode;
872 
873  if (locallockp)
874  *locallockp = locallock;
875 
876  /*
877  * If we already hold the lock, we can just increase the count locally.
878  *
879  * If lockCleared is already set, caller need not worry about absorbing
880  * sinval messages related to the lock's object.
881  */
882  if (locallock->nLocks > 0)
883  {
884  GrantLockLocal(locallock, owner);
885  if (locallock->lockCleared)
887  else
889  }
890 
891  /*
892  * We don't acquire any other heavyweight lock while holding the relation
893  * extension lock. We do allow to acquire the same relation extension
894  * lock more than once but that case won't reach here.
895  */
896  Assert(!IsRelationExtensionLockHeld);
897 
898  /*
899  * Prepare to emit a WAL record if acquisition of this lock needs to be
900  * replayed in a standby server.
901  *
902  * Here we prepare to log; after lock is acquired we'll issue log record.
903  * This arrangement simplifies error recovery in case the preparation step
904  * fails.
905  *
906  * Only AccessExclusiveLocks can conflict with lock types that read-only
907  * transactions can acquire in a standby server. Make sure this definition
908  * matches the one in GetRunningTransactionLocks().
909  */
910  if (lockmode >= AccessExclusiveLock &&
911  locktag->locktag_type == LOCKTAG_RELATION &&
912  !RecoveryInProgress() &&
914  {
916  log_lock = true;
917  }
918 
919  /*
920  * Attempt to take lock via fast path, if eligible. But if we remember
921  * having filled up the fast path array, we don't attempt to make any
922  * further use of it until we release some locks. It's possible that some
923  * other backend has transferred some of those locks to the shared hash
924  * table, leaving space free, but it's not worth acquiring the LWLock just
925  * to check. It's also possible that we're acquiring a second or third
926  * lock type on a relation we have already locked using the fast-path, but
927  * for now we don't worry about that case either.
928  */
929  if (EligibleForRelationFastPath(locktag, lockmode) &&
931  {
932  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
933  bool acquired;
934 
935  /*
936  * LWLockAcquire acts as a memory sequencing point, so it's safe to
937  * assume that any strong locker whose increment to
938  * FastPathStrongRelationLocks->counts becomes visible after we test
939  * it has yet to begin to transfer fast-path locks.
940  */
942  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
943  acquired = false;
944  else
945  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
946  lockmode);
948  if (acquired)
949  {
950  /*
951  * The locallock might contain stale pointers to some old shared
952  * objects; we MUST reset these to null before considering the
953  * lock to be acquired via fast-path.
954  */
955  locallock->lock = NULL;
956  locallock->proclock = NULL;
957  GrantLockLocal(locallock, owner);
958  return LOCKACQUIRE_OK;
959  }
960  }
961 
962  /*
963  * If this lock could potentially have been taken via the fast-path by
964  * some other backend, we must (temporarily) disable further use of the
965  * fast-path for this lock tag, and migrate any locks already taken via
966  * this method to the main lock table.
967  */
968  if (ConflictsWithRelationFastPath(locktag, lockmode))
969  {
970  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
971 
972  BeginStrongLockAcquire(locallock, fasthashcode);
973  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
974  hashcode))
975  {
977  if (locallock->nLocks == 0)
978  RemoveLocalLock(locallock);
979  if (locallockp)
980  *locallockp = NULL;
981  if (reportMemoryError)
982  ereport(ERROR,
983  (errcode(ERRCODE_OUT_OF_MEMORY),
984  errmsg("out of shared memory"),
985  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
986  else
987  return LOCKACQUIRE_NOT_AVAIL;
988  }
989  }
990 
991  /*
992  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
993  * take it via the fast-path, either, so we've got to mess with the shared
994  * lock table.
995  */
996  partitionLock = LockHashPartitionLock(hashcode);
997 
998  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
999 
1000  /*
1001  * Find or create lock and proclock entries with this tag
1002  *
1003  * Note: if the locallock object already existed, it might have a pointer
1004  * to the lock already ... but we should not assume that that pointer is
1005  * valid, since a lock object with zero hold and request counts can go
1006  * away anytime. So we have to use SetupLockInTable() to recompute the
1007  * lock and proclock pointers, even if they're already set.
1008  */
1009  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1010  hashcode, lockmode);
1011  if (!proclock)
1012  {
1014  LWLockRelease(partitionLock);
1015  if (locallock->nLocks == 0)
1016  RemoveLocalLock(locallock);
1017  if (locallockp)
1018  *locallockp = NULL;
1019  if (reportMemoryError)
1020  ereport(ERROR,
1021  (errcode(ERRCODE_OUT_OF_MEMORY),
1022  errmsg("out of shared memory"),
1023  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1024  else
1025  return LOCKACQUIRE_NOT_AVAIL;
1026  }
1027  locallock->proclock = proclock;
1028  lock = proclock->tag.myLock;
1029  locallock->lock = lock;
1030 
1031  /*
1032  * If lock requested conflicts with locks requested by waiters, must join
1033  * wait queue. Otherwise, check for conflict with already-held locks.
1034  * (That's last because most complex check.)
1035  */
1036  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1037  found_conflict = true;
1038  else
1039  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1040  lock, proclock);
1041 
1042  if (!found_conflict)
1043  {
1044  /* No conflict with held or previously requested locks */
1045  GrantLock(lock, proclock, lockmode);
1046  GrantLockLocal(locallock, owner);
1047  }
1048  else
1049  {
1050  /*
1051  * Set bitmask of locks this process already holds on this object.
1052  */
1053  MyProc->heldLocks = proclock->holdMask;
1054 
1055  /*
1056  * Sleep till someone wakes me up. We do this even in the dontWait
1057  * case, because while trying to go to sleep, we may discover that we
1058  * can acquire the lock immediately after all.
1059  */
1060 
1061  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1062  locktag->locktag_field2,
1063  locktag->locktag_field3,
1064  locktag->locktag_field4,
1065  locktag->locktag_type,
1066  lockmode);
1067 
1068  WaitOnLock(locallock, owner, dontWait);
1069 
1070  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1071  locktag->locktag_field2,
1072  locktag->locktag_field3,
1073  locktag->locktag_field4,
1074  locktag->locktag_type,
1075  lockmode);
1076 
1077  /*
1078  * NOTE: do not do any material change of state between here and
1079  * return. All required changes in locktable state must have been
1080  * done when the lock was granted to us --- see notes in WaitOnLock.
1081  */
1082 
1083  /*
1084  * Check the proclock entry status. If dontWait = true, this is an
1085  * expected case; otherwise, it will only happen if something in the
1086  * ipc communication doesn't work correctly.
1087  */
1088  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1089  {
1091 
1092  if (dontWait)
1093  {
1094  /*
1095  * We can't acquire the lock immediately. If caller specified
1096  * no blocking, remove useless table entries and return
1097  * LOCKACQUIRE_NOT_AVAIL without waiting.
1098  */
1099  if (proclock->holdMask == 0)
1100  {
1101  uint32 proclock_hashcode;
1102 
1103  proclock_hashcode = ProcLockHashCode(&proclock->tag,
1104  hashcode);
1105  dlist_delete(&proclock->lockLink);
1106  dlist_delete(&proclock->procLink);
1108  &(proclock->tag),
1109  proclock_hashcode,
1110  HASH_REMOVE,
1111  NULL))
1112  elog(PANIC, "proclock table corrupted");
1113  }
1114  else
1115  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1116  lock->nRequested--;
1117  lock->requested[lockmode]--;
1118  LOCK_PRINT("LockAcquire: conditional lock failed",
1119  lock, lockmode);
1120  Assert((lock->nRequested > 0) &&
1121  (lock->requested[lockmode] >= 0));
1122  Assert(lock->nGranted <= lock->nRequested);
1123  LWLockRelease(partitionLock);
1124  if (locallock->nLocks == 0)
1125  RemoveLocalLock(locallock);
1126  if (locallockp)
1127  *locallockp = NULL;
1128  return LOCKACQUIRE_NOT_AVAIL;
1129  }
1130  else
1131  {
1132  /*
1133  * We should have gotten the lock, but somehow that didn't
1134  * happen. If we get here, it's a bug.
1135  */
1136  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1137  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1138  LWLockRelease(partitionLock);
1139  elog(ERROR, "LockAcquire failed");
1140  }
1141  }
1142  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1143  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1144  }
1145 
1146  /*
1147  * Lock state is fully up-to-date now; if we error out after this, no
1148  * special error cleanup is required.
1149  */
1151 
1152  LWLockRelease(partitionLock);
1153 
1154  /*
1155  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1156  * standby server.
1157  */
1158  if (log_lock)
1159  {
1160  /*
1161  * Decode the locktag back to the original values, to avoid sending
1162  * lots of empty bytes with every message. See lock.h to check how a
1163  * locktag is defined for LOCKTAG_RELATION
1164  */
1166  locktag->locktag_field2);
1167  }
1168 
1169  return LOCKACQUIRE_OK;
1170 }
#define LOG
Definition: elog.h:31
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1376
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2702
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool dontWait)
Definition: lock.c:1818
void AbortStrongLockAcquire(void)
Definition: lock.c:1760
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2635
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:213
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1724
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1429
static void FinishStrongLockAcquire(void)
Definition: lock.c:1750
@ LOCKTAG_OBJECT
Definition: lock.h:145
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:504
@ LOCKACQUIRE_OK
Definition: lock.h:502
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:503
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:501
#define RowExclusiveLock
Definition: lockdefs.h:38
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1440
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1423
bool lockCleared
Definition: lock.h:440
uint16 locktag_field4
Definition: lock.h:169
LOCKMASK heldLocks
Definition: proc.h:230
bool RecoveryInProgress(void)
Definition: xlog.c:6304
#define XLogStandbyInfoActive()
Definition: xlog.h:123
bool InRecovery
Definition: xlogutils.c:50

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert, BeginStrongLockAcquire(), ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, dlist_delete(), EligibleForRelationFastPath, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathGrantRelationLock(), FastPathLocalUseCount, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PGPROC::heldLocks, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, InRecovery, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1429 of file lock.c.

1433 {
1434  int numLockModes = lockMethodTable->numLockModes;
1435  LOCKMASK myLocks;
1436  int conflictMask = lockMethodTable->conflictTab[lockmode];
1437  int conflictsRemaining[MAX_LOCKMODES];
1438  int totalConflictsRemaining = 0;
1439  dlist_iter proclock_iter;
1440  int i;
1441 
1442  /*
1443  * first check for global conflicts: If no locks conflict with my request,
1444  * then I get the lock.
1445  *
1446  * Checking for conflict: lock->grantMask represents the types of
1447  * currently held locks. conflictTable[lockmode] has a bit set for each
1448  * type of lock that conflicts with request. Bitwise compare tells if
1449  * there is a conflict.
1450  */
1451  if (!(conflictMask & lock->grantMask))
1452  {
1453  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1454  return false;
1455  }
1456 
1457  /*
1458  * Rats. Something conflicts. But it could still be my own lock, or a
1459  * lock held by another member of my locking group. First, figure out how
1460  * many conflicts remain after subtracting out any locks I hold myself.
1461  */
1462  myLocks = proclock->holdMask;
1463  for (i = 1; i <= numLockModes; i++)
1464  {
1465  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1466  {
1467  conflictsRemaining[i] = 0;
1468  continue;
1469  }
1470  conflictsRemaining[i] = lock->granted[i];
1471  if (myLocks & LOCKBIT_ON(i))
1472  --conflictsRemaining[i];
1473  totalConflictsRemaining += conflictsRemaining[i];
1474  }
1475 
1476  /* If no conflicts remain, we get the lock. */
1477  if (totalConflictsRemaining == 0)
1478  {
1479  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1480  return false;
1481  }
1482 
1483  /* If no group locking, it's definitely a conflict. */
1484  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1485  {
1486  Assert(proclock->tag.myProc == MyProc);
1487  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1488  proclock);
1489  return true;
1490  }
1491 
1492  /*
1493  * The relation extension lock conflict even between the group members.
1494  */
1495  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1496  {
1497  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1498  proclock);
1499  return true;
1500  }
1501 
1502  /*
1503  * Locks held in conflicting modes by members of our own lock group are
1504  * not real conflicts; we can subtract those out and see if we still have
1505  * a conflict. This is O(N) in the number of processes holding or
1506  * awaiting locks on this object. We could improve that by making the
1507  * shared memory state more complex (and larger) but it doesn't seem worth
1508  * it.
1509  */
1510  dlist_foreach(proclock_iter, &lock->procLocks)
1511  {
1512  PROCLOCK *otherproclock =
1513  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1514 
1515  if (proclock != otherproclock &&
1516  proclock->groupLeader == otherproclock->groupLeader &&
1517  (otherproclock->holdMask & conflictMask) != 0)
1518  {
1519  int intersectMask = otherproclock->holdMask & conflictMask;
1520 
1521  for (i = 1; i <= numLockModes; i++)
1522  {
1523  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1524  {
1525  if (conflictsRemaining[i] <= 0)
1526  elog(PANIC, "proclocks held do not match lock");
1527  conflictsRemaining[i]--;
1528  totalConflictsRemaining--;
1529  }
1530  }
1531 
1532  if (totalConflictsRemaining == 0)
1533  {
1534  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1535  proclock);
1536  return false;
1537  }
1538  }
1539  }
1540 
1541  /* Nope, it's a real conflict. */
1542  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1543  return true;
1544 }
#define LOCK_LOCKTAG(lock)
Definition: lock.h:325

References Assert, LockMethodData::conflictTab, dlist_iter::cur, dlist_container, dlist_foreach, elog, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by LockAcquireExtended(), ProcLockWakeup(), and ProcSleep().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 643 of file lock.c.

644 {
645  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
646  LockMethod lockMethodTable;
647  LOCALLOCKTAG localtag;
648  LOCALLOCK *locallock;
649  LOCK *lock;
650  PROCLOCK *proclock;
651  LWLock *partitionLock;
652  bool hasWaiters = false;
653 
654  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
655  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
656  lockMethodTable = LockMethods[lockmethodid];
657  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
658  elog(ERROR, "unrecognized lock mode: %d", lockmode);
659 
660 #ifdef LOCK_DEBUG
661  if (LOCK_DEBUG_ENABLED(locktag))
662  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
663  locktag->locktag_field1, locktag->locktag_field2,
664  lockMethodTable->lockModeNames[lockmode]);
665 #endif
666 
667  /*
668  * Find the LOCALLOCK entry for this lock and lockmode
669  */
670  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
671  localtag.lock = *locktag;
672  localtag.mode = lockmode;
673 
674  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
675  &localtag,
676  HASH_FIND, NULL);
677 
678  /*
679  * let the caller print its own error message, too. Do not ereport(ERROR).
680  */
681  if (!locallock || locallock->nLocks <= 0)
682  {
683  elog(WARNING, "you don't own a lock of type %s",
684  lockMethodTable->lockModeNames[lockmode]);
685  return false;
686  }
687 
688  /*
689  * Check the shared lock table.
690  */
691  partitionLock = LockHashPartitionLock(locallock->hashcode);
692 
693  LWLockAcquire(partitionLock, LW_SHARED);
694 
695  /*
696  * We don't need to re-find the lock or proclock, since we kept their
697  * addresses in the locallock table, and they couldn't have been removed
698  * while we were holding a lock on them.
699  */
700  lock = locallock->lock;
701  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
702  proclock = locallock->proclock;
703  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
704 
705  /*
706  * Double-check that we are actually holding a lock of the type we want to
707  * release.
708  */
709  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
710  {
711  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
712  LWLockRelease(partitionLock);
713  elog(WARNING, "you don't own a lock of type %s",
714  lockMethodTable->lockModeNames[lockmode]);
715  RemoveLocalLock(locallock);
716  return false;
717  }
718 
719  /*
720  * Do the checking.
721  */
722  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
723  hasWaiters = true;
724 
725  LWLockRelease(partitionLock);
726 
727  return hasWaiters;
728 }
#define WARNING
Definition: elog.h:36

References LockMethodData::conflictTab, elog, ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  orstronger 
)

Definition at line 590 of file lock.c.

592 {
593  LOCALLOCKTAG localtag;
594  LOCALLOCK *locallock;
595 
596  /*
597  * See if there is a LOCALLOCK entry for this lock and lockmode
598  */
599  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
600  localtag.lock = *locktag;
601  localtag.mode = lockmode;
602 
603  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
604  &localtag,
605  HASH_FIND, NULL);
606 
607  if (locallock && locallock->nLocks > 0)
608  return true;
609 
610  if (orstronger)
611  {
612  LOCKMODE slockmode;
613 
614  for (slockmode = lockmode + 1;
615  slockmode <= MaxLockMode;
616  slockmode++)
617  {
618  if (LockHeldByMe(locktag, slockmode, false))
619  return true;
620  }
621  }
622 
623  return false;
624 }
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition: lock.c:590
#define MaxLockMode
Definition: lockdefs.h:45

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, LockMethodLocalHash, MaxLockMode, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe(), and CheckRelationOidLockedByMe().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2559 of file lock.c.

2560 {
2562 
2563  Assert(parent != NULL);
2564 
2565  if (locallocks == NULL)
2566  {
2567  HASH_SEQ_STATUS status;
2568  LOCALLOCK *locallock;
2569 
2571 
2572  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2573  LockReassignOwner(locallock, parent);
2574  }
2575  else
2576  {
2577  int i;
2578 
2579  for (i = nlocks - 1; i >= 0; i--)
2580  LockReassignOwner(locallocks[i], parent);
2581  }
2582 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2589
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:888

References Assert, CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), and ResourceOwnerGetParent().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2589 of file lock.c.

2590 {
2591  LOCALLOCKOWNER *lockOwners;
2592  int i;
2593  int ic = -1;
2594  int ip = -1;
2595 
2596  /*
2597  * Scan to see if there are any locks belonging to current owner or its
2598  * parent
2599  */
2600  lockOwners = locallock->lockOwners;
2601  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2602  {
2603  if (lockOwners[i].owner == CurrentResourceOwner)
2604  ic = i;
2605  else if (lockOwners[i].owner == parent)
2606  ip = i;
2607  }
2608 
2609  if (ic < 0)
2610  return; /* no current locks */
2611 
2612  if (ip < 0)
2613  {
2614  /* Parent has no slot, so just give it the child's slot */
2615  lockOwners[ic].owner = parent;
2616  ResourceOwnerRememberLock(parent, locallock);
2617  }
2618  else
2619  {
2620  /* Merge child's count with parent's */
2621  lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2622  /* compact out unused slot */
2623  locallock->numLockOwners--;
2624  if (ic < locallock->numLockOwners)
2625  lockOwners[ic] = lockOwners[locallock->numLockOwners];
2626  }
2628 }
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1065

References CurrentResourceOwner, i, LOCALLOCK::lockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3102 of file lock.c.

3105 {
3106  LOCK *lock;
3107  PROCLOCK *proclock;
3108  PROCLOCKTAG proclocktag;
3109  uint32 hashcode;
3110  uint32 proclock_hashcode;
3111  LWLock *partitionLock;
3112  bool wakeupNeeded;
3113 
3114  hashcode = LockTagHashCode(locktag);
3115  partitionLock = LockHashPartitionLock(hashcode);
3116 
3117  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3118 
3119  /*
3120  * Re-find the lock object (it had better be there).
3121  */
3123  locktag,
3124  hashcode,
3125  HASH_FIND,
3126  NULL);
3127  if (!lock)
3128  elog(PANIC, "failed to re-find shared lock object");
3129 
3130  /*
3131  * Re-find the proclock object (ditto).
3132  */
3133  proclocktag.myLock = lock;
3134  proclocktag.myProc = proc;
3135 
3136  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3137 
3139  &proclocktag,
3140  proclock_hashcode,
3141  HASH_FIND,
3142  NULL);
3143  if (!proclock)
3144  elog(PANIC, "failed to re-find shared proclock object");
3145 
3146  /*
3147  * Double-check that we are actually holding a lock of the type we want to
3148  * release.
3149  */
3150  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3151  {
3152  PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3153  LWLockRelease(partitionLock);
3154  elog(WARNING, "you don't own a lock of type %s",
3155  lockMethodTable->lockModeNames[lockmode]);
3156  return;
3157  }
3158 
3159  /*
3160  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3161  */
3162  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3163 
3164  CleanUpLock(lock, proclock,
3165  lockMethodTable, hashcode,
3166  wakeupNeeded);
3167 
3168  LWLockRelease(partitionLock);
3169 
3170  /*
3171  * Decrement strong lock count. This logic is needed only for 2PC.
3172  */
3173  if (decrement_strong_lock_count
3174  && ConflictsWithRelationFastPath(locktag, lockmode))
3175  {
3176  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3177 
3179  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3180  FastPathStrongRelationLocks->count[fasthashcode]--;
3182  }
3183 }
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1581
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1638

References Assert, CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire, SpinLockRelease, UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 1964 of file lock.c.

1965 {
1966  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1967  LockMethod lockMethodTable;
1968  LOCALLOCKTAG localtag;
1969  LOCALLOCK *locallock;
1970  LOCK *lock;
1971  PROCLOCK *proclock;
1972  LWLock *partitionLock;
1973  bool wakeupNeeded;
1974 
1975  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1976  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1977  lockMethodTable = LockMethods[lockmethodid];
1978  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1979  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1980 
1981 #ifdef LOCK_DEBUG
1982  if (LOCK_DEBUG_ENABLED(locktag))
1983  elog(LOG, "LockRelease: lock [%u,%u] %s",
1984  locktag->locktag_field1, locktag->locktag_field2,
1985  lockMethodTable->lockModeNames[lockmode]);
1986 #endif
1987 
1988  /*
1989  * Find the LOCALLOCK entry for this lock and lockmode
1990  */
1991  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1992  localtag.lock = *locktag;
1993  localtag.mode = lockmode;
1994 
1995  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1996  &localtag,
1997  HASH_FIND, NULL);
1998 
1999  /*
2000  * let the caller print its own error message, too. Do not ereport(ERROR).
2001  */
2002  if (!locallock || locallock->nLocks <= 0)
2003  {
2004  elog(WARNING, "you don't own a lock of type %s",
2005  lockMethodTable->lockModeNames[lockmode]);
2006  return false;
2007  }
2008 
2009  /*
2010  * Decrease the count for the resource owner.
2011  */
2012  {
2013  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2014  ResourceOwner owner;
2015  int i;
2016 
2017  /* Identify owner for lock */
2018  if (sessionLock)
2019  owner = NULL;
2020  else
2021  owner = CurrentResourceOwner;
2022 
2023  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2024  {
2025  if (lockOwners[i].owner == owner)
2026  {
2027  Assert(lockOwners[i].nLocks > 0);
2028  if (--lockOwners[i].nLocks == 0)
2029  {
2030  if (owner != NULL)
2031  ResourceOwnerForgetLock(owner, locallock);
2032  /* compact out unused slot */
2033  locallock->numLockOwners--;
2034  if (i < locallock->numLockOwners)
2035  lockOwners[i] = lockOwners[locallock->numLockOwners];
2036  }
2037  break;
2038  }
2039  }
2040  if (i < 0)
2041  {
2042  /* don't release a lock belonging to another owner */
2043  elog(WARNING, "you don't own a lock of type %s",
2044  lockMethodTable->lockModeNames[lockmode]);
2045  return false;
2046  }
2047  }
2048 
2049  /*
2050  * Decrease the total local count. If we're still holding the lock, we're
2051  * done.
2052  */
2053  locallock->nLocks--;
2054 
2055  if (locallock->nLocks > 0)
2056  return true;
2057 
2058  /*
2059  * At this point we can no longer suppose we are clear of invalidation
2060  * messages related to this lock. Although we'll delete the LOCALLOCK
2061  * object before any intentional return from this routine, it seems worth
2062  * the trouble to explicitly reset lockCleared right now, just in case
2063  * some error prevents us from deleting the LOCALLOCK.
2064  */
2065  locallock->lockCleared = false;
2066 
2067  /* Attempt fast release of any lock eligible for the fast path. */
2068  if (EligibleForRelationFastPath(locktag, lockmode) &&
2070  {
2071  bool released;
2072 
2073  /*
2074  * We might not find the lock here, even if we originally entered it
2075  * here. Another backend may have moved it to the main table.
2076  */
2078  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2079  lockmode);
2081  if (released)
2082  {
2083  RemoveLocalLock(locallock);
2084  return true;
2085  }
2086  }
2087 
2088  /*
2089  * Otherwise we've got to mess with the shared lock table.
2090  */
2091  partitionLock = LockHashPartitionLock(locallock->hashcode);
2092 
2093  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2094 
2095  /*
2096  * Normally, we don't need to re-find the lock or proclock, since we kept
2097  * their addresses in the locallock table, and they couldn't have been
2098  * removed while we were holding a lock on them. But it's possible that
2099  * the lock was taken fast-path and has since been moved to the main hash
2100  * table by another backend, in which case we will need to look up the
2101  * objects here. We assume the lock field is NULL if so.
2102  */
2103  lock = locallock->lock;
2104  if (!lock)
2105  {
2106  PROCLOCKTAG proclocktag;
2107 
2108  Assert(EligibleForRelationFastPath(locktag, lockmode));
2110  locktag,
2111  locallock->hashcode,
2112  HASH_FIND,
2113  NULL);
2114  if (!lock)
2115  elog(ERROR, "failed to re-find shared lock object");
2116  locallock->lock = lock;
2117 
2118  proclocktag.myLock = lock;
2119  proclocktag.myProc = MyProc;
2121  &proclocktag,
2122  HASH_FIND,
2123  NULL);
2124  if (!locallock->proclock)
2125  elog(ERROR, "failed to re-find shared proclock object");
2126  }
2127  LOCK_PRINT("LockRelease: found", lock, lockmode);
2128  proclock = locallock->proclock;
2129  PROCLOCK_PRINT("LockRelease: found", proclock);
2130 
2131  /*
2132  * Double-check that we are actually holding a lock of the type we want to
2133  * release.
2134  */
2135  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2136  {
2137  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2138  LWLockRelease(partitionLock);
2139  elog(WARNING, "you don't own a lock of type %s",
2140  lockMethodTable->lockModeNames[lockmode]);
2141  RemoveLocalLock(locallock);
2142  return false;
2143  }
2144 
2145  /*
2146  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2147  */
2148  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2149 
2150  CleanUpLock(lock, proclock,
2151  lockMethodTable, locallock->hashcode,
2152  wakeupNeeded);
2153 
2154  LWLockRelease(partitionLock);
2155 
2156  RemoveLocalLock(locallock);
2157  return true;
2158 }
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2672

References Assert, CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FastPathLocalUseCount, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2169 of file lock.c.

2170 {
2171  HASH_SEQ_STATUS status;
2172  LockMethod lockMethodTable;
2173  int i,
2174  numLockModes;
2175  LOCALLOCK *locallock;
2176  LOCK *lock;
2177  int partition;
2178  bool have_fast_path_lwlock = false;
2179 
2180  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2181  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2182  lockMethodTable = LockMethods[lockmethodid];
2183 
2184 #ifdef LOCK_DEBUG
2185  if (*(lockMethodTable->trace_flag))
2186  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2187 #endif
2188 
2189  /*
2190  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2191  * the only way that the lock we hold on our own VXID can ever get
2192  * released: it is always and only released when a toplevel transaction
2193  * ends.
2194  */
2195  if (lockmethodid == DEFAULT_LOCKMETHOD)
2197 
2198  numLockModes = lockMethodTable->numLockModes;
2199 
2200  /*
2201  * First we run through the locallock table and get rid of unwanted
2202  * entries, then we scan the process's proclocks and get rid of those. We
2203  * do this separately because we may have multiple locallock entries
2204  * pointing to the same proclock, and we daren't end up with any dangling
2205  * pointers. Fast-path locks are cleaned up during the locallock table
2206  * scan, though.
2207  */
2209 
2210  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2211  {
2212  /*
2213  * If the LOCALLOCK entry is unused, we must've run out of shared
2214  * memory while trying to set up this lock. Just forget the local
2215  * entry.
2216  */
2217  if (locallock->nLocks == 0)
2218  {
2219  RemoveLocalLock(locallock);
2220  continue;
2221  }
2222 
2223  /* Ignore items that are not of the lockmethod to be removed */
2224  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2225  continue;
2226 
2227  /*
2228  * If we are asked to release all locks, we can just zap the entry.
2229  * Otherwise, must scan to see if there are session locks. We assume
2230  * there is at most one lockOwners entry for session locks.
2231  */
2232  if (!allLocks)
2233  {
2234  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2235 
2236  /* If session lock is above array position 0, move it down to 0 */
2237  for (i = 0; i < locallock->numLockOwners; i++)
2238  {
2239  if (lockOwners[i].owner == NULL)
2240  lockOwners[0] = lockOwners[i];
2241  else
2242  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2243  }
2244 
2245  if (locallock->numLockOwners > 0 &&
2246  lockOwners[0].owner == NULL &&
2247  lockOwners[0].nLocks > 0)
2248  {
2249  /* Fix the locallock to show just the session locks */
2250  locallock->nLocks = lockOwners[0].nLocks;
2251  locallock->numLockOwners = 1;
2252  /* We aren't deleting this locallock, so done */
2253  continue;
2254  }
2255  else
2256  locallock->numLockOwners = 0;
2257  }
2258 
2259  /*
2260  * If the lock or proclock pointers are NULL, this lock was taken via
2261  * the relation fast-path (and is not known to have been transferred).
2262  */
2263  if (locallock->proclock == NULL || locallock->lock == NULL)
2264  {
2265  LOCKMODE lockmode = locallock->tag.mode;
2266  Oid relid;
2267 
2268  /* Verify that a fast-path lock is what we've got. */
2269  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2270  elog(PANIC, "locallock table corrupted");
2271 
2272  /*
2273  * If we don't currently hold the LWLock that protects our
2274  * fast-path data structures, we must acquire it before attempting
2275  * to release the lock via the fast-path. We will continue to
2276  * hold the LWLock until we're done scanning the locallock table,
2277  * unless we hit a transferred fast-path lock. (XXX is this
2278  * really such a good idea? There could be a lot of entries ...)
2279  */
2280  if (!have_fast_path_lwlock)
2281  {
2283  have_fast_path_lwlock = true;
2284  }
2285 
2286  /* Attempt fast-path release. */
2287  relid = locallock->tag.lock.locktag_field2;
2288  if (FastPathUnGrantRelationLock(relid, lockmode))
2289  {
2290  RemoveLocalLock(locallock);
2291  continue;
2292  }
2293 
2294  /*
2295  * Our lock, originally taken via the fast path, has been
2296  * transferred to the main lock table. That's going to require
2297  * some extra work, so release our fast-path lock before starting.
2298  */
2300  have_fast_path_lwlock = false;
2301 
2302  /*
2303  * Now dump the lock. We haven't got a pointer to the LOCK or
2304  * PROCLOCK in this case, so we have to handle this a bit
2305  * differently than a normal lock release. Unfortunately, this
2306  * requires an extra LWLock acquire-and-release cycle on the
2307  * partitionLock, but hopefully it shouldn't happen often.
2308  */
2309  LockRefindAndRelease(lockMethodTable, MyProc,
2310  &locallock->tag.lock, lockmode, false);
2311  RemoveLocalLock(locallock);
2312  continue;
2313  }
2314 
2315  /* Mark the proclock to show we need to release this lockmode */
2316  if (locallock->nLocks > 0)
2317  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2318 
2319  /* And remove the locallock hashtable entry */
2320  RemoveLocalLock(locallock);
2321  }
2322 
2323  /* Done with the fast-path data structures */
2324  if (have_fast_path_lwlock)
2326 
2327  /*
2328  * Now, scan each lock partition separately.
2329  */
2330  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2331  {
2332  LWLock *partitionLock;
2333  dlist_head *procLocks = &MyProc->myProcLocks[partition];
2334  dlist_mutable_iter proclock_iter;
2335 
2336  partitionLock = LockHashPartitionLockByIndex(partition);
2337 
2338  /*
2339  * If the proclock list for this partition is empty, we can skip
2340  * acquiring the partition lock. This optimization is trickier than
2341  * it looks, because another backend could be in process of adding
2342  * something to our proclock list due to promoting one of our
2343  * fast-path locks. However, any such lock must be one that we
2344  * decided not to delete above, so it's okay to skip it again now;
2345  * we'd just decide not to delete it again. We must, however, be
2346  * careful to re-fetch the list header once we've acquired the
2347  * partition lock, to be sure we have a valid, up-to-date pointer.
2348  * (There is probably no significant risk if pointer fetch/store is
2349  * atomic, but we don't wish to assume that.)
2350  *
2351  * XXX This argument assumes that the locallock table correctly
2352  * represents all of our fast-path locks. While allLocks mode
2353  * guarantees to clean up all of our normal locks regardless of the
2354  * locallock situation, we lose that guarantee for fast-path locks.
2355  * This is not ideal.
2356  */
2357  if (dlist_is_empty(procLocks))
2358  continue; /* needn't examine this partition */
2359 
2360  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2361 
2362  dlist_foreach_modify(proclock_iter, procLocks)
2363  {
2364  PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2365  bool wakeupNeeded = false;
2366 
2367  Assert(proclock->tag.myProc == MyProc);
2368 
2369  lock = proclock->tag.myLock;
2370 
2371  /* Ignore items that are not of the lockmethod to be removed */
2372  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2373  continue;
2374 
2375  /*
2376  * In allLocks mode, force release of all locks even if locallock
2377  * table had problems
2378  */
2379  if (allLocks)
2380  proclock->releaseMask = proclock->holdMask;
2381  else
2382  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2383 
2384  /*
2385  * Ignore items that have nothing to be released, unless they have
2386  * holdMask == 0 and are therefore recyclable
2387  */
2388  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2389  continue;
2390 
2391  PROCLOCK_PRINT("LockReleaseAll", proclock);
2392  LOCK_PRINT("LockReleaseAll", lock, 0);
2393  Assert(lock->nRequested >= 0);
2394  Assert(lock->nGranted >= 0);
2395  Assert(lock->nGranted <= lock->nRequested);
2396  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2397 
2398  /*
2399  * Release the previously-marked lock modes
2400  */
2401  for (i = 1; i <= numLockModes; i++)
2402  {
2403  if (proclock->releaseMask & LOCKBIT_ON(i))
2404  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2405  lockMethodTable);
2406  }
2407  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2408  Assert(lock->nGranted <= lock->nRequested);
2409  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2410 
2411  proclock->releaseMask = 0;
2412 
2413  /* CleanUpLock will wake up waiters if needed. */
2414  CleanUpLock(lock, proclock,
2415  lockMethodTable,
2416  LockTagHashCode(&lock->tag),
2417  wakeupNeeded);
2418  } /* loop over PROCLOCKs within this partition */
2419 
2420  LWLockRelease(partitionLock);
2421  } /* loop over partitions */
2422 
2423 #ifdef LOCK_DEBUG
2424  if (*(lockMethodTable->trace_flag))
2425  elog(LOG, "LockReleaseAll done");
2426 #endif
2427 }
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4450
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:443
const bool * trace_flag
Definition: lock.h:113
dlist_node * cur
Definition: ilist.h:200

References Assert, CleanUpLock(), dlist_mutable_iter::cur, DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, PANIC, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), and VirtualXactLockTableCleanup().

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2464 of file lock.c.

2465 {
2466  if (locallocks == NULL)
2467  {
2468  HASH_SEQ_STATUS status;
2469  LOCALLOCK *locallock;
2470 
2472 
2473  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2474  ReleaseLockIfHeld(locallock, false);
2475  }
2476  else
2477  {
2478  int i;
2479 
2480  for (i = nlocks - 1; i >= 0; i--)
2481  ReleaseLockIfHeld(locallocks[i], false);
2482  }
2483 }
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2499

References hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and ReleaseLockIfHeld().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2434 of file lock.c.

2435 {
2436  HASH_SEQ_STATUS status;
2437  LOCALLOCK *locallock;
2438 
2439  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2440  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2441 
2443 
2444  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2445  {
2446  /* Ignore items that are not of the specified lock method */
2447  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2448  continue;
2449 
2450  ReleaseLockIfHeld(locallock, true);
2451  }
2452 }

References elog, ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, and ReleaseLockIfHeld().

Referenced by pg_advisory_unlock_all().

◆ LockShmemSize()

Size LockShmemSize ( void  )

Definition at line 3574 of file lock.c.

3575 {
3576  Size size = 0;
3577  long max_table_size;
3578 
3579  /* lock hash table */
3580  max_table_size = NLOCKENTS();
3581  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3582 
3583  /* proclock hash table */
3584  max_table_size *= 2;
3585  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3586 
3587  /*
3588  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3589  */
3590  size = add_size(size, size / 10);
3591 
3592  return size;
3593 }
size_t Size
Definition: c.h:605
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:783
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
static pg_noinline void Size size
Definition: slab.c:607

References add_size(), hash_estimate_size(), NLOCKENTS, and size.

Referenced by CalculateShmemSize().

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 504 of file lock.c.

505 {
506  return get_hash_value(LockMethodLockHash, (const void *) locktag);
507 }
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:911

References get_hash_value(), and LockMethodLockHash.

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4661 of file lock.c.

4662 {
4663  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4664  LOCK *lock;
4665  bool found;
4666  uint32 hashcode;
4667  LWLock *partitionLock;
4668  int waiters = 0;
4669 
4670  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4671  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4672 
4673  hashcode = LockTagHashCode(locktag);
4674  partitionLock = LockHashPartitionLock(hashcode);
4675  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4676 
4678  locktag,
4679  hashcode,
4680  HASH_FIND,
4681  &found);
4682  if (found)
4683  {
4684  Assert(lock != NULL);
4685  waiters = lock->nRequested;
4686  }
4687  LWLockRelease(partitionLock);
4688 
4689  return waiters;
4690 }

References Assert, elog, ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

◆ PostPrepare_Locks()

void PostPrepare_Locks ( TransactionId  xid)

Definition at line 3390 of file lock.c.

3391 {
3392  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3393  HASH_SEQ_STATUS status;
3394  LOCALLOCK *locallock;
3395  LOCK *lock;
3396  PROCLOCK *proclock;
3397  PROCLOCKTAG proclocktag;
3398  int partition;
3399 
3400  /* Can't prepare a lock group follower. */
3401  Assert(MyProc->lockGroupLeader == NULL ||
3403 
3404  /* This is a critical section: any error means big trouble */
3406 
3407  /*
3408  * First we run through the locallock table and get rid of unwanted
3409  * entries, then we scan the process's proclocks and transfer them to the
3410  * target proc.
3411  *
3412  * We do this separately because we may have multiple locallock entries
3413  * pointing to the same proclock, and we daren't end up with any dangling
3414  * pointers.
3415  */
3417 
3418  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3419  {
3420  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3421  bool haveSessionLock;
3422  bool haveXactLock;
3423  int i;
3424 
3425  if (locallock->proclock == NULL || locallock->lock == NULL)
3426  {
3427  /*
3428  * We must've run out of shared memory while trying to set up this
3429  * lock. Just forget the local entry.
3430  */
3431  Assert(locallock->nLocks == 0);
3432  RemoveLocalLock(locallock);
3433  continue;
3434  }
3435 
3436  /* Ignore VXID locks */
3437  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3438  continue;
3439 
3440  /* Scan to see whether we hold it at session or transaction level */
3441  haveSessionLock = haveXactLock = false;
3442  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3443  {
3444  if (lockOwners[i].owner == NULL)
3445  haveSessionLock = true;
3446  else
3447  haveXactLock = true;
3448  }
3449 
3450  /* Ignore it if we have only session lock */
3451  if (!haveXactLock)
3452  continue;
3453 
3454  /* This can't happen, because we already checked it */
3455  if (haveSessionLock)
3456  ereport(PANIC,
3457  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3458  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3459 
3460  /* Mark the proclock to show we need to release this lockmode */
3461  if (locallock->nLocks > 0)
3462  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3463 
3464  /* And remove the locallock hashtable entry */
3465  RemoveLocalLock(locallock);
3466  }
3467 
3468  /*
3469  * Now, scan each lock partition separately.
3470  */
3471  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3472  {
3473  LWLock *partitionLock;
3474  dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3475  dlist_mutable_iter proclock_iter;
3476 
3477  partitionLock = LockHashPartitionLockByIndex(partition);
3478 
3479  /*
3480  * If the proclock list for this partition is empty, we can skip
3481  * acquiring the partition lock. This optimization is safer than the
3482  * situation in LockReleaseAll, because we got rid of any fast-path
3483  * locks during AtPrepare_Locks, so there cannot be any case where
3484  * another backend is adding something to our lists now. For safety,
3485  * though, we code this the same way as in LockReleaseAll.
3486  */
3487  if (dlist_is_empty(procLocks))
3488  continue; /* needn't examine this partition */
3489 
3490  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3491 
3492  dlist_foreach_modify(proclock_iter, procLocks)
3493  {
3494  proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3495 
3496  Assert(proclock->tag.myProc == MyProc);
3497 
3498  lock = proclock->tag.myLock;
3499 
3500  /* Ignore VXID locks */
3502  continue;
3503 
3504  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3505  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3506  Assert(lock->nRequested >= 0);
3507  Assert(lock->nGranted >= 0);
3508  Assert(lock->nGranted <= lock->nRequested);
3509  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3510 
3511  /* Ignore it if nothing to release (must be a session lock) */
3512  if (proclock->releaseMask == 0)
3513  continue;
3514 
3515  /* Else we should be releasing all locks */
3516  if (proclock->releaseMask != proclock->holdMask)
3517  elog(PANIC, "we seem to have dropped a bit somewhere");
3518 
3519  /*
3520  * We cannot simply modify proclock->tag.myProc to reassign
3521  * ownership of the lock, because that's part of the hash key and
3522  * the proclock would then be in the wrong hash chain. Instead
3523  * use hash_update_hash_key. (We used to create a new hash entry,
3524  * but that risks out-of-memory failure if other processes are
3525  * busy making proclocks too.) We must unlink the proclock from
3526  * our procLink chain and put it into the new proc's chain, too.
3527  *
3528  * Note: the updated proclock hash key will still belong to the
3529  * same hash partition, cf proclock_hash(). So the partition lock
3530  * we already hold is sufficient for this.
3531  */
3532  dlist_delete(&proclock->procLink);
3533 
3534  /*
3535  * Create the new hash key for the proclock.
3536  */
3537  proclocktag.myLock = lock;
3538  proclocktag.myProc = newproc;
3539 
3540  /*
3541  * Update groupLeader pointer to point to the new proc. (We'd
3542  * better not be a member of somebody else's lock group!)
3543  */
3544  Assert(proclock->groupLeader == proclock->tag.myProc);
3545  proclock->groupLeader = newproc;
3546 
3547  /*
3548  * Update the proclock. We should not find any existing entry for
3549  * the same hash key, since there can be only one entry for any
3550  * given lock with my own proc.
3551  */
3553  proclock,
3554  &proclocktag))
3555  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3556 
3557  /* Re-link into the new proc's proclock list */
3558  dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3559 
3560  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3561  } /* loop over PROCLOCKs within this partition */
3562 
3563  LWLockRelease(partitionLock);
3564  } /* loop over partitions */
3565 
3566  END_CRIT_SECTION();
3567 }
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1145
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151

References Assert, dlist_mutable_iter::cur, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void *  key,
Size  keysize 
)
static

Definition at line 521 of file lock.c.

522 {
523  const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
524  uint32 lockhash;
525  Datum procptr;
526 
527  Assert(keysize == sizeof(PROCLOCKTAG));
528 
529  /* Look into the associated LOCK object, and compute its hash code */
530  lockhash = LockTagHashCode(&proclocktag->myLock->tag);
531 
532  /*
533  * To make the hash code also depend on the PGPROC, we xor the proc
534  * struct's address into the hash code, left-shifted so that the
535  * partition-number bits don't change. Since this is only a hash, we
536  * don't care if we lose high-order bits of the address; use an
537  * intermediate variable to suppress cast-pointer-to-int warnings.
538  */
539  procptr = PointerGetDatum(proclocktag->myProc);
540  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
541 
542  return lockhash;
543 }
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:96
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64

References Assert, sort-test::key, LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PointerGetDatum(), and LOCK::tag.

Referenced by InitLocks().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 552 of file lock.c.

553 {
554  uint32 lockhash = hashcode;
555  Datum procptr;
556 
557  /*
558  * This must match proclock_hash()!
559  */
560  procptr = PointerGetDatum(proclocktag->myProc);
561  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
562 
563  return lockhash;
564 }

References LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myProc, and PointerGetDatum().

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2499 of file lock.c.

2500 {
2501  ResourceOwner owner;
2502  LOCALLOCKOWNER *lockOwners;
2503  int i;
2504 
2505  /* Identify owner for lock (must match LockRelease!) */
2506  if (sessionLock)
2507  owner = NULL;
2508  else
2509  owner = CurrentResourceOwner;
2510 
2511  /* Scan to see if there are any locks belonging to the target owner */
2512  lockOwners = locallock->lockOwners;
2513  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2514  {
2515  if (lockOwners[i].owner == owner)
2516  {
2517  Assert(lockOwners[i].nLocks > 0);
2518  if (lockOwners[i].nLocks < locallock->nLocks)
2519  {
2520  /*
2521  * We will still hold this lock after forgetting this
2522  * ResourceOwner.
2523  */
2524  locallock->nLocks -= lockOwners[i].nLocks;
2525  /* compact out unused slot */
2526  locallock->numLockOwners--;
2527  if (owner != NULL)
2528  ResourceOwnerForgetLock(owner, locallock);
2529  if (i < locallock->numLockOwners)
2530  lockOwners[i] = lockOwners[locallock->numLockOwners];
2531  }
2532  else
2533  {
2534  Assert(lockOwners[i].nLocks == locallock->nLocks);
2535  /* We want to call LockRelease just once */
2536  lockOwners[i].nLocks = 1;
2537  locallock->nLocks = 1;
2538  if (!LockRelease(&locallock->tag.lock,
2539  locallock->tag.mode,
2540  sessionLock))
2541  elog(WARNING, "ReleaseLockIfHeld: failed??");
2542  }
2543  break;
2544  }
2545  }
2546 }
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:1964

References Assert, CurrentResourceOwner, elog, i, LOCALLOCKTAG::lock, LOCALLOCK::lockOwners, LockRelease(), LOCALLOCKTAG::mode, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, ResourceOwnerForgetLock(), LOCALLOCK::tag, and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 1908 of file lock.c.

1909 {
1910  LOCK *waitLock = proc->waitLock;
1911  PROCLOCK *proclock = proc->waitProcLock;
1912  LOCKMODE lockmode = proc->waitLockMode;
1913  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1914 
1915  /* Make sure proc is waiting */
1917  Assert(proc->links.next != NULL);
1918  Assert(waitLock);
1919  Assert(!dclist_is_empty(&waitLock->waitProcs));
1920  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1921 
1922  /* Remove proc from lock's wait queue */
1923  dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
1924 
1925  /* Undo increments of request counts by waiting process */
1926  Assert(waitLock->nRequested > 0);
1927  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1928  waitLock->nRequested--;
1929  Assert(waitLock->requested[lockmode] > 0);
1930  waitLock->requested[lockmode]--;
1931  /* don't forget to clear waitMask bit if appropriate */
1932  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1933  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1934 
1935  /* Clean up the proc's own state, and pass it the ok/fail signal */
1936  proc->waitLock = NULL;
1937  proc->waitProcLock = NULL;
1939 
1940  /*
1941  * Delete the proclock immediately if it represents no already-held locks.
1942  * (This must happen now because if the owner of the lock decides to
1943  * release it, and the requested/granted counts then go to zero,
1944  * LockRelease expects there to be no remaining proclocks.) Then see if
1945  * any other waiters for the lock can be woken up now.
1946  */
1947  CleanUpLock(waitLock, proclock,
1948  LockMethods[lockmethodid], hashcode,
1949  true);
1950 }
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:120
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:121
PROCLOCK * waitProcLock
Definition: proc.h:228
ProcWaitStatus waitStatus
Definition: proc.h:162

References Assert, CleanUpLock(), dclist_delete_from_thoroughly(), dclist_is_empty(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, dlist_node::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), LockErrorCleanup(), and ProcSleep().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1376 of file lock.c.

1377 {
1378  int i;
1379 
1380  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1381  {
1382  if (locallock->lockOwners[i].owner != NULL)
1383  ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1384  }
1385  locallock->numLockOwners = 0;
1386  if (locallock->lockOwners != NULL)
1387  pfree(locallock->lockOwners);
1388  locallock->lockOwners = NULL;
1389 
1390  if (locallock->holdsStrongLockCount)
1391  {
1392  uint32 fasthashcode;
1393 
1394  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1395 
1397  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1398  FastPathStrongRelationLocks->count[fasthashcode]--;
1399  locallock->holdsStrongLockCount = false;
1401  }
1402 
1404  &(locallock->tag),
1405  HASH_REMOVE, NULL))
1406  elog(WARNING, "locallock table corrupted");
1407 
1408  /*
1409  * Indicate that the lock is released for certain types of locks
1410  */
1411  CheckAndSetLockHeld(locallock, false);
1412 }
void pfree(void *pointer)
Definition: mcxt.c:1521

References Assert, CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_REMOVE, hash_search(), LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, i, LockMethodLocalHash, LOCALLOCK::lockOwners, FastPathStrongRelationLockData::mutex, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire, SpinLockRelease, LOCALLOCK::tag, and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1183 of file lock.c.

1185 {
1186  LOCK *lock;
1187  PROCLOCK *proclock;
1188  PROCLOCKTAG proclocktag;
1189  uint32 proclock_hashcode;
1190  bool found;
1191 
1192  /*
1193  * Find or create a lock with this tag.
1194  */
1196  locktag,
1197  hashcode,
1199  &found);
1200  if (!lock)
1201  return NULL;
1202 
1203  /*
1204  * if it's a new lock object, initialize it
1205  */
1206  if (!found)
1207  {
1208  lock->grantMask = 0;
1209  lock->waitMask = 0;
1210  dlist_init(&lock->procLocks);
1211  dclist_init(&lock->waitProcs);
1212  lock->nRequested = 0;
1213  lock->nGranted = 0;
1214  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1215  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1216  LOCK_PRINT("LockAcquire: new", lock, lockmode);
1217  }
1218  else
1219  {
1220  LOCK_PRINT("LockAcquire: found", lock, lockmode);
1221  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1222  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1223  Assert(lock->nGranted <= lock->nRequested);
1224  }
1225 
1226  /*
1227  * Create the hash key for the proclock table.
1228  */
1229  proclocktag.myLock = lock;
1230  proclocktag.myProc = proc;
1231 
1232  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1233 
1234  /*
1235  * Find or create a proclock entry with this tag
1236  */
1238  &proclocktag,
1239  proclock_hashcode,
1241  &found);
1242  if (!proclock)
1243  {
1244  /* Oops, not enough shmem for the proclock */
1245  if (lock->nRequested == 0)
1246  {
1247  /*
1248  * There are no other requestors of this lock, so garbage-collect
1249  * the lock object. We *must* do this to avoid a permanent leak
1250  * of shared memory, because there won't be anything to cause
1251  * anyone to release the lock object later.
1252  */
1253  Assert(dlist_is_empty(&(lock->procLocks)));
1255  &(lock->tag),
1256  hashcode,
1257  HASH_REMOVE,
1258  NULL))
1259  elog(PANIC, "lock table corrupted");
1260  }
1261  return NULL;
1262  }
1263 
1264  /*
1265  * If new, initialize the new entry
1266  */
1267  if (!found)
1268  {
1269  uint32 partition = LockHashPartition(hashcode);
1270 
1271  /*
1272  * It might seem unsafe to access proclock->groupLeader without a
1273  * lock, but it's not really. Either we are initializing a proclock
1274  * on our own behalf, in which case our group leader isn't changing
1275  * because the group leader for a process can only ever be changed by
1276  * the process itself; or else we are transferring a fast-path lock to
1277  * the main lock table, in which case that process can't change it's
1278  * lock group leader without first releasing all of its locks (and in
1279  * particular the one we are currently transferring).
1280  */
1281  proclock->groupLeader = proc->lockGroupLeader != NULL ?
1282  proc->lockGroupLeader : proc;
1283  proclock->holdMask = 0;
1284  proclock->releaseMask = 0;
1285  /* Add proclock to appropriate lists */
1286  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1287  dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1288  PROCLOCK_PRINT("LockAcquire: new", proclock);
1289  }
1290  else
1291  {
1292  PROCLOCK_PRINT("LockAcquire: found", proclock);
1293  Assert((proclock->holdMask & ~lock->grantMask) == 0);
1294 
1295 #ifdef CHECK_DEADLOCK_RISK
1296 
1297  /*
1298  * Issue warning if we already hold a lower-level lock on this object
1299  * and do not hold a lock of the requested level or higher. This
1300  * indicates a deadlock-prone coding practice (eg, we'd have a
1301  * deadlock if another backend were following the same code path at
1302  * about the same time).
1303  *
1304  * This is not enabled by default, because it may generate log entries
1305  * about user-level coding practices that are in fact safe in context.
1306  * It can be enabled to help find system-level problems.
1307  *
1308  * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1309  * better to use a table. For now, though, this works.
1310  */
1311  {
1312  int i;
1313 
1314  for (i = lockMethodTable->numLockModes; i > 0; i--)
1315  {
1316  if (proclock->holdMask & LOCKBIT_ON(i))
1317  {
1318  if (i >= (int) lockmode)
1319  break; /* safe: we have a lock >= req level */
1320  elog(LOG, "deadlock risk: raising lock level"
1321  " from %s to %s on object %u/%u/%u",
1322  lockMethodTable->lockModeNames[i],
1323  lockMethodTable->lockModeNames[lockmode],
1324  lock->tag.locktag_field1, lock->tag.locktag_field2,
1325  lock->tag.locktag_field3);
1326  break;
1327  }
1328  }
1329  }
1330 #endif /* CHECK_DEADLOCK_RISK */
1331  }
1332 
1333  /*
1334  * lock->nRequested and lock->requested[] count the total number of
1335  * requests, whether granted or waiting, so increment those immediately.
1336  * The other counts don't increment till we get the lock.
1337  */
1338  lock->nRequested++;
1339  lock->requested[lockmode]++;
1340  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1341 
1342  /*
1343  * We shouldn't already hold the desired lock; else locallock table is
1344  * broken.
1345  */
1346  if (proclock->holdMask & LOCKBIT_ON(lockmode))
1347  elog(ERROR, "lock %s on object %u/%u/%u is already held",
1348  lockMethodTable->lockModeNames[lockmode],
1349  lock->tag.locktag_field1, lock->tag.locktag_field2,
1350  lock->tag.locktag_field3);
1351 
1352  return proclock;
1353 }

References Assert, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ERROR, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, LockMethodData::numLockModes, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1581 of file lock.c.

1583 {
1584  bool wakeupNeeded = false;
1585 
1586  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1587  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1588  Assert(lock->nGranted <= lock->nRequested);
1589 
1590  /*
1591  * fix the general lock stats
1592  */
1593  lock->nRequested--;
1594  lock->requested[lockmode]--;
1595  lock->nGranted--;
1596  lock->granted[lockmode]--;
1597 
1598  if (lock->granted[lockmode] == 0)
1599  {
1600  /* change the conflict mask. No more of this lock type. */
1601  lock->grantMask &= LOCKBIT_OFF(lockmode);
1602  }
1603 
1604  LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1605 
1606  /*
1607  * We need only run ProcLockWakeup if the released lock conflicts with at
1608  * least one of the lock types requested by waiter(s). Otherwise whatever
1609  * conflict made them wait must still exist. NOTE: before MVCC, we could
1610  * skip wakeup if lock->granted[lockmode] was still positive. But that's
1611  * not true anymore, because the remaining granted locks might belong to
1612  * some waiter, who could now be awakened because he doesn't conflict with
1613  * his own locks.
1614  */
1615  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1616  wakeupNeeded = true;
1617 
1618  /*
1619  * Now fix the per-proclock state.
1620  */
1621  proclock->holdMask &= LOCKBIT_OFF(lockmode);
1622  PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1623 
1624  return wakeupNeeded;
1625 }

References Assert, LockMethodData::conflictTab, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4550 of file lock.c.

4551 {
4552  LOCKTAG tag;
4553  PGPROC *proc;
4555 
4557 
4559  /* no vxid lock; localTransactionId is a normal, locked XID */
4560  return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4561 
4562  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4563 
4564  /*
4565  * If a lock table entry must be made, this is the PGPROC on whose behalf
4566  * it must be done. Note that the transaction might end or the PGPROC
4567  * might be reassigned to a new backend before we get around to examining
4568  * it, but it doesn't matter. If we find upon examination that the
4569  * relevant lxid is no longer running here, that's enough to prove that
4570  * it's no longer running anywhere.
4571  */
4572  proc = ProcNumberGetProc(vxid.procNumber);
4573  if (proc == NULL)
4574  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4575 
4576  /*
4577  * We must acquire this lock before checking the procNumber and lxid
4578  * against the ones we're waiting for. The target backend will only set
4579  * or clear lxid while holding this lock.
4580  */
4582 
4583  if (proc->vxid.procNumber != vxid.procNumber
4584  || proc->fpLocalTransactionId != vxid.localTransactionId)
4585  {
4586  /* VXID ended */
4587  LWLockRelease(&proc->fpInfoLock);
4588  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4589  }
4590 
4591  /*
4592  * If we aren't asked to wait, there's no need to set up a lock table
4593  * entry. The transaction is still in progress, so just return false.
4594  */
4595  if (!wait)
4596  {
4597  LWLockRelease(&proc->fpInfoLock);
4598  return false;
4599  }
4600 
4601  /*
4602  * OK, we're going to need to sleep on the VXID. But first, we must set
4603  * up the primary lock table entry, if needed (ie, convert the proc's
4604  * fast-path lock on its VXID to a regular lock).
4605  */
4606  if (proc->fpVXIDLock)
4607  {
4608  PROCLOCK *proclock;
4609  uint32 hashcode;
4610  LWLock *partitionLock;
4611 
4612  hashcode = LockTagHashCode(&tag);
4613 
4614  partitionLock = LockHashPartitionLock(hashcode);
4615  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4616 
4618  &tag, hashcode, ExclusiveLock);
4619  if (!proclock)
4620  {
4621  LWLockRelease(partitionLock);
4622  LWLockRelease(&proc->fpInfoLock);
4623  ereport(ERROR,
4624  (errcode(ERRCODE_OUT_OF_MEMORY),
4625  errmsg("out of shared memory"),
4626  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4627  }
4628  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4629 
4630  LWLockRelease(partitionLock);
4631 
4632  proc->fpVXIDLock = false;
4633  }
4634 
4635  /*
4636  * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4637  * search. The proc might have assigned this XID but not yet locked it,
4638  * in which case the proc will lock this XID before releasing the VXID.
4639  * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4640  * so we won't save an XID of a different VXID. It doesn't matter whether
4641  * we save this before or after setting up the primary lock table entry.
4642  */
4643  xid = proc->xid;
4644 
4645  /* Done with proc->fpLockBits */
4646  LWLockRelease(&proc->fpInfoLock);
4647 
4648  /* Time to wait. */
4649  (void) LockAcquire(&tag, ShareLock, false, false);
4650 
4651  LockRelease(&tag, ShareLock, false);
4652  return XactLockForVirtualXact(vxid, xid, wait);
4653 }
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4499
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:756
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:69
#define ShareLock
Definition: lockdefs.h:40
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition: procarray.c:3142
#define InvalidTransactionId
Definition: transam.h:31

References Assert, DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, VirtualTransactionId::procNumber, PGPROC::procNumber, ProcNumberGetProc(), SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, PGPROC::vxid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4450 of file lock.c.

4451 {
4452  bool fastpath;
4453  LocalTransactionId lxid;
4454 
4456 
4457  /*
4458  * Clean up shared memory state.
4459  */
4461 
4462  fastpath = MyProc->fpVXIDLock;
4463  lxid = MyProc->fpLocalTransactionId;
4464  MyProc->fpVXIDLock = false;
4466 
4468 
4469  /*
4470  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4471  * that means someone transferred the lock to the main lock table.
4472  */
4473  if (!fastpath && LocalTransactionIdIsValid(lxid))
4474  {
4475  VirtualTransactionId vxid;
4476  LOCKTAG locktag;
4477 
4478  vxid.procNumber = MyProcNumber;
4479  vxid.localTransactionId = lxid;
4480  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4481 
4483  &locktag, ExclusiveLock, false);
4484  }
4485 }
uint32 LocalTransactionId
Definition: c.h:654
ProcNumber MyProcNumber
Definition: globals.c:88
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:66

References Assert, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, INVALID_PROC_NUMBER, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyProc, MyProcNumber, VirtualTransactionId::procNumber, PGPROC::procNumber, SET_LOCKTAG_VIRTUALTRANSACTION, and PGPROC::vxid.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static void WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner,
bool  dontWait 
)
static

Definition at line 1818 of file lock.c.

1819 {
1820  LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1821  LockMethod lockMethodTable = LockMethods[lockmethodid];
1822 
1823  LOCK_PRINT("WaitOnLock: sleeping on lock",
1824  locallock->lock, locallock->tag.mode);
1825 
1826  /* adjust the process title to indicate that it's waiting */
1827  set_ps_display_suffix("waiting");
1828 
1829  awaitedLock = locallock;
1830  awaitedOwner = owner;
1831 
1832  /*
1833  * NOTE: Think not to put any shared-state cleanup after the call to
1834  * ProcSleep, in either the normal or failure path. The lock state must
1835  * be fully set by the lock grantor, or by CheckDeadLock if we give up
1836  * waiting for the lock. This is necessary because of the possibility
1837  * that a cancel/die interrupt will interrupt ProcSleep after someone else
1838  * grants us the lock, but before we've noticed it. Hence, after granting,
1839  * the locktable state must fully reflect the fact that we own the lock;
1840  * we can't do additional work on return.
1841  *
1842  * We can and do use a PG_TRY block to try to clean up after failure, but
1843  * this still has a major limitation: elog(FATAL) can occur while waiting
1844  * (eg, a "die" interrupt), and then control won't come back here. So all
1845  * cleanup of essential state should happen in LockErrorCleanup, not here.
1846  * We can use PG_TRY to clear the "waiting" status flags, since doing that
1847  * is unimportant if the process exits.
1848  */
1849  PG_TRY();
1850  {
1851  /*
1852  * If dontWait = true, we handle success and failure in the same way
1853  * here. The caller will be able to sort out what has happened.
1854  */
1855  if (ProcSleep(locallock, lockMethodTable, dontWait) != PROC_WAIT_STATUS_OK
1856  && !dontWait)
1857  {
1858 
1859  /*
1860  * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1861  * now.
1862  */
1863  awaitedLock = NULL;
1864  LOCK_PRINT("WaitOnLock: aborting on lock",
1865  locallock->lock, locallock->tag.mode);
1867 
1868  /*
1869  * Now that we aren't holding the partition lock, we can give an
1870  * error report including details about the detected deadlock.
1871  */
1872  DeadLockReport();
1873  /* not reached */
1874  }
1875  }
1876  PG_CATCH();
1877  {
1878  /* In this path, awaitedLock remains set until LockErrorCleanup */
1879 
1880  /* reset ps display to remove the suffix */
1882 
1883  /* and propagate the error */
1884  PG_RE_THROW();
1885  }
1886  PG_END_TRY();
1887 
1888  awaitedLock = NULL;
1889 
1890  /* reset ps display to remove the suffix */
1892 
1893  LOCK_PRINT("WaitOnLock: wakeup on lock",
1894  locallock->lock, locallock->tag.mode);
1895 }
void DeadLockReport(void)
Definition: deadlock.c:1072
#define PG_RE_THROW()
Definition: elog.h:411
#define PG_TRY(...)
Definition: elog.h:370
#define PG_END_TRY(...)
Definition: elog.h:395
#define PG_CATCH(...)
Definition: elog.h:380
@ PROC_WAIT_STATUS_OK
Definition: proc.h:119
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:421
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:369
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1066

References awaitedLock, awaitedOwner, DeadLockReport(), LOCALLOCK::hashcode, LOCALLOCK_LOCKMETHOD, LOCALLOCK::lock, LOCK_PRINT, LockHashPartitionLock, LockMethods, LWLockRelease(), LOCALLOCKTAG::mode, PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, PROC_WAIT_STATUS_OK, ProcSleep(), set_ps_display_remove_suffix(), set_ps_display_suffix(), and LOCALLOCK::tag.

Referenced by LockAcquireExtended().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4499 of file lock.c.

4501 {
4502  bool more = false;
4503 
4504  /* There is no point to wait for 2PCs if you have no 2PCs. */
4505  if (max_prepared_xacts == 0)
4506  return true;
4507 
4508  do
4509  {
4510  LockAcquireResult lar;
4511  LOCKTAG tag;
4512 
4513  /* Clear state from previous iterations. */
4514  if (more)
4515  {
4516  xid = InvalidTransactionId;
4517  more = false;
4518  }
4519 
4520  /* If we have no xid, try to find one. */
4521  if (!TransactionIdIsValid(xid))
4522  xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4523  if (!TransactionIdIsValid(xid))
4524  {
4525  Assert(!more);
4526  return true;
4527  }
4528 
4529  /* Check or wait for XID completion. */
4530  SET_LOCKTAG_TRANSACTION(tag, xid);
4531  lar = LockAcquire(&tag, ShareLock, false, !wait);
4532  if (lar == LOCKACQUIRE_NOT_AVAIL)
4533  return false;
4534  LockRelease(&tag, ShareLock, false);
4535  } while (more);
4536 
4537  return true;
4538 }
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:226
LockAcquireResult
Definition: lock.h:500
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:852

References Assert, InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 274 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 275 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition: lock.c:121
static const char *const lock_mode_names[]
Definition: lock.c:107
static const LOCKMASK LockConflicts[]
Definition: lock.c:64

Definition at line 124 of file lock.c.

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 121 of file lock.c.

◆ FastPathLocalUseCount

int FastPathLocalUseCount = 0
static

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 107 of file lock.c.

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 64 of file lock.c.

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ max_locks_per_xact

int max_locks_per_xact

Definition at line 53 of file lock.c.

Referenced by CheckRequiredParameterValues(), InitControlFile(), and XLogReportParameters().

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 185 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 273 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod

const LockMethodData user_lockmethod
static
Initial value:

Definition at line 135 of file lock.c.