PostgreSQL Source Code  git master
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/sinvaladt.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner_private.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()   mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_GET_BITS(proc, n)   (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)   (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)   (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)   ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS   (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)   ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static void WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void InitLocks (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (TransactionId xid)
 
Size LockShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCount = 0
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
#define InvalidOid
Definition: postgres_ext.h:36
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39

Definition at line 205 of file lock.c.

Referenced by GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), and LockRefindAndRelease().

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
Oid MyDatabaseId
Definition: globals.c:85
#define InvalidOid
Definition: postgres_ext.h:36
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39

Definition at line 199 of file lock.c.

Referenced by LockAcquireExtended(), LockRelease(), and LockReleaseAll().

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:
AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
#define AssertMacro(condition)
Definition: c.h:740
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:71
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:175
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:174

Definition at line 179 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 174 of file lock.c.

Referenced by FastPathTransferRelationLocks().

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)    ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)    (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)    (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 175 of file lock.c.

Referenced by FastPathTransferRelationLocks(), GetLockConflicts(), and GetLockStatusData().

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 176 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)    (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 184 of file lock.c.

Referenced by FastPathGrantRelationLock().

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 232 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS   (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 233 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

◆ NLOCKENTS

Definition at line 56 of file lock.c.

Referenced by InitLocks(), and LockShmemSize().

◆ PROCLOCK_PRINT

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1664 of file lock.c.

References Assert, FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

1665 {
1666  uint32 fasthashcode;
1667  LOCALLOCK *locallock = StrongLockInProgress;
1668 
1669  if (locallock == NULL)
1670  return;
1671 
1672  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1673  Assert(locallock->holdsStrongLockCount == true);
1675  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1676  FastPathStrongRelationLocks->count[fasthashcode]--;
1677  locallock->holdsStrongLockCount = false;
1678  StrongLockInProgress = NULL;
1680 }
uint32 hashcode
Definition: lock.h:410
bool holdsStrongLockCount
Definition: lock.h:417
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:241
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:235
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:259
#define SpinLockAcquire(lock)
Definition: spin.h:62
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:244
unsigned int uint32
Definition: c.h:359
#define SpinLockRelease(lock)
Definition: spin.h:64
#define Assert(condition)
Definition: c.h:739

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3137 of file lock.c.

References ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), status(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

3138 {
3140  LOCALLOCK *locallock;
3141 
3142  /*
3143  * For the most part, we don't need to touch shared memory for this ---
3144  * all the necessary state information is in the locallock table.
3145  * Fast-path locks are an exception, however: we move any such locks to
3146  * the main table before allowing PREPARE TRANSACTION to succeed.
3147  */
3149 
3150  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3151  {
3152  TwoPhaseLockRecord record;
3153  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3154  bool haveSessionLock;
3155  bool haveXactLock;
3156  int i;
3157 
3158  /*
3159  * Ignore VXID locks. We don't want those to be held by prepared
3160  * transactions, since they aren't meaningful after a restart.
3161  */
3162  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3163  continue;
3164 
3165  /* Ignore it if we don't actually hold the lock */
3166  if (locallock->nLocks <= 0)
3167  continue;
3168 
3169  /* Scan to see whether we hold it at session or transaction level */
3170  haveSessionLock = haveXactLock = false;
3171  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3172  {
3173  if (lockOwners[i].owner == NULL)
3174  haveSessionLock = true;
3175  else
3176  haveXactLock = true;
3177  }
3178 
3179  /* Ignore it if we have only session lock */
3180  if (!haveXactLock)
3181  continue;
3182 
3183  /*
3184  * If we have both session- and transaction-level locks, fail. This
3185  * should never happen with regular locks, since we only take those at
3186  * session level in some special operations like VACUUM. It's
3187  * possible to hit this with advisory locks, though.
3188  *
3189  * It would be nice if we could keep the session hold and give away
3190  * the transactional hold to the prepared xact. However, that would
3191  * require two PROCLOCK objects, and we cannot be sure that another
3192  * PROCLOCK will be available when it comes time for PostPrepare_Locks
3193  * to do the deed. So for now, we error out while we can still do so
3194  * safely.
3195  */
3196  if (haveSessionLock)
3197  ereport(ERROR,
3198  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3199  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3200 
3201  /*
3202  * If the local lock was taken via the fast-path, we need to move it
3203  * to the primary lock table, or just get a pointer to the existing
3204  * primary lock table entry if by chance it's already been
3205  * transferred.
3206  */
3207  if (locallock->proclock == NULL)
3208  {
3209  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3210  locallock->lock = locallock->proclock->tag.myLock;
3211  }
3212 
3213  /*
3214  * Arrange to not release any strong lock count held by this lock
3215  * entry. We must retain the count until the prepared transaction is
3216  * committed or rolled back.
3217  */
3218  locallock->holdsStrongLockCount = false;
3219 
3220  /*
3221  * Create a 2PC record.
3222  */
3223  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3224  record.lockmode = locallock->tag.mode;
3225 
3227  &record, sizeof(TwoPhaseLockRecord));
3228  }
3229 }
PROCLOCKTAG tag
Definition: lock.h:350
LOCALLOCKTAG tag
Definition: lock.h:407
static HTAB * LockMethodLocalHash
Definition: lock.c:255
bool holdsStrongLockCount
Definition: lock.h:417
int numLockOwners
Definition: lock.h:414
LOCKTAG lock
Definition: lock.h:388
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2717
LOCKMODE mode
Definition: lock.h:389
PROCLOCK * proclock
Definition: lock.h:412
Definition: lock.h:163
int errcode(int sqlerrcode)
Definition: elog.c:608
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1187
LOCKTAG locktag
Definition: lock.c:160
#define ERROR
Definition: elog.h:43
#define ereport(elevel, rest)
Definition: elog.h:141
uint8 locktag_type
Definition: lock.h:169
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25
LOCALLOCKOWNER * lockOwners
Definition: lock.h:416
LOCK * lock
Definition: lock.h:411
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
int64 nLocks
Definition: lock.h:413
int errmsg(const char *fmt,...)
Definition: elog.c:822
int i
LOCK * myLock
Definition: lock.h:343
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226
LOCKMODE lockmode
Definition: lock.c:161

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1628 of file lock.c.

References Assert, FastPathStrongRelationLockData::count, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, and SpinLockRelease.

Referenced by LockAcquireExtended().

1629 {
1630  Assert(StrongLockInProgress == NULL);
1631  Assert(locallock->holdsStrongLockCount == false);
1632 
1633  /*
1634  * Adding to a memory location is not atomic, so we take a spinlock to
1635  * ensure we don't collide with someone else trying to bump the count at
1636  * the same time.
1637  *
1638  * XXX: It might be worth considering using an atomic fetch-and-add
1639  * instruction here, on architectures where that is supported.
1640  */
1641 
1643  FastPathStrongRelationLocks->count[fasthashcode]++;
1644  locallock->holdsStrongLockCount = true;
1645  StrongLockInProgress = locallock;
1647 }
bool holdsStrongLockCount
Definition: lock.h:417
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:241
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:259
#define SpinLockAcquire(lock)
Definition: spin.h:62
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:244
#define SpinLockRelease(lock)
Definition: spin.h:64
#define Assert(condition)
Definition: c.h:739

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1545 of file lock.c.

References Assert, elog, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), SHMQueueDelete(), SHMQueueEmpty(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

1548 {
1549  /*
1550  * If this was my last hold on this lock, delete my entry in the proclock
1551  * table.
1552  */
1553  if (proclock->holdMask == 0)
1554  {
1555  uint32 proclock_hashcode;
1556 
1557  PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1558  SHMQueueDelete(&proclock->lockLink);
1559  SHMQueueDelete(&proclock->procLink);
1560  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1562  (void *) &(proclock->tag),
1563  proclock_hashcode,
1564  HASH_REMOVE,
1565  NULL))
1566  elog(PANIC, "proclock table corrupted");
1567  }
1568 
1569  if (lock->nRequested == 0)
1570  {
1571  /*
1572  * The caller just released the last lock, so garbage-collect the lock
1573  * object.
1574  */
1575  LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1576  Assert(SHMQueueEmpty(&(lock->procLocks)));
1578  (void *) &(lock->tag),
1579  hashcode,
1580  HASH_REMOVE,
1581  NULL))
1582  elog(PANIC, "lock table corrupted");
1583  }
1584  else if (wakeupNeeded)
1585  {
1586  /* There are waiters on this lock, so wake them up. */
1587  ProcLockWakeup(lockMethodTable, lock);
1588  }
1589 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
PROCLOCKTAG tag
Definition: lock.h:350
int nRequested
Definition: lock.h:298
LOCKMASK holdMask
Definition: lock.h:354
LOCKTAG tag
Definition: lock.h:290
SHM_QUEUE lockLink
Definition: lock.h:356
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
#define PANIC
Definition: elog.h:53
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:538
unsigned int uint32
Definition: c.h:359
SHM_QUEUE procLocks
Definition: lock.h:295
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1626
SHM_QUEUE procLink
Definition: lock.h:357
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:337
#define Assert(condition)
Definition: c.h:739
static HTAB * LockMethodLockHash
Definition: lock.c:253
#define elog(elevel,...)
Definition: elog.h:228
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:338

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 556 of file lock.c.

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, and LOCKBIT_ON.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

557 {
558  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
559 
560  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
561  return true;
562 
563  return false;
564 }
const LOCKMASK * conflictTab
Definition: lock.h:113
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
static const LockMethod LockMethods[]
Definition: lock.c:150

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2717 of file lock.c.

References PGPROC::backendLock, DEFAULT_LOCKMETHOD, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), LOCALLOCK::hashcode, LOCALLOCKTAG::lock, LockHashPartitionLock, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, ProcLockHashCode(), SetupLockInTable(), PROCLOCK::tag, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

2718 {
2719  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2720  LOCKTAG *locktag = &locallock->tag.lock;
2721  PROCLOCK *proclock = NULL;
2722  LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2723  Oid relid = locktag->locktag_field2;
2724  uint32 f;
2725 
2727 
2728  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2729  {
2730  uint32 lockmode;
2731 
2732  /* Look for an allocated slot matching the given relid. */
2733  if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2734  continue;
2735 
2736  /* If we don't have a lock of the given mode, forget it! */
2737  lockmode = locallock->tag.mode;
2738  if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2739  break;
2740 
2741  /* Find or create lock object. */
2742  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2743 
2744  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2745  locallock->hashcode, lockmode);
2746  if (!proclock)
2747  {
2748  LWLockRelease(partitionLock);
2750  ereport(ERROR,
2751  (errcode(ERRCODE_OUT_OF_MEMORY),
2752  errmsg("out of shared memory"),
2753  errhint("You might need to increase max_locks_per_transaction.")));
2754  }
2755  GrantLock(proclock->tag.myLock, proclock, lockmode);
2756  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2757 
2758  LWLockRelease(partitionLock);
2759 
2760  /* No need to examine remaining slots. */
2761  break;
2762  }
2763 
2765 
2766  /* Lock may have already been transferred by some other backend. */
2767  if (proclock == NULL)
2768  {
2769  LOCK *lock;
2770  PROCLOCKTAG proclocktag;
2771  uint32 proclock_hashcode;
2772 
2773  LWLockAcquire(partitionLock, LW_SHARED);
2774 
2776  (void *) locktag,
2777  locallock->hashcode,
2778  HASH_FIND,
2779  NULL);
2780  if (!lock)
2781  elog(ERROR, "failed to re-find shared lock object");
2782 
2783  proclocktag.myLock = lock;
2784  proclocktag.myProc = MyProc;
2785 
2786  proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2787  proclock = (PROCLOCK *)
2789  (void *) &proclocktag,
2790  proclock_hashcode,
2791  HASH_FIND,
2792  NULL);
2793  if (!proclock)
2794  elog(ERROR, "failed to re-find shared proclock object");
2795  LWLockRelease(partitionLock);
2796  }
2797 
2798  return proclock;
2799 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
PROCLOCKTAG tag
Definition: lock.h:350
uint32 hashcode
Definition: lock.h:410
Definition: lwlock.h:32
LOCALLOCKTAG tag
Definition: lock.h:407
int errhint(const char *fmt,...)
Definition: elog.c:1069
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1117
LOCKTAG lock
Definition: lock.h:388
LOCKMODE mode
Definition: lock.h:389
PGPROC * MyProc
Definition: proc.c:67
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1465
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:177
Definition: lock.h:163
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
int errcode(int sqlerrcode)
Definition: elog.c:608
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
unsigned int Oid
Definition: postgres_ext.h:31
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:188
#define ERROR
Definition: elog.h:43
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:538
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:71
uint32 locktag_field2
Definition: lock.h:166
unsigned int uint32
Definition: c.h:359
Definition: lock.h:287
#define ereport(elevel, rest)
Definition: elog.h:141
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:194
LWLock backendLock
Definition: proc.h:190
static HTAB * LockMethodLockHash
Definition: lock.c:253
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
PGPROC * myProc
Definition: lock.h:344
Definition: lock.h:347
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define elog(elevel,...)
Definition: elog.h:228
LOCK * myLock
Definition: lock.h:343
static const LockMethod LockMethods[]
Definition: lock.c:150
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:186

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2562 of file lock.c.

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_SET_LOCKMODE, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockAcquireExtended().

2563 {
2564  uint32 f;
2565  uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2566 
2567  /* Scan for existing entry for this relid, remembering empty slot. */
2568  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2569  {
2570  if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2571  unused_slot = f;
2572  else if (MyProc->fpRelId[f] == relid)
2573  {
2574  Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2575  FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2576  return true;
2577  }
2578  }
2579 
2580  /* If no existing entry, use any empty slot. */
2581  if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2582  {
2583  MyProc->fpRelId[unused_slot] = relid;
2584  FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2586  return true;
2587  }
2588 
2589  /* No existing entry, and no empty slot. */
2590  return false;
2591 }
PGPROC * MyProc
Definition: proc.c:67
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:177
static int FastPathLocalUseCount
Definition: lock.c:171
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:188
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:184
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:71
unsigned int uint32
Definition: c.h:359
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:194
#define Assert(condition)
Definition: c.h:739

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2629 of file lock.c.

References PROC_HDR::allProcCount, PROC_HDR::allProcs, PGPROC::backendLock, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, GrantLock(), i, LockHashPartitionLock, TwoPhaseLockRecord::lockmode, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

2631 {
2632  LWLock *partitionLock = LockHashPartitionLock(hashcode);
2633  Oid relid = locktag->locktag_field2;
2634  uint32 i;
2635 
2636  /*
2637  * Every PGPROC that can potentially hold a fast-path lock is present in
2638  * ProcGlobal->allProcs. Prepared transactions are not, but any
2639  * outstanding fast-path locks held by prepared transactions are
2640  * transferred to the main lock table.
2641  */
2642  for (i = 0; i < ProcGlobal->allProcCount; i++)
2643  {
2644  PGPROC *proc = &ProcGlobal->allProcs[i];
2645  uint32 f;
2646 
2648 
2649  /*
2650  * If the target backend isn't referencing the same database as the
2651  * lock, then we needn't examine the individual relation IDs at all;
2652  * none of them can be relevant.
2653  *
2654  * proc->databaseId is set at backend startup time and never changes
2655  * thereafter, so it might be safe to perform this test before
2656  * acquiring &proc->backendLock. In particular, it's certainly safe
2657  * to assume that if the target backend holds any fast-path locks, it
2658  * must have performed a memory-fencing operation (in particular, an
2659  * LWLock acquisition) since setting proc->databaseId. However, it's
2660  * less clear that our backend is certain to have performed a memory
2661  * fencing operation since the other backend set proc->databaseId. So
2662  * for now, we test it after acquiring the LWLock just to be safe.
2663  */
2664  if (proc->databaseId != locktag->locktag_field1)
2665  {
2666  LWLockRelease(&proc->backendLock);
2667  continue;
2668  }
2669 
2670  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2671  {
2672  uint32 lockmode;
2673 
2674  /* Look for an allocated slot matching the given relid. */
2675  if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2676  continue;
2677 
2678  /* Find or create lock object. */
2679  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2680  for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2682  ++lockmode)
2683  {
2684  PROCLOCK *proclock;
2685 
2686  if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2687  continue;
2688  proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2689  hashcode, lockmode);
2690  if (!proclock)
2691  {
2692  LWLockRelease(partitionLock);
2693  LWLockRelease(&proc->backendLock);
2694  return false;
2695  }
2696  GrantLock(proclock->tag.myLock, proclock, lockmode);
2697  FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2698  }
2699  LWLockRelease(partitionLock);
2700 
2701  /* No need to examine remaining slots. */
2702  break;
2703  }
2704  LWLockRelease(&proc->backendLock);
2705  }
2706  return true;
2707 }
PROCLOCKTAG tag
Definition: lock.h:350
Definition: lwlock.h:32
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1117
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1465
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:177
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
PROC_HDR * ProcGlobal
Definition: proc.c:80
unsigned int Oid
Definition: postgres_ext.h:31
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:188
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:71
uint32 locktag_field2
Definition: lock.h:166
Oid databaseId
Definition: proc.h:114
unsigned int uint32
Definition: c.h:359
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:194
LWLock backendLock
Definition: proc.h:190
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
uint32 allProcCount
Definition: proc.h:251
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:175
Definition: lock.h:347
int i
LOCK * myLock
Definition: lock.h:343
PGPROC * allProcs
Definition: proc.h:247
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:174
uint32 locktag_field1
Definition: lock.h:165
Definition: proc.h:95
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:186

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2599 of file lock.c.

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FastPathLocalUseCount, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, and MyProc.

Referenced by LockRelease(), and LockReleaseAll().

2600 {
2601  uint32 f;
2602  bool result = false;
2603 
2605  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2606  {
2607  if (MyProc->fpRelId[f] == relid
2608  && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2609  {
2610  Assert(!result);
2611  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2612  result = true;
2613  /* we continue iterating so as to update FastPathLocalUseCount */
2614  }
2615  if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2617  }
2618  return result;
2619 }
PGPROC * MyProc
Definition: proc.c:67
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:177
static int FastPathLocalUseCount
Definition: lock.c:171
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:188
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:71
unsigned int uint32
Definition: c.h:359
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:194
#define Assert(condition)
Definition: c.h:739
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:186

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1654 of file lock.c.

Referenced by LockAcquireExtended().

1655 {
1656  StrongLockInProgress = NULL;
1657 }
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:259

◆ GetBlockerStatusData()

BlockedProcsData* GetBlockerStatusData ( int  blocked_pid)

Definition at line 3660 of file lock.c.

References Assert, BackendPidGetProcWithLock(), dlist_iter::cur, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, BlockedProcsData::locks, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, BlockedProcsData::maxlocks, BlockedProcsData::maxpids, BlockedProcsData::maxprocs, BlockedProcsData::nlocks, BlockedProcsData::npids, BlockedProcsData::nprocs, NUM_LOCK_PARTITIONS, palloc(), BlockedProcsData::procs, and BlockedProcsData::waiter_pids.

Referenced by pg_blocking_pids().

3661 {
3662  BlockedProcsData *data;
3663  PGPROC *proc;
3664  int i;
3665 
3666  data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3667 
3668  /*
3669  * Guess how much space we'll need, and preallocate. Most of the time
3670  * this will avoid needing to do repalloc while holding the LWLocks. (We
3671  * assume, but check with an Assert, that MaxBackends is enough entries
3672  * for the procs[] array; the other two could need enlargement, though.)
3673  */
3674  data->nprocs = data->nlocks = data->npids = 0;
3675  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3676  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3677  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3678  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3679 
3680  /*
3681  * In order to search the ProcArray for blocked_pid and assume that that
3682  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3683  * In addition, to examine the lock grouping fields of any other backend,
3684  * we must hold all the hash partition locks. (Only one of those locks is
3685  * actually relevant for any one lock group, but we can't know which one
3686  * ahead of time.) It's fairly annoying to hold all those locks
3687  * throughout this, but it's no worse than GetLockStatusData(), and it
3688  * does have the advantage that we're guaranteed to return a
3689  * self-consistent instantaneous state.
3690  */
3691  LWLockAcquire(ProcArrayLock, LW_SHARED);
3692 
3693  proc = BackendPidGetProcWithLock(blocked_pid);
3694 
3695  /* Nothing to do if it's gone */
3696  if (proc != NULL)
3697  {
3698  /*
3699  * Acquire lock on the entire shared lock data structure. See notes
3700  * in GetLockStatusData().
3701  */
3702  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3704 
3705  if (proc->lockGroupLeader == NULL)
3706  {
3707  /* Easy case, proc is not a lock group member */
3708  GetSingleProcBlockerStatusData(proc, data);
3709  }
3710  else
3711  {
3712  /* Examine all procs in proc's lock group */
3713  dlist_iter iter;
3714 
3716  {
3717  PGPROC *memberProc;
3718 
3719  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3720  GetSingleProcBlockerStatusData(memberProc, data);
3721  }
3722  }
3723 
3724  /*
3725  * And release locks. See notes in GetLockStatusData().
3726  */
3727  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3729 
3730  Assert(data->nprocs <= data->maxprocs);
3731  }
3732 
3733  LWLockRelease(ProcArrayLock);
3734 
3735  return data;
3736 }
int * waiter_pids
Definition: lock.h:464
dlist_head lockGroupMembers
Definition: proc.h:204
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
BlockedProcData * procs
Definition: lock.h:462
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:2388
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3740
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:505
int MaxBackends
Definition: globals.c:135
int maxlocks
Definition: lock.h:468
LockInstanceData * locks
Definition: lock.h:463
dlist_node * cur
Definition: ilist.h:161
#define Assert(condition)
Definition: c.h:739
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
void * palloc(Size size)
Definition: mcxt.c:949
int i
Definition: proc.h:95
PGPROC * lockGroupLeader
Definition: proc.h:203
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117
int maxprocs
Definition: lock.h:466

◆ GetLockConflicts()

VirtualTransactionId* GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 2819 of file lock.c.

References PROC_HDR::allProcCount, PROC_HDR::allProcs, VirtualTransactionId::backendId, PGPROC::backendLock, ConflictsWithRelationFastPath, LockMethodData::conflictTab, PGPROC::databaseId, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, InvalidBackendId, InvalidLocalTransactionId, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, PROCLOCK::lockLink, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, offsetof, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, SHMQueueNext(), PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

2820 {
2821  static VirtualTransactionId *vxids;
2822  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2823  LockMethod lockMethodTable;
2824  LOCK *lock;
2825  LOCKMASK conflictMask;
2826  SHM_QUEUE *procLocks;
2827  PROCLOCK *proclock;
2828  uint32 hashcode;
2829  LWLock *partitionLock;
2830  int count = 0;
2831  int fast_count = 0;
2832 
2833  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2834  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2835  lockMethodTable = LockMethods[lockmethodid];
2836  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2837  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2838 
2839  /*
2840  * Allocate memory to store results, and fill with InvalidVXID. We only
2841  * need enough space for MaxBackends + a terminator, since prepared xacts
2842  * don't count. InHotStandby allocate once in TopMemoryContext.
2843  */
2844  if (InHotStandby)
2845  {
2846  if (vxids == NULL)
2847  vxids = (VirtualTransactionId *)
2849  sizeof(VirtualTransactionId) * (MaxBackends + 1));
2850  }
2851  else
2852  vxids = (VirtualTransactionId *)
2853  palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
2854 
2855  /* Compute hash code and partition lock, and look up conflicting modes. */
2856  hashcode = LockTagHashCode(locktag);
2857  partitionLock = LockHashPartitionLock(hashcode);
2858  conflictMask = lockMethodTable->conflictTab[lockmode];
2859 
2860  /*
2861  * Fast path locks might not have been entered in the primary lock table.
2862  * If the lock we're dealing with could conflict with such a lock, we must
2863  * examine each backend's fast-path array for conflicts.
2864  */
2865  if (ConflictsWithRelationFastPath(locktag, lockmode))
2866  {
2867  int i;
2868  Oid relid = locktag->locktag_field2;
2869  VirtualTransactionId vxid;
2870 
2871  /*
2872  * Iterate over relevant PGPROCs. Anything held by a prepared
2873  * transaction will have been transferred to the primary lock table,
2874  * so we need not worry about those. This is all a bit fuzzy, because
2875  * new locks could be taken after we've visited a particular
2876  * partition, but the callers had better be prepared to deal with that
2877  * anyway, since the locks could equally well be taken between the
2878  * time we return the value and the time the caller does something
2879  * with it.
2880  */
2881  for (i = 0; i < ProcGlobal->allProcCount; i++)
2882  {
2883  PGPROC *proc = &ProcGlobal->allProcs[i];
2884  uint32 f;
2885 
2886  /* A backend never blocks itself */
2887  if (proc == MyProc)
2888  continue;
2889 
2891 
2892  /*
2893  * If the target backend isn't referencing the same database as
2894  * the lock, then we needn't examine the individual relation IDs
2895  * at all; none of them can be relevant.
2896  *
2897  * See FastPathTransferRelationLocks() for discussion of why we do
2898  * this test after acquiring the lock.
2899  */
2900  if (proc->databaseId != locktag->locktag_field1)
2901  {
2902  LWLockRelease(&proc->backendLock);
2903  continue;
2904  }
2905 
2906  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2907  {
2908  uint32 lockmask;
2909 
2910  /* Look for an allocated slot matching the given relid. */
2911  if (relid != proc->fpRelId[f])
2912  continue;
2913  lockmask = FAST_PATH_GET_BITS(proc, f);
2914  if (!lockmask)
2915  continue;
2916  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2917 
2918  /*
2919  * There can only be one entry per relation, so if we found it
2920  * and it doesn't conflict, we can skip the rest of the slots.
2921  */
2922  if ((lockmask & conflictMask) == 0)
2923  break;
2924 
2925  /* Conflict! */
2926  GET_VXID_FROM_PGPROC(vxid, *proc);
2927 
2928  /*
2929  * If we see an invalid VXID, then either the xact has already
2930  * committed (or aborted), or it's a prepared xact. In either
2931  * case we may ignore it.
2932  */
2933  if (VirtualTransactionIdIsValid(vxid))
2934  vxids[count++] = vxid;
2935 
2936  /* No need to examine remaining slots. */
2937  break;
2938  }
2939 
2940  LWLockRelease(&proc->backendLock);
2941  }
2942  }
2943 
2944  /* Remember how many fast-path conflicts we found. */
2945  fast_count = count;
2946 
2947  /*
2948  * Look up the lock object matching the tag.
2949  */
2950  LWLockAcquire(partitionLock, LW_SHARED);
2951 
2953  (const void *) locktag,
2954  hashcode,
2955  HASH_FIND,
2956  NULL);
2957  if (!lock)
2958  {
2959  /*
2960  * If the lock object doesn't exist, there is nothing holding a lock
2961  * on this lockable object.
2962  */
2963  LWLockRelease(partitionLock);
2964  vxids[count].backendId = InvalidBackendId;
2966  if (countp)
2967  *countp = count;
2968  return vxids;
2969  }
2970 
2971  /*
2972  * Examine each existing holder (or awaiter) of the lock.
2973  */
2974 
2975  procLocks = &(lock->procLocks);
2976 
2977  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2978  offsetof(PROCLOCK, lockLink));
2979 
2980  while (proclock)
2981  {
2982  if (conflictMask & proclock->holdMask)
2983  {
2984  PGPROC *proc = proclock->tag.myProc;
2985 
2986  /* A backend never blocks itself */
2987  if (proc != MyProc)
2988  {
2989  VirtualTransactionId vxid;
2990 
2991  GET_VXID_FROM_PGPROC(vxid, *proc);
2992 
2993  /*
2994  * If we see an invalid VXID, then either the xact has already
2995  * committed (or aborted), or it's a prepared xact. In either
2996  * case we may ignore it.
2997  */
2998  if (VirtualTransactionIdIsValid(vxid))
2999  {
3000  int i;
3001 
3002  /* Avoid duplicate entries. */
3003  for (i = 0; i < fast_count; ++i)
3004  if (VirtualTransactionIdEquals(vxids[i], vxid))
3005  break;
3006  if (i >= fast_count)
3007  vxids[count++] = vxid;
3008  }
3009  }
3010  }
3011 
3012  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3013  offsetof(PROCLOCK, lockLink));
3014  }
3015 
3016  LWLockRelease(partitionLock);
3017 
3018  if (count > MaxBackends) /* should never happen */
3019  elog(PANIC, "too many conflicting locks found");
3020 
3021  vxids[count].backendId = InvalidBackendId;
3023  if (countp)
3024  *countp = count;
3025  return vxids;
3026 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
PROCLOCKTAG tag
Definition: lock.h:350
Definition: lwlock.h:32
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:205
#define GET_VXID_FROM_PGPROC(vxid, proc)
Definition: lock.h:79
PGPROC * MyProc
Definition: proc.c:67
LOCKMASK holdMask
Definition: lock.h:354
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:177
const LOCKMASK * conflictTab
Definition: lock.h:113
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
SHM_QUEUE lockLink
Definition: lock.h:356
#define InHotStandby
Definition: xlog.h:74
PROC_HDR * ProcGlobal
Definition: proc.c:80
#define lengthof(array)
Definition: c.h:669
unsigned int Oid
Definition: postgres_ext.h:31
LocalTransactionId localTransactionId
Definition: lock.h:65
#define PANIC
Definition: elog.h:53
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:73
#define ERROR
Definition: elog.h:43
int MaxBackends
Definition: globals.c:135
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:71
uint32 locktag_field2
Definition: lock.h:166
Oid databaseId
Definition: proc.h:114
unsigned int uint32
Definition: c.h:359
Definition: lock.h:287
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:490
MemoryContext TopMemoryContext
Definition: mcxt.c:44
uint16 LOCKMETHODID
Definition: lock.h:124
SHM_QUEUE procLocks
Definition: lock.h:295
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:70
#define InvalidBackendId
Definition: backendid.h:23
void * palloc0(Size size)
Definition: mcxt.c:980
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:194
BackendId backendId
Definition: lock.h:64
LWLock backendLock
Definition: proc.h:190
static HTAB * LockMethodLockHash
Definition: lock.c:253
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
uint32 allProcCount
Definition: proc.h:251
int LOCKMASK
Definition: lockdefs.h:25
uint8 locktag_lockmethodid
Definition: lock.h:170
PGPROC * myProc
Definition: lock.h:344
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:175
Definition: lock.h:347
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:796
#define elog(elevel,...)
Definition: elog.h:228
#define InvalidLocalTransactionId
Definition: lock.h:68
int i
PGPROC * allProcs
Definition: proc.h:247
static const LockMethod LockMethods[]
Definition: lock.c:150
uint32 locktag_field1
Definition: lock.h:165
Definition: proc.h:95
#define offsetof(type, field)
Definition: c.h:662
int numLockModes
Definition: lock.h:112

◆ GetLockmodeName()

const char* GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 3923 of file lock.c.

References Assert, elog, hash_seq_init(), hash_seq_search(), i, lengthof, LOCK_PRINT, LockMethodData::lockModeNames, LOG, mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, NUM_LOCK_PARTITIONS, offsetof, PROCLOCK::procLink, PROCLOCK_PRINT, SHMQueueNext(), status(), PROCLOCK::tag, and PGPROC::waitLock.

Referenced by CheckRelationLockedByMe(), DeadLockReport(), pg_lock_status(), and ProcSleep().

3924 {
3925  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
3926  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
3927  return LockMethods[lockmethodid]->lockModeNames[mode];
3928 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define lengthof(array)
Definition: c.h:669
#define Assert(condition)
Definition: c.h:739
static const LockMethod LockMethods[]
Definition: lock.c:150
const char *const * lockModeNames
Definition: lock.h:114

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 460 of file lock.c.

References Assert, lengthof, and LOCK_LOCKMETHOD.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

461 {
462  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
463 
464  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
465  return LockMethods[lockmethodid];
466 }
#define lengthof(array)
Definition: c.h:669
uint16 LOCKMETHODID
Definition: lock.h:124
#define Assert(condition)
Definition: c.h:739
static const LockMethod LockMethods[]
Definition: lock.c:150
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:303

◆ GetLockStatusData()

LockData* GetLockStatusData ( void  )

Definition at line 3476 of file lock.c.

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert, LockInstanceData::backend, VirtualTransactionId::backendId, PGPROC::backendId, PGPROC::backendLock, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, LockInstanceData::fastpath, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpLocalTransactionId, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockData::locks, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, LockInstanceData::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, LockData::nelements, NoLock, NUM_LOCK_PARTITIONS, palloc(), PGPROC::pid, LockInstanceData::pid, ProcGlobal, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, PGPROC::waitLock, PGPROC::waitLockMode, and LockInstanceData::waitLockMode.

Referenced by pg_lock_status().

3477 {
3478  LockData *data;
3479  PROCLOCK *proclock;
3480  HASH_SEQ_STATUS seqstat;
3481  int els;
3482  int el;
3483  int i;
3484 
3485  data = (LockData *) palloc(sizeof(LockData));
3486 
3487  /* Guess how much space we'll need. */
3488  els = MaxBackends;
3489  el = 0;
3490  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3491 
3492  /*
3493  * First, we iterate through the per-backend fast-path arrays, locking
3494  * them one at a time. This might produce an inconsistent picture of the
3495  * system state, but taking all of those LWLocks at the same time seems
3496  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3497  * matter too much, because none of these locks can be involved in lock
3498  * conflicts anyway - anything that might must be present in the main lock
3499  * table. (For the same reason, we don't sweat about making leaderPid
3500  * completely valid. We cannot safely dereference another backend's
3501  * lockGroupLeader field without holding all lock partition locks, and
3502  * it's not worth that.)
3503  */
3504  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3505  {
3506  PGPROC *proc = &ProcGlobal->allProcs[i];
3507  uint32 f;
3508 
3510 
3511  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3512  {
3513  LockInstanceData *instance;
3514  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3515 
3516  /* Skip unallocated slots. */
3517  if (!lockbits)
3518  continue;
3519 
3520  if (el >= els)
3521  {
3522  els += MaxBackends;
3523  data->locks = (LockInstanceData *)
3524  repalloc(data->locks, sizeof(LockInstanceData) * els);
3525  }
3526 
3527  instance = &data->locks[el];
3528  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3529  proc->fpRelId[f]);
3530  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3531  instance->waitLockMode = NoLock;
3532  instance->backend = proc->backendId;
3533  instance->lxid = proc->lxid;
3534  instance->pid = proc->pid;
3535  instance->leaderPid = proc->pid;
3536  instance->fastpath = true;
3537 
3538  el++;
3539  }
3540 
3541  if (proc->fpVXIDLock)
3542  {
3543  VirtualTransactionId vxid;
3544  LockInstanceData *instance;
3545 
3546  if (el >= els)
3547  {
3548  els += MaxBackends;
3549  data->locks = (LockInstanceData *)
3550  repalloc(data->locks, sizeof(LockInstanceData) * els);
3551  }
3552 
3553  vxid.backendId = proc->backendId;
3555 
3556  instance = &data->locks[el];
3557  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3558  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3559  instance->waitLockMode = NoLock;
3560  instance->backend = proc->backendId;
3561  instance->lxid = proc->lxid;
3562  instance->pid = proc->pid;
3563  instance->leaderPid = proc->pid;
3564  instance->fastpath = true;
3565 
3566  el++;
3567  }
3568 
3569  LWLockRelease(&proc->backendLock);
3570  }
3571 
3572  /*
3573  * Next, acquire lock on the entire shared lock data structure. We do
3574  * this so that, at least for locks in the primary lock table, the state
3575  * will be self-consistent.
3576  *
3577  * Since this is a read-only operation, we take shared instead of
3578  * exclusive lock. There's not a whole lot of point to this, because all
3579  * the normal operations require exclusive lock, but it doesn't hurt
3580  * anything either. It will at least allow two backends to do
3581  * GetLockStatusData in parallel.
3582  *
3583  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3584  */
3585  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3587 
3588  /* Now we can safely count the number of proclocks */
3590  if (data->nelements > els)
3591  {
3592  els = data->nelements;
3593  data->locks = (LockInstanceData *)
3594  repalloc(data->locks, sizeof(LockInstanceData) * els);
3595  }
3596 
3597  /* Now scan the tables to copy the data */
3599 
3600  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3601  {
3602  PGPROC *proc = proclock->tag.myProc;
3603  LOCK *lock = proclock->tag.myLock;
3604  LockInstanceData *instance = &data->locks[el];
3605 
3606  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3607  instance->holdMask = proclock->holdMask;
3608  if (proc->waitLock == proclock->tag.myLock)
3609  instance->waitLockMode = proc->waitLockMode;
3610  else
3611  instance->waitLockMode = NoLock;
3612  instance->backend = proc->backendId;
3613  instance->lxid = proc->lxid;
3614  instance->pid = proc->pid;
3615  instance->leaderPid = proclock->groupLeader->pid;
3616  instance->fastpath = false;
3617 
3618  el++;
3619  }
3620 
3621  /*
3622  * And release locks. We do this in reverse order for two reasons: (1)
3623  * Anyone else who needs more than one of the locks will be trying to lock
3624  * them in increasing order; we don't want to release the other process
3625  * until it can get all the locks it needs. (2) This avoids O(N^2)
3626  * behavior inside LWLockRelease.
3627  */
3628  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3630 
3631  Assert(el == data->nelements);
3632 
3633  return data;
3634 }
PROCLOCKTAG tag
Definition: lock.h:350
LockInstanceData * locks
Definition: lock.h:444
BackendId backendId
Definition: proc.h:113
#define ExclusiveLock
Definition: lockdefs.h:44
LOCKMASK holdMask
Definition: lock.h:354
bool fastpath
Definition: lock.h:438
LOCKMODE waitLockMode
Definition: proc.h:141
LOCKTAG tag
Definition: lock.h:290
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:177
Definition: lock.h:163
PROC_HDR * ProcGlobal
Definition: proc.c:80
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1335
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
bool fpVXIDLock
Definition: proc.h:195
Definition: lock.h:441
LocalTransactionId localTransactionId
Definition: lock.h:65
LOCKTAG locktag
Definition: lock.h:431
LOCKMODE waitLockMode
Definition: lock.h:433
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
int leaderPid
Definition: lock.h:437
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:505
int MaxBackends
Definition: globals.c:135
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:180
#define NoLock
Definition: lockdefs.h:34
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:71
Oid databaseId
Definition: proc.h:114
unsigned int uint32
Definition: c.h:359
Definition: lock.h:287
LOCK * waitLock
Definition: proc.h:139
int nelements
Definition: lock.h:443
BackendId backend
Definition: lock.h:434
LocalTransactionId lxid
Definition: lock.h:435
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:194
#define Assert(condition)
Definition: c.h:739
BackendId backendId
Definition: lock.h:64
LWLock backendLock
Definition: proc.h:190
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:225
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
uint32 allProcCount
Definition: proc.h:251
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1069
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
LOCKMASK holdMask
Definition: lock.h:432
PGPROC * myProc
Definition: lock.h:344
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:175
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
Definition: lock.h:347
void * palloc(Size size)
Definition: mcxt.c:949
int i
LOCK * myLock
Definition: lock.h:343
PGPROC * allProcs
Definition: proc.h:247
Definition: proc.h:95
int pid
Definition: proc.h:109
LocalTransactionId fpLocalTransactionId
Definition: proc.h:196
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117
PGPROC * groupLeader
Definition: lock.h:353
LocalTransactionId lxid
Definition: proc.h:106

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 472 of file lock.c.

References Assert, lengthof, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

473 {
474  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
475 
476  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
477  return LockMethods[lockmethodid];
478 }
#define lengthof(array)
Definition: c.h:669
uint16 LOCKMETHODID
Definition: lock.h:124
#define Assert(condition)
Definition: c.h:739
uint8 locktag_lockmethodid
Definition: lock.h:170
static const LockMethod LockMethods[]
Definition: lock.c:150

◆ GetRunningTransactionLocks()

xl_standby_lock* GetRunningTransactionLocks ( int *  nlocks)

Definition at line 3840 of file lock.c.

References AccessExclusiveLock, PROC_HDR::allPgXact, Assert, xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), PGPROC::pgprocno, ProcGlobal, xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGXACT::xid.

Referenced by LogStandbySnapshot().

3841 {
3842  xl_standby_lock *accessExclusiveLocks;
3843  PROCLOCK *proclock;
3844  HASH_SEQ_STATUS seqstat;
3845  int i;
3846  int index;
3847  int els;
3848 
3849  /*
3850  * Acquire lock on the entire shared lock data structure.
3851  *
3852  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3853  */
3854  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3856 
3857  /* Now we can safely count the number of proclocks */
3859 
3860  /*
3861  * Allocating enough space for all locks in the lock table is overkill,
3862  * but it's more convenient and faster than having to enlarge the array.
3863  */
3864  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3865 
3866  /* Now scan the tables to copy the data */
3868 
3869  /*
3870  * If lock is a currently granted AccessExclusiveLock then it will have
3871  * just one proclock holder, so locks are never accessed twice in this
3872  * particular case. Don't copy this code for use elsewhere because in the
3873  * general case this will give you duplicate locks when looking at
3874  * non-exclusive lock types.
3875  */
3876  index = 0;
3877  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3878  {
3879  /* make sure this definition matches the one used in LockAcquire */
3880  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
3881  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
3882  {
3883  PGPROC *proc = proclock->tag.myProc;
3884  PGXACT *pgxact = &ProcGlobal->allPgXact[proc->pgprocno];
3885  LOCK *lock = proclock->tag.myLock;
3886  TransactionId xid = pgxact->xid;
3887 
3888  /*
3889  * Don't record locks for transactions if we know they have
3890  * already issued their WAL record for commit but not yet released
3891  * lock. It is still possible that we see locks held by already
3892  * complete transactions, if they haven't yet zeroed their xids.
3893  */
3894  if (!TransactionIdIsValid(xid))
3895  continue;
3896 
3897  accessExclusiveLocks[index].xid = xid;
3898  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
3899  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
3900 
3901  index++;
3902  }
3903  }
3904 
3905  Assert(index <= els);
3906 
3907  /*
3908  * And release locks. We do this in reverse order for two reasons: (1)
3909  * Anyone else who needs more than one of the locks will be trying to lock
3910  * them in increasing order; we don't want to release the other process
3911  * until it can get all the locks it needs. (2) This avoids O(N^2)
3912  * behavior inside LWLockRelease.
3913  */
3914  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3916 
3917  *nlocks = index;
3918  return accessExclusiveLocks;
3919 }
PROCLOCKTAG tag
Definition: lock.h:350
uint32 TransactionId
Definition: c.h:514
Definition: proc.h:222
PGXACT * allPgXact
Definition: proc.h:249
LOCKMASK holdMask
Definition: lock.h:354
TransactionId xid
Definition: proc.h:224
LOCKTAG tag
Definition: lock.h:290
PROC_HDR * ProcGlobal
Definition: proc.c:80
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1335
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
Definition: type.h:89
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:505
uint32 locktag_field2
Definition: lock.h:166
Definition: lock.h:287
TransactionId xid
Definition: lockdefs.h:54
uint8 locktag_type
Definition: lock.h:169
#define Assert(condition)
Definition: c.h:739
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
PGPROC * myProc
Definition: lock.h:344
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
#define AccessExclusiveLock
Definition: lockdefs.h:45
Definition: lock.h:347
int pgprocno
Definition: proc.h:110
void * palloc(Size size)
Definition: mcxt.c:949
int i
LOCK * myLock
Definition: lock.h:343
#define TransactionIdIsValid(xid)
Definition: transam.h:41
uint32 locktag_field1
Definition: lock.h:165
Definition: proc.h:95
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 3740 of file lock.c.

References LockInstanceData::backend, PGPROC::backendId, LockInstanceData::fastpath, BlockedProcData::first_lock, BlockedProcData::first_waiter, PROCLOCK::groupLeader, PROCLOCK::holdMask, LockInstanceData::holdMask, i, LockInstanceData::leaderPid, PROC_QUEUE::links, PGPROC::links, PROCLOCK::lockLink, BlockedProcsData::locks, LockInstanceData::locktag, PGPROC::lxid, LockInstanceData::lxid, Max, MaxBackends, BlockedProcsData::maxlocks, BlockedProcsData::maxpids, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, SHM_QUEUE::next, BlockedProcsData::nlocks, NoLock, BlockedProcsData::npids, BlockedProcsData::nprocs, BlockedProcData::num_locks, BlockedProcData::num_waiters, offsetof, PGPROC::pid, LockInstanceData::pid, BlockedProcData::pid, LOCK::procLocks, BlockedProcsData::procs, repalloc(), SHMQueueNext(), PROC_QUEUE::size, LOCK::tag, PROCLOCK::tag, BlockedProcsData::waiter_pids, PGPROC::waitLock, PGPROC::waitLockMode, LockInstanceData::waitLockMode, and LOCK::waitProcs.

Referenced by GetBlockerStatusData().

3741 {
3742  LOCK *theLock = blocked_proc->waitLock;
3743  BlockedProcData *bproc;
3744  SHM_QUEUE *procLocks;
3745  PROCLOCK *proclock;
3746  PROC_QUEUE *waitQueue;
3747  PGPROC *proc;
3748  int queue_size;
3749  int i;
3750 
3751  /* Nothing to do if this proc is not blocked */
3752  if (theLock == NULL)
3753  return;
3754 
3755  /* Set up a procs[] element */
3756  bproc = &data->procs[data->nprocs++];
3757  bproc->pid = blocked_proc->pid;
3758  bproc->first_lock = data->nlocks;
3759  bproc->first_waiter = data->npids;
3760 
3761  /*
3762  * We may ignore the proc's fast-path arrays, since nothing in those could
3763  * be related to a contended lock.
3764  */
3765 
3766  /* Collect all PROCLOCKs associated with theLock */
3767  procLocks = &(theLock->procLocks);
3768  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3769  offsetof(PROCLOCK, lockLink));
3770  while (proclock)
3771  {
3772  PGPROC *proc = proclock->tag.myProc;
3773  LOCK *lock = proclock->tag.myLock;
3774  LockInstanceData *instance;
3775 
3776  if (data->nlocks >= data->maxlocks)
3777  {
3778  data->maxlocks += MaxBackends;
3779  data->locks = (LockInstanceData *)
3780  repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3781  }
3782 
3783  instance = &data->locks[data->nlocks];
3784  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3785  instance->holdMask = proclock->holdMask;
3786  if (proc->waitLock == lock)
3787  instance->waitLockMode = proc->waitLockMode;
3788  else
3789  instance->waitLockMode = NoLock;
3790  instance->backend = proc->backendId;
3791  instance->lxid = proc->lxid;
3792  instance->pid = proc->pid;
3793  instance->leaderPid = proclock->groupLeader->pid;
3794  instance->fastpath = false;
3795  data->nlocks++;
3796 
3797  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3798  offsetof(PROCLOCK, lockLink));
3799  }
3800 
3801  /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3802  waitQueue = &(theLock->waitProcs);
3803  queue_size = waitQueue->size;
3804 
3805  if (queue_size > data->maxpids - data->npids)
3806  {
3807  data->maxpids = Max(data->maxpids + MaxBackends,
3808  data->npids + queue_size);
3809  data->waiter_pids = (int *) repalloc(data->waiter_pids,
3810  sizeof(int) * data->maxpids);
3811  }
3812 
3813  /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3814  proc = (PGPROC *) waitQueue->links.next;
3815  for (i = 0; i < queue_size; i++)
3816  {
3817  if (proc == blocked_proc)
3818  break;
3819  data->waiter_pids[data->npids++] = proc->pid;
3820  proc = (PGPROC *) proc->links.next;
3821  }
3822 
3823  bproc->num_locks = data->nlocks - bproc->first_lock;
3824  bproc->num_waiters = data->npids - bproc->first_waiter;
3825 }
PROCLOCKTAG tag
Definition: lock.h:350
int * waiter_pids
Definition: lock.h:464
BackendId backendId
Definition: proc.h:113
int first_lock
Definition: lock.h:452
SHM_QUEUE links
Definition: lock.h:31
int num_waiters
Definition: lock.h:457
LOCKMASK holdMask
Definition: lock.h:354
SHM_QUEUE links
Definition: proc.h:98
struct SHM_QUEUE * next
Definition: shmem.h:31
bool fastpath
Definition: lock.h:438
LOCKMODE waitLockMode
Definition: proc.h:141
LOCKTAG tag
Definition: lock.h:290
Definition: lock.h:163
SHM_QUEUE lockLink
Definition: lock.h:356
BlockedProcData * procs
Definition: lock.h:462
LOCKTAG locktag
Definition: lock.h:431
LOCKMODE waitLockMode
Definition: lock.h:433
int num_locks
Definition: lock.h:453
PROC_QUEUE waitProcs
Definition: lock.h:296
int leaderPid
Definition: lock.h:437
int MaxBackends
Definition: globals.c:135
#define NoLock
Definition: lockdefs.h:34
Definition: lock.h:287
LOCK * waitLock
Definition: proc.h:139
int maxlocks
Definition: lock.h:468
SHM_QUEUE procLocks
Definition: lock.h:295
LockInstanceData * locks
Definition: lock.h:463
BackendId backend
Definition: lock.h:434
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
int first_waiter
Definition: lock.h:456
#define Max(x, y)
Definition: c.h:905
LocalTransactionId lxid
Definition: lock.h:435
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1069
LOCKMASK holdMask
Definition: lock.h:432
PGPROC * myProc
Definition: lock.h:344
Definition: lock.h:347
int i
int size
Definition: lock.h:32
LOCK * myLock
Definition: lock.h:343
Definition: proc.h:95
int pid
Definition: proc.h:109
#define offsetof(type, field)
Definition: c.h:662
PGPROC * groupLeader
Definition: lock.h:353
LocalTransactionId lxid
Definition: proc.h:106

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1693 of file lock.c.

References GrantLockLocal().

Referenced by LockErrorCleanup(), and ProcSleep().

1694 {
1696 }
static LOCALLOCK * awaitedLock
Definition: lock.c:260
static ResourceOwner awaitedOwner
Definition: lock.c:261
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1599

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1465 of file lock.c.

References Assert, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, TwoPhaseLockRecord::lockmode, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), ProcSleep(), and VirtualXactLock().

1466 {
1467  lock->nGranted++;
1468  lock->granted[lockmode]++;
1469  lock->grantMask |= LOCKBIT_ON(lockmode);
1470  if (lock->granted[lockmode] == lock->requested[lockmode])
1471  lock->waitMask &= LOCKBIT_OFF(lockmode);
1472  proclock->holdMask |= LOCKBIT_ON(lockmode);
1473  LOCK_PRINT("GrantLock", lock, lockmode);
1474  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1475  Assert(lock->nGranted <= lock->nRequested);
1476 }
int nRequested
Definition: lock.h:298
LOCKMASK holdMask
Definition: lock.h:354
int nGranted
Definition: lock.h:300
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:87
int granted[MAX_LOCKMODES]
Definition: lock.h:299
LOCKMASK waitMask
Definition: lock.h:294
int requested[MAX_LOCKMODES]
Definition: lock.h:297
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:337
#define Assert(condition)
Definition: c.h:739
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
LOCKMASK grantMask
Definition: lock.h:293

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1599 of file lock.c.

References Assert, i, LOCALLOCK::lockOwners, LOCALLOCK::maxLockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

1600 {
1601  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1602  int i;
1603 
1604  Assert(locallock->numLockOwners < locallock->maxLockOwners);
1605  /* Count the total */
1606  locallock->nLocks++;
1607  /* Count the per-owner lock */
1608  for (i = 0; i < locallock->numLockOwners; i++)
1609  {
1610  if (lockOwners[i].owner == owner)
1611  {
1612  lockOwners[i].nLocks++;
1613  return;
1614  }
1615  }
1616  lockOwners[i].owner = owner;
1617  lockOwners[i].nLocks = 1;
1618  locallock->numLockOwners++;
1619  if (owner != NULL)
1620  ResourceOwnerRememberLock(owner, locallock);
1621 }
int numLockOwners
Definition: lock.h:414
int64 nLocks
Definition: lock.h:401
int maxLockOwners
Definition: lock.h:415
#define Assert(condition)
Definition: c.h:739
LOCALLOCKOWNER * lockOwners
Definition: lock.h:416
int64 nLocks
Definition: lock.h:413
int i
struct ResourceOwnerData * owner
Definition: lock.h:400
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:933

◆ InitLocks()

void InitLocks ( void  )

Definition at line 377 of file lock.c.

References HASHCTL::entrysize, HASHCTL::hash, HASH_BLOBS, hash_create(), hash_destroy(), HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, MemSet, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateSharedMemoryAndSemaphores().

378 {
379  HASHCTL info;
380  long init_table_size,
381  max_table_size;
382  bool found;
383 
384  /*
385  * Compute init/max size to request for lock hashtables. Note these
386  * calculations must agree with LockShmemSize!
387  */
388  max_table_size = NLOCKENTS();
389  init_table_size = max_table_size / 2;
390 
391  /*
392  * Allocate hash table for LOCK structs. This stores per-locked-object
393  * information.
394  */
395  MemSet(&info, 0, sizeof(info));
396  info.keysize = sizeof(LOCKTAG);
397  info.entrysize = sizeof(LOCK);
399 
400  LockMethodLockHash = ShmemInitHash("LOCK hash",
401  init_table_size,
402  max_table_size,
403  &info,
405 
406  /* Assume an average of 2 holders per lock */
407  max_table_size *= 2;
408  init_table_size *= 2;
409 
410  /*
411  * Allocate hash table for PROCLOCK structs. This stores
412  * per-lock-per-holder information.
413  */
414  info.keysize = sizeof(PROCLOCKTAG);
415  info.entrysize = sizeof(PROCLOCK);
416  info.hash = proclock_hash;
418 
419  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
420  init_table_size,
421  max_table_size,
422  &info,
424 
425  /*
426  * Allocate fast-path structures.
427  */
429  ShmemInitStruct("Fast Path Strong Relation Lock Data",
430  sizeof(FastPathStrongRelationLockData), &found);
431  if (!found)
433 
434  /*
435  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
436  * counts and resource owner information.
437  *
438  * The non-shared table could already exist in this process (this occurs
439  * when the postmaster is recreating shared memory after a backend crash).
440  * If so, delete and recreate it. (We could simply leave it, since it
441  * ought to be empty in the postmaster, but for safety let's zap it.)
442  */
445 
446  info.keysize = sizeof(LOCALLOCKTAG);
447  info.entrysize = sizeof(LOCALLOCK);
448 
449  LockMethodLocalHash = hash_create("LOCALLOCK hash",
450  16,
451  &info,
453 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:814
static HTAB * LockMethodLocalHash
Definition: lock.c:255
#define HASH_ELEM
Definition: hsearch.h:87
#define SpinLockInit(lock)
Definition: spin.h:60
Size entrysize
Definition: hsearch.h:73
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:507
#define MemSet(start, val, len)
Definition: c.h:962
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
struct LOCALLOCKTAG LOCALLOCKTAG
#define HASH_PARTITION
Definition: hsearch.h:83
#define NLOCKENTS()
Definition: lock.c:56
struct LOCALLOCK LOCALLOCK
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:244
long num_partitions
Definition: hsearch.h:67
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:392
struct PROCLOCK PROCLOCK
struct LOCKTAG LOCKTAG
#define HASH_BLOBS
Definition: hsearch.h:88
struct PROCLOCKTAG PROCLOCKTAG
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
Size keysize
Definition: hsearch.h:72
struct LOCK LOCK
static HTAB * LockMethodLockHash
Definition: lock.c:253
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:337
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117
HashValueFunc hash
Definition: hsearch.h:74
#define HASH_FUNCTION
Definition: hsearch.h:89

◆ lock_twophase_postabort()

void lock_twophase_postabort ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4275 of file lock.c.

References lock_twophase_postcommit().

4277 {
4278  lock_twophase_postcommit(xid, info, recdata, len);
4279 }
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4249

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4249 of file lock.c.

References Assert, elog, ERROR, lengthof, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

4251 {
4252  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4253  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4254  LOCKTAG *locktag;
4255  LOCKMETHODID lockmethodid;
4256  LockMethod lockMethodTable;
4257 
4258  Assert(len == sizeof(TwoPhaseLockRecord));
4259  locktag = &rec->locktag;
4260  lockmethodid = locktag->locktag_lockmethodid;
4261 
4262  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4263  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4264  lockMethodTable = LockMethods[lockmethodid];
4265 
4266  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4267 }
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3040
Definition: lock.h:163
#define lengthof(array)
Definition: c.h:669
LOCKTAG locktag
Definition: lock.c:160
#define ERROR
Definition: elog.h:43
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:878
uint16 LOCKMETHODID
Definition: lock.h:124
#define Assert(condition)
Definition: c.h:739
uint8 locktag_lockmethodid
Definition: lock.h:170
#define elog(elevel,...)
Definition: elog.h:228
static const LockMethod LockMethods[]
Definition: lock.c:150
LOCKMODE lockmode
Definition: lock.c:161
Definition: proc.h:95

◆ lock_twophase_recover()

void lock_twophase_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4036 of file lock.c.

References Assert, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcQueueInit(), PROCLOCK::releaseMask, LOCK::requested, SHMQueueEmpty(), SHMQueueInit(), SHMQueueInsertBefore(), SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

4038 {
4039  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4040  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4041  LOCKTAG *locktag;
4042  LOCKMODE lockmode;
4043  LOCKMETHODID lockmethodid;
4044  LOCK *lock;
4045  PROCLOCK *proclock;
4046  PROCLOCKTAG proclocktag;
4047  bool found;
4048  uint32 hashcode;
4049  uint32 proclock_hashcode;
4050  int partition;
4051  LWLock *partitionLock;
4052  LockMethod lockMethodTable;
4053 
4054  Assert(len == sizeof(TwoPhaseLockRecord));
4055  locktag = &rec->locktag;
4056  lockmode = rec->lockmode;
4057  lockmethodid = locktag->locktag_lockmethodid;
4058 
4059  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4060  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4061  lockMethodTable = LockMethods[lockmethodid];
4062 
4063  hashcode = LockTagHashCode(locktag);
4064  partition = LockHashPartition(hashcode);
4065  partitionLock = LockHashPartitionLock(hashcode);
4066 
4067  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4068 
4069  /*
4070  * Find or create a lock with this tag.
4071  */
4073  (void *) locktag,
4074  hashcode,
4076  &found);
4077  if (!lock)
4078  {
4079  LWLockRelease(partitionLock);
4080  ereport(ERROR,
4081  (errcode(ERRCODE_OUT_OF_MEMORY),
4082  errmsg("out of shared memory"),
4083  errhint("You might need to increase max_locks_per_transaction.")));
4084  }
4085 
4086  /*
4087  * if it's a new lock object, initialize it
4088  */
4089  if (!found)
4090  {
4091  lock->grantMask = 0;
4092  lock->waitMask = 0;
4093  SHMQueueInit(&(lock->procLocks));
4094  ProcQueueInit(&(lock->waitProcs));
4095  lock->nRequested = 0;
4096  lock->nGranted = 0;
4097  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4098  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4099  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4100  }
4101  else
4102  {
4103  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4104  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4105  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4106  Assert(lock->nGranted <= lock->nRequested);
4107  }
4108 
4109  /*
4110  * Create the hash key for the proclock table.
4111  */
4112  proclocktag.myLock = lock;
4113  proclocktag.myProc = proc;
4114 
4115  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4116 
4117  /*
4118  * Find or create a proclock entry with this tag
4119  */
4121  (void *) &proclocktag,
4122  proclock_hashcode,
4124  &found);
4125  if (!proclock)
4126  {
4127  /* Oops, not enough shmem for the proclock */
4128  if (lock->nRequested == 0)
4129  {
4130  /*
4131  * There are no other requestors of this lock, so garbage-collect
4132  * the lock object. We *must* do this to avoid a permanent leak
4133  * of shared memory, because there won't be anything to cause
4134  * anyone to release the lock object later.
4135  */
4136  Assert(SHMQueueEmpty(&(lock->procLocks)));
4138  (void *) &(lock->tag),
4139  hashcode,
4140  HASH_REMOVE,
4141  NULL))
4142  elog(PANIC, "lock table corrupted");
4143  }
4144  LWLockRelease(partitionLock);
4145  ereport(ERROR,
4146  (errcode(ERRCODE_OUT_OF_MEMORY),
4147  errmsg("out of shared memory"),
4148  errhint("You might need to increase max_locks_per_transaction.")));
4149  }
4150 
4151  /*
4152  * If new, initialize the new entry
4153  */
4154  if (!found)
4155  {
4156  Assert(proc->lockGroupLeader == NULL);
4157  proclock->groupLeader = proc;
4158  proclock->holdMask = 0;
4159  proclock->releaseMask = 0;
4160  /* Add proclock to appropriate lists */
4161  SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
4162  SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
4163  &proclock->procLink);
4164  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4165  }
4166  else
4167  {
4168  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4169  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4170  }
4171 
4172  /*
4173  * lock->nRequested and lock->requested[] count the total number of
4174  * requests, whether granted or waiting, so increment those immediately.
4175  */
4176  lock->nRequested++;
4177  lock->requested[lockmode]++;
4178  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4179 
4180  /*
4181  * We shouldn't already hold the desired lock.
4182  */
4183  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4184  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4185  lockMethodTable->lockModeNames[lockmode],
4186  lock->tag.locktag_field1, lock->tag.locktag_field2,
4187  lock->tag.locktag_field3);
4188 
4189  /*
4190  * We ignore any possible conflicts and just grant ourselves the lock. Not
4191  * only because we don't bother, but also to avoid deadlocks when
4192  * switching from standby to normal mode. See function comment.
4193  */
4194  GrantLock(lock, proclock, lockmode);
4195 
4196  /*
4197  * Bump strong lock count, to make sure any fast-path lock requests won't
4198  * be granted without consulting the primary lock table.
4199  */
4200  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4201  {
4202  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4203 
4205  FastPathStrongRelationLocks->count[fasthashcode]++;
4207  }
4208 
4209  LWLockRelease(partitionLock);
4210 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
Definition: lwlock.h:32
int errhint(const char *fmt,...)
Definition: elog.c:1069
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:205
int LOCKMODE
Definition: lockdefs.h:26
int nRequested
Definition: lock.h:298
LOCKMASK holdMask
Definition: lock.h:354
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1465
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:241
LOCKTAG tag
Definition: lock.h:290
Definition: lock.h:163
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
SHM_QUEUE lockLink
Definition: lock.h:356
int errcode(int sqlerrcode)
Definition: elog.c:608
#define MemSet(start, val, len)
Definition: c.h:962
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:235
#define LockHashPartition(hashcode)
Definition: lock.h:500
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
#define lengthof(array)
Definition: c.h:669
#define PANIC
Definition: elog.h:53
int nGranted
Definition: lock.h:300
PROC_QUEUE waitProcs
Definition: lock.h:296
LOCKTAG locktag
Definition: lock.c:160
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define SpinLockAcquire(lock)
Definition: spin.h:62
#define ERROR
Definition: elog.h:43
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:244
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:538
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1038
uint32 locktag_field2
Definition: lock.h:166
unsigned int uint32
Definition: c.h:359
int granted[MAX_LOCKMODES]
Definition: lock.h:299
Definition: lock.h:287
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:878
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:490
#define ereport(elevel, rest)
Definition: elog.h:141
LOCKMASK waitMask
Definition: lock.h:294
uint16 LOCKMETHODID
Definition: lock.h:124
SHM_QUEUE procLocks
Definition: lock.h:295
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
#define SpinLockRelease(lock)
Definition: spin.h:64
int requested[MAX_LOCKMODES]
Definition: lock.h:297
#define MAX_LOCKMODES
Definition: lock.h:84
SHM_QUEUE procLink
Definition: lock.h:357
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:337
#define Assert(condition)
Definition: c.h:739
static HTAB * LockMethodLockHash
Definition: lock.c:253
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
uint8 locktag_lockmethodid
Definition: lock.h:170
PGPROC * myProc
Definition: lock.h:344
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
LOCKMASK grantMask
Definition: lock.h:293
Definition: lock.h:347
int errmsg(const char *fmt,...)
Definition: elog.c:822
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
#define elog(elevel,...)
Definition: elog.h:228
LOCK * myLock
Definition: lock.h:343
static const LockMethod LockMethods[]
Definition: lock.c:150
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:160
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:338
uint32 locktag_field1
Definition: lock.h:165
LOCKMODE lockmode
Definition: lock.c:161
Definition: proc.h:95
uint32 locktag_field3
Definition: lock.h:167
PGPROC * lockGroupLeader
Definition: proc.h:203
const char *const * lockModeNames
Definition: lock.h:114
PGPROC * groupLeader
Definition: lock.h:353
LOCKMASK releaseMask
Definition: lock.h:355

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4217 of file lock.c.

References AccessExclusiveLock, Assert, elog, ERROR, lengthof, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().

4219 {
4220  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4221  LOCKTAG *locktag;
4222  LOCKMODE lockmode;
4223  LOCKMETHODID lockmethodid;
4224 
4225  Assert(len == sizeof(TwoPhaseLockRecord));
4226  locktag = &rec->locktag;
4227  lockmode = rec->lockmode;
4228  lockmethodid = locktag->locktag_lockmethodid;
4229 
4230  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4231  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4232 
4233  if (lockmode == AccessExclusiveLock &&
4234  locktag->locktag_type == LOCKTAG_RELATION)
4235  {
4237  locktag->locktag_field1 /* dboid */ ,
4238  locktag->locktag_field2 /* reloid */ );
4239  }
4240 }
int LOCKMODE
Definition: lockdefs.h:26
Definition: lock.h:163
#define lengthof(array)
Definition: c.h:669
LOCKTAG locktag
Definition: lock.c:160
#define ERROR
Definition: elog.h:43
uint32 locktag_field2
Definition: lock.h:166
uint16 LOCKMETHODID
Definition: lock.h:124
uint8 locktag_type
Definition: lock.h:169
#define Assert(condition)
Definition: c.h:739
uint8 locktag_lockmethodid
Definition: lock.h:170
#define AccessExclusiveLock
Definition: lockdefs.h:45
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:629
#define elog(elevel,...)
Definition: elog.h:228
static const LockMethod LockMethods[]
Definition: lock.c:150
uint32 locktag_field1
Definition: lock.h:165
LOCKMODE lockmode
Definition: lock.c:161

◆ LockAcquire()

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp 
)

Definition at line 732 of file lock.c.

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert, PGPROC::backendLock, BeginStrongLockAcquire(), ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, EligibleForRelationFastPath, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathGrantRelationLock(), FastPathLocalUseCount, FastPathStrongLockHashPartition, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_BACKEND, GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PGPROC::heldLocks, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, InRecovery, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), SHMQueueDelete(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockAcquire(), LockRelation(), and LockRelationOid().

738 {
739  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
740  LockMethod lockMethodTable;
741  LOCALLOCKTAG localtag;
742  LOCALLOCK *locallock;
743  LOCK *lock;
744  PROCLOCK *proclock;
745  bool found;
746  ResourceOwner owner;
747  uint32 hashcode;
748  LWLock *partitionLock;
749  bool found_conflict;
750  bool log_lock = false;
751 
752  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
753  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
754  lockMethodTable = LockMethods[lockmethodid];
755  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
756  elog(ERROR, "unrecognized lock mode: %d", lockmode);
757 
758  if (RecoveryInProgress() && !InRecovery &&
759  (locktag->locktag_type == LOCKTAG_OBJECT ||
760  locktag->locktag_type == LOCKTAG_RELATION) &&
761  lockmode > RowExclusiveLock)
762  ereport(ERROR,
763  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
764  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
765  lockMethodTable->lockModeNames[lockmode]),
766  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
767 
768 #ifdef LOCK_DEBUG
769  if (LOCK_DEBUG_ENABLED(locktag))
770  elog(LOG, "LockAcquire: lock [%u,%u] %s",
771  locktag->locktag_field1, locktag->locktag_field2,
772  lockMethodTable->lockModeNames[lockmode]);
773 #endif
774 
775  /* Identify owner for lock */
776  if (sessionLock)
777  owner = NULL;
778  else
779  owner = CurrentResourceOwner;
780 
781  /*
782  * Find or create a LOCALLOCK entry for this lock and lockmode
783  */
784  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
785  localtag.lock = *locktag;
786  localtag.mode = lockmode;
787 
788  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
789  (void *) &localtag,
790  HASH_ENTER, &found);
791 
792  /*
793  * if it's a new locallock object, initialize it
794  */
795  if (!found)
796  {
797  locallock->lock = NULL;
798  locallock->proclock = NULL;
799  locallock->hashcode = LockTagHashCode(&(localtag.lock));
800  locallock->nLocks = 0;
801  locallock->holdsStrongLockCount = false;
802  locallock->lockCleared = false;
803  locallock->numLockOwners = 0;
804  locallock->maxLockOwners = 8;
805  locallock->lockOwners = NULL; /* in case next line fails */
806  locallock->lockOwners = (LOCALLOCKOWNER *)
808  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
809  }
810  else
811  {
812  /* Make sure there will be room to remember the lock */
813  if (locallock->numLockOwners >= locallock->maxLockOwners)
814  {
815  int newsize = locallock->maxLockOwners * 2;
816 
817  locallock->lockOwners = (LOCALLOCKOWNER *)
818  repalloc(locallock->lockOwners,
819  newsize * sizeof(LOCALLOCKOWNER));
820  locallock->maxLockOwners = newsize;
821  }
822  }
823  hashcode = locallock->hashcode;
824 
825  if (locallockp)
826  *locallockp = locallock;
827 
828  /*
829  * If we already hold the lock, we can just increase the count locally.
830  *
831  * If lockCleared is already set, caller need not worry about absorbing
832  * sinval messages related to the lock's object.
833  */
834  if (locallock->nLocks > 0)
835  {
836  GrantLockLocal(locallock, owner);
837  if (locallock->lockCleared)
839  else
841  }
842 
843  /*
844  * Prepare to emit a WAL record if acquisition of this lock needs to be
845  * replayed in a standby server.
846  *
847  * Here we prepare to log; after lock is acquired we'll issue log record.
848  * This arrangement simplifies error recovery in case the preparation step
849  * fails.
850  *
851  * Only AccessExclusiveLocks can conflict with lock types that read-only
852  * transactions can acquire in a standby server. Make sure this definition
853  * matches the one in GetRunningTransactionLocks().
854  */
855  if (lockmode >= AccessExclusiveLock &&
856  locktag->locktag_type == LOCKTAG_RELATION &&
857  !RecoveryInProgress() &&
859  {
861  log_lock = true;
862  }
863 
864  /*
865  * Attempt to take lock via fast path, if eligible. But if we remember
866  * having filled up the fast path array, we don't attempt to make any
867  * further use of it until we release some locks. It's possible that some
868  * other backend has transferred some of those locks to the shared hash
869  * table, leaving space free, but it's not worth acquiring the LWLock just
870  * to check. It's also possible that we're acquiring a second or third
871  * lock type on a relation we have already locked using the fast-path, but
872  * for now we don't worry about that case either.
873  */
874  if (EligibleForRelationFastPath(locktag, lockmode) &&
876  {
877  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
878  bool acquired;
879 
880  /*
881  * LWLockAcquire acts as a memory sequencing point, so it's safe to
882  * assume that any strong locker whose increment to
883  * FastPathStrongRelationLocks->counts becomes visible after we test
884  * it has yet to begin to transfer fast-path locks.
885  */
887  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
888  acquired = false;
889  else
890  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
891  lockmode);
893  if (acquired)
894  {
895  /*
896  * The locallock might contain stale pointers to some old shared
897  * objects; we MUST reset these to null before considering the
898  * lock to be acquired via fast-path.
899  */
900  locallock->lock = NULL;
901  locallock->proclock = NULL;
902  GrantLockLocal(locallock, owner);
903  return LOCKACQUIRE_OK;
904  }
905  }
906 
907  /*
908  * If this lock could potentially have been taken via the fast-path by
909  * some other backend, we must (temporarily) disable further use of the
910  * fast-path for this lock tag, and migrate any locks already taken via
911  * this method to the main lock table.
912  */
913  if (ConflictsWithRelationFastPath(locktag, lockmode))
914  {
915  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
916 
917  BeginStrongLockAcquire(locallock, fasthashcode);
918  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
919  hashcode))
920  {
922  if (locallock->nLocks == 0)
923  RemoveLocalLock(locallock);
924  if (locallockp)
925  *locallockp = NULL;
926  if (reportMemoryError)
927  ereport(ERROR,
928  (errcode(ERRCODE_OUT_OF_MEMORY),
929  errmsg("out of shared memory"),
930  errhint("You might need to increase max_locks_per_transaction.")));
931  else
932  return LOCKACQUIRE_NOT_AVAIL;
933  }
934  }
935 
936  /*
937  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
938  * take it via the fast-path, either, so we've got to mess with the shared
939  * lock table.
940  */
941  partitionLock = LockHashPartitionLock(hashcode);
942 
943  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
944 
945  /*
946  * Find or create lock and proclock entries with this tag
947  *
948  * Note: if the locallock object already existed, it might have a pointer
949  * to the lock already ... but we should not assume that that pointer is
950  * valid, since a lock object with zero hold and request counts can go
951  * away anytime. So we have to use SetupLockInTable() to recompute the
952  * lock and proclock pointers, even if they're already set.
953  */
954  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
955  hashcode, lockmode);
956  if (!proclock)
957  {
959  LWLockRelease(partitionLock);
960  if (locallock->nLocks == 0)
961  RemoveLocalLock(locallock);
962  if (locallockp)
963  *locallockp = NULL;
964  if (reportMemoryError)
965  ereport(ERROR,
966  (errcode(ERRCODE_OUT_OF_MEMORY),
967  errmsg("out of shared memory"),
968  errhint("You might need to increase max_locks_per_transaction.")));
969  else
970  return LOCKACQUIRE_NOT_AVAIL;
971  }
972  locallock->proclock = proclock;
973  lock = proclock->tag.myLock;
974  locallock->lock = lock;
975 
976  /*
977  * If lock requested conflicts with locks requested by waiters, must join
978  * wait queue. Otherwise, check for conflict with already-held locks.
979  * (That's last because most complex check.)
980  */
981  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
982  found_conflict = true;
983  else
984  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
985  lock, proclock);
986 
987  if (!found_conflict)
988  {
989  /* No conflict with held or previously requested locks */
990  GrantLock(lock, proclock, lockmode);
991  GrantLockLocal(locallock, owner);
992  }
993  else
994  {
995  /*
996  * We can't acquire the lock immediately. If caller specified no
997  * blocking, remove useless table entries and return
998  * LOCKACQUIRE_NOT_AVAIL without waiting.
999  */
1000  if (dontWait)
1001  {
1003  if (proclock->holdMask == 0)
1004  {
1005  uint32 proclock_hashcode;
1006 
1007  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1008  SHMQueueDelete(&proclock->lockLink);
1009  SHMQueueDelete(&proclock->procLink);
1011  (void *) &(proclock->tag),
1012  proclock_hashcode,
1013  HASH_REMOVE,
1014  NULL))
1015  elog(PANIC, "proclock table corrupted");
1016  }
1017  else
1018  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1019  lock->nRequested--;
1020  lock->requested[lockmode]--;
1021  LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1022  Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1023  Assert(lock->nGranted <= lock->nRequested);
1024  LWLockRelease(partitionLock);
1025  if (locallock->nLocks == 0)
1026  RemoveLocalLock(locallock);
1027  if (locallockp)
1028  *locallockp = NULL;
1029  return LOCKACQUIRE_NOT_AVAIL;
1030  }
1031 
1032  /*
1033  * Set bitmask of locks this process already holds on this object.
1034  */
1035  MyProc->heldLocks = proclock->holdMask;
1036 
1037  /*
1038  * Sleep till someone wakes me up.
1039  */
1040 
1041  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1042  locktag->locktag_field2,
1043  locktag->locktag_field3,
1044  locktag->locktag_field4,
1045  locktag->locktag_type,
1046  lockmode);
1047 
1048  WaitOnLock(locallock, owner);
1049 
1050  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1051  locktag->locktag_field2,
1052  locktag->locktag_field3,
1053  locktag->locktag_field4,
1054  locktag->locktag_type,
1055  lockmode);
1056 
1057  /*
1058  * NOTE: do not do any material change of state between here and
1059  * return. All required changes in locktable state must have been
1060  * done when the lock was granted to us --- see notes in WaitOnLock.
1061  */
1062 
1063  /*
1064  * Check the proclock entry status, in case something in the ipc
1065  * communication doesn't work correctly.
1066  */
1067  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1068  {
1070  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1071  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1072  /* Should we retry ? */
1073  LWLockRelease(partitionLock);
1074  elog(ERROR, "LockAcquire failed");
1075  }
1076  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1077  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1078  }
1079 
1080  /*
1081  * Lock state is fully up-to-date now; if we error out after this, no
1082  * special error cleanup is required.
1083  */
1085 
1086  LWLockRelease(partitionLock);
1087 
1088  /*
1089  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1090  * standby server.
1091  */
1092  if (log_lock)
1093  {
1094  /*
1095  * Decode the locktag back to the original values, to avoid sending
1096  * lots of empty bytes with every message. See lock.h to check how a
1097  * locktag is defined for LOCKTAG_RELATION
1098  */
1100  locktag->locktag_field2);
1101  }
1102 
1103  return LOCKACQUIRE_OK;
1104 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
PROCLOCKTAG tag
Definition: lock.h:350
uint32 hashcode
Definition: lock.h:410
Definition: lwlock.h:32
static HTAB * LockMethodLocalHash
Definition: lock.c:255
bool holdsStrongLockCount
Definition: lock.h:417
int errhint(const char *fmt,...)
Definition: elog.c:1069
int numLockOwners
Definition: lock.h:414
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1117
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:205
LOCKTAG lock
Definition: lock.h:388
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2562
LOCKMODE mode
Definition: lock.h:389
PROCLOCK * proclock
Definition: lock.h:412
int nRequested
Definition: lock.h:298
PGPROC * MyProc
Definition: proc.c:67
LOCKMASK holdMask
Definition: lock.h:354
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1465
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:241
bool InRecovery
Definition: xlog.c:200
static int FastPathLocalUseCount
Definition: lock.c:171
const LOCKMASK * conflictTab
Definition: lock.h:113
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
SHM_QUEUE lockLink
Definition: lock.h:356
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1039
int errcode(int sqlerrcode)
Definition: elog.c:608
#define MemSet(start, val, len)
Definition: c.h:962
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:235
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
#define lengthof(array)
Definition: c.h:669
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
#define LOG
Definition: elog.h:26
bool RecoveryInProgress(void)
Definition: xlog.c:7930
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:199
#define PANIC
Definition: elog.h:53
int maxLockOwners
Definition: lock.h:415
int nGranted
Definition: lock.h:300
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1721
uint16 locktag_field4
Definition: lock.h:168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define ERROR
Definition: elog.h:43
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:244
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1294
static void FinishStrongLockAcquire(void)
Definition: lock.c:1654
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:538
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:71
#define RowExclusiveLock
Definition: lockdefs.h:38
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1056
void AbortStrongLockAcquire(void)
Definition: lock.c:1664
uint32 locktag_field2
Definition: lock.h:166
unsigned int uint32
Definition: c.h:359
Definition: lock.h:287
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:490
#define ereport(elevel, rest)
Definition: elog.h:141
MemoryContext TopMemoryContext
Definition: mcxt.c:44
LOCKMASK waitMask
Definition: lock.h:294
uint16 LOCKMETHODID
Definition: lock.h:124
int requested[MAX_LOCKMODES]
Definition: lock.h:297
SHM_QUEUE procLink
Definition: lock.h:357
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1628
#define XLogStandbyInfoActive()
Definition: xlog.h:195
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:337
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1342
uint8 locktag_type
Definition: lock.h:169
#define Assert(condition)
Definition: c.h:739
LWLock backendLock
Definition: proc.h:190
LOCALLOCKOWNER * lockOwners
Definition: lock.h:416
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
LOCK * lock
Definition: lock.h:411
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1069
uint8 locktag_lockmethodid
Definition: lock.h:170
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
#define AccessExclusiveLock
Definition: lockdefs.h:45
Definition: lock.h:347
int64 nLocks
Definition: lock.h:413
int errmsg(const char *fmt,...)
Definition: elog.c:822
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:796
#define elog(elevel,...)
Definition: elog.h:228
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
LOCK * myLock
Definition: lock.h:343
static const LockMethod LockMethods[]
Definition: lock.c:150
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:338
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2629
uint32 locktag_field1
Definition: lock.h:165
uint32 locktag_field3
Definition: lock.h:167
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1599
bool lockCleared
Definition: lock.h:418
const char *const * lockModeNames
Definition: lock.h:114
LOCKMASK heldLocks
Definition: proc.h:142
int numLockModes
Definition: lock.h:112

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1342 of file lock.c.

References Assert, LockMethodData::conflictTab, elog, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCKBIT_ON, PGPROC::lockGroupLeader, PROCLOCK::lockLink, TwoPhaseLockRecord::lockmode, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, offsetof, PANIC, PROCLOCK_PRINT, LOCK::procLocks, SHMQueueNext(), and PROCLOCK::tag.

Referenced by LockAcquireExtended(), ProcLockWakeup(), and ProcSleep().

1346 {
1347  int numLockModes = lockMethodTable->numLockModes;
1348  LOCKMASK myLocks;
1349  int conflictMask = lockMethodTable->conflictTab[lockmode];
1350  int conflictsRemaining[MAX_LOCKMODES];
1351  int totalConflictsRemaining = 0;
1352  int i;
1353  SHM_QUEUE *procLocks;
1354  PROCLOCK *otherproclock;
1355 
1356  /*
1357  * first check for global conflicts: If no locks conflict with my request,
1358  * then I get the lock.
1359  *
1360  * Checking for conflict: lock->grantMask represents the types of
1361  * currently held locks. conflictTable[lockmode] has a bit set for each
1362  * type of lock that conflicts with request. Bitwise compare tells if
1363  * there is a conflict.
1364  */
1365  if (!(conflictMask & lock->grantMask))
1366  {
1367  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1368  return false;
1369  }
1370 
1371  /*
1372  * Rats. Something conflicts. But it could still be my own lock, or a
1373  * lock held by another member of my locking group. First, figure out how
1374  * many conflicts remain after subtracting out any locks I hold myself.
1375  */
1376  myLocks = proclock->holdMask;
1377  for (i = 1; i <= numLockModes; i++)
1378  {
1379  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1380  {
1381  conflictsRemaining[i] = 0;
1382  continue;
1383  }
1384  conflictsRemaining[i] = lock->granted[i];
1385  if (myLocks & LOCKBIT_ON(i))
1386  --conflictsRemaining[i];
1387  totalConflictsRemaining += conflictsRemaining[i];
1388  }
1389 
1390  /* If no conflicts remain, we get the lock. */
1391  if (totalConflictsRemaining == 0)
1392  {
1393  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1394  return false;
1395  }
1396 
1397  /* If no group locking, it's definitely a conflict. */
1398  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1399  {
1400  Assert(proclock->tag.myProc == MyProc);
1401  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1402  proclock);
1403  return true;
1404  }
1405 
1406  /*
1407  * Locks held in conflicting modes by members of our own lock group are
1408  * not real conflicts; we can subtract those out and see if we still have
1409  * a conflict. This is O(N) in the number of processes holding or
1410  * awaiting locks on this object. We could improve that by making the
1411  * shared memory state more complex (and larger) but it doesn't seem worth
1412  * it.
1413  */
1414  procLocks = &(lock->procLocks);
1415  otherproclock = (PROCLOCK *)
1416  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1417  while (otherproclock != NULL)
1418  {
1419  if (proclock != otherproclock &&
1420  proclock->groupLeader == otherproclock->groupLeader &&
1421  (otherproclock->holdMask & conflictMask) != 0)
1422  {
1423  int intersectMask = otherproclock->holdMask & conflictMask;
1424 
1425  for (i = 1; i <= numLockModes; i++)
1426  {
1427  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1428  {
1429  if (conflictsRemaining[i] <= 0)
1430  elog(PANIC, "proclocks held do not match lock");
1431  conflictsRemaining[i]--;
1432  totalConflictsRemaining--;
1433  }
1434  }
1435 
1436  if (totalConflictsRemaining == 0)
1437  {
1438  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1439  proclock);
1440  return false;
1441  }
1442  }
1443  otherproclock = (PROCLOCK *)
1444  SHMQueueNext(procLocks, &otherproclock->lockLink,
1445  offsetof(PROCLOCK, lockLink));
1446  }
1447 
1448  /* Nope, it's a real conflict. */
1449  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1450  return true;
1451 }
PROCLOCKTAG tag
Definition: lock.h:350
PGPROC * MyProc
Definition: proc.c:67
LOCKMASK holdMask
Definition: lock.h:354
const LOCKMASK * conflictTab
Definition: lock.h:113
SHM_QUEUE lockLink
Definition: lock.h:356
#define PANIC
Definition: elog.h:53
int granted[MAX_LOCKMODES]
Definition: lock.h:299
SHM_QUEUE procLocks
Definition: lock.h:295
#define MAX_LOCKMODES
Definition: lock.h:84
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
#define Assert(condition)
Definition: c.h:739
int LOCKMASK
Definition: lockdefs.h:25
PGPROC * myProc
Definition: lock.h:344
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
LOCKMASK grantMask
Definition: lock.h:293
Definition: lock.h:347
#define elog(elevel,...)
Definition: elog.h:228
int i
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:338
PGPROC * lockGroupLeader
Definition: proc.h:203
#define offsetof(type, field)
Definition: c.h:662
PGPROC * groupLeader
Definition: lock.h:353
int numLockModes
Definition: lock.h:112

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 595 of file lock.c.

References LockMethodData::conflictTab, elog, ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

596 {
597  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
598  LockMethod lockMethodTable;
599  LOCALLOCKTAG localtag;
600  LOCALLOCK *locallock;
601  LOCK *lock;
602  PROCLOCK *proclock;
603  LWLock *partitionLock;
604  bool hasWaiters = false;
605 
606  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
607  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
608  lockMethodTable = LockMethods[lockmethodid];
609  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
610  elog(ERROR, "unrecognized lock mode: %d", lockmode);
611 
612 #ifdef LOCK_DEBUG
613  if (LOCK_DEBUG_ENABLED(locktag))
614  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
615  locktag->locktag_field1, locktag->locktag_field2,
616  lockMethodTable->lockModeNames[lockmode]);
617 #endif
618 
619  /*
620  * Find the LOCALLOCK entry for this lock and lockmode
621  */
622  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
623  localtag.lock = *locktag;
624  localtag.mode = lockmode;
625 
626  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
627  (void *) &localtag,
628  HASH_FIND, NULL);
629 
630  /*
631  * let the caller print its own error message, too. Do not ereport(ERROR).
632  */
633  if (!locallock || locallock->nLocks <= 0)
634  {
635  elog(WARNING, "you don't own a lock of type %s",
636  lockMethodTable->lockModeNames[lockmode]);
637  return false;
638  }
639 
640  /*
641  * Check the shared lock table.
642  */
643  partitionLock = LockHashPartitionLock(locallock->hashcode);
644 
645  LWLockAcquire(partitionLock, LW_SHARED);
646 
647  /*
648  * We don't need to re-find the lock or proclock, since we kept their
649  * addresses in the locallock table, and they couldn't have been removed
650  * while we were holding a lock on them.
651  */
652  lock = locallock->lock;
653  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
654  proclock = locallock->proclock;
655  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
656 
657  /*
658  * Double-check that we are actually holding a lock of the type we want to
659  * release.
660  */
661  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
662  {
663  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
664  LWLockRelease(partitionLock);
665  elog(WARNING, "you don't own a lock of type %s",
666  lockMethodTable->lockModeNames[lockmode]);
667  RemoveLocalLock(locallock);
668  return false;
669  }
670 
671  /*
672  * Do the checking.
673  */
674  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
675  hasWaiters = true;
676 
677  LWLockRelease(partitionLock);
678 
679  return hasWaiters;
680 }
uint32 hashcode
Definition: lock.h:410
Definition: lwlock.h:32
static HTAB * LockMethodLocalHash
Definition: lock.c:255
LOCKTAG lock
Definition: lock.h:388
LOCKMODE mode
Definition: lock.h:389
PROCLOCK * proclock
Definition: lock.h:412
LOCKMASK holdMask
Definition: lock.h:354
const LOCKMASK * conflictTab
Definition: lock.h:113
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
#define MemSet(start, val, len)
Definition: c.h:962
#define lengthof(array)
Definition: c.h:669
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
#define LOG
Definition: elog.h:26
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define ERROR
Definition: elog.h:43
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1294
uint32 locktag_field2
Definition: lock.h:166
Definition: lock.h:287
LOCKMASK waitMask
Definition: lock.h:294
uint16 LOCKMETHODID
Definition: lock.h:124
#define WARNING
Definition: elog.h:40
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:337
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
LOCK * lock
Definition: lock.h:411
uint8 locktag_lockmethodid
Definition: lock.h:170
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
Definition: lock.h:347
int64 nLocks
Definition: lock.h:413
#define elog(elevel,...)
Definition: elog.h:228
static const LockMethod LockMethods[]
Definition: lock.c:150
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:338
uint32 locktag_field1
Definition: lock.h:165
const char *const * lockModeNames
Definition: lock.h:114
int numLockModes
Definition: lock.h:112

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode 
)

Definition at line 571 of file lock.c.

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe().

572 {
573  LOCALLOCKTAG localtag;
574  LOCALLOCK *locallock;
575 
576  /*
577  * See if there is a LOCALLOCK entry for this lock and lockmode
578  */
579  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
580  localtag.lock = *locktag;
581  localtag.mode = lockmode;
582 
583  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
584  (void *) &localtag,
585  HASH_FIND, NULL);
586 
587  return (locallock && locallock->nLocks > 0);
588 }
static HTAB * LockMethodLocalHash
Definition: lock.c:255
LOCKTAG lock
Definition: lock.h:388
LOCKMODE mode
Definition: lock.h:389
#define MemSet(start, val, len)
Definition: c.h:962
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
int64 nLocks
Definition: lock.h:413

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2486 of file lock.c.

References Assert, CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockReassignOwner(), ResourceOwnerGetParent(), and status().

Referenced by ResourceOwnerReleaseInternal().

2487 {
2489 
2490  Assert(parent != NULL);
2491 
2492  if (locallocks == NULL)
2493  {
2495  LOCALLOCK *locallock;
2496 
2498 
2499  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2500  LockReassignOwner(locallock, parent);
2501  }
2502  else
2503  {
2504  int i;
2505 
2506  for (i = nlocks - 1; i >= 0; i--)
2507  LockReassignOwner(locallocks[i], parent);
2508  }
2509 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2516
static HTAB * LockMethodLocalHash
Definition: lock.c:255
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:739
#define Assert(condition)
Definition: c.h:739
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
int i
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2516 of file lock.c.

References CurrentResourceOwner, i, LOCALLOCK::lockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

2517 {
2518  LOCALLOCKOWNER *lockOwners;
2519  int i;
2520  int ic = -1;
2521  int ip = -1;
2522 
2523  /*
2524  * Scan to see if there are any locks belonging to current owner or its
2525  * parent
2526  */
2527  lockOwners = locallock->lockOwners;
2528  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2529  {
2530  if (lockOwners[i].owner == CurrentResourceOwner)
2531  ic = i;
2532  else if (lockOwners[i].owner == parent)
2533  ip = i;
2534  }
2535 
2536  if (ic < 0)
2537  return; /* no current locks */
2538 
2539  if (ip < 0)
2540  {
2541  /* Parent has no slot, so just give it the child's slot */
2542  lockOwners[ic].owner = parent;
2543  ResourceOwnerRememberLock(parent, locallock);
2544  }
2545  else
2546  {
2547  /* Merge child's count with parent's */
2548  lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2549  /* compact out unused slot */
2550  locallock->numLockOwners--;
2551  if (ic < locallock->numLockOwners)
2552  lockOwners[ic] = lockOwners[locallock->numLockOwners];
2553  }
2555 }
int numLockOwners
Definition: lock.h:414
int64 nLocks
Definition: lock.h:401
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:953
LOCALLOCKOWNER * lockOwners
Definition: lock.h:416
int i
struct ResourceOwnerData * owner
Definition: lock.h:400
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:933

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3040 of file lock.c.

References Assert, CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire, SpinLockRelease, UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

3043 {
3044  LOCK *lock;
3045  PROCLOCK *proclock;
3046  PROCLOCKTAG proclocktag;
3047  uint32 hashcode;
3048  uint32 proclock_hashcode;
3049  LWLock *partitionLock;
3050  bool wakeupNeeded;
3051 
3052  hashcode = LockTagHashCode(locktag);
3053  partitionLock = LockHashPartitionLock(hashcode);
3054 
3055  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3056 
3057  /*
3058  * Re-find the lock object (it had better be there).
3059  */
3061  (void *) locktag,
3062  hashcode,
3063  HASH_FIND,
3064  NULL);
3065  if (!lock)
3066  elog(PANIC, "failed to re-find shared lock object");
3067 
3068  /*
3069  * Re-find the proclock object (ditto).
3070  */
3071  proclocktag.myLock = lock;
3072  proclocktag.myProc = proc;
3073 
3074  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3075 
3077  (void *) &proclocktag,
3078  proclock_hashcode,
3079  HASH_FIND,
3080  NULL);
3081  if (!proclock)
3082  elog(PANIC, "failed to re-find shared proclock object");
3083 
3084  /*
3085  * Double-check that we are actually holding a lock of the type we want to
3086  * release.
3087  */
3088  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3089  {
3090  PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3091  LWLockRelease(partitionLock);
3092  elog(WARNING, "you don't own a lock of type %s",
3093  lockMethodTable->lockModeNames[lockmode]);
3094  return;
3095  }
3096 
3097  /*
3098  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3099  */
3100  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3101 
3102  CleanUpLock(lock, proclock,
3103  lockMethodTable, hashcode,
3104  wakeupNeeded);
3105 
3106  LWLockRelease(partitionLock);
3107 
3108  /*
3109  * Decrement strong lock count. This logic is needed only for 2PC.
3110  */
3111  if (decrement_strong_lock_count
3112  && ConflictsWithRelationFastPath(locktag, lockmode))
3113  {
3114  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3115 
3117  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3118  FastPathStrongRelationLocks->count[fasthashcode]--;
3120  }
3121 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
Definition: lwlock.h:32
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:205
LOCKMASK holdMask
Definition: lock.h:354
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:241
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:235
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
#define PANIC
Definition: elog.h:53
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define SpinLockAcquire(lock)
Definition: spin.h:62
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:244
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:538
unsigned int uint32
Definition: c.h:359
Definition: lock.h:287
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:490
#define WARNING
Definition: elog.h:40
#define SpinLockRelease(lock)
Definition: spin.h:64
#define Assert(condition)
Definition: c.h:739
static HTAB * LockMethodLockHash
Definition: lock.c:253
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
PGPROC * myProc
Definition: lock.h:344
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
Definition: lock.h:347
#define elog(elevel,...)
Definition: elog.h:228
LOCK * myLock
Definition: lock.h:343
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1488
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:338
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1545
const char *const * lockModeNames
Definition: lock.h:114

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 1882 of file lock.c.

References Assert, PGPROC::backendLock, CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FastPathLocalUseCount, FastPathUnGrantRelationLock(), HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseLockList(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockTableDelete(), and XactLockTableWait().

1883 {
1884  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1885  LockMethod lockMethodTable;
1886  LOCALLOCKTAG localtag;
1887  LOCALLOCK *locallock;
1888  LOCK *lock;
1889  PROCLOCK *proclock;
1890  LWLock *partitionLock;
1891  bool wakeupNeeded;
1892 
1893  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1894  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1895  lockMethodTable = LockMethods[lockmethodid];
1896  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1897  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1898 
1899 #ifdef LOCK_DEBUG
1900  if (LOCK_DEBUG_ENABLED(locktag))
1901  elog(LOG, "LockRelease: lock [%u,%u] %s",
1902  locktag->locktag_field1, locktag->locktag_field2,
1903  lockMethodTable->lockModeNames[lockmode]);
1904 #endif
1905 
1906  /*
1907  * Find the LOCALLOCK entry for this lock and lockmode
1908  */
1909  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1910  localtag.lock = *locktag;
1911  localtag.mode = lockmode;
1912 
1913  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1914  (void *) &localtag,
1915  HASH_FIND, NULL);
1916 
1917  /*
1918  * let the caller print its own error message, too. Do not ereport(ERROR).
1919  */
1920  if (!locallock || locallock->nLocks <= 0)
1921  {
1922  elog(WARNING, "you don't own a lock of type %s",
1923  lockMethodTable->lockModeNames[lockmode]);
1924  return false;
1925  }
1926 
1927  /*
1928  * Decrease the count for the resource owner.
1929  */
1930  {
1931  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1932  ResourceOwner owner;
1933  int i;
1934 
1935  /* Identify owner for lock */
1936  if (sessionLock)
1937  owner = NULL;
1938  else
1939  owner = CurrentResourceOwner;
1940 
1941  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1942  {
1943  if (lockOwners[i].owner == owner)
1944  {
1945  Assert(lockOwners[i].nLocks > 0);
1946  if (--lockOwners[i].nLocks == 0)
1947  {
1948  if (owner != NULL)
1949  ResourceOwnerForgetLock(owner, locallock);
1950  /* compact out unused slot */
1951  locallock->numLockOwners--;
1952  if (i < locallock->numLockOwners)
1953  lockOwners[i] = lockOwners[locallock->numLockOwners];
1954  }
1955  break;
1956  }
1957  }
1958  if (i < 0)
1959  {
1960  /* don't release a lock belonging to another owner */
1961  elog(WARNING, "you don't own a lock of type %s",
1962  lockMethodTable->lockModeNames[lockmode]);
1963  return false;
1964  }
1965  }
1966 
1967  /*
1968  * Decrease the total local count. If we're still holding the lock, we're
1969  * done.
1970  */
1971  locallock->nLocks--;
1972 
1973  if (locallock->nLocks > 0)
1974  return true;
1975 
1976  /*
1977  * At this point we can no longer suppose we are clear of invalidation
1978  * messages related to this lock. Although we'll delete the LOCALLOCK
1979  * object before any intentional return from this routine, it seems worth
1980  * the trouble to explicitly reset lockCleared right now, just in case
1981  * some error prevents us from deleting the LOCALLOCK.
1982  */
1983  locallock->lockCleared = false;
1984 
1985  /* Attempt fast release of any lock eligible for the fast path. */
1986  if (EligibleForRelationFastPath(locktag, lockmode) &&
1988  {
1989  bool released;
1990 
1991  /*
1992  * We might not find the lock here, even if we originally entered it
1993  * here. Another backend may have moved it to the main table.
1994  */
1996  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
1997  lockmode);
1999  if (released)
2000  {
2001  RemoveLocalLock(locallock);
2002  return true;
2003  }
2004  }
2005 
2006  /*
2007  * Otherwise we've got to mess with the shared lock table.
2008  */
2009  partitionLock = LockHashPartitionLock(locallock->hashcode);
2010 
2011  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2012 
2013  /*
2014  * Normally, we don't need to re-find the lock or proclock, since we kept
2015  * their addresses in the locallock table, and they couldn't have been
2016  * removed while we were holding a lock on them. But it's possible that
2017  * the lock was taken fast-path and has since been moved to the main hash
2018  * table by another backend, in which case we will need to look up the
2019  * objects here. We assume the lock field is NULL if so.
2020  */
2021  lock = locallock->lock;
2022  if (!lock)
2023  {
2024  PROCLOCKTAG proclocktag;
2025 
2026  Assert(EligibleForRelationFastPath(locktag, lockmode));
2028  (const void *) locktag,
2029  locallock->hashcode,
2030  HASH_FIND,
2031  NULL);
2032  if (!lock)
2033  elog(ERROR, "failed to re-find shared lock object");
2034  locallock->lock = lock;
2035 
2036  proclocktag.myLock = lock;
2037  proclocktag.myProc = MyProc;
2039  (void *) &proclocktag,
2040  HASH_FIND,
2041  NULL);
2042  if (!locallock->proclock)
2043  elog(ERROR, "failed to re-find shared proclock object");
2044  }
2045  LOCK_PRINT("LockRelease: found", lock, lockmode);
2046  proclock = locallock->proclock;
2047  PROCLOCK_PRINT("LockRelease: found", proclock);
2048 
2049  /*
2050  * Double-check that we are actually holding a lock of the type we want to
2051  * release.
2052  */
2053  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2054  {
2055  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2056  LWLockRelease(partitionLock);
2057  elog(WARNING, "you don't own a lock of type %s",
2058  lockMethodTable->lockModeNames[lockmode]);
2059  RemoveLocalLock(locallock);
2060  return false;
2061  }
2062 
2063  /*
2064  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2065  */
2066  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2067 
2068  CleanUpLock(lock, proclock,
2069  lockMethodTable, locallock->hashcode,
2070  wakeupNeeded);
2071 
2072  LWLockRelease(partitionLock);
2073 
2074  RemoveLocalLock(locallock);
2075  return true;
2076 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
uint32 hashcode
Definition: lock.h:410
Definition: lwlock.h:32
static HTAB * LockMethodLocalHash
Definition: lock.c:255
int numLockOwners
Definition: lock.h:414
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2599
LOCKTAG lock
Definition: lock.h:388
LOCKMODE mode
Definition: lock.h:389
PROCLOCK * proclock
Definition: lock.h:412
PGPROC * MyProc
Definition: proc.c:67
LOCKMASK holdMask
Definition: lock.h:354
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
static int FastPathLocalUseCount
Definition: lock.c:171
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
#define MemSet(start, val, len)
Definition: c.h:962
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
#define lengthof(array)
Definition: c.h:669
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
#define LOG
Definition: elog.h:26
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:199
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define ERROR
Definition: elog.h:43
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1294
uint32 locktag_field2
Definition: lock.h:166
Definition: lock.h:287
uint16 LOCKMETHODID
Definition: lock.h:124
#define WARNING
Definition: elog.h:40
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:953
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:337
#define Assert(condition)
Definition: c.h:739
LWLock backendLock
Definition: proc.h:190
static HTAB * LockMethodLockHash
Definition: lock.c:253
LOCALLOCKOWNER * lockOwners
Definition: lock.h:416
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
LOCK * lock
Definition: lock.h:411
uint8 locktag_lockmethodid
Definition: lock.h:170
PGPROC * myProc
Definition: lock.h:344
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
Definition: lock.h:347
int64 nLocks
Definition: lock.h:413
#define elog(elevel,...)
Definition: elog.h:228
int i
LOCK * myLock
Definition: lock.h:343
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1488
static const LockMethod LockMethods[]
Definition: lock.c:150
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:338
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1545
uint32 locktag_field1
Definition: lock.h:165
bool lockCleared
Definition: lock.h:418
const char *const * lockModeNames
Definition: lock.h:114
int numLockModes
Definition: lock.h:112

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2087 of file lock.c.

References Assert, PGPROC::backendLock, CleanUpLock(), DEFAULT_LOCKMETHOD, EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, offsetof, LOCALLOCKOWNER::owner, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), SHMQueueNext(), status(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), and VirtualXactLockTableCleanup().

Referenced by DiscardAll(), ProcReleaseLocks(), and ShutdownPostgres().

2088 {
2090  LockMethod lockMethodTable;
2091  int i,
2092  numLockModes;
2093  LOCALLOCK *locallock;
2094  LOCK *lock;
2095  PROCLOCK *proclock;
2096  int partition;
2097  bool have_fast_path_lwlock = false;
2098 
2099  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2100  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2101  lockMethodTable = LockMethods[lockmethodid];
2102 
2103 #ifdef LOCK_DEBUG
2104  if (*(lockMethodTable->trace_flag))
2105  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2106 #endif
2107 
2108  /*
2109  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2110  * the only way that the lock we hold on our own VXID can ever get
2111  * released: it is always and only released when a toplevel transaction
2112  * ends.
2113  */
2114  if (lockmethodid == DEFAULT_LOCKMETHOD)
2116 
2117  numLockModes = lockMethodTable->numLockModes;
2118 
2119  /*
2120  * First we run through the locallock table and get rid of unwanted
2121  * entries, then we scan the process's proclocks and get rid of those. We
2122  * do this separately because we may have multiple locallock entries
2123  * pointing to the same proclock, and we daren't end up with any dangling
2124  * pointers. Fast-path locks are cleaned up during the locallock table
2125  * scan, though.
2126  */
2128 
2129  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2130  {
2131  /*
2132  * If the LOCALLOCK entry is unused, we must've run out of shared
2133  * memory while trying to set up this lock. Just forget the local
2134  * entry.
2135  */
2136  if (locallock->nLocks == 0)
2137  {
2138  RemoveLocalLock(locallock);
2139  continue;
2140  }
2141 
2142  /* Ignore items that are not of the lockmethod to be removed */
2143  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2144  continue;
2145 
2146  /*
2147  * If we are asked to release all locks, we can just zap the entry.
2148  * Otherwise, must scan to see if there are session locks. We assume
2149  * there is at most one lockOwners entry for session locks.
2150  */
2151  if (!allLocks)
2152  {
2153  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2154 
2155  /* If session lock is above array position 0, move it down to 0 */
2156  for (i = 0; i < locallock->numLockOwners; i++)
2157  {
2158  if (lockOwners[i].owner == NULL)
2159  lockOwners[0] = lockOwners[i];
2160  else
2161  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2162  }
2163 
2164  if (locallock->numLockOwners > 0 &&
2165  lockOwners[0].owner == NULL &&
2166  lockOwners[0].nLocks > 0)
2167  {
2168  /* Fix the locallock to show just the session locks */
2169  locallock->nLocks = lockOwners[0].nLocks;
2170  locallock->numLockOwners = 1;
2171  /* We aren't deleting this locallock, so done */
2172  continue;
2173  }
2174  else
2175  locallock->numLockOwners = 0;
2176  }
2177 
2178  /*
2179  * If the lock or proclock pointers are NULL, this lock was taken via
2180  * the relation fast-path (and is not known to have been transferred).
2181  */
2182  if (locallock->proclock == NULL || locallock->lock == NULL)
2183  {
2184  LOCKMODE lockmode = locallock->tag.mode;
2185  Oid relid;
2186 
2187  /* Verify that a fast-path lock is what we've got. */
2188  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2189  elog(PANIC, "locallock table corrupted");
2190 
2191  /*
2192  * If we don't currently hold the LWLock that protects our
2193  * fast-path data structures, we must acquire it before attempting
2194  * to release the lock via the fast-path. We will continue to
2195  * hold the LWLock until we're done scanning the locallock table,
2196  * unless we hit a transferred fast-path lock. (XXX is this
2197  * really such a good idea? There could be a lot of entries ...)
2198  */
2199  if (!have_fast_path_lwlock)
2200  {
2202  have_fast_path_lwlock = true;
2203  }
2204 
2205  /* Attempt fast-path release. */
2206  relid = locallock->tag.lock.locktag_field2;
2207  if (FastPathUnGrantRelationLock(relid, lockmode))
2208  {
2209  RemoveLocalLock(locallock);
2210  continue;
2211  }
2212 
2213  /*
2214  * Our lock, originally taken via the fast path, has been
2215  * transferred to the main lock table. That's going to require
2216  * some extra work, so release our fast-path lock before starting.
2217  */
2219  have_fast_path_lwlock = false;
2220 
2221  /*
2222  * Now dump the lock. We haven't got a pointer to the LOCK or
2223  * PROCLOCK in this case, so we have to handle this a bit
2224  * differently than a normal lock release. Unfortunately, this
2225  * requires an extra LWLock acquire-and-release cycle on the
2226  * partitionLock, but hopefully it shouldn't happen often.
2227  */
2228  LockRefindAndRelease(lockMethodTable, MyProc,
2229  &locallock->tag.lock, lockmode, false);
2230  RemoveLocalLock(locallock);
2231  continue;
2232  }
2233 
2234  /* Mark the proclock to show we need to release this lockmode */
2235  if (locallock->nLocks > 0)
2236  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2237 
2238  /* And remove the locallock hashtable entry */
2239  RemoveLocalLock(locallock);
2240  }
2241 
2242  /* Done with the fast-path data structures */
2243  if (have_fast_path_lwlock)
2245 
2246  /*
2247  * Now, scan each lock partition separately.
2248  */
2249  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2250  {
2251  LWLock *partitionLock;
2252  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
2253  PROCLOCK *nextplock;
2254 
2255  partitionLock = LockHashPartitionLockByIndex(partition);
2256 
2257  /*
2258  * If the proclock list for this partition is empty, we can skip
2259  * acquiring the partition lock. This optimization is trickier than
2260  * it looks, because another backend could be in process of adding
2261  * something to our proclock list due to promoting one of our
2262  * fast-path locks. However, any such lock must be one that we
2263  * decided not to delete above, so it's okay to skip it again now;
2264  * we'd just decide not to delete it again. We must, however, be
2265  * careful to re-fetch the list header once we've acquired the
2266  * partition lock, to be sure we have a valid, up-to-date pointer.
2267  * (There is probably no significant risk if pointer fetch/store is
2268  * atomic, but we don't wish to assume that.)
2269  *
2270  * XXX This argument assumes that the locallock table correctly
2271  * represents all of our fast-path locks. While allLocks mode
2272  * guarantees to clean up all of our normal locks regardless of the
2273  * locallock situation, we lose that guarantee for fast-path locks.
2274  * This is not ideal.
2275  */
2276  if (SHMQueueNext(procLocks, procLocks,
2277  offsetof(PROCLOCK, procLink)) == NULL)
2278  continue; /* needn't examine this partition */
2279 
2280  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2281 
2282  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2283  offsetof(PROCLOCK, procLink));
2284  proclock;
2285  proclock = nextplock)
2286  {
2287  bool wakeupNeeded = false;
2288 
2289  /* Get link first, since we may unlink/delete this proclock */
2290  nextplock = (PROCLOCK *)
2291  SHMQueueNext(procLocks, &proclock->procLink,
2292  offsetof(PROCLOCK, procLink));
2293 
2294  Assert(proclock->tag.myProc == MyProc);
2295 
2296  lock = proclock->tag.myLock;
2297 
2298  /* Ignore items that are not of the lockmethod to be removed */
2299  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2300  continue;
2301 
2302  /*
2303  * In allLocks mode, force release of all locks even if locallock
2304  * table had problems
2305  */
2306  if (allLocks)
2307  proclock->releaseMask = proclock->holdMask;
2308  else
2309  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2310 
2311  /*
2312  * Ignore items that have nothing to be released, unless they have
2313  * holdMask == 0 and are therefore recyclable
2314  */
2315  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2316  continue;
2317 
2318  PROCLOCK_PRINT("LockReleaseAll", proclock);
2319  LOCK_PRINT("LockReleaseAll", lock, 0);
2320  Assert(lock->nRequested >= 0);
2321  Assert(lock->nGranted >= 0);
2322  Assert(lock->nGranted <= lock->nRequested);
2323  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2324 
2325  /*
2326  * Release the previously-marked lock modes
2327  */
2328  for (i = 1; i <= numLockModes; i++)
2329  {
2330  if (proclock->releaseMask & LOCKBIT_ON(i))
2331  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2332  lockMethodTable);
2333  }
2334  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2335  Assert(lock->nGranted <= lock->nRequested);
2336  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2337 
2338  proclock->releaseMask = 0;
2339 
2340  /* CleanUpLock will wake up waiters if needed. */
2341  CleanUpLock(lock, proclock,
2342  lockMethodTable,
2343  LockTagHashCode(&lock->tag),
2344  wakeupNeeded);
2345  } /* loop over PROCLOCKs within this partition */
2346 
2347  LWLockRelease(partitionLock);
2348  } /* loop over partitions */
2349 
2350 #ifdef LOCK_DEBUG
2351  if (*(lockMethodTable->trace_flag))
2352  elog(LOG, "LockReleaseAll done");
2353 #endif
2354 }
PROCLOCKTAG tag
Definition: lock.h:350
Definition: lwlock.h:32
LOCALLOCKTAG tag
Definition: lock.h:407
static HTAB * LockMethodLocalHash
Definition: lock.c:255
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3040
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4322
int numLockOwners
Definition: lock.h:414
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2599
LOCKTAG lock
Definition: lock.h:388
int LOCKMODE
Definition: lockdefs.h:26
LOCKMODE mode
Definition: lock.h:389
PROCLOCK * proclock
Definition: lock.h:412
int nRequested
Definition: lock.h:298
PGPROC * MyProc
Definition: proc.c:67
LOCKMASK holdMask
Definition: lock.h:354
int64 nLocks
Definition: lock.h:401
LOCKTAG tag
Definition: lock.h:290
#define lengthof(array)
Definition: c.h:669
#define LOG
Definition: elog.h:26
unsigned int Oid
Definition: postgres_ext.h:31
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:199
#define PANIC
Definition: elog.h:53
int nGranted
Definition: lock.h:300
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:505
#define ERROR
Definition: elog.h:43
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1294
uint32 locktag_field2
Definition: lock.h:166
Definition: lock.h:287
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:490
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:421
SHM_QUEUE procLink
Definition: lock.h:357
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:953
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:337
#define Assert(condition)
Definition: c.h:739
LWLock backendLock
Definition: proc.h:190
LOCALLOCKOWNER * lockOwners
Definition: lock.h:416
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
LOCK * lock
Definition: lock.h:411
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
PGPROC * myProc
Definition: lock.h:344
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
LOCKMASK grantMask
Definition: lock.h:293
Definition: lock.h:347
int64 nLocks
Definition: lock.h:413
#define elog(elevel,...)
Definition: elog.h:228
int i
const bool * trace_flag
Definition: lock.h:115
LOCK * myLock
Definition: lock.h:343
struct ResourceOwnerData * owner
Definition: lock.h:400
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1488
static const LockMethod LockMethods[]
Definition: lock.c:150
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:160
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:338
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1545
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226
#define offsetof(type, field)
Definition: c.h:662
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:303
int numLockModes
Definition: lock.h:112
LOCKMASK releaseMask
Definition: lock.h:355

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2391 of file lock.c.

References hash_seq_init(), hash_seq_search(), i, ReleaseLockIfHeld(), and status().

Referenced by ResourceOwnerReleaseInternal().

2392 {
2393  if (locallocks == NULL)
2394  {
2396  LOCALLOCK *locallock;
2397 
2399 
2400  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2401  ReleaseLockIfHeld(locallock, false);
2402  }
2403  else
2404  {
2405  int i;
2406 
2407  for (i = nlocks - 1; i >= 0; i--)
2408  ReleaseLockIfHeld(locallocks[i], false);
2409  }
2410 }
static HTAB * LockMethodLocalHash
Definition: lock.c:255
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2426
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
int i
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2361 of file lock.c.

References elog, ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, ReleaseLockIfHeld(), and status().

Referenced by pg_advisory_unlock_all().

2362 {
2364  LOCALLOCK *locallock;
2365 
2366  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2367  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2368 
2370 
2371  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2372  {
2373  /* Ignore items that are not of the specified lock method */
2374  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2375  continue;
2376 
2377  ReleaseLockIfHeld(locallock, true);
2378  }
2379 }
static HTAB * LockMethodLocalHash
Definition: lock.c:255
#define lengthof(array)
Definition: c.h:669
#define ERROR
Definition: elog.h:43
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2426
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:421
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
#define elog(elevel,...)
Definition: elog.h:228
static const LockMethod LockMethods[]
Definition: lock.c:150
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226

◆ LockShmemSize()

Size LockShmemSize ( void  )

Definition at line 3439 of file lock.c.

References add_size(), hash_estimate_size(), and NLOCKENTS.

Referenced by CreateSharedMemoryAndSemaphores().

3440 {
3441  Size size = 0;
3442  long max_table_size;
3443 
3444  /* lock hash table */
3445  max_table_size = NLOCKENTS();
3446  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3447 
3448  /* proclock hash table */
3449  max_table_size *= 2;
3450  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3451 
3452  /*
3453  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3454  */
3455  size = add_size(size, size / 10);
3456 
3457  return size;
3458 }
#define NLOCKENTS()
Definition: lock.c:56
Definition: lock.h:287
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:732
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
size_t Size
Definition: c.h:467
Definition: lock.h:347

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 490 of file lock.c.

References get_hash_value().

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

491 {
492  return get_hash_value(LockMethodLockHash, (const void *) locktag);
493 }
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:860
static HTAB * LockMethodLockHash
Definition: lock.c:253

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4465 of file lock.c.

References Assert, elog, ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

4466 {
4467  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4468  LOCK *lock;
4469  bool found;
4470  uint32 hashcode;
4471  LWLock *partitionLock;
4472  int waiters = 0;
4473 
4474  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4475  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4476 
4477  hashcode = LockTagHashCode(locktag);
4478  partitionLock = LockHashPartitionLock(hashcode);
4479  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4480 
4482  (const void *) locktag,
4483  hashcode,
4484  HASH_FIND,
4485  &found);
4486  if (found)
4487  {
4488  Assert(lock != NULL);
4489  waiters = lock->nRequested;
4490  }
4491  LWLockRelease(partitionLock);
4492 
4493  return waiters;
4494 }
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
Definition: lwlock.h:32
int nRequested
Definition: lock.h:298
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
#define lengthof(array)
Definition: c.h:669
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
#define ERROR
Definition: elog.h:43
unsigned int uint32
Definition: c.h:359
Definition: lock.h:287
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:490
uint16 LOCKMETHODID
Definition: lock.h:124
#define Assert(condition)
Definition: c.h:739
static HTAB * LockMethodLockHash
Definition: lock.c:253
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
uint8 locktag_lockmethodid
Definition: lock.h:170
#define elog(elevel,...)
Definition: elog.h:228
static const LockMethod LockMethods[]
Definition: lock.c:150

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

Definition at line 1706 of file lock.c.

References Assert, LOCALLOCK::lockCleared, and LOCALLOCK::nLocks.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockRelation(), and LockRelationOid().

1707 {
1708  Assert(locallock->nLocks > 0);
1709  locallock->lockCleared = true;
1710 }
#define Assert(condition)
Definition: c.h:739
int64 nLocks
Definition: lock.h:413
bool lockCleared
Definition: lock.h:418

◆ PostPrepare_Locks()

void PostPrepare_Locks ( TransactionId  xid)

Definition at line 3247 of file lock.c.

References Assert, elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, offsetof, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), SHMQueueDelete(), SHMQueueInsertBefore(), SHMQueueNext(), START_CRIT_SECTION, status(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

3248 {
3249  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3251  LOCALLOCK *locallock;
3252  LOCK *lock;
3253  PROCLOCK *proclock;
3254  PROCLOCKTAG proclocktag;
3255  int partition;
3256 
3257  /* Can't prepare a lock group follower. */
3258  Assert(MyProc->lockGroupLeader == NULL ||
3260 
3261  /* This is a critical section: any error means big trouble */
3263 
3264  /*
3265  * First we run through the locallock table and get rid of unwanted
3266  * entries, then we scan the process's proclocks and transfer them to the
3267  * target proc.
3268  *
3269  * We do this separately because we may have multiple locallock entries
3270  * pointing to the same proclock, and we daren't end up with any dangling
3271  * pointers.
3272  */
3274 
3275  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3276  {
3277  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3278  bool haveSessionLock;
3279  bool haveXactLock;
3280  int i;
3281 
3282  if (locallock->proclock == NULL || locallock->lock == NULL)
3283  {
3284  /*
3285  * We must've run out of shared memory while trying to set up this
3286  * lock. Just forget the local entry.
3287  */
3288  Assert(locallock->nLocks == 0);
3289  RemoveLocalLock(locallock);
3290  continue;
3291  }
3292 
3293  /* Ignore VXID locks */
3294  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3295  continue;
3296 
3297  /* Scan to see whether we hold it at session or transaction level */
3298  haveSessionLock = haveXactLock = false;
3299  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3300  {
3301  if (lockOwners[i].owner == NULL)
3302  haveSessionLock = true;
3303  else
3304  haveXactLock = true;
3305  }
3306 
3307  /* Ignore it if we have only session lock */
3308  if (!haveXactLock)
3309  continue;
3310 
3311  /* This can't happen, because we already checked it */
3312  if (haveSessionLock)
3313  ereport(PANIC,
3314  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3315  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3316 
3317  /* Mark the proclock to show we need to release this lockmode */
3318  if (locallock->nLocks > 0)
3319  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3320 
3321  /* And remove the locallock hashtable entry */
3322  RemoveLocalLock(locallock);
3323  }
3324 
3325  /*
3326  * Now, scan each lock partition separately.
3327  */
3328  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3329  {
3330  LWLock *partitionLock;
3331  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
3332  PROCLOCK *nextplock;
3333 
3334  partitionLock = LockHashPartitionLockByIndex(partition);
3335 
3336  /*
3337  * If the proclock list for this partition is empty, we can skip
3338  * acquiring the partition lock. This optimization is safer than the
3339  * situation in LockReleaseAll, because we got rid of any fast-path
3340  * locks during AtPrepare_Locks, so there cannot be any case where
3341  * another backend is adding something to our lists now. For safety,
3342  * though, we code this the same way as in LockReleaseAll.
3343  */
3344  if (SHMQueueNext(procLocks, procLocks,
3345  offsetof(PROCLOCK, procLink)) == NULL)
3346  continue; /* needn't examine this partition */
3347 
3348  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3349 
3350  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3351  offsetof(PROCLOCK, procLink));
3352  proclock;
3353  proclock = nextplock)
3354  {
3355  /* Get link first, since we may unlink/relink this proclock */
3356  nextplock = (PROCLOCK *)
3357  SHMQueueNext(procLocks, &proclock->procLink,
3358  offsetof(PROCLOCK, procLink));
3359 
3360  Assert(proclock->tag.myProc == MyProc);
3361 
3362  lock = proclock->tag.myLock;
3363 
3364  /* Ignore VXID locks */