PostgreSQL Source Code  git master
lock.h File Reference
#include "storage/backendid.h"
#include "storage/lockdefs.h"
#include "storage/lwlock.h"
#include "storage/shmem.h"
#include "utils/timestamp.h"
Include dependency graph for lock.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  PROC_QUEUE
 
struct  VirtualTransactionId
 
struct  LockMethodData
 
struct  LOCKTAG
 
struct  LOCK
 
struct  PROCLOCKTAG
 
struct  PROCLOCK
 
struct  LOCALLOCKTAG
 
struct  LOCALLOCKOWNER
 
struct  LOCALLOCK
 
struct  LockInstanceData
 
struct  LockData
 
struct  BlockedProcData
 
struct  BlockedProcsData
 

Macros

#define InvalidLocalTransactionId   0
 
#define LocalTransactionIdIsValid(lxid)   ((lxid) != InvalidLocalTransactionId)
 
#define VirtualTransactionIdIsValid(vxid)    (LocalTransactionIdIsValid((vxid).localTransactionId))
 
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)    ((vxid).backendId == InvalidBackendId)
 
#define VirtualTransactionIdEquals(vxid1, vxid2)
 
#define SetInvalidVirtualTransactionId(vxid)
 
#define GET_VXID_FROM_PGPROC(vxid, proc)
 
#define MAX_LOCKMODES   10
 
#define LOCKBIT_ON(lockmode)   (1 << (lockmode))
 
#define LOCKBIT_OFF(lockmode)   (~(1 << (lockmode)))
 
#define DEFAULT_LOCKMETHOD   1
 
#define USER_LOCKMETHOD   2
 
#define LOCKTAG_LAST_TYPE   LOCKTAG_ADVISORY
 
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
 
#define SET_LOCKTAG_RELATION_EXTEND(locktag, dboid, reloid)
 
#define SET_LOCKTAG_DATABASE_FROZEN_IDS(locktag, dboid)
 
#define SET_LOCKTAG_PAGE(locktag, dboid, reloid, blocknum)
 
#define SET_LOCKTAG_TUPLE(locktag, dboid, reloid, blocknum, offnum)
 
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
 
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
 
#define SET_LOCKTAG_SPECULATIVE_INSERTION(locktag, xid, token)
 
#define SET_LOCKTAG_OBJECT(locktag, dboid, classoid, objoid, objsubid)
 
#define SET_LOCKTAG_ADVISORY(locktag, id1, id2, id3, id4)
 
#define LOCK_LOCKMETHOD(lock)   ((LOCKMETHODID) (lock).tag.locktag_lockmethodid)
 
#define LOCK_LOCKTAG(lock)   ((LockTagType) (lock).tag.locktag_type)
 
#define PROCLOCK_LOCKMETHOD(proclock)    LOCK_LOCKMETHOD(*((proclock).tag.myLock))
 
#define LOCALLOCK_LOCKMETHOD(llock)   ((llock).tag.lock.locktag_lockmethodid)
 
#define LOCALLOCK_LOCKTAG(llock)   ((LockTagType) (llock).tag.lock.locktag_type)
 
#define LockHashPartition(hashcode)    ((hashcode) % NUM_LOCK_PARTITIONS)
 
#define LockHashPartitionLock(hashcode)
 
#define LockHashPartitionLockByIndex(i)    (&MainLWLockArray[LOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
 
#define LockHashPartitionLockByProc(leader_pgproc)    LockHashPartitionLock((leader_pgproc)->pgprocno)
 

Typedefs

typedef struct PGPROC PGPROC
 
typedef struct PROC_QUEUE PROC_QUEUE
 
typedef struct LockMethodData LockMethodData
 
typedef const LockMethodDataLockMethod
 
typedef uint16 LOCKMETHODID
 
typedef enum LockTagType LockTagType
 
typedef struct LOCKTAG LOCKTAG
 
typedef struct LOCK LOCK
 
typedef struct PROCLOCKTAG PROCLOCKTAG
 
typedef struct PROCLOCK PROCLOCK
 
typedef struct LOCALLOCKTAG LOCALLOCKTAG
 
typedef struct LOCALLOCKOWNER LOCALLOCKOWNER
 
typedef struct LOCALLOCK LOCALLOCK
 
typedef struct LockInstanceData LockInstanceData
 
typedef struct LockData LockData
 
typedef struct BlockedProcData BlockedProcData
 
typedef struct BlockedProcsData BlockedProcsData
 

Enumerations

enum  LockTagType {
  LOCKTAG_RELATION , LOCKTAG_RELATION_EXTEND , LOCKTAG_DATABASE_FROZEN_IDS , LOCKTAG_PAGE ,
  LOCKTAG_TUPLE , LOCKTAG_TRANSACTION , LOCKTAG_VIRTUALTRANSACTION , LOCKTAG_SPECULATIVE_TOKEN ,
  LOCKTAG_OBJECT , LOCKTAG_USERLOCK , LOCKTAG_ADVISORY
}
 
enum  LockAcquireResult { LOCKACQUIRE_NOT_AVAIL , LOCKACQUIRE_OK , LOCKACQUIRE_ALREADY_HELD , LOCKACQUIRE_ALREADY_CLEAR }
 
enum  DeadLockState {
  DS_NOT_YET_CHECKED , DS_NO_DEADLOCK , DS_SOFT_DEADLOCK , DS_HARD_DEADLOCK ,
  DS_BLOCKED_BY_AUTOVACUUM
}
 

Functions

void InitLocks (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
 
void AbortStrongLockAcquire (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (TransactionId xid)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void GrantAwaitedLock (void)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
Size LockShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
DeadLockState DeadLockCheck (PGPROC *proc)
 
PGPROCGetBlockingAutoVacuumPgproc (void)
 
void DeadLockReport (void) pg_attribute_noreturn()
 
void RememberSimpleDeadLock (PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
 
void InitDeadLockChecking (void)
 
int LockWaiterCount (const LOCKTAG *locktag)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 

Variables

int max_locks_per_xact
 
const char *const LockTagTypeNames []
 

Macro Definition Documentation

◆ DEFAULT_LOCKMETHOD

#define DEFAULT_LOCKMETHOD   1

Definition at line 130 of file lock.h.

◆ GET_VXID_FROM_PGPROC

#define GET_VXID_FROM_PGPROC (   vxid,
  proc 
)
Value:
((vxid).backendId = (proc).backendId, \
(vxid).localTransactionId = (proc).lxid)

Definition at line 82 of file lock.h.

◆ InvalidLocalTransactionId

#define InvalidLocalTransactionId   0

Definition at line 70 of file lock.h.

◆ LOCALLOCK_LOCKMETHOD

#define LOCALLOCK_LOCKMETHOD (   llock)    ((llock).tag.lock.locktag_lockmethodid)

Definition at line 435 of file lock.h.

◆ LOCALLOCK_LOCKTAG

#define LOCALLOCK_LOCKTAG (   llock)    ((LockTagType) (llock).tag.lock.locktag_type)

Definition at line 436 of file lock.h.

◆ LocalTransactionIdIsValid

#define LocalTransactionIdIsValid (   lxid)    ((lxid) != InvalidLocalTransactionId)

Definition at line 71 of file lock.h.

◆ LOCK_LOCKMETHOD

#define LOCK_LOCKMETHOD (   lock)    ((LOCKMETHODID) (lock).tag.locktag_lockmethodid)

Definition at line 316 of file lock.h.

◆ LOCK_LOCKTAG

#define LOCK_LOCKTAG (   lock)    ((LockTagType) (lock).tag.locktag_type)

Definition at line 317 of file lock.h.

◆ LOCKBIT_OFF

#define LOCKBIT_OFF (   lockmode)    (~(1 << (lockmode)))

Definition at line 90 of file lock.h.

◆ LOCKBIT_ON

#define LOCKBIT_ON (   lockmode)    (1 << (lockmode))

Definition at line 89 of file lock.h.

◆ LockHashPartition

#define LockHashPartition (   hashcode)     ((hashcode) % NUM_LOCK_PARTITIONS)

Definition at line 517 of file lock.h.

◆ LockHashPartitionLock

#define LockHashPartitionLock (   hashcode)
Value:
LockHashPartition(hashcode)].lock)
LWLockPadded * MainLWLockArray
Definition: lwlock.c:199
#define LOCK_MANAGER_LWLOCK_OFFSET
Definition: lwlock.h:95
LWLock lock
Definition: lwlock.h:57

Definition at line 519 of file lock.h.

◆ LockHashPartitionLockByIndex

#define LockHashPartitionLockByIndex (   i)     (&MainLWLockArray[LOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)

Definition at line 522 of file lock.h.

◆ LockHashPartitionLockByProc

#define LockHashPartitionLockByProc (   leader_pgproc)     LockHashPartitionLock((leader_pgproc)->pgprocno)

Definition at line 534 of file lock.h.

◆ LOCKTAG_LAST_TYPE

#define LOCKTAG_LAST_TYPE   LOCKTAG_ADVISORY

Definition at line 155 of file lock.h.

◆ MAX_LOCKMODES

#define MAX_LOCKMODES   10

Definition at line 87 of file lock.h.

◆ PROCLOCK_LOCKMETHOD

#define PROCLOCK_LOCKMETHOD (   proclock)     LOCK_LOCKMETHOD(*((proclock).tag.myLock))

Definition at line 374 of file lock.h.

◆ SET_LOCKTAG_ADVISORY

#define SET_LOCKTAG_ADVISORY (   locktag,
  id1,
  id2,
  id3,
  id4 
)
Value:
((locktag).locktag_field1 = (id1), \
(locktag).locktag_field2 = (id2), \
(locktag).locktag_field3 = (id3), \
(locktag).locktag_field4 = (id4), \
(locktag).locktag_type = LOCKTAG_ADVISORY, \
(locktag).locktag_lockmethodid = USER_LOCKMETHOD)
@ LOCKTAG_ADVISORY
Definition: lock.h:152
#define USER_LOCKMETHOD
Definition: lock.h:131

Definition at line 273 of file lock.h.

◆ SET_LOCKTAG_DATABASE_FROZEN_IDS

#define SET_LOCKTAG_DATABASE_FROZEN_IDS (   locktag,
  dboid 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = 0, \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_DATABASE_FROZEN_IDS, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
#define DEFAULT_LOCKMETHOD
Definition: lock.h:130
@ LOCKTAG_DATABASE_FROZEN_IDS
Definition: lock.h:144

Definition at line 202 of file lock.h.

◆ SET_LOCKTAG_OBJECT

#define SET_LOCKTAG_OBJECT (   locktag,
  dboid,
  classoid,
  objoid,
  objsubid 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (classoid), \
(locktag).locktag_field3 = (objoid), \
(locktag).locktag_field4 = (objsubid), \
(locktag).locktag_type = LOCKTAG_OBJECT, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_OBJECT
Definition: lock.h:150

Definition at line 265 of file lock.h.

◆ SET_LOCKTAG_PAGE

#define SET_LOCKTAG_PAGE (   locktag,
  dboid,
  reloid,
  blocknum 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (reloid), \
(locktag).locktag_field3 = (blocknum), \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_PAGE, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_PAGE
Definition: lock.h:145

Definition at line 211 of file lock.h.

◆ SET_LOCKTAG_RELATION

#define SET_LOCKTAG_RELATION (   locktag,
  dboid,
  reloid 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (reloid), \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_RELATION, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_RELATION
Definition: lock.h:142

Definition at line 184 of file lock.h.

◆ SET_LOCKTAG_RELATION_EXTEND

#define SET_LOCKTAG_RELATION_EXTEND (   locktag,
  dboid,
  reloid 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (reloid), \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_RELATION_EXTEND, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:143

Definition at line 193 of file lock.h.

◆ SET_LOCKTAG_SPECULATIVE_INSERTION

#define SET_LOCKTAG_SPECULATIVE_INSERTION (   locktag,
  xid,
  token 
)
Value:
((locktag).locktag_field1 = (xid), \
(locktag).locktag_field2 = (token), \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_SPECULATIVE_TOKEN, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_SPECULATIVE_TOKEN
Definition: lock.h:149

Definition at line 250 of file lock.h.

◆ SET_LOCKTAG_TRANSACTION

#define SET_LOCKTAG_TRANSACTION (   locktag,
  xid 
)
Value:
((locktag).locktag_field1 = (xid), \
(locktag).locktag_field2 = 0, \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_TRANSACTION, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_TRANSACTION
Definition: lock.h:147

Definition at line 229 of file lock.h.

◆ SET_LOCKTAG_TUPLE

#define SET_LOCKTAG_TUPLE (   locktag,
  dboid,
  reloid,
  blocknum,
  offnum 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (reloid), \
(locktag).locktag_field3 = (blocknum), \
(locktag).locktag_field4 = (offnum), \
(locktag).locktag_type = LOCKTAG_TUPLE, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_TUPLE
Definition: lock.h:146

Definition at line 220 of file lock.h.

◆ SET_LOCKTAG_VIRTUALTRANSACTION

#define SET_LOCKTAG_VIRTUALTRANSACTION (   locktag,
  vxid 
)
Value:
((locktag).locktag_field1 = (vxid).backendId, \
(locktag).locktag_field2 = (vxid).localTransactionId, \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_VIRTUALTRANSACTION, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:148

Definition at line 238 of file lock.h.

◆ SetInvalidVirtualTransactionId

#define SetInvalidVirtualTransactionId (   vxid)
Value:
((vxid).backendId = InvalidBackendId, \
(vxid).localTransactionId = InvalidLocalTransactionId)
#define InvalidBackendId
Definition: backendid.h:23
#define InvalidLocalTransactionId
Definition: lock.h:70

Definition at line 79 of file lock.h.

◆ USER_LOCKMETHOD

#define USER_LOCKMETHOD   2

Definition at line 131 of file lock.h.

◆ VirtualTransactionIdEquals

#define VirtualTransactionIdEquals (   vxid1,
  vxid2 
)
Value:
((vxid1).backendId == (vxid2).backendId && \
(vxid1).localTransactionId == (vxid2).localTransactionId)

Definition at line 76 of file lock.h.

◆ VirtualTransactionIdIsRecoveredPreparedXact

#define VirtualTransactionIdIsRecoveredPreparedXact (   vxid)     ((vxid).backendId == InvalidBackendId)

Definition at line 74 of file lock.h.

◆ VirtualTransactionIdIsValid

#define VirtualTransactionIdIsValid (   vxid)     (LocalTransactionIdIsValid((vxid).localTransactionId))

Definition at line 72 of file lock.h.

Typedef Documentation

◆ BlockedProcData

◆ BlockedProcsData

◆ LOCALLOCK

typedef struct LOCALLOCK LOCALLOCK

◆ LOCALLOCKOWNER

◆ LOCALLOCKTAG

typedef struct LOCALLOCKTAG LOCALLOCKTAG

◆ LOCK

typedef struct LOCK LOCK

◆ LockData

typedef struct LockData LockData

◆ LockInstanceData

◆ LockMethod

typedef const LockMethodData* LockMethod

Definition at line 121 of file lock.h.

◆ LockMethodData

◆ LOCKMETHODID

Definition at line 127 of file lock.h.

◆ LOCKTAG

typedef struct LOCKTAG LOCKTAG

◆ LockTagType

typedef enum LockTagType LockTagType

◆ PGPROC

typedef struct PGPROC PGPROC

Definition at line 1 of file lock.h.

◆ PROC_QUEUE

typedef struct PROC_QUEUE PROC_QUEUE

◆ PROCLOCK

typedef struct PROCLOCK PROCLOCK

◆ PROCLOCKTAG

typedef struct PROCLOCKTAG PROCLOCKTAG

Enumeration Type Documentation

◆ DeadLockState

Enumerator
DS_NOT_YET_CHECKED 
DS_NO_DEADLOCK 
DS_SOFT_DEADLOCK 
DS_HARD_DEADLOCK 
DS_BLOCKED_BY_AUTOVACUUM 

Definition at line 501 of file lock.h.

502 {
503  DS_NOT_YET_CHECKED, /* no deadlock check has run yet */
504  DS_NO_DEADLOCK, /* no deadlock detected */
505  DS_SOFT_DEADLOCK, /* deadlock avoided by queue rearrangement */
506  DS_HARD_DEADLOCK, /* deadlock, no way out but ERROR */
507  DS_BLOCKED_BY_AUTOVACUUM /* no deadlock; queue blocked by autovacuum
508  * worker */
509 } DeadLockState;
DeadLockState
Definition: lock.h:502
@ DS_HARD_DEADLOCK
Definition: lock.h:506
@ DS_BLOCKED_BY_AUTOVACUUM
Definition: lock.h:507
@ DS_NO_DEADLOCK
Definition: lock.h:504
@ DS_NOT_YET_CHECKED
Definition: lock.h:503
@ DS_SOFT_DEADLOCK
Definition: lock.h:505

◆ LockAcquireResult

Enumerator
LOCKACQUIRE_NOT_AVAIL 
LOCKACQUIRE_OK 
LOCKACQUIRE_ALREADY_HELD 
LOCKACQUIRE_ALREADY_CLEAR 

Definition at line 492 of file lock.h.

493 {
494  LOCKACQUIRE_NOT_AVAIL, /* lock not available, and dontWait=true */
495  LOCKACQUIRE_OK, /* lock successfully acquired */
496  LOCKACQUIRE_ALREADY_HELD, /* incremented count for lock already held */
497  LOCKACQUIRE_ALREADY_CLEAR /* incremented count for lock already clear */
LockAcquireResult
Definition: lock.h:493
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:497
@ LOCKACQUIRE_OK
Definition: lock.h:495
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:496
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:494

◆ LockTagType

Enumerator
LOCKTAG_RELATION 
LOCKTAG_RELATION_EXTEND 
LOCKTAG_DATABASE_FROZEN_IDS 
LOCKTAG_PAGE 
LOCKTAG_TUPLE 
LOCKTAG_TRANSACTION 
LOCKTAG_VIRTUALTRANSACTION 
LOCKTAG_SPECULATIVE_TOKEN 
LOCKTAG_OBJECT 
LOCKTAG_USERLOCK 
LOCKTAG_ADVISORY 

Definition at line 140 of file lock.h.

141 {
142  LOCKTAG_RELATION, /* whole relation */
143  LOCKTAG_RELATION_EXTEND, /* the right to extend a relation */
144  LOCKTAG_DATABASE_FROZEN_IDS, /* pg_database.datfrozenxid */
145  LOCKTAG_PAGE, /* one page of a relation */
146  LOCKTAG_TUPLE, /* one physical tuple */
147  LOCKTAG_TRANSACTION, /* transaction (for waiting for xact done) */
148  LOCKTAG_VIRTUALTRANSACTION, /* virtual transaction (ditto) */
149  LOCKTAG_SPECULATIVE_TOKEN, /* speculative insertion Xid and token */
150  LOCKTAG_OBJECT, /* non-relation database object */
151  LOCKTAG_USERLOCK, /* reserved for old contrib/userlock code */
152  LOCKTAG_ADVISORY /* advisory user locks */
153 } LockTagType;
LockTagType
Definition: lock.h:141
@ LOCKTAG_USERLOCK
Definition: lock.h:151

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1757 of file lock.c.

1758 {
1759  uint32 fasthashcode;
1760  LOCALLOCK *locallock = StrongLockInProgress;
1761 
1762  if (locallock == NULL)
1763  return;
1764 
1765  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1766  Assert(locallock->holdsStrongLockCount == true);
1768  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1769  FastPathStrongRelationLocks->count[fasthashcode]--;
1770  locallock->holdsStrongLockCount = false;
1771  StrongLockInProgress = NULL;
1773 }
unsigned int uint32
Definition: c.h:441
Assert(fmt[strlen(fmt) - 1] !='\n')
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:263
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:272
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:287
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:269
uint32 hashcode
Definition: lock.h:424
bool holdsStrongLockCount
Definition: lock.h:431

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3321 of file lock.c.

3322 {
3324  LOCALLOCK *locallock;
3325 
3326  /* First, verify there aren't locks of both xact and session level */
3328 
3329  /* Now do the per-locallock cleanup work */
3331 
3332  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3333  {
3334  TwoPhaseLockRecord record;
3335  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3336  bool haveSessionLock;
3337  bool haveXactLock;
3338  int i;
3339 
3340  /*
3341  * Ignore VXID locks. We don't want those to be held by prepared
3342  * transactions, since they aren't meaningful after a restart.
3343  */
3344  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3345  continue;
3346 
3347  /* Ignore it if we don't actually hold the lock */
3348  if (locallock->nLocks <= 0)
3349  continue;
3350 
3351  /* Scan to see whether we hold it at session or transaction level */
3352  haveSessionLock = haveXactLock = false;
3353  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3354  {
3355  if (lockOwners[i].owner == NULL)
3356  haveSessionLock = true;
3357  else
3358  haveXactLock = true;
3359  }
3360 
3361  /* Ignore it if we have only session lock */
3362  if (!haveXactLock)
3363  continue;
3364 
3365  /* This can't happen, because we already checked it */
3366  if (haveSessionLock)
3367  ereport(ERROR,
3368  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3369  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3370 
3371  /*
3372  * If the local lock was taken via the fast-path, we need to move it
3373  * to the primary lock table, or just get a pointer to the existing
3374  * primary lock table entry if by chance it's already been
3375  * transferred.
3376  */
3377  if (locallock->proclock == NULL)
3378  {
3379  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3380  locallock->lock = locallock->proclock->tag.myLock;
3381  }
3382 
3383  /*
3384  * Arrange to not release any strong lock count held by this lock
3385  * entry. We must retain the count until the prepared transaction is
3386  * committed or rolled back.
3387  */
3388  locallock->holdsStrongLockCount = false;
3389 
3390  /*
3391  * Create a 2PC record.
3392  */
3393  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3394  record.lockmode = locallock->tag.mode;
3395 
3397  &record, sizeof(TwoPhaseLockRecord));
3398  }
3399 }
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
int errcode(int sqlerrcode)
Definition: elog.c:693
int errmsg(const char *fmt,...)
Definition: elog.c:904
#define ERROR
Definition: elog.h:33
#define ereport(elevel,...)
Definition: elog.h:143
int i
Definition: isn.c:73
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2810
static HTAB * LockMethodLocalHash
Definition: lock.c:283
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3233
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:229
LOCKTAG lock
Definition: lock.h:402
LOCKMODE mode
Definition: lock.h:403
LOCALLOCKOWNER * lockOwners
Definition: lock.h:430
LOCK * lock
Definition: lock.h:425
int64 nLocks
Definition: lock.h:427
int numLockOwners
Definition: lock.h:428
PROCLOCK * proclock
Definition: lock.h:426
LOCALLOCKTAG tag
Definition: lock.h:421
Definition: lock.h:168
uint8 locktag_type
Definition: lock.h:173
LOCK * myLock
Definition: lock.h:357
PROCLOCKTAG tag
Definition: lock.h:364
LOCKTAG locktag
Definition: lock.c:161
LOCKMODE lockmode
Definition: lock.c:162
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1238
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), status(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ DeadLockCheck()

DeadLockState DeadLockCheck ( PGPROC proc)

Definition at line 217 of file deadlock.c.

218 {
219  int i,
220  j;
221 
222  /* Initialize to "no constraints" */
223  nCurConstraints = 0;
225  nWaitOrders = 0;
226 
227  /* Initialize to not blocked by an autovacuum worker */
229 
230  /* Search for deadlocks and possible fixes */
231  if (DeadLockCheckRecurse(proc))
232  {
233  /*
234  * Call FindLockCycle one more time, to record the correct
235  * deadlockDetails[] for the basic state with no rearrangements.
236  */
237  int nSoftEdges;
238 
239  TRACE_POSTGRESQL_DEADLOCK_FOUND();
240 
241  nWaitOrders = 0;
242  if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges))
243  elog(FATAL, "deadlock seems to have disappeared");
244 
245  return DS_HARD_DEADLOCK; /* cannot find a non-deadlocked state */
246  }
247 
248  /* Apply any needed rearrangements of wait queues */
249  for (i = 0; i < nWaitOrders; i++)
250  {
251  LOCK *lock = waitOrders[i].lock;
252  PGPROC **procs = waitOrders[i].procs;
253  int nProcs = waitOrders[i].nProcs;
254  PROC_QUEUE *waitQueue = &(lock->waitProcs);
255 
256  Assert(nProcs == waitQueue->size);
257 
258 #ifdef DEBUG_DEADLOCK
259  PrintLockQueue(lock, "DeadLockCheck:");
260 #endif
261 
262  /* Reset the queue and re-add procs in the desired order */
263  ProcQueueInit(waitQueue);
264  for (j = 0; j < nProcs; j++)
265  {
266  SHMQueueInsertBefore(&(waitQueue->links), &(procs[j]->links));
267  waitQueue->size++;
268  }
269 
270 #ifdef DEBUG_DEADLOCK
271  PrintLockQueue(lock, "rearranged to:");
272 #endif
273 
274  /* See if any waiters for the lock can be woken up now */
275  ProcLockWakeup(GetLocksMethodTable(lock), lock);
276  }
277 
278  /* Return code tells caller if we had to escape a deadlock or not */
279  if (nWaitOrders > 0)
280  return DS_SOFT_DEADLOCK;
281  else if (blocking_autovacuum_proc != NULL)
283  else
284  return DS_NO_DEADLOCK;
285 }
static WAIT_ORDER * waitOrders
Definition: deadlock.c:111
static bool FindLockCycle(PGPROC *checkProc, EDGE *softEdges, int *nSoftEdges)
Definition: deadlock.c:449
static bool DeadLockCheckRecurse(PGPROC *proc)
Definition: deadlock.c:315
static EDGE * possibleConstraints
Definition: deadlock.c:121
static int nWaitOrders
Definition: deadlock.c:112
static int nCurConstraints
Definition: deadlock.c:117
static PGPROC * blocking_autovacuum_proc
Definition: deadlock.c:128
static int nPossibleConstraints
Definition: deadlock.c:122
#define FATAL
Definition: elog.h:35
#define elog(elevel,...)
Definition: elog.h:218
int j
Definition: isn.c:74
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:487
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1709
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1028
Definition: lock.h:301
PROC_QUEUE waitProcs
Definition: lock.h:309
Definition: proc.h:125
SHM_QUEUE links
Definition: lock.h:32
int size
Definition: lock.h:33
PGPROC ** procs
Definition: deadlock.c:59
LOCK * lock
Definition: deadlock.c:58
int nProcs
Definition: deadlock.c:60

References Assert(), blocking_autovacuum_proc, DeadLockCheckRecurse(), DS_BLOCKED_BY_AUTOVACUUM, DS_HARD_DEADLOCK, DS_NO_DEADLOCK, DS_SOFT_DEADLOCK, elog, FATAL, FindLockCycle(), GetLocksMethodTable(), i, j, PROC_QUEUE::links, PGPROC::links, WAIT_ORDER::lock, nCurConstraints, nPossibleConstraints, WAIT_ORDER::nProcs, nWaitOrders, possibleConstraints, ProcLockWakeup(), ProcQueueInit(), WAIT_ORDER::procs, SHMQueueInsertBefore(), PROC_QUEUE::size, waitOrders, and LOCK::waitProcs.

Referenced by CheckDeadLock().

◆ DeadLockReport()

void DeadLockReport ( void  )

Definition at line 1090 of file deadlock.c.

1091 {
1092  StringInfoData clientbuf; /* errdetail for client */
1093  StringInfoData logbuf; /* errdetail for server log */
1094  StringInfoData locktagbuf;
1095  int i;
1096 
1097  initStringInfo(&clientbuf);
1098  initStringInfo(&logbuf);
1099  initStringInfo(&locktagbuf);
1100 
1101  /* Generate the "waits for" lines sent to the client */
1102  for (i = 0; i < nDeadlockDetails; i++)
1103  {
1104  DEADLOCK_INFO *info = &deadlockDetails[i];
1105  int nextpid;
1106 
1107  /* The last proc waits for the first one... */
1108  if (i < nDeadlockDetails - 1)
1109  nextpid = info[1].pid;
1110  else
1111  nextpid = deadlockDetails[0].pid;
1112 
1113  /* reset locktagbuf to hold next object description */
1114  resetStringInfo(&locktagbuf);
1115 
1116  DescribeLockTag(&locktagbuf, &info->locktag);
1117 
1118  if (i > 0)
1119  appendStringInfoChar(&clientbuf, '\n');
1120 
1121  appendStringInfo(&clientbuf,
1122  _("Process %d waits for %s on %s; blocked by process %d."),
1123  info->pid,
1125  info->lockmode),
1126  locktagbuf.data,
1127  nextpid);
1128  }
1129 
1130  /* Duplicate all the above for the server ... */
1131  appendBinaryStringInfo(&logbuf, clientbuf.data, clientbuf.len);
1132 
1133  /* ... and add info about query strings */
1134  for (i = 0; i < nDeadlockDetails; i++)
1135  {
1136  DEADLOCK_INFO *info = &deadlockDetails[i];
1137 
1138  appendStringInfoChar(&logbuf, '\n');
1139 
1140  appendStringInfo(&logbuf,
1141  _("Process %d: %s"),
1142  info->pid,
1144  }
1145 
1147 
1148  ereport(ERROR,
1149  (errcode(ERRCODE_T_R_DEADLOCK_DETECTED),
1150  errmsg("deadlock detected"),
1151  errdetail_internal("%s", clientbuf.data),
1152  errdetail_log("%s", logbuf.data),
1153  errhint("See server log for query details.")));
1154 }
const char * pgstat_get_backend_current_activity(int pid, bool checkUser)
static int nDeadlockDetails
Definition: deadlock.c:125
static DEADLOCK_INFO * deadlockDetails
Definition: deadlock.c:124
int errdetail_internal(const char *fmt,...)
Definition: elog.c:1064
int errhint(const char *fmt,...)
Definition: elog.c:1151
int errdetail_log(const char *fmt,...)
Definition: elog.c:1085
#define _(x)
Definition: elog.c:89
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1101
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4100
void pgstat_report_deadlock(void)
Definition: pgstat.c:1755
void resetStringInfo(StringInfo str)
Definition: stringinfo.c:75
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
void appendBinaryStringInfo(StringInfo str, const char *data, int datalen)
Definition: stringinfo.c:227
void appendStringInfoChar(StringInfo str, char ch)
Definition: stringinfo.c:188
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
LOCKTAG locktag
Definition: deadlock.c:73
LOCKMODE lockmode
Definition: deadlock.c:74
uint8 locktag_lockmethodid
Definition: lock.h:174

References _, appendBinaryStringInfo(), appendStringInfo(), appendStringInfoChar(), StringInfoData::data, deadlockDetails, DescribeLockTag(), ereport, errcode(), errdetail_internal(), errdetail_log(), errhint(), errmsg(), ERROR, GetLockmodeName(), i, initStringInfo(), StringInfoData::len, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, LOCKTAG::locktag_lockmethodid, nDeadlockDetails, pgstat_get_backend_current_activity(), pgstat_report_deadlock(), DEADLOCK_INFO::pid, and resetStringInfo().

Referenced by WaitOnLock().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 583 of file lock.c.

584 {
585  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
586 
587  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
588  return true;
589 
590  return false;
591 }
static const LockMethod LockMethods[]
Definition: lock.c:151
#define LOCKBIT_ON(lockmode)
Definition: lock.h:89
const LOCKMASK * conflictTab
Definition: lock.h:116

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ GetBlockerStatusData()

BlockedProcsData* GetBlockerStatusData ( int  blocked_pid)

Definition at line 3838 of file lock.c.

3839 {
3841  PGPROC *proc;
3842  int i;
3843 
3845 
3846  /*
3847  * Guess how much space we'll need, and preallocate. Most of the time
3848  * this will avoid needing to do repalloc while holding the LWLocks. (We
3849  * assume, but check with an Assert, that MaxBackends is enough entries
3850  * for the procs[] array; the other two could need enlargement, though.)
3851  */
3852  data->nprocs = data->nlocks = data->npids = 0;
3853  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3854  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3855  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3856  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3857 
3858  /*
3859  * In order to search the ProcArray for blocked_pid and assume that that
3860  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3861  * In addition, to examine the lock grouping fields of any other backend,
3862  * we must hold all the hash partition locks. (Only one of those locks is
3863  * actually relevant for any one lock group, but we can't know which one
3864  * ahead of time.) It's fairly annoying to hold all those locks
3865  * throughout this, but it's no worse than GetLockStatusData(), and it
3866  * does have the advantage that we're guaranteed to return a
3867  * self-consistent instantaneous state.
3868  */
3869  LWLockAcquire(ProcArrayLock, LW_SHARED);
3870 
3871  proc = BackendPidGetProcWithLock(blocked_pid);
3872 
3873  /* Nothing to do if it's gone */
3874  if (proc != NULL)
3875  {
3876  /*
3877  * Acquire lock on the entire shared lock data structure. See notes
3878  * in GetLockStatusData().
3879  */
3880  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3882 
3883  if (proc->lockGroupLeader == NULL)
3884  {
3885  /* Easy case, proc is not a lock group member */
3887  }
3888  else
3889  {
3890  /* Examine all procs in proc's lock group */
3891  dlist_iter iter;
3892 
3894  {
3895  PGPROC *memberProc;
3896 
3897  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3898  GetSingleProcBlockerStatusData(memberProc, data);
3899  }
3900  }
3901 
3902  /*
3903  * And release locks. See notes in GetLockStatusData().
3904  */
3905  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3907 
3908  Assert(data->nprocs <= data->maxprocs);
3909  }
3910 
3911  LWLockRelease(ProcArrayLock);
3912 
3913  return data;
3914 }
int MaxBackends
Definition: globals.c:139
#define dlist_foreach(iter, lhead)
Definition: ilist.h:526
#define dlist_container(type, membername, ptr)
Definition: ilist.h:496
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3918
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:522
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1199
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:87
@ LW_SHARED
Definition: lwlock.h:105
void * palloc(Size size)
Definition: mcxt.c:1062
const void * data
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3169
dlist_head lockGroupMembers
Definition: proc.h:255
PGPROC * lockGroupLeader
Definition: proc.h:254
dlist_node * cur
Definition: ilist.h:161

References Assert(), BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, and palloc().

Referenced by pg_blocking_pids().

◆ GetBlockingAutoVacuumPgproc()

PGPROC* GetBlockingAutoVacuumPgproc ( void  )

Definition at line 293 of file deadlock.c.

294 {
295  PGPROC *ptr;
296 
299 
300  return ptr;
301 }

References blocking_autovacuum_proc.

Referenced by ProcSleep().

◆ GetLockConflicts()

VirtualTransactionId* GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 2914 of file lock.c.

2915 {
2916  static VirtualTransactionId *vxids;
2917  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2918  LockMethod lockMethodTable;
2919  LOCK *lock;
2920  LOCKMASK conflictMask;
2921  SHM_QUEUE *procLocks;
2922  PROCLOCK *proclock;
2923  uint32 hashcode;
2924  LWLock *partitionLock;
2925  int count = 0;
2926  int fast_count = 0;
2927 
2928  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2929  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2930  lockMethodTable = LockMethods[lockmethodid];
2931  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2932  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2933 
2934  /*
2935  * Allocate memory to store results, and fill with InvalidVXID. We only
2936  * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2937  * InHotStandby allocate once in TopMemoryContext.
2938  */
2939  if (InHotStandby)
2940  {
2941  if (vxids == NULL)
2942  vxids = (VirtualTransactionId *)
2944  sizeof(VirtualTransactionId) *
2946  }
2947  else
2948  vxids = (VirtualTransactionId *)
2949  palloc0(sizeof(VirtualTransactionId) *
2951 
2952  /* Compute hash code and partition lock, and look up conflicting modes. */
2953  hashcode = LockTagHashCode(locktag);
2954  partitionLock = LockHashPartitionLock(hashcode);
2955  conflictMask = lockMethodTable->conflictTab[lockmode];
2956 
2957  /*
2958  * Fast path locks might not have been entered in the primary lock table.
2959  * If the lock we're dealing with could conflict with such a lock, we must
2960  * examine each backend's fast-path array for conflicts.
2961  */
2962  if (ConflictsWithRelationFastPath(locktag, lockmode))
2963  {
2964  int i;
2965  Oid relid = locktag->locktag_field2;
2966  VirtualTransactionId vxid;
2967 
2968  /*
2969  * Iterate over relevant PGPROCs. Anything held by a prepared
2970  * transaction will have been transferred to the primary lock table,
2971  * so we need not worry about those. This is all a bit fuzzy, because
2972  * new locks could be taken after we've visited a particular
2973  * partition, but the callers had better be prepared to deal with that
2974  * anyway, since the locks could equally well be taken between the
2975  * time we return the value and the time the caller does something
2976  * with it.
2977  */
2978  for (i = 0; i < ProcGlobal->allProcCount; i++)
2979  {
2980  PGPROC *proc = &ProcGlobal->allProcs[i];
2981  uint32 f;
2982 
2983  /* A backend never blocks itself */
2984  if (proc == MyProc)
2985  continue;
2986 
2988 
2989  /*
2990  * If the target backend isn't referencing the same database as
2991  * the lock, then we needn't examine the individual relation IDs
2992  * at all; none of them can be relevant.
2993  *
2994  * See FastPathTransferRelationLocks() for discussion of why we do
2995  * this test after acquiring the lock.
2996  */
2997  if (proc->databaseId != locktag->locktag_field1)
2998  {
2999  LWLockRelease(&proc->fpInfoLock);
3000  continue;
3001  }
3002 
3003  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
3004  {
3005  uint32 lockmask;
3006 
3007  /* Look for an allocated slot matching the given relid. */
3008  if (relid != proc->fpRelId[f])
3009  continue;
3010  lockmask = FAST_PATH_GET_BITS(proc, f);
3011  if (!lockmask)
3012  continue;
3013  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3014 
3015  /*
3016  * There can only be one entry per relation, so if we found it
3017  * and it doesn't conflict, we can skip the rest of the slots.
3018  */
3019  if ((lockmask & conflictMask) == 0)
3020  break;
3021 
3022  /* Conflict! */
3023  GET_VXID_FROM_PGPROC(vxid, *proc);
3024 
3025  if (VirtualTransactionIdIsValid(vxid))
3026  vxids[count++] = vxid;
3027  /* else, xact already committed or aborted */
3028 
3029  /* No need to examine remaining slots. */
3030  break;
3031  }
3032 
3033  LWLockRelease(&proc->fpInfoLock);
3034  }
3035  }
3036 
3037  /* Remember how many fast-path conflicts we found. */
3038  fast_count = count;
3039 
3040  /*
3041  * Look up the lock object matching the tag.
3042  */
3043  LWLockAcquire(partitionLock, LW_SHARED);
3044 
3046  (const void *) locktag,
3047  hashcode,
3048  HASH_FIND,
3049  NULL);
3050  if (!lock)
3051  {
3052  /*
3053  * If the lock object doesn't exist, there is nothing holding a lock
3054  * on this lockable object.
3055  */
3056  LWLockRelease(partitionLock);
3057  vxids[count].backendId = InvalidBackendId;
3059  if (countp)
3060  *countp = count;
3061  return vxids;
3062  }
3063 
3064  /*
3065  * Examine each existing holder (or awaiter) of the lock.
3066  */
3067 
3068  procLocks = &(lock->procLocks);
3069 
3070  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3071  offsetof(PROCLOCK, lockLink));
3072 
3073  while (proclock)
3074  {
3075  if (conflictMask & proclock->holdMask)
3076  {
3077  PGPROC *proc = proclock->tag.myProc;
3078 
3079  /* A backend never blocks itself */
3080  if (proc != MyProc)
3081  {
3082  VirtualTransactionId vxid;
3083 
3084  GET_VXID_FROM_PGPROC(vxid, *proc);
3085 
3086  if (VirtualTransactionIdIsValid(vxid))
3087  {
3088  int i;
3089 
3090  /* Avoid duplicate entries. */
3091  for (i = 0; i < fast_count; ++i)
3092  if (VirtualTransactionIdEquals(vxids[i], vxid))
3093  break;
3094  if (i >= fast_count)
3095  vxids[count++] = vxid;
3096  }
3097  /* else, xact already committed or aborted */
3098  }
3099  }
3100 
3101  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3102  offsetof(PROCLOCK, lockLink));
3103  }
3104 
3105  LWLockRelease(partitionLock);
3106 
3107  if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3108  elog(PANIC, "too many conflicting locks found");
3109 
3110  vxids[count].backendId = InvalidBackendId;
3112  if (countp)
3113  *countp = count;
3114  return vxids;
3115 }
#define offsetof(type, field)
Definition: c.h:727
#define lengthof(array)
Definition: c.h:734
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:967
#define PANIC
Definition: elog.h:36
@ HASH_FIND
Definition: hsearch.h:113
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:203
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:233
static HTAB * LockMethodLockHash
Definition: lock.c:281
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:205
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:517
uint16 LOCKMETHODID
Definition: lock.h:127
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:72
#define LockHashPartitionLock(hashcode)
Definition: lock.h:519
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:76
#define GET_VXID_FROM_PGPROC(vxid, proc)
Definition: lock.h:82
int LOCKMASK
Definition: lockdefs.h:25
MemoryContext TopMemoryContext
Definition: mcxt.c:48
void * palloc0(Size size)
Definition: mcxt.c:1093
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
unsigned int Oid
Definition: postgres_ext.h:31
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:77
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
PGPROC * MyProc
Definition: proc.c:68
PROC_HDR * ProcGlobal
Definition: proc.c:80
uint32 locktag_field1
Definition: lock.h:169
uint32 locktag_field2
Definition: lock.h:170
SHM_QUEUE procLocks
Definition: lock.h:308
Definition: lwlock.h:32
int numLockModes
Definition: lock.h:115
LWLock fpInfoLock
Definition: proc.h:243
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:245
Oid databaseId
Definition: proc.h:157
PGPROC * myProc
Definition: lock.h:358
Definition: lock.h:362
LOCKMASK holdMask
Definition: lock.h:368
SHM_QUEUE lockLink
Definition: lock.h:370
PGPROC * allProcs
Definition: proc.h:321
uint32 allProcCount
Definition: proc.h:339
LocalTransactionId localTransactionId
Definition: lock.h:67
BackendId backendId
Definition: lock.h:66
int max_prepared_xacts
Definition: twophase.c:117
#define InHotStandby
Definition: xlogutils.h:57

References PROC_HDR::allProcCount, PROC_HDR::allProcs, VirtualTransactionId::backendId, ConflictsWithRelationFastPath, LockMethodData::conflictTab, PGPROC::databaseId, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, InvalidBackendId, InvalidLocalTransactionId, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, offsetof, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, SHMQueueNext(), PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char* GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 4100 of file lock.c.

4101 {
4102  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4103  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4104  return LockMethods[lockmethodid]->lockModeNames[mode];
4105 }
static PgChecksumMode mode
Definition: pg_checksums.c:65
const char *const * lockModeNames
Definition: lock.h:117

References Assert(), lengthof, LockMethods, LockMethodData::lockModeNames, and mode.

Referenced by CheckRelationLockedByMe(), DeadLockReport(), pg_lock_status(), and ProcSleep().

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 487 of file lock.c.

488 {
489  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
490 
491  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
492  return LockMethods[lockmethodid];
493 }
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:316

References Assert(), lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData* GetLockStatusData ( void  )

Definition at line 3646 of file lock.c.

3647 {
3648  LockData *data;
3649  PROCLOCK *proclock;
3650  HASH_SEQ_STATUS seqstat;
3651  int els;
3652  int el;
3653  int i;
3654 
3655  data = (LockData *) palloc(sizeof(LockData));
3656 
3657  /* Guess how much space we'll need. */
3658  els = MaxBackends;
3659  el = 0;
3660  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3661 
3662  /*
3663  * First, we iterate through the per-backend fast-path arrays, locking
3664  * them one at a time. This might produce an inconsistent picture of the
3665  * system state, but taking all of those LWLocks at the same time seems
3666  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3667  * matter too much, because none of these locks can be involved in lock
3668  * conflicts anyway - anything that might must be present in the main lock
3669  * table. (For the same reason, we don't sweat about making leaderPid
3670  * completely valid. We cannot safely dereference another backend's
3671  * lockGroupLeader field without holding all lock partition locks, and
3672  * it's not worth that.)
3673  */
3674  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3675  {
3676  PGPROC *proc = &ProcGlobal->allProcs[i];
3677  uint32 f;
3678 
3680 
3681  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3682  {
3683  LockInstanceData *instance;
3684  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3685 
3686  /* Skip unallocated slots. */
3687  if (!lockbits)
3688  continue;
3689 
3690  if (el >= els)
3691  {
3692  els += MaxBackends;
3693  data->locks = (LockInstanceData *)
3694  repalloc(data->locks, sizeof(LockInstanceData) * els);
3695  }
3696 
3697  instance = &data->locks[el];
3698  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3699  proc->fpRelId[f]);
3700  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3701  instance->waitLockMode = NoLock;
3702  instance->backend = proc->backendId;
3703  instance->lxid = proc->lxid;
3704  instance->pid = proc->pid;
3705  instance->leaderPid = proc->pid;
3706  instance->fastpath = true;
3707 
3708  /*
3709  * Successfully taking fast path lock means there were no
3710  * conflicting locks.
3711  */
3712  instance->waitStart = 0;
3713 
3714  el++;
3715  }
3716 
3717  if (proc->fpVXIDLock)
3718  {
3719  VirtualTransactionId vxid;
3720  LockInstanceData *instance;
3721 
3722  if (el >= els)
3723  {
3724  els += MaxBackends;
3725  data->locks = (LockInstanceData *)
3726  repalloc(data->locks, sizeof(LockInstanceData) * els);
3727  }
3728 
3729  vxid.backendId = proc->backendId;
3731 
3732  instance = &data->locks[el];
3733  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3734  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3735  instance->waitLockMode = NoLock;
3736  instance->backend = proc->backendId;
3737  instance->lxid = proc->lxid;
3738  instance->pid = proc->pid;
3739  instance->leaderPid = proc->pid;
3740  instance->fastpath = true;
3741  instance->waitStart = 0;
3742 
3743  el++;
3744  }
3745 
3746  LWLockRelease(&proc->fpInfoLock);
3747  }
3748 
3749  /*
3750  * Next, acquire lock on the entire shared lock data structure. We do
3751  * this so that, at least for locks in the primary lock table, the state
3752  * will be self-consistent.
3753  *
3754  * Since this is a read-only operation, we take shared instead of
3755  * exclusive lock. There's not a whole lot of point to this, because all
3756  * the normal operations require exclusive lock, but it doesn't hurt
3757  * anything either. It will at least allow two backends to do
3758  * GetLockStatusData in parallel.
3759  *
3760  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3761  */
3762  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3764 
3765  /* Now we can safely count the number of proclocks */
3767  if (data->nelements > els)
3768  {
3769  els = data->nelements;
3770  data->locks = (LockInstanceData *)
3771  repalloc(data->locks, sizeof(LockInstanceData) * els);
3772  }
3773 
3774  /* Now scan the tables to copy the data */
3776 
3777  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3778  {
3779  PGPROC *proc = proclock->tag.myProc;
3780  LOCK *lock = proclock->tag.myLock;
3781  LockInstanceData *instance = &data->locks[el];
3782 
3783  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3784  instance->holdMask = proclock->holdMask;
3785  if (proc->waitLock == proclock->tag.myLock)
3786  instance->waitLockMode = proc->waitLockMode;
3787  else
3788  instance->waitLockMode = NoLock;
3789  instance->backend = proc->backendId;
3790  instance->lxid = proc->lxid;
3791  instance->pid = proc->pid;
3792  instance->leaderPid = proclock->groupLeader->pid;
3793  instance->fastpath = false;
3794  instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3795 
3796  el++;
3797  }
3798 
3799  /*
3800  * And release locks. We do this in reverse order for two reasons: (1)
3801  * Anyone else who needs more than one of the locks will be trying to lock
3802  * them in increasing order; we don't want to release the other process
3803  * until it can get all the locks it needs. (2) This avoids O(N^2)
3804  * behavior inside LWLockRelease.
3805  */
3806  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3808 
3809  Assert(el == data->nelements);
3810 
3811  return data;
3812 }
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:429
int64 TimestampTz
Definition: timestamp.h:39
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1382
static HTAB * LockMethodProcLockHash
Definition: lock.c:282
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:238
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:184
#define NoLock
Definition: lockdefs.h:34
#define ExclusiveLock
Definition: lockdefs.h:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1182
LOCKTAG tag
Definition: lock.h:303
Definition: lock.h:459
LOCKMASK holdMask
Definition: lock.h:447
LOCKMODE waitLockMode
Definition: lock.h:448
bool fastpath
Definition: lock.h:455
LOCKTAG locktag
Definition: lock.h:446
TimestampTz waitStart
Definition: lock.h:451
int leaderPid
Definition: lock.h:454
BackendId backend
Definition: lock.h:449
LocalTransactionId lxid
Definition: lock.h:450
LocalTransactionId lxid
Definition: proc.h:146
pg_atomic_uint64 waitStart
Definition: proc.h:187
bool fpVXIDLock
Definition: proc.h:246
BackendId backendId
Definition: proc.h:156
int pid
Definition: proc.h:149
LOCK * waitLock
Definition: proc.h:182
LOCKMODE waitLockMode
Definition: proc.h:184
LocalTransactionId fpLocalTransactionId
Definition: proc.h:247
PGPROC * groupLeader
Definition: lock.h:367

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert(), LockInstanceData::backend, VirtualTransactionId::backendId, PGPROC::backendId, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, LockInstanceData::fastpath, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), LockInstanceData::lxid, PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc(), pg_atomic_read_u64(), LockInstanceData::pid, PGPROC::pid, ProcGlobal, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, LockInstanceData::waitStart, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 499 of file lock.c.

500 {
501  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
502 
503  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
504  return LockMethods[lockmethodid];
505 }

References Assert(), lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock* GetRunningTransactionLocks ( int *  nlocks)

Definition at line 4018 of file lock.c.

4019 {
4020  xl_standby_lock *accessExclusiveLocks;
4021  PROCLOCK *proclock;
4022  HASH_SEQ_STATUS seqstat;
4023  int i;
4024  int index;
4025  int els;
4026 
4027  /*
4028  * Acquire lock on the entire shared lock data structure.
4029  *
4030  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4031  */
4032  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4034 
4035  /* Now we can safely count the number of proclocks */
4037 
4038  /*
4039  * Allocating enough space for all locks in the lock table is overkill,
4040  * but it's more convenient and faster than having to enlarge the array.
4041  */
4042  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4043 
4044  /* Now scan the tables to copy the data */
4046 
4047  /*
4048  * If lock is a currently granted AccessExclusiveLock then it will have
4049  * just one proclock holder, so locks are never accessed twice in this
4050  * particular case. Don't copy this code for use elsewhere because in the
4051  * general case this will give you duplicate locks when looking at
4052  * non-exclusive lock types.
4053  */
4054  index = 0;
4055  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4056  {
4057  /* make sure this definition matches the one used in LockAcquire */
4058  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4059  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4060  {
4061  PGPROC *proc = proclock->tag.myProc;
4062  LOCK *lock = proclock->tag.myLock;
4063  TransactionId xid = proc->xid;
4064 
4065  /*
4066  * Don't record locks for transactions if we know they have
4067  * already issued their WAL record for commit but not yet released
4068  * lock. It is still possible that we see locks held by already
4069  * complete transactions, if they haven't yet zeroed their xids.
4070  */
4071  if (!TransactionIdIsValid(xid))
4072  continue;
4073 
4074  accessExclusiveLocks[index].xid = xid;
4075  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4076  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4077 
4078  index++;
4079  }
4080  }
4081 
4082  Assert(index <= els);
4083 
4084  /*
4085  * And release locks. We do this in reverse order for two reasons: (1)
4086  * Anyone else who needs more than one of the locks will be trying to lock
4087  * them in increasing order; we don't want to release the other process
4088  * until it can get all the locks it needs. (2) This avoids O(N^2)
4089  * behavior inside LWLockRelease.
4090  */
4091  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4093 
4094  *nlocks = index;
4095  return accessExclusiveLocks;
4096 }
uint32 TransactionId
Definition: c.h:587
#define AccessExclusiveLock
Definition: lockdefs.h:43
TransactionId xid
Definition: proc.h:136
Definition: type.h:90
TransactionId xid
Definition: lockdefs.h:51
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References AccessExclusiveLock, Assert(), xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1786 of file lock.c.

1787 {
1789 }
static LOCALLOCK * awaitedLock
Definition: lock.c:288
static ResourceOwner awaitedOwner
Definition: lock.c:289
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1689

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup(), and ProcSleep().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1555 of file lock.c.

1556 {
1557  lock->nGranted++;
1558  lock->granted[lockmode]++;
1559  lock->grantMask |= LOCKBIT_ON(lockmode);
1560  if (lock->granted[lockmode] == lock->requested[lockmode])
1561  lock->waitMask &= LOCKBIT_OFF(lockmode);
1562  proclock->holdMask |= LOCKBIT_ON(lockmode);
1563  LOCK_PRINT("GrantLock", lock, lockmode);
1564  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1565  Assert(lock->nGranted <= lock->nRequested);
1566 }
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:365
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:90
int nRequested
Definition: lock.h:311
int requested[MAX_LOCKMODES]
Definition: lock.h:310
int granted[MAX_LOCKMODES]
Definition: lock.h:312
LOCKMASK grantMask
Definition: lock.h:306
LOCKMASK waitMask
Definition: lock.h:307
int nGranted
Definition: lock.h:313

References Assert(), LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), ProcSleep(), and VirtualXactLock().

◆ InitDeadLockChecking()

void InitDeadLockChecking ( void  )

Definition at line 143 of file deadlock.c.

144 {
145  MemoryContext oldcxt;
146 
147  /* Make sure allocations are permanent */
149 
150  /*
151  * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
152  * deadlockDetails[].
153  */
154  visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
156 
157  /*
158  * TopoSort needs to consider at most MaxBackends wait-queue entries, and
159  * it needn't run concurrently with FindLockCycle.
160  */
161  topoProcs = visitedProcs; /* re-use this space */
162  beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
163  afterConstraints = (int *) palloc(MaxBackends * sizeof(int));
164 
165  /*
166  * We need to consider rearranging at most MaxBackends/2 wait queues
167  * (since it takes at least two waiters in a queue to create a soft edge),
168  * and the expanded form of the wait queues can't involve more than
169  * MaxBackends total waiters.
170  */
171  waitOrders = (WAIT_ORDER *)
172  palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
173  waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
174 
175  /*
176  * Allow at most MaxBackends distinct constraints in a configuration. (Is
177  * this enough? In practice it seems it should be, but I don't quite see
178  * how to prove it. If we run out, we might fail to find a workable wait
179  * queue rearrangement even though one exists.) NOTE that this number
180  * limits the maximum recursion depth of DeadLockCheckRecurse. Making it
181  * really big might potentially allow a stack-overflow problem.
182  */
185 
186  /*
187  * Allow up to 3*MaxBackends constraints to be saved without having to
188  * re-run TestConfiguration. (This is probably more than enough, but we
189  * can survive if we run low on space by doing excess runs of
190  * TestConfiguration to re-compute constraint lists each time needed.) The
191  * last MaxBackends entries in possibleConstraints[] are reserved as
192  * output workspace for FindLockCycle.
193  */
196  (EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE));
197 
198  MemoryContextSwitchTo(oldcxt);
199 }
static int maxPossibleConstraints
Definition: deadlock.c:123
static PGPROC ** waitOrderProcs
Definition: deadlock.c:113
static PGPROC ** visitedProcs
Definition: deadlock.c:102
static int * beforeConstraints
Definition: deadlock.c:107
static int * afterConstraints
Definition: deadlock.c:108
static int maxCurConstraints
Definition: deadlock.c:118
static EDGE * curConstraints
Definition: deadlock.c:116
static PGPROC ** topoProcs
Definition: deadlock.c:106
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Definition: deadlock.c:47

References afterConstraints, beforeConstraints, curConstraints, deadlockDetails, MaxBackends, maxCurConstraints, maxPossibleConstraints, MemoryContextSwitchTo(), palloc(), possibleConstraints, TopMemoryContext, topoProcs, visitedProcs, waitOrderProcs, and waitOrders.

Referenced by InitProcess().

◆ InitLocks()

void InitLocks ( void  )

Definition at line 405 of file lock.c.

406 {
407  HASHCTL info;
408  long init_table_size,
409  max_table_size;
410  bool found;
411 
412  /*
413  * Compute init/max size to request for lock hashtables. Note these
414  * calculations must agree with LockShmemSize!
415  */
416  max_table_size = NLOCKENTS();
417  init_table_size = max_table_size / 2;
418 
419  /*
420  * Allocate hash table for LOCK structs. This stores per-locked-object
421  * information.
422  */
423  info.keysize = sizeof(LOCKTAG);
424  info.entrysize = sizeof(LOCK);
426 
427  LockMethodLockHash = ShmemInitHash("LOCK hash",
428  init_table_size,
429  max_table_size,
430  &info,
432 
433  /* Assume an average of 2 holders per lock */
434  max_table_size *= 2;
435  init_table_size *= 2;
436 
437  /*
438  * Allocate hash table for PROCLOCK structs. This stores
439  * per-lock-per-holder information.
440  */
441  info.keysize = sizeof(PROCLOCKTAG);
442  info.entrysize = sizeof(PROCLOCK);
443  info.hash = proclock_hash;
445 
446  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
447  init_table_size,
448  max_table_size,
449  &info,
451 
452  /*
453  * Allocate fast-path structures.
454  */
456  ShmemInitStruct("Fast Path Strong Relation Lock Data",
457  sizeof(FastPathStrongRelationLockData), &found);
458  if (!found)
460 
461  /*
462  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
463  * counts and resource owner information.
464  *
465  * The non-shared table could already exist in this process (this occurs
466  * when the postmaster is recreating shared memory after a backend crash).
467  * If so, delete and recreate it. (We could simply leave it, since it
468  * ought to be empty in the postmaster, but for safety let's zap it.)
469  */
472 
473  info.keysize = sizeof(LOCALLOCKTAG);
474  info.entrysize = sizeof(LOCALLOCK);
475 
476  LockMethodLocalHash = hash_create("LOCALLOCK hash",
477  16,
478  &info,
480 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:862
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HASH_PARTITION
Definition: hsearch.h:92
#define NLOCKENTS()
Definition: lock.c:57
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:534
struct LOCALLOCK LOCALLOCK
struct LOCK LOCK
struct PROCLOCK PROCLOCK
struct LOCKTAG LOCKTAG
struct PROCLOCKTAG PROCLOCKTAG
struct LOCALLOCKTAG LOCALLOCKTAG
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:341
#define SpinLockInit(lock)
Definition: spin.h:60
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
long num_partitions
Definition: hsearch.h:68

References HASHCTL::entrysize, FastPathStrongRelationLocks, HASHCTL::hash, HASH_BLOBS, hash_create(), hash_destroy(), HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateSharedMemoryAndSemaphores().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4452 of file lock.c.

4454 {
4455  lock_twophase_postcommit(xid, info, recdata, len);
4456 }
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4426
const void size_t len

References len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4426 of file lock.c.

4428 {
4429  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4430  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4431  LOCKTAG *locktag;
4432  LOCKMETHODID lockmethodid;
4433  LockMethod lockMethodTable;
4434 
4435  Assert(len == sizeof(TwoPhaseLockRecord));
4436  locktag = &rec->locktag;
4437  lockmethodid = locktag->locktag_lockmethodid;
4438 
4439  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4440  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4441  lockMethodTable = LockMethods[lockmethodid];
4442 
4443  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4444 }
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3129
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:931

References Assert(), elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

◆ lock_twophase_recover()

void lock_twophase_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4213 of file lock.c.

4215 {
4216  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4217  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4218  LOCKTAG *locktag;
4219  LOCKMODE lockmode;
4220  LOCKMETHODID lockmethodid;
4221  LOCK *lock;
4222  PROCLOCK *proclock;
4223  PROCLOCKTAG proclocktag;
4224  bool found;
4225  uint32 hashcode;
4226  uint32 proclock_hashcode;
4227  int partition;
4228  LWLock *partitionLock;
4229  LockMethod lockMethodTable;
4230 
4231  Assert(len == sizeof(TwoPhaseLockRecord));
4232  locktag = &rec->locktag;
4233  lockmode = rec->lockmode;
4234  lockmethodid = locktag->locktag_lockmethodid;
4235 
4236  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4237  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4238  lockMethodTable = LockMethods[lockmethodid];
4239 
4240  hashcode = LockTagHashCode(locktag);
4241  partition = LockHashPartition(hashcode);
4242  partitionLock = LockHashPartitionLock(hashcode);
4243 
4244  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4245 
4246  /*
4247  * Find or create a lock with this tag.
4248  */
4250  (void *) locktag,
4251  hashcode,
4253  &found);
4254  if (!lock)
4255  {
4256  LWLockRelease(partitionLock);
4257  ereport(ERROR,
4258  (errcode(ERRCODE_OUT_OF_MEMORY),
4259  errmsg("out of shared memory"),
4260  errhint("You might need to increase max_locks_per_transaction.")));
4261  }
4262 
4263  /*
4264  * if it's a new lock object, initialize it
4265  */
4266  if (!found)
4267  {
4268  lock->grantMask = 0;
4269  lock->waitMask = 0;
4270  SHMQueueInit(&(lock->procLocks));
4271  ProcQueueInit(&(lock->waitProcs));
4272  lock->nRequested = 0;
4273  lock->nGranted = 0;
4274  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4275  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4276  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4277  }
4278  else
4279  {
4280  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4281  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4282  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4283  Assert(lock->nGranted <= lock->nRequested);
4284  }
4285 
4286  /*
4287  * Create the hash key for the proclock table.
4288  */
4289  proclocktag.myLock = lock;
4290  proclocktag.myProc = proc;
4291 
4292  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4293 
4294  /*
4295  * Find or create a proclock entry with this tag
4296  */
4298  (void *) &proclocktag,
4299  proclock_hashcode,
4301  &found);
4302  if (!proclock)
4303  {
4304  /* Oops, not enough shmem for the proclock */
4305  if (lock->nRequested == 0)
4306  {
4307  /*
4308  * There are no other requestors of this lock, so garbage-collect
4309  * the lock object. We *must* do this to avoid a permanent leak
4310  * of shared memory, because there won't be anything to cause
4311  * anyone to release the lock object later.
4312  */
4313  Assert(SHMQueueEmpty(&(lock->procLocks)));
4315  (void *) &(lock->tag),
4316  hashcode,
4317  HASH_REMOVE,
4318  NULL))
4319  elog(PANIC, "lock table corrupted");
4320  }
4321  LWLockRelease(partitionLock);
4322  ereport(ERROR,
4323  (errcode(ERRCODE_OUT_OF_MEMORY),
4324  errmsg("out of shared memory"),
4325  errhint("You might need to increase max_locks_per_transaction.")));
4326  }
4327 
4328  /*
4329  * If new, initialize the new entry
4330  */
4331  if (!found)
4332  {
4333  Assert(proc->lockGroupLeader == NULL);
4334  proclock->groupLeader = proc;
4335  proclock->holdMask = 0;
4336  proclock->releaseMask = 0;
4337  /* Add proclock to appropriate lists */
4338  SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
4339  SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
4340  &proclock->procLink);
4341  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4342  }
4343  else
4344  {
4345  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4346  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4347  }
4348 
4349  /*
4350  * lock->nRequested and lock->requested[] count the total number of
4351  * requests, whether granted or waiting, so increment those immediately.
4352  */
4353  lock->nRequested++;
4354  lock->requested[lockmode]++;
4355  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4356 
4357  /*
4358  * We shouldn't already hold the desired lock.
4359  */
4360  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4361  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4362  lockMethodTable->lockModeNames[lockmode],
4363  lock->tag.locktag_field1, lock->tag.locktag_field2,
4364  lock->tag.locktag_field3);
4365 
4366  /*
4367  * We ignore any possible conflicts and just grant ourselves the lock. Not
4368  * only because we don't bother, but also to avoid deadlocks when
4369  * switching from standby to normal mode. See function comment.
4370  */
4371  GrantLock(lock, proclock, lockmode);
4372 
4373  /*
4374  * Bump strong lock count, to make sure any fast-path lock requests won't
4375  * be granted without consulting the primary lock table.
4376  */
4377  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4378  {
4379  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4380 
4382  FastPathStrongRelationLocks->count[fasthashcode]++;
4384  }
4385 
4386  LWLockRelease(partitionLock);
4387 }
#define MemSet(start, val, len)
Definition: c.h:1008
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER_NULL
Definition: hsearch.h:116
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:565
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1555
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:366
#define MAX_LOCKMODES
Definition: lock.h:87
#define LockHashPartition(hashcode)
Definition: lock.h:517
int LOCKMODE
Definition: lockdefs.h:26
@ LW_EXCLUSIVE
Definition: lwlock.h:104
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
uint32 locktag_field3
Definition: lock.h:171
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:211
LOCKMASK releaseMask
Definition: lock.h:369
SHM_QUEUE procLink
Definition: lock.h:371

References Assert(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcQueueInit(), PROCLOCK::releaseMask, LOCK::requested, SHMQueueEmpty(), SHMQueueInit(), SHMQueueInsertBefore(), SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4394 of file lock.c.

4396 {
4397  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4398  LOCKTAG *locktag;
4399  LOCKMODE lockmode;
4400  LOCKMETHODID lockmethodid;
4401 
4402  Assert(len == sizeof(TwoPhaseLockRecord));
4403  locktag = &rec->locktag;
4404  lockmode = rec->lockmode;
4405  lockmethodid = locktag->locktag_lockmethodid;
4406 
4407  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4408  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4409 
4410  if (lockmode == AccessExclusiveLock &&
4411  locktag->locktag_type == LOCKTAG_RELATION)
4412  {
4414  locktag->locktag_field1 /* dboid */ ,
4415  locktag->locktag_field2 /* reloid */ );
4416  }
4417 }
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:948

References AccessExclusiveLock, Assert(), elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().

◆ LockAcquire()

LockAcquireResult LockAcquire ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait 
)

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp 
)

Definition at line 771 of file lock.c.

777 {
778  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
779  LockMethod lockMethodTable;
780  LOCALLOCKTAG localtag;
781  LOCALLOCK *locallock;
782  LOCK *lock;
783  PROCLOCK *proclock;
784  bool found;
785  ResourceOwner owner;
786  uint32 hashcode;
787  LWLock *partitionLock;
788  bool found_conflict;
789  bool log_lock = false;
790 
791  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
792  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
793  lockMethodTable = LockMethods[lockmethodid];
794  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
795  elog(ERROR, "unrecognized lock mode: %d", lockmode);
796 
797  if (RecoveryInProgress() && !InRecovery &&
798  (locktag->locktag_type == LOCKTAG_OBJECT ||
799  locktag->locktag_type == LOCKTAG_RELATION) &&
800  lockmode > RowExclusiveLock)
801  ereport(ERROR,
802  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
803  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
804  lockMethodTable->lockModeNames[lockmode]),
805  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
806 
807 #ifdef LOCK_DEBUG
808  if (LOCK_DEBUG_ENABLED(locktag))
809  elog(LOG, "LockAcquire: lock [%u,%u] %s",
810  locktag->locktag_field1, locktag->locktag_field2,
811  lockMethodTable->lockModeNames[lockmode]);
812 #endif
813 
814  /* Identify owner for lock */
815  if (sessionLock)
816  owner = NULL;
817  else
818  owner = CurrentResourceOwner;
819 
820  /*
821  * Find or create a LOCALLOCK entry for this lock and lockmode
822  */
823  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
824  localtag.lock = *locktag;
825  localtag.mode = lockmode;
826 
827  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
828  (void *) &localtag,
829  HASH_ENTER, &found);
830 
831  /*
832  * if it's a new locallock object, initialize it
833  */
834  if (!found)
835  {
836  locallock->lock = NULL;
837  locallock->proclock = NULL;
838  locallock->hashcode = LockTagHashCode(&(localtag.lock));
839  locallock->nLocks = 0;
840  locallock->holdsStrongLockCount = false;
841  locallock->lockCleared = false;
842  locallock->numLockOwners = 0;
843  locallock->maxLockOwners = 8;
844  locallock->lockOwners = NULL; /* in case next line fails */
845  locallock->lockOwners = (LOCALLOCKOWNER *)
847  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
848  }
849  else
850  {
851  /* Make sure there will be room to remember the lock */
852  if (locallock->numLockOwners >= locallock->maxLockOwners)
853  {
854  int newsize = locallock->maxLockOwners * 2;
855 
856  locallock->lockOwners = (LOCALLOCKOWNER *)
857  repalloc(locallock->lockOwners,
858  newsize * sizeof(LOCALLOCKOWNER));
859  locallock->maxLockOwners = newsize;
860  }
861  }
862  hashcode = locallock->hashcode;
863 
864  if (locallockp)
865  *locallockp = locallock;
866 
867  /*
868  * If we already hold the lock, we can just increase the count locally.
869  *
870  * If lockCleared is already set, caller need not worry about absorbing
871  * sinval messages related to the lock's object.
872  */
873  if (locallock->nLocks > 0)
874  {
875  GrantLockLocal(locallock, owner);
876  if (locallock->lockCleared)
878  else
880  }
881 
882  /*
883  * We don't acquire any other heavyweight lock while holding the relation
884  * extension lock. We do allow to acquire the same relation extension
885  * lock more than once but that case won't reach here.
886  */
887  Assert(!IsRelationExtensionLockHeld);
888 
889  /*
890  * We don't acquire any other heavyweight lock while holding the page lock
891  * except for relation extension.
892  */
893  Assert(!IsPageLockHeld ||
894  (locktag->locktag_type == LOCKTAG_RELATION_EXTEND));
895 
896  /*
897  * Prepare to emit a WAL record if acquisition of this lock needs to be
898  * replayed in a standby server.
899  *
900  * Here we prepare to log; after lock is acquired we'll issue log record.
901  * This arrangement simplifies error recovery in case the preparation step
902  * fails.
903  *
904  * Only AccessExclusiveLocks can conflict with lock types that read-only
905  * transactions can acquire in a standby server. Make sure this definition
906  * matches the one in GetRunningTransactionLocks().
907  */
908  if (lockmode >= AccessExclusiveLock &&
909  locktag->locktag_type == LOCKTAG_RELATION &&
910  !RecoveryInProgress() &&
912  {
914  log_lock = true;
915  }
916 
917  /*
918  * Attempt to take lock via fast path, if eligible. But if we remember
919  * having filled up the fast path array, we don't attempt to make any
920  * further use of it until we release some locks. It's possible that some
921  * other backend has transferred some of those locks to the shared hash
922  * table, leaving space free, but it's not worth acquiring the LWLock just
923  * to check. It's also possible that we're acquiring a second or third
924  * lock type on a relation we have already locked using the fast-path, but
925  * for now we don't worry about that case either.
926  */
927  if (EligibleForRelationFastPath(locktag, lockmode) &&
929  {
930  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
931  bool acquired;
932 
933  /*
934  * LWLockAcquire acts as a memory sequencing point, so it's safe to
935  * assume that any strong locker whose increment to
936  * FastPathStrongRelationLocks->counts becomes visible after we test
937  * it has yet to begin to transfer fast-path locks.
938  */
940  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
941  acquired = false;
942  else
943  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
944  lockmode);
946  if (acquired)
947  {
948  /*
949  * The locallock might contain stale pointers to some old shared
950  * objects; we MUST reset these to null before considering the
951  * lock to be acquired via fast-path.
952  */
953  locallock->lock = NULL;
954  locallock->proclock = NULL;
955  GrantLockLocal(locallock, owner);
956  return LOCKACQUIRE_OK;
957  }
958  }
959 
960  /*
961  * If this lock could potentially have been taken via the fast-path by
962  * some other backend, we must (temporarily) disable further use of the
963  * fast-path for this lock tag, and migrate any locks already taken via
964  * this method to the main lock table.
965  */
966  if (ConflictsWithRelationFastPath(locktag, lockmode))
967  {
968  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
969 
970  BeginStrongLockAcquire(locallock, fasthashcode);
971  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
972  hashcode))
973  {
975  if (locallock->nLocks == 0)
976  RemoveLocalLock(locallock);
977  if (locallockp)
978  *locallockp = NULL;
979  if (reportMemoryError)
980  ereport(ERROR,
981  (errcode(ERRCODE_OUT_OF_MEMORY),
982  errmsg("out of shared memory"),
983  errhint("You might need to increase max_locks_per_transaction.")));
984  else
985  return LOCKACQUIRE_NOT_AVAIL;
986  }
987  }
988 
989  /*
990  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
991  * take it via the fast-path, either, so we've got to mess with the shared
992  * lock table.
993  */
994  partitionLock = LockHashPartitionLock(hashcode);
995 
996  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
997 
998  /*
999  * Find or create lock and proclock entries with this tag
1000  *
1001  * Note: if the locallock object already existed, it might have a pointer
1002  * to the lock already ... but we should not assume that that pointer is
1003  * valid, since a lock object with zero hold and request counts can go
1004  * away anytime. So we have to use SetupLockInTable() to recompute the
1005  * lock and proclock pointers, even if they're already set.
1006  */
1007  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1008  hashcode, lockmode);
1009  if (!proclock)
1010  {
1012  LWLockRelease(partitionLock);
1013  if (locallock->nLocks == 0)
1014  RemoveLocalLock(locallock);
1015  if (locallockp)
1016  *locallockp = NULL;
1017  if (reportMemoryError)
1018  ereport(ERROR,
1019  (errcode(ERRCODE_OUT_OF_MEMORY),
1020  errmsg("out of shared memory"),
1021  errhint("You might need to increase max_locks_per_transaction.")));
1022  else
1023  return LOCKACQUIRE_NOT_AVAIL;
1024  }
1025  locallock->proclock = proclock;
1026  lock = proclock->tag.myLock;
1027  locallock->lock = lock;
1028 
1029  /*
1030  * If lock requested conflicts with locks requested by waiters, must join
1031  * wait queue. Otherwise, check for conflict with already-held locks.
1032  * (That's last because most complex check.)
1033  */
1034  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1035  found_conflict = true;
1036  else
1037  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1038  lock, proclock);
1039 
1040  if (!found_conflict)
1041  {
1042  /* No conflict with held or previously requested locks */
1043  GrantLock(lock, proclock, lockmode);
1044  GrantLockLocal(locallock, owner);
1045  }
1046  else
1047  {
1048  /*
1049  * We can't acquire the lock immediately. If caller specified no
1050  * blocking, remove useless table entries and return
1051  * LOCKACQUIRE_NOT_AVAIL without waiting.
1052  */
1053  if (dontWait)
1054  {
1056  if (proclock->holdMask == 0)
1057  {
1058  uint32 proclock_hashcode;
1059 
1060  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1061  SHMQueueDelete(&proclock->lockLink);
1062  SHMQueueDelete(&proclock->procLink);
1064  (void *) &(proclock->tag),
1065  proclock_hashcode,
1066  HASH_REMOVE,
1067  NULL))
1068  elog(PANIC, "proclock table corrupted");
1069  }
1070  else
1071  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1072  lock->nRequested--;
1073  lock->requested[lockmode]--;
1074  LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1075  Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1076  Assert(lock->nGranted <= lock->nRequested);
1077  LWLockRelease(partitionLock);
1078  if (locallock->nLocks == 0)
1079  RemoveLocalLock(locallock);
1080  if (locallockp)
1081  *locallockp = NULL;
1082  return LOCKACQUIRE_NOT_AVAIL;
1083  }
1084 
1085  /*
1086  * Set bitmask of locks this process already holds on this object.
1087  */
1088  MyProc->heldLocks = proclock->holdMask;
1089 
1090  /*
1091  * Sleep till someone wakes me up.
1092  */
1093 
1094  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1095  locktag->locktag_field2,
1096  locktag->locktag_field3,
1097  locktag->locktag_field4,
1098  locktag->locktag_type,
1099  lockmode);
1100 
1101  WaitOnLock(locallock, owner);
1102 
1103  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1104  locktag->locktag_field2,
1105  locktag->locktag_field3,
1106  locktag->locktag_field4,
1107  locktag->locktag_type,
1108  lockmode);
1109 
1110  /*
1111  * NOTE: do not do any material change of state between here and
1112  * return. All required changes in locktable state must have been
1113  * done when the lock was granted to us --- see notes in WaitOnLock.
1114  */
1115 
1116  /*
1117  * Check the proclock entry status, in case something in the ipc
1118  * communication doesn't work correctly.
1119  */
1120  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1121  {
1123  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1124  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1125  /* Should we retry ? */
1126  LWLockRelease(partitionLock);
1127  elog(ERROR, "LockAcquire failed");
1128  }
1129  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1130  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1131  }
1132 
1133  /*
1134  * Lock state is fully up-to-date now; if we error out after this, no
1135  * special error cleanup is required.
1136  */
1138 
1139  LWLockRelease(partitionLock);
1140 
1141  /*
1142  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1143  * standby server.
1144  */
1145  if (log_lock)
1146  {
1147  /*
1148  * Decode the locktag back to the original values, to avoid sending
1149  * lots of empty bytes with every message. See lock.h to check how a
1150  * locktag is defined for LOCKTAG_RELATION
1151  */
1153  locktag->locktag_field2);
1154  }
1155 
1156  return LOCKACQUIRE_OK;
1157 }
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
#define LOG
Definition: elog.h:25
@ HASH_ENTER
Definition: hsearch.h:114
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1367
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1170
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2722
void AbortStrongLockAcquire(void)
Definition: lock.c:1757
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2655
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1814
static int FastPathLocalUseCount
Definition: lock.c:172
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:227
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1721
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1420
static void FinishStrongLockAcquire(void)
Definition: lock.c:1747
#define RowExclusiveLock
Definition: lockdefs.h:38
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1377
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1360
int maxLockOwners
Definition: lock.h:429
bool lockCleared
Definition: lock.h:432
uint16 locktag_field4
Definition: lock.h:172
LOCKMASK heldLocks
Definition: proc.h:185
bool RecoveryInProgress(void)
Definition: xlog.c:8404
#define XLogStandbyInfoActive()
Definition: xlog.h:178
bool InRecovery
Definition: xlogutils.c:52

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert(), BeginStrongLockAcquire(), ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, EligibleForRelationFastPath, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathGrantRelationLock(), FastPathLocalUseCount, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PGPROC::heldLocks, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, InRecovery, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG_RELATION_EXTEND, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), SHMQueueDelete(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockAcquire(), LockRelation(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1420 of file lock.c.

1424 {
1425  int numLockModes = lockMethodTable->numLockModes;
1426  LOCKMASK myLocks;
1427  int conflictMask = lockMethodTable->conflictTab[lockmode];
1428  int conflictsRemaining[MAX_LOCKMODES];
1429  int totalConflictsRemaining = 0;
1430  int i;
1431  SHM_QUEUE *procLocks;
1432  PROCLOCK *otherproclock;
1433 
1434  /*
1435  * first check for global conflicts: If no locks conflict with my request,
1436  * then I get the lock.
1437  *
1438  * Checking for conflict: lock->grantMask represents the types of
1439  * currently held locks. conflictTable[lockmode] has a bit set for each
1440  * type of lock that conflicts with request. Bitwise compare tells if
1441  * there is a conflict.
1442  */
1443  if (!(conflictMask & lock->grantMask))
1444  {
1445  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1446  return false;
1447  }
1448 
1449  /*
1450  * Rats. Something conflicts. But it could still be my own lock, or a
1451  * lock held by another member of my locking group. First, figure out how
1452  * many conflicts remain after subtracting out any locks I hold myself.
1453  */
1454  myLocks = proclock->holdMask;
1455  for (i = 1; i <= numLockModes; i++)
1456  {
1457  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1458  {
1459  conflictsRemaining[i] = 0;
1460  continue;
1461  }
1462  conflictsRemaining[i] = lock->granted[i];
1463  if (myLocks & LOCKBIT_ON(i))
1464  --conflictsRemaining[i];
1465  totalConflictsRemaining += conflictsRemaining[i];
1466  }
1467 
1468  /* If no conflicts remain, we get the lock. */
1469  if (totalConflictsRemaining == 0)
1470  {
1471  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1472  return false;
1473  }
1474 
1475  /* If no group locking, it's definitely a conflict. */
1476  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1477  {
1478  Assert(proclock->tag.myProc == MyProc);
1479  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1480  proclock);
1481  return true;
1482  }
1483 
1484  /*
1485  * The relation extension or page lock conflict even between the group
1486  * members.
1487  */
1488  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
1489  (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
1490  {
1491  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1492  proclock);
1493  return true;
1494  }
1495 
1496  /*
1497  * Locks held in conflicting modes by members of our own lock group are
1498  * not real conflicts; we can subtract those out and see if we still have
1499  * a conflict. This is O(N) in the number of processes holding or
1500  * awaiting locks on this object. We could improve that by making the
1501  * shared memory state more complex (and larger) but it doesn't seem worth
1502  * it.
1503  */
1504  procLocks = &(lock->procLocks);
1505  otherproclock = (PROCLOCK *)
1506  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1507  while (otherproclock != NULL)
1508  {
1509  if (proclock != otherproclock &&
1510  proclock->groupLeader == otherproclock->groupLeader &&
1511  (otherproclock->holdMask & conflictMask) != 0)
1512  {
1513  int intersectMask = otherproclock->holdMask & conflictMask;
1514 
1515  for (i = 1; i <= numLockModes; i++)
1516  {
1517  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1518  {
1519  if (conflictsRemaining[i] <= 0)
1520  elog(PANIC, "proclocks held do not match lock");
1521  conflictsRemaining[i]--;
1522  totalConflictsRemaining--;
1523  }
1524  }
1525 
1526  if (totalConflictsRemaining == 0)
1527  {
1528  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1529  proclock);
1530  return false;
1531  }
1532  }
1533  otherproclock = (PROCLOCK *)
1534  SHMQueueNext(procLocks, &otherproclock->lockLink,
1535  offsetof(PROCLOCK, lockLink));
1536  }
1537 
1538  /* Nope, it's a real conflict. */
1539  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1540  return true;
1541 }
#define LOCK_LOCKTAG(lock)
Definition: lock.h:317

References Assert(), LockMethodData::conflictTab, elog, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, PROCLOCK::lockLink, LOCKTAG_PAGE, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, offsetof, PANIC, PROCLOCK_PRINT, LOCK::procLocks, SHMQueueNext(), and PROCLOCK::tag.

Referenced by LockAcquireExtended(), ProcLockWakeup(), and ProcSleep().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 634 of file lock.c.

635 {
636  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
637  LockMethod lockMethodTable;
638  LOCALLOCKTAG localtag;
639  LOCALLOCK *locallock;
640  LOCK *lock;
641  PROCLOCK *proclock;
642  LWLock *partitionLock;
643  bool hasWaiters = false;
644 
645  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
646  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
647  lockMethodTable = LockMethods[lockmethodid];
648  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
649  elog(ERROR, "unrecognized lock mode: %d", lockmode);
650 
651 #ifdef LOCK_DEBUG
652  if (LOCK_DEBUG_ENABLED(locktag))
653  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
654  locktag->locktag_field1, locktag->locktag_field2,
655  lockMethodTable->lockModeNames[lockmode]);
656 #endif
657 
658  /*
659  * Find the LOCALLOCK entry for this lock and lockmode
660  */
661  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
662  localtag.lock = *locktag;
663  localtag.mode = lockmode;
664 
665  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
666  (void *) &localtag,
667  HASH_FIND, NULL);
668 
669  /*
670  * let the caller print its own error message, too. Do not ereport(ERROR).
671  */
672  if (!locallock || locallock->nLocks <= 0)
673  {
674  elog(WARNING, "you don't own a lock of type %s",
675  lockMethodTable->lockModeNames[lockmode]);
676  return false;
677  }
678 
679  /*
680  * Check the shared lock table.
681  */
682  partitionLock = LockHashPartitionLock(locallock->hashcode);
683 
684  LWLockAcquire(partitionLock, LW_SHARED);
685 
686  /*
687  * We don't need to re-find the lock or proclock, since we kept their
688  * addresses in the locallock table, and they couldn't have been removed
689  * while we were holding a lock on them.
690  */
691  lock = locallock->lock;
692  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
693  proclock = locallock->proclock;
694  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
695 
696  /*
697  * Double-check that we are actually holding a lock of the type we want to
698  * release.
699  */
700  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
701  {
702  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
703  LWLockRelease(partitionLock);
704  elog(WARNING, "you don't own a lock of type %s",
705  lockMethodTable->lockModeNames[lockmode]);
706  RemoveLocalLock(locallock);
707  return false;
708  }
709 
710  /*
711  * Do the checking.
712  */
713  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
714  hasWaiters = true;
715 
716  LWLockRelease(partitionLock);
717 
718  return hasWaiters;
719 }
#define WARNING
Definition: elog.h:30

References LockMethodData::conflictTab, elog, ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode 
)

Definition at line 598 of file lock.c.

599 {
600  LOCALLOCKTAG localtag;
601  LOCALLOCK *locallock;
602 
603  /*
604  * See if there is a LOCALLOCK entry for this lock and lockmode
605  */
606  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
607  localtag.lock = *locktag;
608  localtag.mode = lockmode;
609 
610  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
611  (void *) &localtag,
612  HASH_FIND, NULL);
613 
614  return (locallock && locallock->nLocks > 0);
615 }

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, LockMethodLocalHash, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2579 of file lock.c.

2580 {
2582 
2583  Assert(parent != NULL);
2584 
2585  if (locallocks == NULL)
2586  {
2588  LOCALLOCK *locallock;
2589 
2591 
2592  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2593  LockReassignOwner(locallock, parent);
2594  }
2595  else
2596  {
2597  int i;
2598 
2599  for (i = nlocks - 1; i >= 0; i--)
2600  LockReassignOwner(locallocks[i], parent);
2601  }
2602 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2609
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:792

References Assert(), CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), ResourceOwnerGetParent(), and status().

Referenced by ResourceOwnerReleaseInternal().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 1975 of file lock.c.

1976 {
1977  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1978  LockMethod lockMethodTable;
1979  LOCALLOCKTAG localtag;
1980  LOCALLOCK *locallock;
1981  LOCK *lock;
1982  PROCLOCK *proclock;
1983  LWLock *partitionLock;
1984  bool wakeupNeeded;
1985 
1986  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1987  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1988  lockMethodTable = LockMethods[lockmethodid];
1989  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1990  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1991 
1992 #ifdef LOCK_DEBUG
1993  if (LOCK_DEBUG_ENABLED(locktag))
1994  elog(LOG, "LockRelease: lock [%u,%u] %s",
1995  locktag->locktag_field1, locktag->locktag_field2,
1996  lockMethodTable->lockModeNames[lockmode]);
1997 #endif
1998 
1999  /*
2000  * Find the LOCALLOCK entry for this lock and lockmode
2001  */
2002  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2003  localtag.lock = *locktag;
2004  localtag.mode = lockmode;
2005 
2006  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
2007  (void *) &localtag,
2008  HASH_FIND, NULL);
2009 
2010  /*
2011  * let the caller print its own error message, too. Do not ereport(ERROR).
2012  */
2013  if (!locallock || locallock->nLocks <= 0)
2014  {
2015  elog(WARNING, "you don't own a lock of type %s",
2016  lockMethodTable->lockModeNames[lockmode]);
2017  return false;
2018  }
2019 
2020  /*
2021  * Decrease the count for the resource owner.
2022  */
2023  {
2024  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2025  ResourceOwner owner;
2026  int i;
2027 
2028  /* Identify owner for lock */
2029  if (sessionLock)
2030  owner = NULL;
2031  else
2032  owner = CurrentResourceOwner;
2033 
2034  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2035  {
2036  if (lockOwners[i].owner == owner)
2037  {
2038  Assert(lockOwners[i].nLocks > 0);
2039  if (--lockOwners[i].nLocks == 0)
2040  {
2041  if (owner != NULL)
2042  ResourceOwnerForgetLock(owner, locallock);
2043  /* compact out unused slot */
2044  locallock->numLockOwners--;
2045  if (i < locallock->numLockOwners)
2046  lockOwners[i] = lockOwners[locallock->numLockOwners];
2047  }
2048  break;
2049  }
2050  }
2051  if (i < 0)
2052  {
2053  /* don't release a lock belonging to another owner */
2054  elog(WARNING, "you don't own a lock of type %s",
2055  lockMethodTable->lockModeNames[lockmode]);
2056  return false;
2057  }
2058  }
2059 
2060  /*
2061  * Decrease the total local count. If we're still holding the lock, we're
2062  * done.
2063  */
2064  locallock->nLocks--;
2065 
2066  if (locallock->nLocks > 0)
2067  return true;
2068 
2069  /*
2070  * At this point we can no longer suppose we are clear of invalidation
2071  * messages related to this lock. Although we'll delete the LOCALLOCK
2072  * object before any intentional return from this routine, it seems worth
2073  * the trouble to explicitly reset lockCleared right now, just in case
2074  * some error prevents us from deleting the LOCALLOCK.
2075  */
2076  locallock->lockCleared = false;
2077 
2078  /* Attempt fast release of any lock eligible for the fast path. */
2079  if (EligibleForRelationFastPath(locktag, lockmode) &&
2081  {
2082  bool released;
2083 
2084  /*
2085  * We might not find the lock here, even if we originally entered it
2086  * here. Another backend may have moved it to the main table.
2087  */
2089  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2090  lockmode);
2092  if (released)
2093  {
2094  RemoveLocalLock(locallock);
2095  return true;
2096  }
2097  }
2098 
2099  /*
2100  * Otherwise we've got to mess with the shared lock table.
2101  */
2102  partitionLock = LockHashPartitionLock(locallock->hashcode);
2103 
2104  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2105 
2106  /*
2107  * Normally, we don't need to re-find the lock or proclock, since we kept
2108  * their addresses in the locallock table, and they couldn't have been
2109  * removed while we were holding a lock on them. But it's possible that
2110  * the lock was taken fast-path and has since been moved to the main hash
2111  * table by another backend, in which case we will need to look up the
2112  * objects here. We assume the lock field is NULL if so.
2113  */
2114  lock = locallock->lock;
2115  if (!lock)
2116  {
2117  PROCLOCKTAG proclocktag;
2118 
2119  Assert(EligibleForRelationFastPath(locktag, lockmode));
2121  (const void *) locktag,
2122  locallock->hashcode,
2123  HASH_FIND,
2124  NULL);
2125  if (!lock)
2126  elog(ERROR, "failed to re-find shared lock object");
2127  locallock->lock = lock;
2128 
2129  proclocktag.myLock = lock;
2130  proclocktag.myProc = MyProc;
2132  (void *) &proclocktag,
2133  HASH_FIND,
2134  NULL);
2135  if (!locallock->proclock)
2136  elog(ERROR, "failed to re-find shared proclock object");
2137  }
2138  LOCK_PRINT("LockRelease: found", lock, lockmode);
2139  proclock = locallock->proclock;
2140  PROCLOCK_PRINT("LockRelease: found", proclock);
2141 
2142  /*
2143  * Double-check that we are actually holding a lock of the type we want to
2144  * release.
2145  */
2146  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2147  {
2148  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2149  LWLockRelease(partitionLock);
2150  elog(WARNING, "you don't own a lock of type %s",
2151  lockMethodTable->lockModeNames[lockmode]);
2152  RemoveLocalLock(locallock);
2153  return false;
2154  }
2155 
2156  /*
2157  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2158  */
2159  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2160 
2161  CleanUpLock(lock, proclock,
2162  lockMethodTable, locallock->hashcode,
2163  wakeupNeeded);
2164 
2165  LWLockRelease(partitionLock);
2166 
2167  RemoveLocalLock(locallock);
2168  return true;
2169 }
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1578
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1635
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2692
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1006

References Assert(), CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FastPathLocalUseCount, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseLockList(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2180 of file lock.c.

2181 {
2183  LockMethod lockMethodTable;
2184  int i,
2185  numLockModes;
2186  LOCALLOCK *locallock;
2187  LOCK *lock;
2188  PROCLOCK *proclock;
2189  int partition;
2190  bool have_fast_path_lwlock = false;
2191 
2192  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2193  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2194  lockMethodTable = LockMethods[lockmethodid];
2195 
2196 #ifdef LOCK_DEBUG
2197  if (*(lockMethodTable->trace_flag))
2198  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2199 #endif
2200 
2201  /*
2202  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2203  * the only way that the lock we hold on our own VXID can ever get
2204  * released: it is always and only released when a toplevel transaction
2205  * ends.
2206  */
2207  if (lockmethodid == DEFAULT_LOCKMETHOD)
2209 
2210  numLockModes = lockMethodTable->numLockModes;
2211 
2212  /*
2213  * First we run through the locallock table and get rid of unwanted
2214  * entries, then we scan the process's proclocks and get rid of those. We
2215  * do this separately because we may have multiple locallock entries
2216  * pointing to the same proclock, and we daren't end up with any dangling
2217  * pointers. Fast-path locks are cleaned up during the locallock table
2218  * scan, though.
2219  */
2221 
2222  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2223  {
2224  /*
2225  * If the LOCALLOCK entry is unused, we must've run out of shared
2226  * memory while trying to set up this lock. Just forget the local
2227  * entry.
2228  */
2229  if (locallock->nLocks == 0)
2230  {
2231  RemoveLocalLock(locallock);
2232  continue;
2233  }
2234 
2235  /* Ignore items that are not of the lockmethod to be removed */
2236  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2237  continue;
2238 
2239  /*
2240  * If we are asked to release all locks, we can just zap the entry.
2241  * Otherwise, must scan to see if there are session locks. We assume
2242  * there is at most one lockOwners entry for session locks.
2243  */
2244  if (!allLocks)
2245  {
2246  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2247 
2248  /* If session lock is above array position 0, move it down to 0 */
2249  for (i = 0; i < locallock->numLockOwners; i++)
2250  {
2251  if (lockOwners[i].owner == NULL)
2252  lockOwners[0] = lockOwners[i];
2253  else
2254  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2255  }
2256 
2257  if (locallock->numLockOwners > 0 &&
2258  lockOwners[0].owner == NULL &&
2259  lockOwners[0].nLocks > 0)
2260  {
2261  /* Fix the locallock to show just the session locks */
2262  locallock->nLocks = lockOwners[0].nLocks;
2263  locallock->numLockOwners = 1;
2264  /* We aren't deleting this locallock, so done */
2265  continue;
2266  }
2267  else
2268  locallock->numLockOwners = 0;
2269  }
2270 
2271  /*
2272  * If the lock or proclock pointers are NULL, this lock was taken via
2273  * the relation fast-path (and is not known to have been transferred).
2274  */
2275  if (locallock->proclock == NULL || locallock->lock == NULL)
2276  {
2277  LOCKMODE lockmode = locallock->tag.mode;
2278  Oid relid;
2279 
2280  /* Verify that a fast-path lock is what we've got. */
2281  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2282  elog(PANIC, "locallock table corrupted");
2283 
2284  /*
2285  * If we don't currently hold the LWLock that protects our
2286  * fast-path data structures, we must acquire it before attempting
2287  * to release the lock via the fast-path. We will continue to
2288  * hold the LWLock until we're done scanning the locallock table,
2289  * unless we hit a transferred fast-path lock. (XXX is this
2290  * really such a good idea? There could be a lot of entries ...)
2291  */
2292  if (!have_fast_path_lwlock)
2293  {
2295  have_fast_path_lwlock = true;
2296  }
2297 
2298  /* Attempt fast-path release. */
2299  relid = locallock->tag.lock.locktag_field2;
2300  if (FastPathUnGrantRelationLock(relid, lockmode))
2301  {
2302  RemoveLocalLock(locallock);
2303  continue;
2304  }
2305 
2306  /*
2307  * Our lock, originally taken via the fast path, has been
2308  * transferred to the main lock table. That's going to require
2309  * some extra work, so release our fast-path lock before starting.
2310  */
2312  have_fast_path_lwlock = false;
2313 
2314  /*
2315  * Now dump the lock. We haven't got a pointer to the LOCK or
2316  * PROCLOCK in this case, so we have to handle this a bit
2317  * differently than a normal lock release. Unfortunately, this
2318  * requires an extra LWLock acquire-and-release cycle on the
2319  * partitionLock, but hopefully it shouldn't happen often.
2320  */
2321  LockRefindAndRelease(lockMethodTable, MyProc,
2322  &locallock->tag.lock, lockmode, false);
2323  RemoveLocalLock(locallock);
2324  continue;
2325  }
2326 
2327  /* Mark the proclock to show we need to release this lockmode */
2328  if (locallock->nLocks > 0)
2329  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2330 
2331  /* And remove the locallock hashtable entry */
2332  RemoveLocalLock(locallock);
2333  }
2334 
2335  /* Done with the fast-path data structures */
2336  if (have_fast_path_lwlock)
2338 
2339  /*
2340  * Now, scan each lock partition separately.
2341  */
2342  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2343  {
2344  LWLock *partitionLock;
2345  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
2346  PROCLOCK *nextplock;
2347 
2348  partitionLock = LockHashPartitionLockByIndex(partition);
2349 
2350  /*
2351  * If the proclock list for this partition is empty, we can skip
2352  * acquiring the partition lock. This optimization is trickier than
2353  * it looks, because another backend could be in process of adding
2354  * something to our proclock list due to promoting one of our
2355  * fast-path locks. However, any such lock must be one that we
2356  * decided not to delete above, so it's okay to skip it again now;
2357  * we'd just decide not to delete it again. We must, however, be
2358  * careful to re-fetch the list header once we've acquired the
2359  * partition lock, to be sure we have a valid, up-to-date pointer.
2360  * (There is probably no significant risk if pointer fetch/store is
2361  * atomic, but we don't wish to assume that.)
2362  *
2363  * XXX This argument assumes that the locallock table correctly
2364  * represents all of our fast-path locks. While allLocks mode
2365  * guarantees to clean up all of our normal locks regardless of the
2366  * locallock situation, we lose that guarantee for fast-path locks.
2367  * This is not ideal.
2368  */
2369  if (SHMQueueNext(procLocks, procLocks,
2370  offsetof(PROCLOCK, procLink)) == NULL)
2371  continue; /* needn't examine this partition */
2372 
2373  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2374 
2375  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2376  offsetof(PROCLOCK, procLink));
2377  proclock;
2378  proclock = nextplock)
2379  {
2380  bool wakeupNeeded = false;
2381 
2382  /* Get link first, since we may unlink/delete this proclock */
2383  nextplock = (PROCLOCK *)
2384  SHMQueueNext(procLocks, &proclock->procLink,
2385  offsetof(PROCLOCK, procLink));
2386 
2387  Assert(proclock->tag.myProc == MyProc);
2388 
2389  lock = proclock->tag.myLock;
2390 
2391  /* Ignore items that are not of the lockmethod to be removed */
2392  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2393  continue;
2394 
2395  /*
2396  * In allLocks mode, force release of all locks even if locallock
2397  * table had problems
2398  */
2399  if (allLocks)
2400  proclock->releaseMask = proclock->holdMask;
2401  else
2402  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2403 
2404  /*
2405  * Ignore items that have nothing to be released, unless they have
2406  * holdMask == 0 and are therefore recyclable
2407  */
2408  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2409  continue;
2410 
2411  PROCLOCK_PRINT("LockReleaseAll", proclock);
2412  LOCK_PRINT("LockReleaseAll", lock, 0);
2413  Assert(lock->nRequested >= 0);
2414  Assert(lock->nGranted >= 0);
2415  Assert(lock->nGranted <= lock->nRequested);
2416  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2417 
2418  /*
2419  * Release the previously-marked lock modes
2420  */
2421  for (i = 1; i <= numLockModes; i++)
2422  {
2423  if (proclock->releaseMask & LOCKBIT_ON(i))
2424  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2425  lockMethodTable);
2426  }
2427  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2428  Assert(lock->nGranted <= lock->nRequested);
2429  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2430 
2431  proclock->releaseMask = 0;
2432 
2433  /* CleanUpLock will wake up waiters if needed. */
2434  CleanUpLock(lock, proclock,
2435  lockMethodTable,
2436  LockTagHashCode(&lock->tag),
2437  wakeupNeeded);
2438  } /* loop over PROCLOCKs within this partition */
2439 
2440  LWLockRelease(partitionLock);
2441  } /* loop over partitions */
2442 
2443 #ifdef LOCK_DEBUG
2444  if (*(lockMethodTable->trace_flag))
2445  elog(LOG, "LockReleaseAll done");
2446 #endif
2447 }
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4499
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:435
int64 nLocks
Definition: lock.h:415
const bool * trace_flag
Definition: lock.h:118

References Assert(), CleanUpLock(), DEFAULT_LOCKMETHOD, EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, offsetof, LOCALLOCKOWNER::owner, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), SHMQueueNext(), status(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), and VirtualXactLockTableCleanup().

Referenced by DiscardAll(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2484 of file lock.c.

2485 {
2486  if (locallocks == NULL)
2487  {
2489  LOCALLOCK *locallock;
2490 
2492 
2493  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2494  ReleaseLockIfHeld(locallock, false);
2495  }
2496  else
2497  {
2498  int i;
2499 
2500  for (i = nlocks - 1; i >= 0; i--)
2501  ReleaseLockIfHeld(locallocks[i], false);
2502  }
2503 }
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2519

References hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, ReleaseLockIfHeld(), and status().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2454 of file lock.c.

2455 {
2457  LOCALLOCK *locallock;
2458 
2459  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2460  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2461 
2463 
2464  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2465  {
2466  /* Ignore items that are not of the specified lock method */
2467  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2468  continue;
2469 
2470  ReleaseLockIfHeld(locallock, true);
2471  }
2472 }

References elog, ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, ReleaseLockIfHeld(), and status().

Referenced by pg_advisory_unlock_all().

◆ LockShmemSize()

Size LockShmemSize ( void  )

Definition at line 3609 of file lock.c.

3610 {
3611  Size size = 0;
3612  long max_table_size;
3613 
3614  /* lock hash table */
3615  max_table_size = NLOCKENTS();
3616  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3617 
3618  /* proclock hash table */
3619  max_table_size *= 2;
3620  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3621 
3622  /*
3623  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3624  */
3625  size = add_size(size, size / 10);
3626 
3627  return size;
3628 }
size_t Size
Definition: c.h:540
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:780
Size add_size(Size s1, Size s2)
Definition: shmem.c:502

References add_size(), hash_estimate_size(), and NLOCKENTS.

Referenced by CalculateShmemSize().

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 517 of file lock.c.

518 {
519  return get_hash_value(LockMethodLockHash, (const void *) locktag);
520 }
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:908

References get_hash_value(), and LockMethodLockHash.

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4710 of file lock.c.

4711 {
4712  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4713  LOCK *lock;
4714  bool found;
4715  uint32 hashcode;
4716  LWLock *partitionLock;
4717  int waiters = 0;
4718 
4719  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4720  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4721 
4722  hashcode = LockTagHashCode(locktag);
4723  partitionLock = LockHashPartitionLock(hashcode);
4724  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4725 
4727  (const void *) locktag,
4728  hashcode,
4729  HASH_FIND,
4730  &found);
4731  if (found)
4732  {
4733  Assert(lock != NULL);
4734  waiters = lock->nRequested;
4735  }
4736  LWLockRelease(partitionLock);
4737 
4738  return waiters;
4739 }

References Assert(), elog, ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

Definition at line 1799 of file lock.c.

1800 {
1801  Assert(locallock->nLocks > 0);
1802  locallock->lockCleared = true;
1803 }

References Assert(), LOCALLOCK::lockCleared, and LOCALLOCK::nLocks.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockRelation(), and LockRelationOid().

◆ PostPrepare_Locks()

void PostPrepare_Locks ( TransactionId  xid)

Definition at line 3417 of file lock.c.

3418 {
3419  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3421  LOCALLOCK *locallock;
3422  LOCK *lock;
3423  PROCLOCK *proclock;
3424  PROCLOCKTAG proclocktag;
3425  int partition;
3426 
3427  /* Can't prepare a lock group follower. */
3428  Assert(MyProc->lockGroupLeader == NULL ||
3430 
3431  /* This is a critical section: any error means big trouble */
3433 
3434  /*
3435  * First we run through the locallock table and get rid of unwanted
3436  * entries, then we scan the process's proclocks and transfer them to the
3437  * target proc.
3438  *
3439  * We do this separately because we may have multiple locallock entries
3440  * pointing to the same proclock, and we daren't end up with any dangling
3441  * pointers.
3442  */
3444 
3445  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3446  {
3447  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3448  bool haveSessionLock;
3449  bool haveXactLock;
3450  int i;
3451 
3452  if (locallock->proclock == NULL || locallock->lock == NULL)
3453  {
3454  /*
3455  * We must've run out of shared memory while trying to set up this
3456  * lock. Just forget the local entry.
3457  */
3458  Assert(locallock->nLocks == 0);
3459  RemoveLocalLock(locallock);
3460  continue;
3461  }
3462 
3463  /* Ignore VXID locks */
3464  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3465  continue;
3466 
3467  /* Scan to see whether we hold it at session or transaction level */
3468  haveSessionLock = haveXactLock = false;
3469  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3470  {
3471  if (lockOwners[i].owner == NULL)
3472  haveSessionLock = true;
3473  else
3474  haveXactLock = true;
3475  }
3476 
3477  /* Ignore it if we have only session lock */
3478  if (!haveXactLock)
3479  continue;
3480 
3481  /* This can't happen, because we already checked it */
3482  if (haveSessionLock)
3483  ereport(PANIC,
3484  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3485  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3486 
3487  /* Mark the proclock to show we need to release this lockmode */
3488  if (locallock->nLocks > 0)
3489  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3490 
3491  /* And remove the locallock hashtable entry */
3492  RemoveLocalLock(locallock);
3493  }
3494 
3495  /*
3496  * Now, scan each lock partition separately.
3497  */
3498  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3499  {
3500  LWLock *partitionLock;
3501  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
3502  PROCLOCK *nextplock;
3503 
3504  partitionLock = LockHashPartitionLockByIndex(partition);
3505 
3506  /*
3507  * If the proclock list for this partition is empty, we can skip
3508  * acquiring the partition lock. This optimization is safer than the
3509  * situation in LockReleaseAll, because we got rid of any fast-path
3510  * locks during AtPrepare_Locks, so there cannot be any case where
3511  * another backend is adding something to our lists now. For safety,
3512  * though, we code this the same way as in LockReleaseAll.
3513  */
3514  if (SHMQueueNext(procLocks, procLocks,
3515  offsetof(PROCLOCK, procLink)) == NULL)
3516  continue; /* needn't examine this partition */
3517 
3518  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3519 
3520  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3521  offsetof(PROCLOCK, procLink));
3522  proclock;
3523  proclock = nextplock)
3524  {
3525  /* Get link first, since we may unlink/relink this proclock */
3526  nextplock = (PROCLOCK *)
3527  SHMQueueNext(procLocks, &proclock->procLink,
3528  offsetof(PROCLOCK, procLink));
3529 
3530  Assert(proclock->tag.myProc == MyProc);
3531 
3532  lock = proclock->tag.myLock;
3533 
3534  /* Ignore VXID locks */
3536  continue;
3537 
3538  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3539  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3540  Assert(lock->nRequested >= 0);
3541  Assert(lock->nGranted >= 0);
3542  Assert(lock->nGranted <= lock->nRequested);
3543  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3544 
3545  /* Ignore it if nothing to release (must be a session lock) */
3546  if (proclock->releaseMask == 0)
3547  continue;
3548 
3549  /* Else we should be releasing all locks */
3550  if (proclock->releaseMask != proclock->holdMask)
3551  elog(PANIC, "we seem to have dropped a bit somewhere");
3552 
3553  /*
3554  * We cannot simply modify proclock->tag.myProc to reassign
3555  * ownership of the lock, because that's part of the hash key and
3556  * the proclock would then be in the wrong hash chain. Instead
3557  * use hash_update_hash_key. (We used to create a new hash entry,
3558  * but that risks out-of-memory failure if other processes are
3559  * busy making proclocks too.) We must unlink the proclock from
3560  * our procLink chain and put it into the new proc's chain, too.
3561  *
3562  * Note: the updated proclock hash key will still belong to the
3563  * same hash partition, cf proclock_hash(). So the partition lock
3564  * we already hold is sufficient for this.
3565  */
3566  SHMQueueDelete(&proclock->procLink);
3567 
3568  /*
3569  * Create the new hash key for the proclock.
3570  */
3571  proclocktag.myLock = lock;
3572  proclocktag.myProc = newproc;
3573 
3574  /*
3575  * Update groupLeader pointer to point to the new proc. (We'd
3576  * better not be a member of somebody else's lock group!)
3577  */
3578  Assert(proclock->groupLeader == proclock->tag.myProc);
3579  proclock->groupLeader = newproc;
3580 
3581  /*
3582  * Update the proclock. We should not find any existing entry for
3583  * the same hash key, since there can be only one entry for any
3584  * given lock with my own proc.
3585  */
3587  (void *) proclock,
3588  (void *) &proclocktag))
3589  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3590 
3591  /* Re-link into the new proc's proclock list */
3592  SHMQueueInsertBefore(&(newproc->myProcLocks[partition]),
3593  &proclock->procLink);
3594 
3595  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3596  } /* loop over PROCLOCKs within this partition */
3597 
3598  LWLockRelease(partitionLock);
3599  } /* loop over partitions */
3600 
3601  END_CRIT_SECTION();
3602 }
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1162
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
#define END_CRIT_SECTION()
Definition: miscadmin.h:149

References Assert(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, offsetof, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), SHMQueueDelete(), SHMQueueInsertBefore(), SHMQueueNext(), START_CRIT_SECTION, status(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ RememberSimpleDeadLock()

void RememberSimpleDeadLock ( PGPROC proc1,
LOCKMODE  lockmode,
LOCK lock,
PGPROC proc2 
)

Definition at line 1162 of file deadlock.c.

1166 {
1167  DEADLOCK_INFO *info = &deadlockDetails[0];
1168 
1169  info->locktag = lock->tag;
1170  info->lockmode = lockmode;
1171  info->pid = proc1->pid;
1172  info++;
1173  info->locktag = proc2->waitLock->tag;
1174  info->lockmode = proc2->waitLockMode;
1175  info->pid = proc2->pid;
1176  nDeadlockDetails = 2;
1177 }

References deadlockDetails, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, nDeadlockDetails, DEADLOCK_INFO::pid, PGPROC::pid, LOCK::tag, PGPROC::waitLock, and PGPROC::waitLockMode.

Referenced by ProcSleep().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 1918 of file lock.c.

1919 {
1920  LOCK *waitLock = proc->waitLock;
1921  PROCLOCK *proclock = proc->waitProcLock;
1922  LOCKMODE lockmode = proc->waitLockMode;
1923  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1924 
1925  /* Make sure proc is waiting */
1927  Assert(proc->links.next != NULL);
1928  Assert(waitLock);
1929  Assert(waitLock->waitProcs.size > 0);
1930  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1931 
1932  /* Remove proc from lock's wait queue */
1933  SHMQueueDelete(&(proc->links));
1934  waitLock->waitProcs.size--;
1935 
1936  /* Undo increments of request counts by waiting process */
1937  Assert(waitLock->nRequested > 0);
1938  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1939  waitLock->nRequested--;
1940  Assert(waitLock->requested[lockmode] > 0);
1941  waitLock->requested[lockmode]--;
1942  /* don't forget to clear waitMask bit if appropriate */
1943  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1944  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1945 
1946  /* Clean up the proc's own state, and pass it the ok/fail signal */
1947  proc->waitLock = NULL;
1948  proc->waitProcLock = NULL;
1950 
1951  /*
1952  * Delete the proclock immediately if it represents no already-held locks.
1953  * (This must happen now because if the owner of the lock decides to
1954  * release it, and the requested/granted counts then go to zero,
1955  * LockRelease expects there to be no remaining proclocks.) Then see if
1956  * any other waiters for the lock can be woken up now.
1957  */
1958  CleanUpLock(waitLock, proclock,
1959  LockMethods[lockmethodid], hashcode,
1960  true);
1961 }
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:88
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:89
PROCLOCK * waitProcLock
Definition: proc.h:183
SHM_QUEUE links
Definition: proc.h:127
ProcWaitStatus waitStatus
Definition: proc.h:131
struct SHM_QUEUE * next
Definition: shmem.h:31

References Assert(), CleanUpLock(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, SHM_QUEUE::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, SHMQueueDelete(), PROC_QUEUE::size, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), LockErrorCleanup(), and ProcSleep().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4599 of file lock.c.

4600 {
4601  LOCKTAG tag;
4602  PGPROC *proc;
4604 
4606 
4608  /* no vxid lock; localTransactionId is a normal, locked XID */
4609  return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4610 
4611  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4612 
4613  /*
4614  * If a lock table entry must be made, this is the PGPROC on whose behalf
4615  * it must be done. Note that the transaction might end or the PGPROC
4616  * might be reassigned to a new backend before we get around to examining
4617  * it, but it doesn't matter. If we find upon examination that the
4618  * relevant lxid is no longer running here, that's enough to prove that
4619  * it's no longer running anywhere.
4620  */
4621  proc = BackendIdGetProc(vxid.backendId);
4622  if (proc == NULL)
4623  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4624 
4625  /*
4626  * We must acquire this lock before checking the backendId and lxid
4627  * against the ones we're waiting for. The target backend will only set
4628  * or clear lxid while holding this lock.
4629  */
4631 
4632  if (proc->backendId != vxid.backendId
4633  || proc->fpLocalTransactionId != vxid.localTransactionId)
4634  {
4635  /* VXID ended */
4636  LWLockRelease(&proc->fpInfoLock);
4637  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4638  }
4639 
4640  /*
4641  * If we aren't asked to wait, there's no need to set up a lock table
4642  * entry. The transaction is still in progress, so just return false.
4643  */
4644  if (!wait)
4645  {
4646  LWLockRelease(&proc->fpInfoLock);
4647  return false;
4648  }
4649 
4650  /*
4651  * OK, we're going to need to sleep on the VXID. But first, we must set
4652  * up the primary lock table entry, if needed (ie, convert the proc's
4653  * fast-path lock on its VXID to a regular lock).
4654  */
4655  if (proc->fpVXIDLock)
4656  {
4657  PROCLOCK *proclock;
4658  uint32 hashcode;
4659  LWLock *partitionLock;
4660 
4661  hashcode = LockTagHashCode(&tag);
4662 
4663  partitionLock = LockHashPartitionLock(hashcode);
4664  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4665 
4667  &tag, hashcode, ExclusiveLock);
4668  if (!proclock)
4669  {
4670  LWLockRelease(partitionLock);
4671  LWLockRelease(&proc->fpInfoLock);
4672  ereport(ERROR,
4673  (errcode(ERRCODE_OUT_OF_MEMORY),
4674  errmsg("out of shared memory"),
4675  errhint("You might need to increase max_locks_per_transaction.")));
4676  }
4677  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4678 
4679  LWLockRelease(partitionLock);
4680 
4681  proc->fpVXIDLock = false;
4682  }
4683 
4684  /*
4685  * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4686  * search. The proc might have assigned this XID but not yet locked it,
4687  * in which case the proc will lock this XID before releasing the VXID.
4688  * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4689  * so we won't save an XID of a different VXID. It doesn't matter whether
4690  * we save this before or after setting up the primary lock table entry.
4691  */
4692  xid = proc->xid;
4693 
4694  /* Done with proc->fpLockBits */
4695  LWLockRelease(&proc->fpInfoLock);
4696 
4697  /* Time to wait. */
4698  (void) LockAcquire(&tag, ShareLock, false, false);
4699 
4700  LockRelease(&tag, ShareLock, false);
4701  return XactLockForVirtualXact(vxid, xid, wait);
4702 }
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4548
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:747
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:1975
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:74
#define ShareLock
Definition: lockdefs.h:40
PGPROC * BackendIdGetProc(int backendID)
Definition: sinvaladt.c:376
#define InvalidTransactionId
Definition: transam.h:31

References Assert(), VirtualTransactionId::backendId, PGPROC::backendId, BackendIdGetProc(), DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4499 of file lock.c.

4500 {
4501  bool fastpath;
4502  LocalTransactionId lxid;
4503 
4505 
4506  /*
4507  * Clean up shared memory state.
4508  */
4510 
4511  fastpath = MyProc->fpVXIDLock;
4512  lxid = MyProc->fpLocalTransactionId;
4513  MyProc->fpVXIDLock = false;
4515 
4517 
4518  /*
4519  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4520  * that means someone transferred the lock to the main lock table.
4521  */
4522  if (!fastpath && LocalTransactionIdIsValid(lxid))
4523  {
4524  VirtualTransactionId vxid;
4525  LOCKTAG locktag;
4526 
4527  vxid.backendId = MyBackendId;
4528  vxid.localTransactionId = lxid;
4529  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4530 
4532  &locktag, ExclusiveLock, false);
4533  }
4534 }
uint32 LocalTransactionId
Definition: c.h:589
BackendId MyBackendId
Definition: globals.c:84
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:71

References Assert(), VirtualTransactionId::backendId, PGPROC::backendId, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, InvalidBackendId, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyBackendId, MyProc, and SET_LOCKTAG_VIRTUALTRANSACTION.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

Variable Documentation

◆ LockTagTypeNames

const char* const LockTagTypeNames[]
extern

Definition at line 29 of file lockfuncs.c.

Referenced by GetLockNameFromTagType(), and pg_lock_status().

◆ max_locks_per_xact

int max_locks_per_xact
extern

Definition at line 55 of file lock.c.

Referenced by CheckRequiredParameterValues(), InitControlFile(), and XLogReportParameters().