PostgreSQL Source Code  git master
lock.h File Reference
#include "lib/ilist.h"
#include "storage/backendid.h"
#include "storage/lockdefs.h"
#include "storage/lwlock.h"
#include "storage/shmem.h"
#include "utils/timestamp.h"
Include dependency graph for lock.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  VirtualTransactionId
 
struct  LockMethodData
 
struct  LOCKTAG
 
struct  LOCK
 
struct  PROCLOCKTAG
 
struct  PROCLOCK
 
struct  LOCALLOCKTAG
 
struct  LOCALLOCKOWNER
 
struct  LOCALLOCK
 
struct  LockInstanceData
 
struct  LockData
 
struct  BlockedProcData
 
struct  BlockedProcsData
 

Macros

#define InvalidLocalTransactionId   0
 
#define LocalTransactionIdIsValid(lxid)   ((lxid) != InvalidLocalTransactionId)
 
#define VirtualTransactionIdIsValid(vxid)    (LocalTransactionIdIsValid((vxid).localTransactionId))
 
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)    ((vxid).backendId == InvalidBackendId)
 
#define VirtualTransactionIdEquals(vxid1, vxid2)
 
#define SetInvalidVirtualTransactionId(vxid)
 
#define GET_VXID_FROM_PGPROC(vxid, proc)
 
#define MAX_LOCKMODES   10
 
#define LOCKBIT_ON(lockmode)   (1 << (lockmode))
 
#define LOCKBIT_OFF(lockmode)   (~(1 << (lockmode)))
 
#define DEFAULT_LOCKMETHOD   1
 
#define USER_LOCKMETHOD   2
 
#define LOCKTAG_LAST_TYPE   LOCKTAG_APPLY_TRANSACTION
 
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
 
#define SET_LOCKTAG_RELATION_EXTEND(locktag, dboid, reloid)
 
#define SET_LOCKTAG_DATABASE_FROZEN_IDS(locktag, dboid)
 
#define SET_LOCKTAG_PAGE(locktag, dboid, reloid, blocknum)
 
#define SET_LOCKTAG_TUPLE(locktag, dboid, reloid, blocknum, offnum)
 
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
 
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
 
#define SET_LOCKTAG_SPECULATIVE_INSERTION(locktag, xid, token)
 
#define SET_LOCKTAG_OBJECT(locktag, dboid, classoid, objoid, objsubid)
 
#define SET_LOCKTAG_ADVISORY(locktag, id1, id2, id3, id4)
 
#define SET_LOCKTAG_APPLY_TRANSACTION(locktag, dboid, suboid, xid, objid)
 
#define LOCK_LOCKMETHOD(lock)   ((LOCKMETHODID) (lock).tag.locktag_lockmethodid)
 
#define LOCK_LOCKTAG(lock)   ((LockTagType) (lock).tag.locktag_type)
 
#define PROCLOCK_LOCKMETHOD(proclock)    LOCK_LOCKMETHOD(*((proclock).tag.myLock))
 
#define LOCALLOCK_LOCKMETHOD(llock)   ((llock).tag.lock.locktag_lockmethodid)
 
#define LOCALLOCK_LOCKTAG(llock)   ((LockTagType) (llock).tag.lock.locktag_type)
 
#define LockHashPartition(hashcode)    ((hashcode) % NUM_LOCK_PARTITIONS)
 
#define LockHashPartitionLock(hashcode)
 
#define LockHashPartitionLockByIndex(i)    (&MainLWLockArray[LOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)
 
#define LockHashPartitionLockByProc(leader_pgproc)    LockHashPartitionLock((leader_pgproc)->pgprocno)
 

Typedefs

typedef struct PGPROC PGPROC
 
typedef struct LockMethodData LockMethodData
 
typedef const LockMethodDataLockMethod
 
typedef uint16 LOCKMETHODID
 
typedef enum LockTagType LockTagType
 
typedef struct LOCKTAG LOCKTAG
 
typedef struct LOCK LOCK
 
typedef struct PROCLOCKTAG PROCLOCKTAG
 
typedef struct PROCLOCK PROCLOCK
 
typedef struct LOCALLOCKTAG LOCALLOCKTAG
 
typedef struct LOCALLOCKOWNER LOCALLOCKOWNER
 
typedef struct LOCALLOCK LOCALLOCK
 
typedef struct LockInstanceData LockInstanceData
 
typedef struct LockData LockData
 
typedef struct BlockedProcData BlockedProcData
 
typedef struct BlockedProcsData BlockedProcsData
 

Enumerations

enum  LockTagType {
  LOCKTAG_RELATION , LOCKTAG_RELATION_EXTEND , LOCKTAG_DATABASE_FROZEN_IDS , LOCKTAG_PAGE ,
  LOCKTAG_TUPLE , LOCKTAG_TRANSACTION , LOCKTAG_VIRTUALTRANSACTION , LOCKTAG_SPECULATIVE_TOKEN ,
  LOCKTAG_OBJECT , LOCKTAG_USERLOCK , LOCKTAG_ADVISORY , LOCKTAG_APPLY_TRANSACTION
}
 
enum  LockAcquireResult { LOCKACQUIRE_NOT_AVAIL , LOCKACQUIRE_OK , LOCKACQUIRE_ALREADY_HELD , LOCKACQUIRE_ALREADY_CLEAR }
 
enum  DeadLockState {
  DS_NOT_YET_CHECKED , DS_NO_DEADLOCK , DS_SOFT_DEADLOCK , DS_HARD_DEADLOCK ,
  DS_BLOCKED_BY_AUTOVACUUM
}
 

Functions

void InitLocks (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
 
void AbortStrongLockAcquire (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (TransactionId xid)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void GrantAwaitedLock (void)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
Size LockShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
DeadLockState DeadLockCheck (PGPROC *proc)
 
PGPROCGetBlockingAutoVacuumPgproc (void)
 
void DeadLockReport (void) pg_attribute_noreturn()
 
void RememberSimpleDeadLock (PGPROC *proc1, LOCKMODE lockmode, LOCK *lock, PGPROC *proc2)
 
void InitDeadLockChecking (void)
 
int LockWaiterCount (const LOCKTAG *locktag)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 

Variables

PGDLLIMPORT int max_locks_per_xact
 
PGDLLIMPORT const char *const LockTagTypeNames []
 

Macro Definition Documentation

◆ DEFAULT_LOCKMETHOD

#define DEFAULT_LOCKMETHOD   1

Definition at line 125 of file lock.h.

◆ GET_VXID_FROM_PGPROC

#define GET_VXID_FROM_PGPROC (   vxid,
  proc 
)
Value:
((vxid).backendId = (proc).backendId, \
(vxid).localTransactionId = (proc).lxid)

Definition at line 77 of file lock.h.

◆ InvalidLocalTransactionId

#define InvalidLocalTransactionId   0

Definition at line 65 of file lock.h.

◆ LOCALLOCK_LOCKMETHOD

#define LOCALLOCK_LOCKMETHOD (   llock)    ((llock).tag.lock.locktag_lockmethodid)

Definition at line 443 of file lock.h.

◆ LOCALLOCK_LOCKTAG

#define LOCALLOCK_LOCKTAG (   llock)    ((LockTagType) (llock).tag.lock.locktag_type)

Definition at line 444 of file lock.h.

◆ LocalTransactionIdIsValid

#define LocalTransactionIdIsValid (   lxid)    ((lxid) != InvalidLocalTransactionId)

Definition at line 66 of file lock.h.

◆ LOCK_LOCKMETHOD

#define LOCK_LOCKMETHOD (   lock)    ((LOCKMETHODID) (lock).tag.locktag_lockmethodid)

Definition at line 324 of file lock.h.

◆ LOCK_LOCKTAG

#define LOCK_LOCKTAG (   lock)    ((LockTagType) (lock).tag.locktag_type)

Definition at line 325 of file lock.h.

◆ LOCKBIT_OFF

#define LOCKBIT_OFF (   lockmode)    (~(1 << (lockmode)))

Definition at line 85 of file lock.h.

◆ LOCKBIT_ON

#define LOCKBIT_ON (   lockmode)    (1 << (lockmode))

Definition at line 84 of file lock.h.

◆ LockHashPartition

#define LockHashPartition (   hashcode)     ((hashcode) % NUM_LOCK_PARTITIONS)

Definition at line 525 of file lock.h.

◆ LockHashPartitionLock

#define LockHashPartitionLock (   hashcode)
Value:
LockHashPartition(hashcode)].lock)
LWLockPadded * MainLWLockArray
Definition: lwlock.c:212
#define LOCK_MANAGER_LWLOCK_OFFSET
Definition: lwlock.h:106
LWLock lock
Definition: lwlock.h:68

Definition at line 527 of file lock.h.

◆ LockHashPartitionLockByIndex

#define LockHashPartitionLockByIndex (   i)     (&MainLWLockArray[LOCK_MANAGER_LWLOCK_OFFSET + (i)].lock)

Definition at line 530 of file lock.h.

◆ LockHashPartitionLockByProc

#define LockHashPartitionLockByProc (   leader_pgproc)     LockHashPartitionLock((leader_pgproc)->pgprocno)

Definition at line 542 of file lock.h.

◆ LOCKTAG_LAST_TYPE

#define LOCKTAG_LAST_TYPE   LOCKTAG_APPLY_TRANSACTION

Definition at line 152 of file lock.h.

◆ MAX_LOCKMODES

#define MAX_LOCKMODES   10

Definition at line 82 of file lock.h.

◆ PROCLOCK_LOCKMETHOD

#define PROCLOCK_LOCKMETHOD (   proclock)     LOCK_LOCKMETHOD(*((proclock).tag.myLock))

Definition at line 382 of file lock.h.

◆ SET_LOCKTAG_ADVISORY

#define SET_LOCKTAG_ADVISORY (   locktag,
  id1,
  id2,
  id3,
  id4 
)
Value:
((locktag).locktag_field1 = (id1), \
(locktag).locktag_field2 = (id2), \
(locktag).locktag_field3 = (id3), \
(locktag).locktag_field4 = (id4), \
(locktag).locktag_type = LOCKTAG_ADVISORY, \
(locktag).locktag_lockmethodid = USER_LOCKMETHOD)
@ LOCKTAG_ADVISORY
Definition: lock.h:147
#define USER_LOCKMETHOD
Definition: lock.h:126

Definition at line 270 of file lock.h.

◆ SET_LOCKTAG_APPLY_TRANSACTION

#define SET_LOCKTAG_APPLY_TRANSACTION (   locktag,
  dboid,
  suboid,
  xid,
  objid 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (suboid), \
(locktag).locktag_field3 = (xid), \
(locktag).locktag_field4 = (objid), \
(locktag).locktag_type = LOCKTAG_APPLY_TRANSACTION, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
@ LOCKTAG_APPLY_TRANSACTION
Definition: lock.h:148

Definition at line 282 of file lock.h.

◆ SET_LOCKTAG_DATABASE_FROZEN_IDS

#define SET_LOCKTAG_DATABASE_FROZEN_IDS (   locktag,
  dboid 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = 0, \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_DATABASE_FROZEN_IDS, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_DATABASE_FROZEN_IDS
Definition: lock.h:139

Definition at line 199 of file lock.h.

◆ SET_LOCKTAG_OBJECT

#define SET_LOCKTAG_OBJECT (   locktag,
  dboid,
  classoid,
  objoid,
  objsubid 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (classoid), \
(locktag).locktag_field3 = (objoid), \
(locktag).locktag_field4 = (objsubid), \
(locktag).locktag_type = LOCKTAG_OBJECT, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_OBJECT
Definition: lock.h:145

Definition at line 262 of file lock.h.

◆ SET_LOCKTAG_PAGE

#define SET_LOCKTAG_PAGE (   locktag,
  dboid,
  reloid,
  blocknum 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (reloid), \
(locktag).locktag_field3 = (blocknum), \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_PAGE, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_PAGE
Definition: lock.h:140

Definition at line 208 of file lock.h.

◆ SET_LOCKTAG_RELATION

#define SET_LOCKTAG_RELATION (   locktag,
  dboid,
  reloid 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (reloid), \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_RELATION, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_RELATION
Definition: lock.h:137

Definition at line 181 of file lock.h.

◆ SET_LOCKTAG_RELATION_EXTEND

#define SET_LOCKTAG_RELATION_EXTEND (   locktag,
  dboid,
  reloid 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (reloid), \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_RELATION_EXTEND, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:138

Definition at line 190 of file lock.h.

◆ SET_LOCKTAG_SPECULATIVE_INSERTION

#define SET_LOCKTAG_SPECULATIVE_INSERTION (   locktag,
  xid,
  token 
)
Value:
((locktag).locktag_field1 = (xid), \
(locktag).locktag_field2 = (token), \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_SPECULATIVE_TOKEN, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_SPECULATIVE_TOKEN
Definition: lock.h:144

Definition at line 247 of file lock.h.

◆ SET_LOCKTAG_TRANSACTION

#define SET_LOCKTAG_TRANSACTION (   locktag,
  xid 
)
Value:
((locktag).locktag_field1 = (xid), \
(locktag).locktag_field2 = 0, \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_TRANSACTION, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_TRANSACTION
Definition: lock.h:142

Definition at line 226 of file lock.h.

◆ SET_LOCKTAG_TUPLE

#define SET_LOCKTAG_TUPLE (   locktag,
  dboid,
  reloid,
  blocknum,
  offnum 
)
Value:
((locktag).locktag_field1 = (dboid), \
(locktag).locktag_field2 = (reloid), \
(locktag).locktag_field3 = (blocknum), \
(locktag).locktag_field4 = (offnum), \
(locktag).locktag_type = LOCKTAG_TUPLE, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_TUPLE
Definition: lock.h:141

Definition at line 217 of file lock.h.

◆ SET_LOCKTAG_VIRTUALTRANSACTION

#define SET_LOCKTAG_VIRTUALTRANSACTION (   locktag,
  vxid 
)
Value:
((locktag).locktag_field1 = (vxid).backendId, \
(locktag).locktag_field2 = (vxid).localTransactionId, \
(locktag).locktag_field3 = 0, \
(locktag).locktag_field4 = 0, \
(locktag).locktag_type = LOCKTAG_VIRTUALTRANSACTION, \
(locktag).locktag_lockmethodid = DEFAULT_LOCKMETHOD)
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:143

Definition at line 235 of file lock.h.

◆ SetInvalidVirtualTransactionId

#define SetInvalidVirtualTransactionId (   vxid)
Value:
((vxid).backendId = InvalidBackendId, \
(vxid).localTransactionId = InvalidLocalTransactionId)
#define InvalidBackendId
Definition: backendid.h:23
#define InvalidLocalTransactionId
Definition: lock.h:65

Definition at line 74 of file lock.h.

◆ USER_LOCKMETHOD

#define USER_LOCKMETHOD   2

Definition at line 126 of file lock.h.

◆ VirtualTransactionIdEquals

#define VirtualTransactionIdEquals (   vxid1,
  vxid2 
)
Value:
((vxid1).backendId == (vxid2).backendId && \
(vxid1).localTransactionId == (vxid2).localTransactionId)

Definition at line 71 of file lock.h.

◆ VirtualTransactionIdIsRecoveredPreparedXact

#define VirtualTransactionIdIsRecoveredPreparedXact (   vxid)     ((vxid).backendId == InvalidBackendId)

Definition at line 69 of file lock.h.

◆ VirtualTransactionIdIsValid

#define VirtualTransactionIdIsValid (   vxid)     (LocalTransactionIdIsValid((vxid).localTransactionId))

Definition at line 67 of file lock.h.

Typedef Documentation

◆ BlockedProcData

◆ BlockedProcsData

◆ LOCALLOCK

typedef struct LOCALLOCK LOCALLOCK

◆ LOCALLOCKOWNER

◆ LOCALLOCKTAG

typedef struct LOCALLOCKTAG LOCALLOCKTAG

◆ LOCK

typedef struct LOCK LOCK

◆ LockData

typedef struct LockData LockData

◆ LockInstanceData

◆ LockMethod

typedef const LockMethodData* LockMethod

Definition at line 116 of file lock.h.

◆ LockMethodData

◆ LOCKMETHODID

Definition at line 122 of file lock.h.

◆ LOCKTAG

typedef struct LOCKTAG LOCKTAG

◆ LockTagType

typedef enum LockTagType LockTagType

◆ PGPROC

typedef struct PGPROC PGPROC

Definition at line 1 of file lock.h.

◆ PROCLOCK

typedef struct PROCLOCK PROCLOCK

◆ PROCLOCKTAG

typedef struct PROCLOCKTAG PROCLOCKTAG

Enumeration Type Documentation

◆ DeadLockState

Enumerator
DS_NOT_YET_CHECKED 
DS_NO_DEADLOCK 
DS_SOFT_DEADLOCK 
DS_HARD_DEADLOCK 
DS_BLOCKED_BY_AUTOVACUUM 

Definition at line 509 of file lock.h.

510 {
511  DS_NOT_YET_CHECKED, /* no deadlock check has run yet */
512  DS_NO_DEADLOCK, /* no deadlock detected */
513  DS_SOFT_DEADLOCK, /* deadlock avoided by queue rearrangement */
514  DS_HARD_DEADLOCK, /* deadlock, no way out but ERROR */
515  DS_BLOCKED_BY_AUTOVACUUM /* no deadlock; queue blocked by autovacuum
516  * worker */
517 } DeadLockState;
DeadLockState
Definition: lock.h:510
@ DS_HARD_DEADLOCK
Definition: lock.h:514
@ DS_BLOCKED_BY_AUTOVACUUM
Definition: lock.h:515
@ DS_NO_DEADLOCK
Definition: lock.h:512
@ DS_NOT_YET_CHECKED
Definition: lock.h:511
@ DS_SOFT_DEADLOCK
Definition: lock.h:513

◆ LockAcquireResult

Enumerator
LOCKACQUIRE_NOT_AVAIL 
LOCKACQUIRE_OK 
LOCKACQUIRE_ALREADY_HELD 
LOCKACQUIRE_ALREADY_CLEAR 

Definition at line 500 of file lock.h.

501 {
502  LOCKACQUIRE_NOT_AVAIL, /* lock not available, and dontWait=true */
503  LOCKACQUIRE_OK, /* lock successfully acquired */
504  LOCKACQUIRE_ALREADY_HELD, /* incremented count for lock already held */
505  LOCKACQUIRE_ALREADY_CLEAR /* incremented count for lock already clear */
LockAcquireResult
Definition: lock.h:501
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:505
@ LOCKACQUIRE_OK
Definition: lock.h:503
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:504
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:502

◆ LockTagType

Enumerator
LOCKTAG_RELATION 
LOCKTAG_RELATION_EXTEND 
LOCKTAG_DATABASE_FROZEN_IDS 
LOCKTAG_PAGE 
LOCKTAG_TUPLE 
LOCKTAG_TRANSACTION 
LOCKTAG_VIRTUALTRANSACTION 
LOCKTAG_SPECULATIVE_TOKEN 
LOCKTAG_OBJECT 
LOCKTAG_USERLOCK 
LOCKTAG_ADVISORY 
LOCKTAG_APPLY_TRANSACTION 

Definition at line 135 of file lock.h.

136 {
137  LOCKTAG_RELATION, /* whole relation */
138  LOCKTAG_RELATION_EXTEND, /* the right to extend a relation */
139  LOCKTAG_DATABASE_FROZEN_IDS, /* pg_database.datfrozenxid */
140  LOCKTAG_PAGE, /* one page of a relation */
141  LOCKTAG_TUPLE, /* one physical tuple */
142  LOCKTAG_TRANSACTION, /* transaction (for waiting for xact done) */
143  LOCKTAG_VIRTUALTRANSACTION, /* virtual transaction (ditto) */
144  LOCKTAG_SPECULATIVE_TOKEN, /* speculative insertion Xid and token */
145  LOCKTAG_OBJECT, /* non-relation database object */
146  LOCKTAG_USERLOCK, /* reserved for old contrib/userlock code */
147  LOCKTAG_ADVISORY, /* advisory user locks */
148  LOCKTAG_APPLY_TRANSACTION /* transaction being applied on a logical
149  * replication subscriber */
150 } LockTagType;
LockTagType
Definition: lock.h:136
@ LOCKTAG_USERLOCK
Definition: lock.h:146

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1752 of file lock.c.

1753 {
1754  uint32 fasthashcode;
1755  LOCALLOCK *locallock = StrongLockInProgress;
1756 
1757  if (locallock == NULL)
1758  return;
1759 
1760  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1761  Assert(locallock->holdsStrongLockCount == true);
1763  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1764  FastPathStrongRelationLocks->count[fasthashcode]--;
1765  locallock->holdsStrongLockCount = false;
1766  StrongLockInProgress = NULL;
1768 }
unsigned int uint32
Definition: c.h:490
Assert(fmt[strlen(fmt) - 1] !='\n')
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:263
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:272
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:287
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:269
uint32 hashcode
Definition: lock.h:432
bool holdsStrongLockCount
Definition: lock.h:439

References Assert(), FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3299 of file lock.c.

3300 {
3302  LOCALLOCK *locallock;
3303 
3304  /* First, verify there aren't locks of both xact and session level */
3306 
3307  /* Now do the per-locallock cleanup work */
3309 
3310  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3311  {
3312  TwoPhaseLockRecord record;
3313  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3314  bool haveSessionLock;
3315  bool haveXactLock;
3316  int i;
3317 
3318  /*
3319  * Ignore VXID locks. We don't want those to be held by prepared
3320  * transactions, since they aren't meaningful after a restart.
3321  */
3322  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3323  continue;
3324 
3325  /* Ignore it if we don't actually hold the lock */
3326  if (locallock->nLocks <= 0)
3327  continue;
3328 
3329  /* Scan to see whether we hold it at session or transaction level */
3330  haveSessionLock = haveXactLock = false;
3331  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3332  {
3333  if (lockOwners[i].owner == NULL)
3334  haveSessionLock = true;
3335  else
3336  haveXactLock = true;
3337  }
3338 
3339  /* Ignore it if we have only session lock */
3340  if (!haveXactLock)
3341  continue;
3342 
3343  /* This can't happen, because we already checked it */
3344  if (haveSessionLock)
3345  ereport(ERROR,
3346  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3347  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3348 
3349  /*
3350  * If the local lock was taken via the fast-path, we need to move it
3351  * to the primary lock table, or just get a pointer to the existing
3352  * primary lock table entry if by chance it's already been
3353  * transferred.
3354  */
3355  if (locallock->proclock == NULL)
3356  {
3357  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3358  locallock->lock = locallock->proclock->tag.myLock;
3359  }
3360 
3361  /*
3362  * Arrange to not release any strong lock count held by this lock
3363  * entry. We must retain the count until the prepared transaction is
3364  * committed or rolled back.
3365  */
3366  locallock->holdsStrongLockCount = false;
3367 
3368  /*
3369  * Create a 2PC record.
3370  */
3371  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3372  record.lockmode = locallock->tag.mode;
3373 
3375  &record, sizeof(TwoPhaseLockRecord));
3376  }
3377 }
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1431
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1421
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int i
Definition: isn.c:73
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2795
static HTAB * LockMethodLocalHash
Definition: lock.c:283
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3211
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
LOCALLOCKOWNER * lockOwners
Definition: lock.h:438
LOCK * lock
Definition: lock.h:433
int64 nLocks
Definition: lock.h:435
int numLockOwners
Definition: lock.h:436
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
LOCK * myLock
Definition: lock.h:365
PROCLOCKTAG tag
Definition: lock.h:372
LOCKTAG locktag
Definition: lock.c:161
LOCKMODE lockmode
Definition: lock.c:162
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1257
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), status(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ DeadLockCheck()

DeadLockState DeadLockCheck ( PGPROC proc)

Definition at line 217 of file deadlock.c.

218 {
219  /* Initialize to "no constraints" */
220  nCurConstraints = 0;
222  nWaitOrders = 0;
223 
224  /* Initialize to not blocked by an autovacuum worker */
226 
227  /* Search for deadlocks and possible fixes */
228  if (DeadLockCheckRecurse(proc))
229  {
230  /*
231  * Call FindLockCycle one more time, to record the correct
232  * deadlockDetails[] for the basic state with no rearrangements.
233  */
234  int nSoftEdges;
235 
236  TRACE_POSTGRESQL_DEADLOCK_FOUND();
237 
238  nWaitOrders = 0;
239  if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges))
240  elog(FATAL, "deadlock seems to have disappeared");
241 
242  return DS_HARD_DEADLOCK; /* cannot find a non-deadlocked state */
243  }
244 
245  /* Apply any needed rearrangements of wait queues */
246  for (int i = 0; i < nWaitOrders; i++)
247  {
248  LOCK *lock = waitOrders[i].lock;
249  PGPROC **procs = waitOrders[i].procs;
250  int nProcs = waitOrders[i].nProcs;
251  dclist_head *waitQueue = &lock->waitProcs;
252 
253  Assert(nProcs == dclist_count(waitQueue));
254 
255 #ifdef DEBUG_DEADLOCK
256  PrintLockQueue(lock, "DeadLockCheck:");
257 #endif
258 
259  /* Reset the queue and re-add procs in the desired order */
260  dclist_init(waitQueue);
261  for (int j = 0; j < nProcs; j++)
262  dclist_push_tail(waitQueue, &procs[j]->links);
263 
264 #ifdef DEBUG_DEADLOCK
265  PrintLockQueue(lock, "rearranged to:");
266 #endif
267 
268  /* See if any waiters for the lock can be woken up now */
269  ProcLockWakeup(GetLocksMethodTable(lock), lock);
270  }
271 
272  /* Return code tells caller if we had to escape a deadlock or not */
273  if (nWaitOrders > 0)
274  return DS_SOFT_DEADLOCK;
275  else if (blocking_autovacuum_proc != NULL)
277  else
278  return DS_NO_DEADLOCK;
279 }
static WAIT_ORDER * waitOrders
Definition: deadlock.c:111
static bool FindLockCycle(PGPROC *checkProc, EDGE *softEdges, int *nSoftEdges)
Definition: deadlock.c:443
static bool DeadLockCheckRecurse(PGPROC *proc)
Definition: deadlock.c:309
static EDGE * possibleConstraints
Definition: deadlock.c:121
static int nWaitOrders
Definition: deadlock.c:112
static int nCurConstraints
Definition: deadlock.c:117
static PGPROC * blocking_autovacuum_proc
Definition: deadlock.c:128
static int nPossibleConstraints
Definition: deadlock.c:122
#define FATAL
Definition: elog.h:41
static void dclist_push_tail(dclist_head *head, dlist_node *node)
Definition: ilist.h:709
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
int j
Definition: isn.c:74
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:487
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1637
Definition: lock.h:309
dclist_head waitProcs
Definition: lock.h:317
Definition: proc.h:162
PGPROC ** procs
Definition: deadlock.c:59
LOCK * lock
Definition: deadlock.c:58
int nProcs
Definition: deadlock.c:60
static struct link * links
Definition: zic.c:299

References Assert(), blocking_autovacuum_proc, dclist_count(), dclist_init(), dclist_push_tail(), DeadLockCheckRecurse(), DS_BLOCKED_BY_AUTOVACUUM, DS_HARD_DEADLOCK, DS_NO_DEADLOCK, DS_SOFT_DEADLOCK, elog(), FATAL, FindLockCycle(), GetLocksMethodTable(), i, j, links, WAIT_ORDER::lock, nCurConstraints, nPossibleConstraints, WAIT_ORDER::nProcs, nWaitOrders, possibleConstraints, ProcLockWakeup(), WAIT_ORDER::procs, waitOrders, and LOCK::waitProcs.

Referenced by CheckDeadLock().

◆ DeadLockReport()

void DeadLockReport ( void  )

Definition at line 1073 of file deadlock.c.

1074 {
1075  StringInfoData clientbuf; /* errdetail for client */
1076  StringInfoData logbuf; /* errdetail for server log */
1077  StringInfoData locktagbuf;
1078  int i;
1079 
1080  initStringInfo(&clientbuf);
1081  initStringInfo(&logbuf);
1082  initStringInfo(&locktagbuf);
1083 
1084  /* Generate the "waits for" lines sent to the client */
1085  for (i = 0; i < nDeadlockDetails; i++)
1086  {
1087  DEADLOCK_INFO *info = &deadlockDetails[i];
1088  int nextpid;
1089 
1090  /* The last proc waits for the first one... */
1091  if (i < nDeadlockDetails - 1)
1092  nextpid = info[1].pid;
1093  else
1094  nextpid = deadlockDetails[0].pid;
1095 
1096  /* reset locktagbuf to hold next object description */
1097  resetStringInfo(&locktagbuf);
1098 
1099  DescribeLockTag(&locktagbuf, &info->locktag);
1100 
1101  if (i > 0)
1102  appendStringInfoChar(&clientbuf, '\n');
1103 
1104  appendStringInfo(&clientbuf,
1105  _("Process %d waits for %s on %s; blocked by process %d."),
1106  info->pid,
1108  info->lockmode),
1109  locktagbuf.data,
1110  nextpid);
1111  }
1112 
1113  /* Duplicate all the above for the server ... */
1114  appendBinaryStringInfo(&logbuf, clientbuf.data, clientbuf.len);
1115 
1116  /* ... and add info about query strings */
1117  for (i = 0; i < nDeadlockDetails; i++)
1118  {
1119  DEADLOCK_INFO *info = &deadlockDetails[i];
1120 
1121  appendStringInfoChar(&logbuf, '\n');
1122 
1123  appendStringInfo(&logbuf,
1124  _("Process %d: %s"),
1125  info->pid,
1127  }
1128 
1130 
1131  ereport(ERROR,
1133  errmsg("deadlock detected"),
1134  errdetail_internal("%s", clientbuf.data),
1135  errdetail_log("%s", logbuf.data),
1136  errhint("See server log for query details.")));
1137 }
const char * pgstat_get_backend_current_activity(int pid, bool checkUser)
static int nDeadlockDetails
Definition: deadlock.c:125
static DEADLOCK_INFO * deadlockDetails
Definition: deadlock.c:124
int errdetail_internal(const char *fmt,...)
Definition: elog.c:1229
int errhint(const char *fmt,...)
Definition: elog.c:1316
int errdetail_log(const char *fmt,...)
Definition: elog.c:1250
#define _(x)
Definition: elog.c:91
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1168
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4064
#define ERRCODE_T_R_DEADLOCK_DETECTED
Definition: pgbench.c:77
void pgstat_report_deadlock(void)
void resetStringInfo(StringInfo str)
Definition: stringinfo.c:75
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition: stringinfo.c:91
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition: stringinfo.c:227
void appendStringInfoChar(StringInfo str, char ch)
Definition: stringinfo.c:188
void initStringInfo(StringInfo str)
Definition: stringinfo.c:59
LOCKTAG locktag
Definition: deadlock.c:73
LOCKMODE lockmode
Definition: deadlock.c:74
uint8 locktag_lockmethodid
Definition: lock.h:171

References _, appendBinaryStringInfo(), appendStringInfo(), appendStringInfoChar(), StringInfoData::data, deadlockDetails, DescribeLockTag(), ereport, errcode(), ERRCODE_T_R_DEADLOCK_DETECTED, errdetail_internal(), errdetail_log(), errhint(), errmsg(), ERROR, GetLockmodeName(), i, initStringInfo(), StringInfoData::len, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, LOCKTAG::locktag_lockmethodid, nDeadlockDetails, pgstat_get_backend_current_activity(), pgstat_report_deadlock(), DEADLOCK_INFO::pid, and resetStringInfo().

Referenced by WaitOnLock().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 583 of file lock.c.

584 {
585  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
586 
587  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
588  return true;
589 
590  return false;
591 }
static const LockMethod LockMethods[]
Definition: lock.c:151
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
const LOCKMASK * conflictTab
Definition: lock.h:111

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ GetBlockerStatusData()

BlockedProcsData* GetBlockerStatusData ( int  blocked_pid)

Definition at line 3808 of file lock.c.

3809 {
3811  PGPROC *proc;
3812  int i;
3813 
3815 
3816  /*
3817  * Guess how much space we'll need, and preallocate. Most of the time
3818  * this will avoid needing to do repalloc while holding the LWLocks. (We
3819  * assume, but check with an Assert, that MaxBackends is enough entries
3820  * for the procs[] array; the other two could need enlargement, though.)
3821  */
3822  data->nprocs = data->nlocks = data->npids = 0;
3823  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3824  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3825  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3826  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3827 
3828  /*
3829  * In order to search the ProcArray for blocked_pid and assume that that
3830  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3831  * In addition, to examine the lock grouping fields of any other backend,
3832  * we must hold all the hash partition locks. (Only one of those locks is
3833  * actually relevant for any one lock group, but we can't know which one
3834  * ahead of time.) It's fairly annoying to hold all those locks
3835  * throughout this, but it's no worse than GetLockStatusData(), and it
3836  * does have the advantage that we're guaranteed to return a
3837  * self-consistent instantaneous state.
3838  */
3839  LWLockAcquire(ProcArrayLock, LW_SHARED);
3840 
3841  proc = BackendPidGetProcWithLock(blocked_pid);
3842 
3843  /* Nothing to do if it's gone */
3844  if (proc != NULL)
3845  {
3846  /*
3847  * Acquire lock on the entire shared lock data structure. See notes
3848  * in GetLockStatusData().
3849  */
3850  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3852 
3853  if (proc->lockGroupLeader == NULL)
3854  {
3855  /* Easy case, proc is not a lock group member */
3857  }
3858  else
3859  {
3860  /* Examine all procs in proc's lock group */
3861  dlist_iter iter;
3862 
3864  {
3865  PGPROC *memberProc;
3866 
3867  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3868  GetSingleProcBlockerStatusData(memberProc, data);
3869  }
3870  }
3871 
3872  /*
3873  * And release locks. See notes in GetLockStatusData().
3874  */
3875  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3877 
3878  Assert(data->nprocs <= data->maxprocs);
3879  }
3880 
3881  LWLockRelease(ProcArrayLock);
3882 
3883  return data;
3884 }
int MaxBackends
Definition: globals.c:140
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3888
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:530
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1195
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:98
@ LW_SHARED
Definition: lwlock.h:116
void * palloc(Size size)
Definition: mcxt.c:1210
const void * data
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3185
dlist_head lockGroupMembers
Definition: proc.h:296
PGPROC * lockGroupLeader
Definition: proc.h:295
dlist_node * cur
Definition: ilist.h:179

References Assert(), BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, and palloc().

Referenced by pg_blocking_pids().

◆ GetBlockingAutoVacuumPgproc()

PGPROC* GetBlockingAutoVacuumPgproc ( void  )

Definition at line 287 of file deadlock.c.

288 {
289  PGPROC *ptr;
290 
293 
294  return ptr;
295 }

References blocking_autovacuum_proc.

Referenced by ProcSleep().

◆ GetLockConflicts()

VirtualTransactionId* GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 2899 of file lock.c.

2900 {
2901  static VirtualTransactionId *vxids;
2902  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2903  LockMethod lockMethodTable;
2904  LOCK *lock;
2905  LOCKMASK conflictMask;
2906  dlist_iter proclock_iter;
2907  PROCLOCK *proclock;
2908  uint32 hashcode;
2909  LWLock *partitionLock;
2910  int count = 0;
2911  int fast_count = 0;
2912 
2913  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2914  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2915  lockMethodTable = LockMethods[lockmethodid];
2916  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2917  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2918 
2919  /*
2920  * Allocate memory to store results, and fill with InvalidVXID. We only
2921  * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2922  * InHotStandby allocate once in TopMemoryContext.
2923  */
2924  if (InHotStandby)
2925  {
2926  if (vxids == NULL)
2927  vxids = (VirtualTransactionId *)
2929  sizeof(VirtualTransactionId) *
2931  }
2932  else
2933  vxids = (VirtualTransactionId *)
2934  palloc0(sizeof(VirtualTransactionId) *
2936 
2937  /* Compute hash code and partition lock, and look up conflicting modes. */
2938  hashcode = LockTagHashCode(locktag);
2939  partitionLock = LockHashPartitionLock(hashcode);
2940  conflictMask = lockMethodTable->conflictTab[lockmode];
2941 
2942  /*
2943  * Fast path locks might not have been entered in the primary lock table.
2944  * If the lock we're dealing with could conflict with such a lock, we must
2945  * examine each backend's fast-path array for conflicts.
2946  */
2947  if (ConflictsWithRelationFastPath(locktag, lockmode))
2948  {
2949  int i;
2950  Oid relid = locktag->locktag_field2;
2951  VirtualTransactionId vxid;
2952 
2953  /*
2954  * Iterate over relevant PGPROCs. Anything held by a prepared
2955  * transaction will have been transferred to the primary lock table,
2956  * so we need not worry about those. This is all a bit fuzzy, because
2957  * new locks could be taken after we've visited a particular
2958  * partition, but the callers had better be prepared to deal with that
2959  * anyway, since the locks could equally well be taken between the
2960  * time we return the value and the time the caller does something
2961  * with it.
2962  */
2963  for (i = 0; i < ProcGlobal->allProcCount; i++)
2964  {
2965  PGPROC *proc = &ProcGlobal->allProcs[i];
2966  uint32 f;
2967 
2968  /* A backend never blocks itself */
2969  if (proc == MyProc)
2970  continue;
2971 
2973 
2974  /*
2975  * If the target backend isn't referencing the same database as
2976  * the lock, then we needn't examine the individual relation IDs
2977  * at all; none of them can be relevant.
2978  *
2979  * See FastPathTransferRelationLocks() for discussion of why we do
2980  * this test after acquiring the lock.
2981  */
2982  if (proc->databaseId != locktag->locktag_field1)
2983  {
2984  LWLockRelease(&proc->fpInfoLock);
2985  continue;
2986  }
2987 
2988  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2989  {
2990  uint32 lockmask;
2991 
2992  /* Look for an allocated slot matching the given relid. */
2993  if (relid != proc->fpRelId[f])
2994  continue;
2995  lockmask = FAST_PATH_GET_BITS(proc, f);
2996  if (!lockmask)
2997  continue;
2998  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2999 
3000  /*
3001  * There can only be one entry per relation, so if we found it
3002  * and it doesn't conflict, we can skip the rest of the slots.
3003  */
3004  if ((lockmask & conflictMask) == 0)
3005  break;
3006 
3007  /* Conflict! */
3008  GET_VXID_FROM_PGPROC(vxid, *proc);
3009 
3010  if (VirtualTransactionIdIsValid(vxid))
3011  vxids[count++] = vxid;
3012  /* else, xact already committed or aborted */
3013 
3014  /* No need to examine remaining slots. */
3015  break;
3016  }
3017 
3018  LWLockRelease(&proc->fpInfoLock);
3019  }
3020  }
3021 
3022  /* Remember how many fast-path conflicts we found. */
3023  fast_count = count;
3024 
3025  /*
3026  * Look up the lock object matching the tag.
3027  */
3028  LWLockAcquire(partitionLock, LW_SHARED);
3029 
3031  (const void *) locktag,
3032  hashcode,
3033  HASH_FIND,
3034  NULL);
3035  if (!lock)
3036  {
3037  /*
3038  * If the lock object doesn't exist, there is nothing holding a lock
3039  * on this lockable object.
3040  */
3041  LWLockRelease(partitionLock);
3042  vxids[count].backendId = InvalidBackendId;
3044  if (countp)
3045  *countp = count;
3046  return vxids;
3047  }
3048 
3049  /*
3050  * Examine each existing holder (or awaiter) of the lock.
3051  */
3052  dlist_foreach(proclock_iter, &lock->procLocks)
3053  {
3054  proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3055 
3056  if (conflictMask & proclock->holdMask)
3057  {
3058  PGPROC *proc = proclock->tag.myProc;
3059 
3060  /* A backend never blocks itself */
3061  if (proc != MyProc)
3062  {
3063  VirtualTransactionId vxid;
3064 
3065  GET_VXID_FROM_PGPROC(vxid, *proc);
3066 
3067  if (VirtualTransactionIdIsValid(vxid))
3068  {
3069  int i;
3070 
3071  /* Avoid duplicate entries. */
3072  for (i = 0; i < fast_count; ++i)
3073  if (VirtualTransactionIdEquals(vxids[i], vxid))
3074  break;
3075  if (i >= fast_count)
3076  vxids[count++] = vxid;
3077  }
3078  /* else, xact already committed or aborted */
3079  }
3080  }
3081  }
3082 
3083  LWLockRelease(partitionLock);
3084 
3085  if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3086  elog(PANIC, "too many conflicting locks found");
3087 
3088  vxids[count].backendId = InvalidBackendId;
3090  if (countp)
3091  *countp = count;
3092  return vxids;
3093 }
#define lengthof(array)
Definition: c.h:772
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:966
#define PANIC
Definition: elog.h:42
@ HASH_FIND
Definition: hsearch.h:113
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:203
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:233
static HTAB * LockMethodLockHash
Definition: lock.c:281
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:205
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:517
uint16 LOCKMETHODID
Definition: lock.h:122
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:67
#define LockHashPartitionLock(hashcode)
Definition: lock.h:527
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:71
#define GET_VXID_FROM_PGPROC(vxid, proc)
Definition: lock.h:77
int LOCKMASK
Definition: lockdefs.h:25
MemoryContext TopMemoryContext
Definition: mcxt.c:141
void * palloc0(Size size)
Definition: mcxt.c:1241
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1005
unsigned int Oid
Definition: postgres_ext.h:31
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:79
PGPROC * MyProc
Definition: proc.c:66
PROC_HDR * ProcGlobal
Definition: proc.c:78
uint32 locktag_field1
Definition: lock.h:166
uint32 locktag_field2
Definition: lock.h:167
dlist_head procLocks
Definition: lock.h:316
Definition: lwlock.h:40
int numLockModes
Definition: lock.h:110
LWLock fpInfoLock
Definition: proc.h:284
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:286
Oid databaseId
Definition: proc.h:198
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370
LOCKMASK holdMask
Definition: lock.h:376
PGPROC * allProcs
Definition: proc.h:362
uint32 allProcCount
Definition: proc.h:380
LocalTransactionId localTransactionId
Definition: lock.h:62
BackendId backendId
Definition: lock.h:61
int max_prepared_xacts
Definition: twophase.c:117
#define InHotStandby
Definition: xlogutils.h:57

References PROC_HDR::allProcCount, PROC_HDR::allProcs, VirtualTransactionId::backendId, ConflictsWithRelationFastPath, LockMethodData::conflictTab, dlist_iter::cur, PGPROC::databaseId, dlist_container, dlist_foreach, elog(), ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, InvalidBackendId, InvalidLocalTransactionId, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char* GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 4064 of file lock.c.

4065 {
4066  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4067  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4068  return LockMethods[lockmethodid]->lockModeNames[mode];
4069 }
static PgChecksumMode mode
Definition: pg_checksums.c:65
const char *const * lockModeNames
Definition: lock.h:112

References Assert(), lengthof, LockMethods, LockMethodData::lockModeNames, and mode.

Referenced by CheckRelationLockedByMe(), DeadLockReport(), pg_lock_status(), and ProcSleep().

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 487 of file lock.c.

488 {
489  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
490 
491  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
492  return LockMethods[lockmethodid];
493 }
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:324

References Assert(), lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData* GetLockStatusData ( void  )

Definition at line 3616 of file lock.c.

3617 {
3618  LockData *data;
3619  PROCLOCK *proclock;
3620  HASH_SEQ_STATUS seqstat;
3621  int els;
3622  int el;
3623  int i;
3624 
3625  data = (LockData *) palloc(sizeof(LockData));
3626 
3627  /* Guess how much space we'll need. */
3628  els = MaxBackends;
3629  el = 0;
3630  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3631 
3632  /*
3633  * First, we iterate through the per-backend fast-path arrays, locking
3634  * them one at a time. This might produce an inconsistent picture of the
3635  * system state, but taking all of those LWLocks at the same time seems
3636  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3637  * matter too much, because none of these locks can be involved in lock
3638  * conflicts anyway - anything that might must be present in the main lock
3639  * table. (For the same reason, we don't sweat about making leaderPid
3640  * completely valid. We cannot safely dereference another backend's
3641  * lockGroupLeader field without holding all lock partition locks, and
3642  * it's not worth that.)
3643  */
3644  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3645  {
3646  PGPROC *proc = &ProcGlobal->allProcs[i];
3647  uint32 f;
3648 
3650 
3651  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3652  {
3653  LockInstanceData *instance;
3654  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3655 
3656  /* Skip unallocated slots. */
3657  if (!lockbits)
3658  continue;
3659 
3660  if (el >= els)
3661  {
3662  els += MaxBackends;
3663  data->locks = (LockInstanceData *)
3664  repalloc(data->locks, sizeof(LockInstanceData) * els);
3665  }
3666 
3667  instance = &data->locks[el];
3668  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3669  proc->fpRelId[f]);
3670  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3671  instance->waitLockMode = NoLock;
3672  instance->backend = proc->backendId;
3673  instance->lxid = proc->lxid;
3674  instance->pid = proc->pid;
3675  instance->leaderPid = proc->pid;
3676  instance->fastpath = true;
3677 
3678  /*
3679  * Successfully taking fast path lock means there were no
3680  * conflicting locks.
3681  */
3682  instance->waitStart = 0;
3683 
3684  el++;
3685  }
3686 
3687  if (proc->fpVXIDLock)
3688  {
3689  VirtualTransactionId vxid;
3690  LockInstanceData *instance;
3691 
3692  if (el >= els)
3693  {
3694  els += MaxBackends;
3695  data->locks = (LockInstanceData *)
3696  repalloc(data->locks, sizeof(LockInstanceData) * els);
3697  }
3698 
3699  vxid.backendId = proc->backendId;
3701 
3702  instance = &data->locks[el];
3703  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3704  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3705  instance->waitLockMode = NoLock;
3706  instance->backend = proc->backendId;
3707  instance->lxid = proc->lxid;
3708  instance->pid = proc->pid;
3709  instance->leaderPid = proc->pid;
3710  instance->fastpath = true;
3711  instance->waitStart = 0;
3712 
3713  el++;
3714  }
3715 
3716  LWLockRelease(&proc->fpInfoLock);
3717  }
3718 
3719  /*
3720  * Next, acquire lock on the entire shared lock data structure. We do
3721  * this so that, at least for locks in the primary lock table, the state
3722  * will be self-consistent.
3723  *
3724  * Since this is a read-only operation, we take shared instead of
3725  * exclusive lock. There's not a whole lot of point to this, because all
3726  * the normal operations require exclusive lock, but it doesn't hurt
3727  * anything either. It will at least allow two backends to do
3728  * GetLockStatusData in parallel.
3729  *
3730  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3731  */
3732  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3734 
3735  /* Now we can safely count the number of proclocks */
3737  if (data->nelements > els)
3738  {
3739  els = data->nelements;
3740  data->locks = (LockInstanceData *)
3741  repalloc(data->locks, sizeof(LockInstanceData) * els);
3742  }
3743 
3744  /* Now scan the tables to copy the data */
3746 
3747  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3748  {
3749  PGPROC *proc = proclock->tag.myProc;
3750  LOCK *lock = proclock->tag.myLock;
3751  LockInstanceData *instance = &data->locks[el];
3752 
3753  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3754  instance->holdMask = proclock->holdMask;
3755  if (proc->waitLock == proclock->tag.myLock)
3756  instance->waitLockMode = proc->waitLockMode;
3757  else
3758  instance->waitLockMode = NoLock;
3759  instance->backend = proc->backendId;
3760  instance->lxid = proc->lxid;
3761  instance->pid = proc->pid;
3762  instance->leaderPid = proclock->groupLeader->pid;
3763  instance->fastpath = false;
3764  instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3765 
3766  el++;
3767  }
3768 
3769  /*
3770  * And release locks. We do this in reverse order for two reasons: (1)
3771  * Anyone else who needs more than one of the locks will be trying to lock
3772  * them in increasing order; we don't want to release the other process
3773  * until it can get all the locks it needs. (2) This avoids O(N^2)
3774  * behavior inside LWLockRelease.
3775  */
3776  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3778 
3779  Assert(el == data->nelements);
3780 
3781  return data;
3782 }
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:424
int64 TimestampTz
Definition: timestamp.h:39
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1377
static HTAB * LockMethodProcLockHash
Definition: lock.c:282
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:235
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:181
#define NoLock
Definition: lockdefs.h:34
#define ExclusiveLock
Definition: lockdefs.h:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1456
LOCKTAG tag
Definition: lock.h:311
Definition: lock.h:467
LOCKMASK holdMask
Definition: lock.h:455
LOCKMODE waitLockMode
Definition: lock.h:456
bool fastpath
Definition: lock.h:463
LOCKTAG locktag
Definition: lock.h:454
TimestampTz waitStart
Definition: lock.h:459
int leaderPid
Definition: lock.h:462
BackendId backend
Definition: lock.h:457
LocalTransactionId lxid
Definition: lock.h:458
LocalTransactionId lxid
Definition: proc.h:183
pg_atomic_uint64 waitStart
Definition: proc.h:228
bool fpVXIDLock
Definition: proc.h:287
BackendId backendId
Definition: proc.h:197
int pid
Definition: proc.h:186
LOCK * waitLock
Definition: proc.h:223
LOCKMODE waitLockMode
Definition: proc.h:225
LocalTransactionId fpLocalTransactionId
Definition: proc.h:288
PGPROC * groupLeader
Definition: lock.h:375

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert(), LockInstanceData::backend, VirtualTransactionId::backendId, PGPROC::backendId, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, LockInstanceData::fastpath, FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), LockInstanceData::lxid, PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc(), pg_atomic_read_u64(), LockInstanceData::pid, PGPROC::pid, ProcGlobal, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, LockInstanceData::waitStart, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 499 of file lock.c.

500 {
501  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
502 
503  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
504  return LockMethods[lockmethodid];
505 }

References Assert(), lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock* GetRunningTransactionLocks ( int *  nlocks)

Definition at line 3982 of file lock.c.

3983 {
3984  xl_standby_lock *accessExclusiveLocks;
3985  PROCLOCK *proclock;
3986  HASH_SEQ_STATUS seqstat;
3987  int i;
3988  int index;
3989  int els;
3990 
3991  /*
3992  * Acquire lock on the entire shared lock data structure.
3993  *
3994  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3995  */
3996  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3998 
3999  /* Now we can safely count the number of proclocks */
4001 
4002  /*
4003  * Allocating enough space for all locks in the lock table is overkill,
4004  * but it's more convenient and faster than having to enlarge the array.
4005  */
4006  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4007 
4008  /* Now scan the tables to copy the data */
4010 
4011  /*
4012  * If lock is a currently granted AccessExclusiveLock then it will have
4013  * just one proclock holder, so locks are never accessed twice in this
4014  * particular case. Don't copy this code for use elsewhere because in the
4015  * general case this will give you duplicate locks when looking at
4016  * non-exclusive lock types.
4017  */
4018  index = 0;
4019  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4020  {
4021  /* make sure this definition matches the one used in LockAcquire */
4022  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4023  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4024  {
4025  PGPROC *proc = proclock->tag.myProc;
4026  LOCK *lock = proclock->tag.myLock;
4027  TransactionId xid = proc->xid;
4028 
4029  /*
4030  * Don't record locks for transactions if we know they have
4031  * already issued their WAL record for commit but not yet released
4032  * lock. It is still possible that we see locks held by already
4033  * complete transactions, if they haven't yet zeroed their xids.
4034  */
4035  if (!TransactionIdIsValid(xid))
4036  continue;
4037 
4038  accessExclusiveLocks[index].xid = xid;
4039  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4040  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4041 
4042  index++;
4043  }
4044  }
4045 
4046  Assert(index <= els);
4047 
4048  /*
4049  * And release locks. We do this in reverse order for two reasons: (1)
4050  * Anyone else who needs more than one of the locks will be trying to lock
4051  * them in increasing order; we don't want to release the other process
4052  * until it can get all the locks it needs. (2) This avoids O(N^2)
4053  * behavior inside LWLockRelease.
4054  */
4055  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4057 
4058  *nlocks = index;
4059  return accessExclusiveLocks;
4060 }
uint32 TransactionId
Definition: c.h:636
#define AccessExclusiveLock
Definition: lockdefs.h:43
TransactionId xid
Definition: proc.h:173
Definition: type.h:95
TransactionId xid
Definition: lockdefs.h:51
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References AccessExclusiveLock, Assert(), xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1781 of file lock.c.

1782 {
1784 }
static LOCALLOCK * awaitedLock
Definition: lock.c:288
static ResourceOwner awaitedOwner
Definition: lock.c:289
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1684

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup(), and ProcSleep().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1550 of file lock.c.

1551 {
1552  lock->nGranted++;
1553  lock->granted[lockmode]++;
1554  lock->grantMask |= LOCKBIT_ON(lockmode);
1555  if (lock->granted[lockmode] == lock->requested[lockmode])
1556  lock->waitMask &= LOCKBIT_OFF(lockmode);
1557  proclock->holdMask |= LOCKBIT_ON(lockmode);
1558  LOCK_PRINT("GrantLock", lock, lockmode);
1559  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1560  Assert(lock->nGranted <= lock->nRequested);
1561 }
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:365
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:85
int nRequested
Definition: lock.h:319
int requested[MAX_LOCKMODES]
Definition: lock.h:318
int granted[MAX_LOCKMODES]
Definition: lock.h:320
LOCKMASK grantMask
Definition: lock.h:314
LOCKMASK waitMask
Definition: lock.h:315
int nGranted
Definition: lock.h:321

References Assert(), LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), ProcSleep(), and VirtualXactLock().

◆ InitDeadLockChecking()

void InitDeadLockChecking ( void  )

Definition at line 143 of file deadlock.c.

144 {
145  MemoryContext oldcxt;
146 
147  /* Make sure allocations are permanent */
149 
150  /*
151  * FindLockCycle needs at most MaxBackends entries in visitedProcs[] and
152  * deadlockDetails[].
153  */
154  visitedProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
156 
157  /*
158  * TopoSort needs to consider at most MaxBackends wait-queue entries, and
159  * it needn't run concurrently with FindLockCycle.
160  */
161  topoProcs = visitedProcs; /* re-use this space */
162  beforeConstraints = (int *) palloc(MaxBackends * sizeof(int));
163  afterConstraints = (int *) palloc(MaxBackends * sizeof(int));
164 
165  /*
166  * We need to consider rearranging at most MaxBackends/2 wait queues
167  * (since it takes at least two waiters in a queue to create a soft edge),
168  * and the expanded form of the wait queues can't involve more than
169  * MaxBackends total waiters.
170  */
171  waitOrders = (WAIT_ORDER *)
172  palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
173  waitOrderProcs = (PGPROC **) palloc(MaxBackends * sizeof(PGPROC *));
174 
175  /*
176  * Allow at most MaxBackends distinct constraints in a configuration. (Is
177  * this enough? In practice it seems it should be, but I don't quite see
178  * how to prove it. If we run out, we might fail to find a workable wait
179  * queue rearrangement even though one exists.) NOTE that this number
180  * limits the maximum recursion depth of DeadLockCheckRecurse. Making it
181  * really big might potentially allow a stack-overflow problem.
182  */
185 
186  /*
187  * Allow up to 3*MaxBackends constraints to be saved without having to
188  * re-run TestConfiguration. (This is probably more than enough, but we
189  * can survive if we run low on space by doing excess runs of
190  * TestConfiguration to re-compute constraint lists each time needed.) The
191  * last MaxBackends entries in possibleConstraints[] are reserved as
192  * output workspace for FindLockCycle.
193  */
196  (EDGE *) palloc(maxPossibleConstraints * sizeof(EDGE));
197 
198  MemoryContextSwitchTo(oldcxt);
199 }
static int maxPossibleConstraints
Definition: deadlock.c:123
static PGPROC ** waitOrderProcs
Definition: deadlock.c:113
static PGPROC ** visitedProcs
Definition: deadlock.c:102
static int * beforeConstraints
Definition: deadlock.c:107
static int * afterConstraints
Definition: deadlock.c:108
static int maxCurConstraints
Definition: deadlock.c:118
static EDGE * curConstraints
Definition: deadlock.c:116
static PGPROC ** topoProcs
Definition: deadlock.c:106
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:138
Definition: deadlock.c:47

References afterConstraints, beforeConstraints, curConstraints, deadlockDetails, MaxBackends, maxCurConstraints, maxPossibleConstraints, MemoryContextSwitchTo(), palloc(), possibleConstraints, TopMemoryContext, topoProcs, visitedProcs, waitOrderProcs, and waitOrders.

Referenced by InitProcess().

◆ InitLocks()

void InitLocks ( void  )

Definition at line 405 of file lock.c.

406 {
407  HASHCTL info;
408  long init_table_size,
409  max_table_size;
410  bool found;
411 
412  /*
413  * Compute init/max size to request for lock hashtables. Note these
414  * calculations must agree with LockShmemSize!
415  */
416  max_table_size = NLOCKENTS();
417  init_table_size = max_table_size / 2;
418 
419  /*
420  * Allocate hash table for LOCK structs. This stores per-locked-object
421  * information.
422  */
423  info.keysize = sizeof(LOCKTAG);
424  info.entrysize = sizeof(LOCK);
426 
427  LockMethodLockHash = ShmemInitHash("LOCK hash",
428  init_table_size,
429  max_table_size,
430  &info,
432 
433  /* Assume an average of 2 holders per lock */
434  max_table_size *= 2;
435  init_table_size *= 2;
436 
437  /*
438  * Allocate hash table for PROCLOCK structs. This stores
439  * per-lock-per-holder information.
440  */
441  info.keysize = sizeof(PROCLOCKTAG);
442  info.entrysize = sizeof(PROCLOCK);
443  info.hash = proclock_hash;
445 
446  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
447  init_table_size,
448  max_table_size,
449  &info,
451 
452  /*
453  * Allocate fast-path structures.
454  */
456  ShmemInitStruct("Fast Path Strong Relation Lock Data",
457  sizeof(FastPathStrongRelationLockData), &found);
458  if (!found)
460 
461  /*
462  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
463  * counts and resource owner information.
464  *
465  * The non-shared table could already exist in this process (this occurs
466  * when the postmaster is recreating shared memory after a backend crash).
467  * If so, delete and recreate it. (We could simply leave it, since it
468  * ought to be empty in the postmaster, but for safety let's zap it.)
469  */
472 
473  info.keysize = sizeof(LOCALLOCKTAG);
474  info.entrysize = sizeof(LOCALLOCK);
475 
476  LockMethodLocalHash = hash_create("LOCALLOCK hash",
477  16,
478  &info,
480 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:863
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HASH_PARTITION
Definition: hsearch.h:92
#define NLOCKENTS()
Definition: lock.c:57
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:534
struct LOCALLOCK LOCALLOCK
struct LOCK LOCK
struct PROCLOCK PROCLOCK
struct LOCKTAG LOCKTAG
struct PROCLOCKTAG PROCLOCKTAG
struct LOCALLOCKTAG LOCALLOCKTAG
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:341
#define SpinLockInit(lock)
Definition: spin.h:60
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
long num_partitions
Definition: hsearch.h:68

References HASHCTL::entrysize, FastPathStrongRelationLocks, HASHCTL::hash, HASH_BLOBS, hash_create(), hash_destroy(), HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateSharedMemoryAndSemaphores().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4407 of file lock.c.

4409 {
4410  lock_twophase_postcommit(xid, info, recdata, len);
4411 }
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4381
const void size_t len

References len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4381 of file lock.c.

4383 {
4384  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4385  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4386  LOCKTAG *locktag;
4387  LOCKMETHODID lockmethodid;
4388  LockMethod lockMethodTable;
4389 
4390  Assert(len == sizeof(TwoPhaseLockRecord));
4391  locktag = &rec->locktag;
4392  lockmethodid = locktag->locktag_lockmethodid;
4393 
4394  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4395  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4396  lockMethodTable = LockMethods[lockmethodid];
4397 
4398  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4399 }
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3107
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:933

References Assert(), elog(), ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

◆ lock_twophase_recover()

void lock_twophase_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4168 of file lock.c.

4170 {
4171  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4172  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4173  LOCKTAG *locktag;
4174  LOCKMODE lockmode;
4175  LOCKMETHODID lockmethodid;
4176  LOCK *lock;
4177  PROCLOCK *proclock;
4178  PROCLOCKTAG proclocktag;
4179  bool found;
4180  uint32 hashcode;
4181  uint32 proclock_hashcode;
4182  int partition;
4183  LWLock *partitionLock;
4184  LockMethod lockMethodTable;
4185 
4186  Assert(len == sizeof(TwoPhaseLockRecord));
4187  locktag = &rec->locktag;
4188  lockmode = rec->lockmode;
4189  lockmethodid = locktag->locktag_lockmethodid;
4190 
4191  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4192  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4193  lockMethodTable = LockMethods[lockmethodid];
4194 
4195  hashcode = LockTagHashCode(locktag);
4196  partition = LockHashPartition(hashcode);
4197  partitionLock = LockHashPartitionLock(hashcode);
4198 
4199  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4200 
4201  /*
4202  * Find or create a lock with this tag.
4203  */
4205  (void *) locktag,
4206  hashcode,
4208  &found);
4209  if (!lock)
4210  {
4211  LWLockRelease(partitionLock);
4212  ereport(ERROR,
4213  (errcode(ERRCODE_OUT_OF_MEMORY),
4214  errmsg("out of shared memory"),
4215  errhint("You might need to increase max_locks_per_transaction.")));
4216  }
4217 
4218  /*
4219  * if it's a new lock object, initialize it
4220  */
4221  if (!found)
4222  {
4223  lock->grantMask = 0;
4224  lock->waitMask = 0;
4225  dlist_init(&lock->procLocks);
4226  dclist_init(&lock->waitProcs);
4227  lock->nRequested = 0;
4228  lock->nGranted = 0;
4229  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4230  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4231  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4232  }
4233  else
4234  {
4235  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4236  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4237  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4238  Assert(lock->nGranted <= lock->nRequested);
4239  }
4240 
4241  /*
4242  * Create the hash key for the proclock table.
4243  */
4244  proclocktag.myLock = lock;
4245  proclocktag.myProc = proc;
4246 
4247  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4248 
4249  /*
4250  * Find or create a proclock entry with this tag
4251  */
4253  (void *) &proclocktag,
4254  proclock_hashcode,
4256  &found);
4257  if (!proclock)
4258  {
4259  /* Oops, not enough shmem for the proclock */
4260  if (lock->nRequested == 0)
4261  {
4262  /*
4263  * There are no other requestors of this lock, so garbage-collect
4264  * the lock object. We *must* do this to avoid a permanent leak
4265  * of shared memory, because there won't be anything to cause
4266  * anyone to release the lock object later.
4267  */
4268  Assert(dlist_is_empty(&lock->procLocks));
4270  (void *) &(lock->tag),
4271  hashcode,
4272  HASH_REMOVE,
4273  NULL))
4274  elog(PANIC, "lock table corrupted");
4275  }
4276  LWLockRelease(partitionLock);
4277  ereport(ERROR,
4278  (errcode(ERRCODE_OUT_OF_MEMORY),
4279  errmsg("out of shared memory"),
4280  errhint("You might need to increase max_locks_per_transaction.")));
4281  }
4282 
4283  /*
4284  * If new, initialize the new entry
4285  */
4286  if (!found)
4287  {
4288  Assert(proc->lockGroupLeader == NULL);
4289  proclock->groupLeader = proc;
4290  proclock->holdMask = 0;
4291  proclock->releaseMask = 0;
4292  /* Add proclock to appropriate lists */
4293  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4294  dlist_push_tail(&proc->myProcLocks[partition],
4295  &proclock->procLink);
4296  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4297  }
4298  else
4299  {
4300  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4301  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4302  }
4303 
4304  /*
4305  * lock->nRequested and lock->requested[] count the total number of
4306  * requests, whether granted or waiting, so increment those immediately.
4307  */
4308  lock->nRequested++;
4309  lock->requested[lockmode]++;
4310  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4311 
4312  /*
4313  * We shouldn't already hold the desired lock.
4314  */
4315  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4316  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4317  lockMethodTable->lockModeNames[lockmode],
4318  lock->tag.locktag_field1, lock->tag.locktag_field2,
4319  lock->tag.locktag_field3);
4320 
4321  /*
4322  * We ignore any possible conflicts and just grant ourselves the lock. Not
4323  * only because we don't bother, but also to avoid deadlocks when
4324  * switching from standby to normal mode. See function comment.
4325  */
4326  GrantLock(lock, proclock, lockmode);
4327 
4328  /*
4329  * Bump strong lock count, to make sure any fast-path lock requests won't
4330  * be granted without consulting the primary lock table.
4331  */
4332  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4333  {
4334  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4335 
4337  FastPathStrongRelationLocks->count[fasthashcode]++;
4339  }
4340 
4341  LWLockRelease(partitionLock);
4342 }
#define MemSet(start, val, len)
Definition: c.h:1004
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER_NULL
Definition: hsearch.h:116
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:565
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1550
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:366
#define MAX_LOCKMODES
Definition: lock.h:82
#define LockHashPartition(hashcode)
Definition: lock.h:525
int LOCKMODE
Definition: lockdefs.h:26
@ LW_EXCLUSIVE
Definition: lwlock.h:115
uint32 locktag_field3
Definition: lock.h:168
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:252
dlist_node lockLink
Definition: lock.h:378
LOCKMASK releaseMask
Definition: lock.h:377
dlist_node procLink
Definition: lock.h:379

References Assert(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog(), ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4349 of file lock.c.

4351 {
4352  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4353  LOCKTAG *locktag;
4354  LOCKMODE lockmode;
4355  LOCKMETHODID lockmethodid;
4356 
4357  Assert(len == sizeof(TwoPhaseLockRecord));
4358  locktag = &rec->locktag;
4359  lockmode = rec->lockmode;
4360  lockmethodid = locktag->locktag_lockmethodid;
4361 
4362  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4363  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4364 
4365  if (lockmode == AccessExclusiveLock &&
4366  locktag->locktag_type == LOCKTAG_RELATION)
4367  {
4369  locktag->locktag_field1 /* dboid */ ,
4370  locktag->locktag_field2 /* reloid */ );
4371  }
4372 }
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:981

References AccessExclusiveLock, Assert(), elog(), ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().

◆ LockAcquire()

LockAcquireResult LockAcquire ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait 
)

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp 
)

Definition at line 771 of file lock.c.

777 {
778  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
779  LockMethod lockMethodTable;
780  LOCALLOCKTAG localtag;
781  LOCALLOCK *locallock;
782  LOCK *lock;
783  PROCLOCK *proclock;
784  bool found;
785  ResourceOwner owner;
786  uint32 hashcode;
787  LWLock *partitionLock;
788  bool found_conflict;
789  bool log_lock = false;
790 
791  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
792  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
793  lockMethodTable = LockMethods[lockmethodid];
794  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
795  elog(ERROR, "unrecognized lock mode: %d", lockmode);
796 
797  if (RecoveryInProgress() && !InRecovery &&
798  (locktag->locktag_type == LOCKTAG_OBJECT ||
799  locktag->locktag_type == LOCKTAG_RELATION) &&
800  lockmode > RowExclusiveLock)
801  ereport(ERROR,
802  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
803  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
804  lockMethodTable->lockModeNames[lockmode]),
805  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
806 
807 #ifdef LOCK_DEBUG
808  if (LOCK_DEBUG_ENABLED(locktag))
809  elog(LOG, "LockAcquire: lock [%u,%u] %s",
810  locktag->locktag_field1, locktag->locktag_field2,
811  lockMethodTable->lockModeNames[lockmode]);
812 #endif
813 
814  /* Identify owner for lock */
815  if (sessionLock)
816  owner = NULL;
817  else
818  owner = CurrentResourceOwner;
819 
820  /*
821  * Find or create a LOCALLOCK entry for this lock and lockmode
822  */
823  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
824  localtag.lock = *locktag;
825  localtag.mode = lockmode;
826 
827  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
828  (void *) &localtag,
829  HASH_ENTER, &found);
830 
831  /*
832  * if it's a new locallock object, initialize it
833  */
834  if (!found)
835  {
836  locallock->lock = NULL;
837  locallock->proclock = NULL;
838  locallock->hashcode = LockTagHashCode(&(localtag.lock));
839  locallock->nLocks = 0;
840  locallock->holdsStrongLockCount = false;
841  locallock->lockCleared = false;
842  locallock->numLockOwners = 0;
843  locallock->maxLockOwners = 8;
844  locallock->lockOwners = NULL; /* in case next line fails */
845  locallock->lockOwners = (LOCALLOCKOWNER *)
847  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
848  }
849  else
850  {
851  /* Make sure there will be room to remember the lock */
852  if (locallock->numLockOwners >= locallock->maxLockOwners)
853  {
854  int newsize = locallock->maxLockOwners * 2;
855 
856  locallock->lockOwners = (LOCALLOCKOWNER *)
857  repalloc(locallock->lockOwners,
858  newsize * sizeof(LOCALLOCKOWNER));
859  locallock->maxLockOwners = newsize;
860  }
861  }
862  hashcode = locallock->hashcode;
863 
864  if (locallockp)
865  *locallockp = locallock;
866 
867  /*
868  * If we already hold the lock, we can just increase the count locally.
869  *
870  * If lockCleared is already set, caller need not worry about absorbing
871  * sinval messages related to the lock's object.
872  */
873  if (locallock->nLocks > 0)
874  {
875  GrantLockLocal(locallock, owner);
876  if (locallock->lockCleared)
878  else
880  }
881 
882  /*
883  * We don't acquire any other heavyweight lock while holding the relation
884  * extension lock. We do allow to acquire the same relation extension
885  * lock more than once but that case won't reach here.
886  */
887  Assert(!IsRelationExtensionLockHeld);
888 
889  /*
890  * We don't acquire any other heavyweight lock while holding the page lock
891  * except for relation extension.
892  */
893  Assert(!IsPageLockHeld ||
894  (locktag->locktag_type == LOCKTAG_RELATION_EXTEND));
895 
896  /*
897  * Prepare to emit a WAL record if acquisition of this lock needs to be
898  * replayed in a standby server.
899  *
900  * Here we prepare to log; after lock is acquired we'll issue log record.
901  * This arrangement simplifies error recovery in case the preparation step
902  * fails.
903  *
904  * Only AccessExclusiveLocks can conflict with lock types that read-only
905  * transactions can acquire in a standby server. Make sure this definition
906  * matches the one in GetRunningTransactionLocks().
907  */
908  if (lockmode >= AccessExclusiveLock &&
909  locktag->locktag_type == LOCKTAG_RELATION &&
910  !RecoveryInProgress() &&
912  {
914  log_lock = true;
915  }
916 
917  /*
918  * Attempt to take lock via fast path, if eligible. But if we remember
919  * having filled up the fast path array, we don't attempt to make any
920  * further use of it until we release some locks. It's possible that some
921  * other backend has transferred some of those locks to the shared hash
922  * table, leaving space free, but it's not worth acquiring the LWLock just
923  * to check. It's also possible that we're acquiring a second or third
924  * lock type on a relation we have already locked using the fast-path, but
925  * for now we don't worry about that case either.
926  */
927  if (EligibleForRelationFastPath(locktag, lockmode) &&
929  {
930  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
931  bool acquired;
932 
933  /*
934  * LWLockAcquire acts as a memory sequencing point, so it's safe to
935  * assume that any strong locker whose increment to
936  * FastPathStrongRelationLocks->counts becomes visible after we test
937  * it has yet to begin to transfer fast-path locks.
938  */
940  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
941  acquired = false;
942  else
943  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
944  lockmode);
946  if (acquired)
947  {
948  /*
949  * The locallock might contain stale pointers to some old shared
950  * objects; we MUST reset these to null before considering the
951  * lock to be acquired via fast-path.
952  */
953  locallock->lock = NULL;
954  locallock->proclock = NULL;
955  GrantLockLocal(locallock, owner);
956  return LOCKACQUIRE_OK;
957  }
958  }
959 
960  /*
961  * If this lock could potentially have been taken via the fast-path by
962  * some other backend, we must (temporarily) disable further use of the
963  * fast-path for this lock tag, and migrate any locks already taken via
964  * this method to the main lock table.
965  */
966  if (ConflictsWithRelationFastPath(locktag, lockmode))
967  {
968  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
969 
970  BeginStrongLockAcquire(locallock, fasthashcode);
971  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
972  hashcode))
973  {
975  if (locallock->nLocks == 0)
976  RemoveLocalLock(locallock);
977  if (locallockp)
978  *locallockp = NULL;
979  if (reportMemoryError)
980  ereport(ERROR,
981  (errcode(ERRCODE_OUT_OF_MEMORY),
982  errmsg("out of shared memory"),
983  errhint("You might need to increase max_locks_per_transaction.")));
984  else
985  return LOCKACQUIRE_NOT_AVAIL;
986  }
987  }
988 
989  /*
990  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
991  * take it via the fast-path, either, so we've got to mess with the shared
992  * lock table.
993  */
994  partitionLock = LockHashPartitionLock(hashcode);
995 
996  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
997 
998  /*
999  * Find or create lock and proclock entries with this tag
1000  *
1001  * Note: if the locallock object already existed, it might have a pointer
1002  * to the lock already ... but we should not assume that that pointer is
1003  * valid, since a lock object with zero hold and request counts can go
1004  * away anytime. So we have to use SetupLockInTable() to recompute the
1005  * lock and proclock pointers, even if they're already set.
1006  */
1007  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1008  hashcode, lockmode);
1009  if (!proclock)
1010  {
1012  LWLockRelease(partitionLock);
1013  if (locallock->nLocks == 0)
1014  RemoveLocalLock(locallock);
1015  if (locallockp)
1016  *locallockp = NULL;
1017  if (reportMemoryError)
1018  ereport(ERROR,
1019  (errcode(ERRCODE_OUT_OF_MEMORY),
1020  errmsg("out of shared memory"),
1021  errhint("You might need to increase max_locks_per_transaction.")));
1022  else
1023  return LOCKACQUIRE_NOT_AVAIL;
1024  }
1025  locallock->proclock = proclock;
1026  lock = proclock->tag.myLock;
1027  locallock->lock = lock;
1028 
1029  /*
1030  * If lock requested conflicts with locks requested by waiters, must join
1031  * wait queue. Otherwise, check for conflict with already-held locks.
1032  * (That's last because most complex check.)
1033  */
1034  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1035  found_conflict = true;
1036  else
1037  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1038  lock, proclock);
1039 
1040  if (!found_conflict)
1041  {
1042  /* No conflict with held or previously requested locks */
1043  GrantLock(lock, proclock, lockmode);
1044  GrantLockLocal(locallock, owner);
1045  }
1046  else
1047  {
1048  /*
1049  * We can't acquire the lock immediately. If caller specified no
1050  * blocking, remove useless table entries and return
1051  * LOCKACQUIRE_NOT_AVAIL without waiting.
1052  */
1053  if (dontWait)
1054  {
1056  if (proclock->holdMask == 0)
1057  {
1058  uint32 proclock_hashcode;
1059 
1060  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1061  dlist_delete(&proclock->lockLink);
1062  dlist_delete(&proclock->procLink);
1064  (void *) &(proclock->tag),
1065  proclock_hashcode,
1066  HASH_REMOVE,
1067  NULL))
1068  elog(PANIC, "proclock table corrupted");
1069  }
1070  else
1071  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1072  lock->nRequested--;
1073  lock->requested[lockmode]--;
1074  LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1075  Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1076  Assert(lock->nGranted <= lock->nRequested);
1077  LWLockRelease(partitionLock);
1078  if (locallock->nLocks == 0)
1079  RemoveLocalLock(locallock);
1080  if (locallockp)
1081  *locallockp = NULL;
1082  return LOCKACQUIRE_NOT_AVAIL;
1083  }
1084 
1085  /*
1086  * Set bitmask of locks this process already holds on this object.
1087  */
1088  MyProc->heldLocks = proclock->holdMask;
1089 
1090  /*
1091  * Sleep till someone wakes me up.
1092  */
1093 
1094  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1095  locktag->locktag_field2,
1096  locktag->locktag_field3,
1097  locktag->locktag_field4,
1098  locktag->locktag_type,
1099  lockmode);
1100 
1101  WaitOnLock(locallock, owner);
1102 
1103  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1104  locktag->locktag_field2,
1105  locktag->locktag_field3,
1106  locktag->locktag_field4,
1107  locktag->locktag_type,
1108  lockmode);
1109 
1110  /*
1111  * NOTE: do not do any material change of state between here and
1112  * return. All required changes in locktable state must have been
1113  * done when the lock was granted to us --- see notes in WaitOnLock.
1114  */
1115 
1116  /*
1117  * Check the proclock entry status, in case something in the ipc
1118  * communication doesn't work correctly.
1119  */
1120  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1121  {
1123  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1124  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1125  /* Should we retry ? */
1126  LWLockRelease(partitionLock);
1127  elog(ERROR, "LockAcquire failed");
1128  }
1129  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1130  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1131  }
1132 
1133  /*
1134  * Lock state is fully up-to-date now; if we error out after this, no
1135  * special error cleanup is required.
1136  */
1138 
1139  LWLockRelease(partitionLock);
1140 
1141  /*
1142  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1143  * standby server.
1144  */
1145  if (log_lock)
1146  {
1147  /*
1148  * Decode the locktag back to the original values, to avoid sending
1149  * lots of empty bytes with every message. See lock.h to check how a
1150  * locktag is defined for LOCKTAG_RELATION
1151  */
1153  locktag->locktag_field2);
1154  }
1155 
1156  return LOCKACQUIRE_OK;
1157 }
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
#define LOG
Definition: elog.h:31
@ HASH_ENTER
Definition: hsearch.h:114
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1366
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1170
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2707
void AbortStrongLockAcquire(void)
Definition: lock.c:1752
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2640
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1809
static int FastPathLocalUseCount
Definition: lock.c:172
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:227
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1716
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1419
static void FinishStrongLockAcquire(void)
Definition: lock.c:1742
#define RowExclusiveLock
Definition: lockdefs.h:38
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1428
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1411
int maxLockOwners
Definition: lock.h:437
bool lockCleared
Definition: lock.h:440
uint16 locktag_field4
Definition: lock.h:169
LOCKMASK heldLocks
Definition: proc.h:226
bool RecoveryInProgress(void)
Definition: xlog.c:5908
#define XLogStandbyInfoActive()
Definition: xlog.h:118
bool InRecovery
Definition: xlogutils.c:53

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert(), BeginStrongLockAcquire(), ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, dlist_delete(), EligibleForRelationFastPath, elog(), ereport, errcode(), errhint(), errmsg(), ERROR, FastPathGrantRelationLock(), FastPathLocalUseCount, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_BACKEND, PGPROC::fpInfoLock, GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PGPROC::heldLocks, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, InRecovery, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG_RELATION_EXTEND, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1419 of file lock.c.

1423 {
1424  int numLockModes = lockMethodTable->numLockModes;
1425  LOCKMASK myLocks;
1426  int conflictMask = lockMethodTable->conflictTab[lockmode];
1427  int conflictsRemaining[MAX_LOCKMODES];
1428  int totalConflictsRemaining = 0;
1429  dlist_iter proclock_iter;
1430  int i;
1431 
1432  /*
1433  * first check for global conflicts: If no locks conflict with my request,
1434  * then I get the lock.
1435  *
1436  * Checking for conflict: lock->grantMask represents the types of
1437  * currently held locks. conflictTable[lockmode] has a bit set for each
1438  * type of lock that conflicts with request. Bitwise compare tells if
1439  * there is a conflict.
1440  */
1441  if (!(conflictMask & lock->grantMask))
1442  {
1443  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1444  return false;
1445  }
1446 
1447  /*
1448  * Rats. Something conflicts. But it could still be my own lock, or a
1449  * lock held by another member of my locking group. First, figure out how
1450  * many conflicts remain after subtracting out any locks I hold myself.
1451  */
1452  myLocks = proclock->holdMask;
1453  for (i = 1; i <= numLockModes; i++)
1454  {
1455  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1456  {
1457  conflictsRemaining[i] = 0;
1458  continue;
1459  }
1460  conflictsRemaining[i] = lock->granted[i];
1461  if (myLocks & LOCKBIT_ON(i))
1462  --conflictsRemaining[i];
1463  totalConflictsRemaining += conflictsRemaining[i];
1464  }
1465 
1466  /* If no conflicts remain, we get the lock. */
1467  if (totalConflictsRemaining == 0)
1468  {
1469  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1470  return false;
1471  }
1472 
1473  /* If no group locking, it's definitely a conflict. */
1474  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1475  {
1476  Assert(proclock->tag.myProc == MyProc);
1477  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1478  proclock);
1479  return true;
1480  }
1481 
1482  /*
1483  * The relation extension or page lock conflict even between the group
1484  * members.
1485  */
1486  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
1487  (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
1488  {
1489  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1490  proclock);
1491  return true;
1492  }
1493 
1494  /*
1495  * Locks held in conflicting modes by members of our own lock group are
1496  * not real conflicts; we can subtract those out and see if we still have
1497  * a conflict. This is O(N) in the number of processes holding or
1498  * awaiting locks on this object. We could improve that by making the
1499  * shared memory state more complex (and larger) but it doesn't seem worth
1500  * it.
1501  */
1502  dlist_foreach(proclock_iter, &lock->procLocks)
1503  {
1504  PROCLOCK *otherproclock =
1505  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1506 
1507  if (proclock != otherproclock &&
1508  proclock->groupLeader == otherproclock->groupLeader &&
1509  (otherproclock->holdMask & conflictMask) != 0)
1510  {
1511  int intersectMask = otherproclock->holdMask & conflictMask;
1512 
1513  for (i = 1; i <= numLockModes; i++)
1514  {
1515  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1516  {
1517  if (conflictsRemaining[i] <= 0)
1518  elog(PANIC, "proclocks held do not match lock");
1519  conflictsRemaining[i]--;
1520  totalConflictsRemaining--;
1521  }
1522  }
1523 
1524  if (totalConflictsRemaining == 0)
1525  {
1526  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1527  proclock);
1528  return false;
1529  }
1530  }
1531  }
1532 
1533  /* Nope, it's a real conflict. */
1534  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1535  return true;
1536 }
#define LOCK_LOCKTAG(lock)
Definition: lock.h:325

References Assert(), LockMethodData::conflictTab, dlist_iter::cur, dlist_container, dlist_foreach, elog(), LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_PAGE, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by LockAcquireExtended(), ProcLockWakeup(), and ProcSleep().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 634 of file lock.c.

635 {
636  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
637  LockMethod lockMethodTable;
638  LOCALLOCKTAG localtag;
639  LOCALLOCK *locallock;
640  LOCK *lock;
641  PROCLOCK *proclock;
642  LWLock *partitionLock;
643  bool hasWaiters = false;
644 
645  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
646  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
647  lockMethodTable = LockMethods[lockmethodid];
648  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
649  elog(ERROR, "unrecognized lock mode: %d", lockmode);
650 
651 #ifdef LOCK_DEBUG
652  if (LOCK_DEBUG_ENABLED(locktag))
653  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
654  locktag->locktag_field1, locktag->locktag_field2,
655  lockMethodTable->lockModeNames[lockmode]);
656 #endif
657 
658  /*
659  * Find the LOCALLOCK entry for this lock and lockmode
660  */
661  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
662  localtag.lock = *locktag;
663  localtag.mode = lockmode;
664 
665  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
666  (void *) &localtag,
667  HASH_FIND, NULL);
668 
669  /*
670  * let the caller print its own error message, too. Do not ereport(ERROR).
671  */
672  if (!locallock || locallock->nLocks <= 0)
673  {
674  elog(WARNING, "you don't own a lock of type %s",
675  lockMethodTable->lockModeNames[lockmode]);
676  return false;
677  }
678 
679  /*
680  * Check the shared lock table.
681  */
682  partitionLock = LockHashPartitionLock(locallock->hashcode);
683 
684  LWLockAcquire(partitionLock, LW_SHARED);
685 
686  /*
687  * We don't need to re-find the lock or proclock, since we kept their
688  * addresses in the locallock table, and they couldn't have been removed
689  * while we were holding a lock on them.
690  */
691  lock = locallock->lock;
692  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
693  proclock = locallock->proclock;
694  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
695 
696  /*
697  * Double-check that we are actually holding a lock of the type we want to
698  * release.
699  */
700  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
701  {
702  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
703  LWLockRelease(partitionLock);
704  elog(WARNING, "you don't own a lock of type %s",
705  lockMethodTable->lockModeNames[lockmode]);
706  RemoveLocalLock(locallock);
707  return false;
708  }
709 
710  /*
711  * Do the checking.
712  */
713  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
714  hasWaiters = true;
715 
716  LWLockRelease(partitionLock);
717 
718  return hasWaiters;
719 }
#define WARNING
Definition: elog.h:36

References LockMethodData::conflictTab, elog(), ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode 
)

Definition at line 598 of file lock.c.

599 {
600  LOCALLOCKTAG localtag;
601  LOCALLOCK *locallock;
602 
603  /*
604  * See if there is a LOCALLOCK entry for this lock and lockmode
605  */
606  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
607  localtag.lock = *locktag;
608  localtag.mode = lockmode;
609 
610  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
611  (void *) &localtag,
612  HASH_FIND, NULL);
613 
614  return (locallock && locallock->nLocks > 0);
615 }

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, LockMethodLocalHash, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2564 of file lock.c.

2565 {
2567 
2568  Assert(parent != NULL);
2569 
2570  if (locallocks == NULL)
2571  {
2573  LOCALLOCK *locallock;
2574 
2576 
2577  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2578  LockReassignOwner(locallock, parent);
2579  }
2580  else
2581  {
2582  int i;
2583 
2584  for (i = nlocks - 1; i >= 0; i--)
2585  LockReassignOwner(locallocks[i], parent);
2586  }
2587 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2594
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:797

References Assert(), CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), ResourceOwnerGetParent(), and status().

Referenced by ResourceOwnerReleaseInternal().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 1969 of file lock.c.

1970 {
1971  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1972  LockMethod lockMethodTable;
1973  LOCALLOCKTAG localtag;
1974  LOCALLOCK *locallock;
1975  LOCK *lock;
1976  PROCLOCK *proclock;
1977  LWLock *partitionLock;
1978  bool wakeupNeeded;
1979 
1980  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1981  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1982  lockMethodTable = LockMethods[lockmethodid];
1983  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1984  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1985 
1986 #ifdef LOCK_DEBUG
1987  if (LOCK_DEBUG_ENABLED(locktag))
1988  elog(LOG, "LockRelease: lock [%u,%u] %s",
1989  locktag->locktag_field1, locktag->locktag_field2,
1990  lockMethodTable->lockModeNames[lockmode]);
1991 #endif
1992 
1993  /*
1994  * Find the LOCALLOCK entry for this lock and lockmode
1995  */
1996  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1997  localtag.lock = *locktag;
1998  localtag.mode = lockmode;
1999 
2000  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
2001  (void *) &localtag,
2002  HASH_FIND, NULL);
2003 
2004  /*
2005  * let the caller print its own error message, too. Do not ereport(ERROR).
2006  */
2007  if (!locallock || locallock->nLocks <= 0)
2008  {
2009  elog(WARNING, "you don't own a lock of type %s",
2010  lockMethodTable->lockModeNames[lockmode]);
2011  return false;
2012  }
2013 
2014  /*
2015  * Decrease the count for the resource owner.
2016  */
2017  {
2018  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2019  ResourceOwner owner;
2020  int i;
2021 
2022  /* Identify owner for lock */
2023  if (sessionLock)
2024  owner = NULL;
2025  else
2026  owner = CurrentResourceOwner;
2027 
2028  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2029  {
2030  if (lockOwners[i].owner == owner)
2031  {
2032  Assert(lockOwners[i].nLocks > 0);
2033  if (--lockOwners[i].nLocks == 0)
2034  {
2035  if (owner != NULL)
2036  ResourceOwnerForgetLock(owner, locallock);
2037  /* compact out unused slot */
2038  locallock->numLockOwners--;
2039  if (i < locallock->numLockOwners)
2040  lockOwners[i] = lockOwners[locallock->numLockOwners];
2041  }
2042  break;
2043  }
2044  }
2045  if (i < 0)
2046  {
2047  /* don't release a lock belonging to another owner */
2048  elog(WARNING, "you don't own a lock of type %s",
2049  lockMethodTable->lockModeNames[lockmode]);
2050  return false;
2051  }
2052  }
2053 
2054  /*
2055  * Decrease the total local count. If we're still holding the lock, we're
2056  * done.
2057  */
2058  locallock->nLocks--;
2059 
2060  if (locallock->nLocks > 0)
2061  return true;
2062 
2063  /*
2064  * At this point we can no longer suppose we are clear of invalidation
2065  * messages related to this lock. Although we'll delete the LOCALLOCK
2066  * object before any intentional return from this routine, it seems worth
2067  * the trouble to explicitly reset lockCleared right now, just in case
2068  * some error prevents us from deleting the LOCALLOCK.
2069  */
2070  locallock->lockCleared = false;
2071 
2072  /* Attempt fast release of any lock eligible for the fast path. */
2073  if (EligibleForRelationFastPath(locktag, lockmode) &&
2075  {
2076  bool released;
2077 
2078  /*
2079  * We might not find the lock here, even if we originally entered it
2080  * here. Another backend may have moved it to the main table.
2081  */
2083  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2084  lockmode);
2086  if (released)
2087  {
2088  RemoveLocalLock(locallock);
2089  return true;
2090  }
2091  }
2092 
2093  /*
2094  * Otherwise we've got to mess with the shared lock table.
2095  */
2096  partitionLock = LockHashPartitionLock(locallock->hashcode);
2097 
2098  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2099 
2100  /*
2101  * Normally, we don't need to re-find the lock or proclock, since we kept
2102  * their addresses in the locallock table, and they couldn't have been
2103  * removed while we were holding a lock on them. But it's possible that
2104  * the lock was taken fast-path and has since been moved to the main hash
2105  * table by another backend, in which case we will need to look up the
2106  * objects here. We assume the lock field is NULL if so.
2107  */
2108  lock = locallock->lock;
2109  if (!lock)
2110  {
2111  PROCLOCKTAG proclocktag;
2112 
2113  Assert(EligibleForRelationFastPath(locktag, lockmode));
2115  (const void *) locktag,
2116  locallock->hashcode,
2117  HASH_FIND,
2118  NULL);
2119  if (!lock)
2120  elog(ERROR, "failed to re-find shared lock object");
2121  locallock->lock = lock;
2122 
2123  proclocktag.myLock = lock;
2124  proclocktag.myProc = MyProc;
2126  (void *) &proclocktag,
2127  HASH_FIND,
2128  NULL);
2129  if (!locallock->proclock)
2130  elog(ERROR, "failed to re-find shared proclock object");
2131  }
2132  LOCK_PRINT("LockRelease: found", lock, lockmode);
2133  proclock = locallock->proclock;
2134  PROCLOCK_PRINT("LockRelease: found", proclock);
2135 
2136  /*
2137  * Double-check that we are actually holding a lock of the type we want to
2138  * release.
2139  */
2140  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2141  {
2142  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2143  LWLockRelease(partitionLock);
2144  elog(WARNING, "you don't own a lock of type %s",
2145  lockMethodTable->lockModeNames[lockmode]);
2146  RemoveLocalLock(locallock);
2147  return false;
2148  }
2149 
2150  /*
2151  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2152  */
2153  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2154 
2155  CleanUpLock(lock, proclock,
2156  lockMethodTable, locallock->hashcode,
2157  wakeupNeeded);
2158 
2159  LWLockRelease(partitionLock);
2160 
2161  RemoveLocalLock(locallock);
2162  return true;
2163 }
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1573
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1630
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2677
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1010

References Assert(), CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog(), ERROR, FastPathLocalUseCount, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2174 of file lock.c.

2175 {
2177  LockMethod lockMethodTable;
2178  int i,
2179  numLockModes;
2180  LOCALLOCK *locallock;
2181  LOCK *lock;
2182  int partition;
2183  bool have_fast_path_lwlock = false;
2184 
2185  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2186  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2187  lockMethodTable = LockMethods[lockmethodid];
2188 
2189 #ifdef LOCK_DEBUG
2190  if (*(lockMethodTable->trace_flag))
2191  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2192 #endif
2193 
2194  /*
2195  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2196  * the only way that the lock we hold on our own VXID can ever get
2197  * released: it is always and only released when a toplevel transaction
2198  * ends.
2199  */
2200  if (lockmethodid == DEFAULT_LOCKMETHOD)
2202 
2203  numLockModes = lockMethodTable->numLockModes;
2204 
2205  /*
2206  * First we run through the locallock table and get rid of unwanted
2207  * entries, then we scan the process's proclocks and get rid of those. We
2208  * do this separately because we may have multiple locallock entries
2209  * pointing to the same proclock, and we daren't end up with any dangling
2210  * pointers. Fast-path locks are cleaned up during the locallock table
2211  * scan, though.
2212  */
2214 
2215  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2216  {
2217  /*
2218  * If the LOCALLOCK entry is unused, we must've run out of shared
2219  * memory while trying to set up this lock. Just forget the local
2220  * entry.
2221  */
2222  if (locallock->nLocks == 0)
2223  {
2224  RemoveLocalLock(locallock);
2225  continue;
2226  }
2227 
2228  /* Ignore items that are not of the lockmethod to be removed */
2229  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2230  continue;
2231 
2232  /*
2233  * If we are asked to release all locks, we can just zap the entry.
2234  * Otherwise, must scan to see if there are session locks. We assume
2235  * there is at most one lockOwners entry for session locks.
2236  */
2237  if (!allLocks)
2238  {
2239  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2240 
2241  /* If session lock is above array position 0, move it down to 0 */
2242  for (i = 0; i < locallock->numLockOwners; i++)
2243  {
2244  if (lockOwners[i].owner == NULL)
2245  lockOwners[0] = lockOwners[i];
2246  else
2247  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2248  }
2249 
2250  if (locallock->numLockOwners > 0 &&
2251  lockOwners[0].owner == NULL &&
2252  lockOwners[0].nLocks > 0)
2253  {
2254  /* Fix the locallock to show just the session locks */
2255  locallock->nLocks = lockOwners[0].nLocks;
2256  locallock->numLockOwners = 1;
2257  /* We aren't deleting this locallock, so done */
2258  continue;
2259  }
2260  else
2261  locallock->numLockOwners = 0;
2262  }
2263 
2264  /*
2265  * If the lock or proclock pointers are NULL, this lock was taken via
2266  * the relation fast-path (and is not known to have been transferred).
2267  */
2268  if (locallock->proclock == NULL || locallock->lock == NULL)
2269  {
2270  LOCKMODE lockmode = locallock->tag.mode;
2271  Oid relid;
2272 
2273  /* Verify that a fast-path lock is what we've got. */
2274  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2275  elog(PANIC, "locallock table corrupted");
2276 
2277  /*
2278  * If we don't currently hold the LWLock that protects our
2279  * fast-path data structures, we must acquire it before attempting
2280  * to release the lock via the fast-path. We will continue to
2281  * hold the LWLock until we're done scanning the locallock table,
2282  * unless we hit a transferred fast-path lock. (XXX is this
2283  * really such a good idea? There could be a lot of entries ...)
2284  */
2285  if (!have_fast_path_lwlock)
2286  {
2288  have_fast_path_lwlock = true;
2289  }
2290 
2291  /* Attempt fast-path release. */
2292  relid = locallock->tag.lock.locktag_field2;
2293  if (FastPathUnGrantRelationLock(relid, lockmode))
2294  {
2295  RemoveLocalLock(locallock);
2296  continue;
2297  }
2298 
2299  /*
2300  * Our lock, originally taken via the fast path, has been
2301  * transferred to the main lock table. That's going to require
2302  * some extra work, so release our fast-path lock before starting.
2303  */
2305  have_fast_path_lwlock = false;
2306 
2307  /*
2308  * Now dump the lock. We haven't got a pointer to the LOCK or
2309  * PROCLOCK in this case, so we have to handle this a bit
2310  * differently than a normal lock release. Unfortunately, this
2311  * requires an extra LWLock acquire-and-release cycle on the
2312  * partitionLock, but hopefully it shouldn't happen often.
2313  */
2314  LockRefindAndRelease(lockMethodTable, MyProc,
2315  &locallock->tag.lock, lockmode, false);
2316  RemoveLocalLock(locallock);
2317  continue;
2318  }
2319 
2320  /* Mark the proclock to show we need to release this lockmode */
2321  if (locallock->nLocks > 0)
2322  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2323 
2324  /* And remove the locallock hashtable entry */
2325  RemoveLocalLock(locallock);
2326  }
2327 
2328  /* Done with the fast-path data structures */
2329  if (have_fast_path_lwlock)
2331 
2332  /*
2333  * Now, scan each lock partition separately.
2334  */
2335  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2336  {
2337  LWLock *partitionLock;
2338  dlist_head *procLocks = &MyProc->myProcLocks[partition];
2339  dlist_mutable_iter proclock_iter;
2340 
2341  partitionLock = LockHashPartitionLockByIndex(partition);
2342 
2343  /*
2344  * If the proclock list for this partition is empty, we can skip
2345  * acquiring the partition lock. This optimization is trickier than
2346  * it looks, because another backend could be in process of adding
2347  * something to our proclock list due to promoting one of our
2348  * fast-path locks. However, any such lock must be one that we
2349  * decided not to delete above, so it's okay to skip it again now;
2350  * we'd just decide not to delete it again. We must, however, be
2351  * careful to re-fetch the list header once we've acquired the
2352  * partition lock, to be sure we have a valid, up-to-date pointer.
2353  * (There is probably no significant risk if pointer fetch/store is
2354  * atomic, but we don't wish to assume that.)
2355  *
2356  * XXX This argument assumes that the locallock table correctly
2357  * represents all of our fast-path locks. While allLocks mode
2358  * guarantees to clean up all of our normal locks regardless of the
2359  * locallock situation, we lose that guarantee for fast-path locks.
2360  * This is not ideal.
2361  */
2362  if (dlist_is_empty(procLocks))
2363  continue; /* needn't examine this partition */
2364 
2365  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2366 
2367  dlist_foreach_modify(proclock_iter, procLocks)
2368  {
2369  PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2370  bool wakeupNeeded = false;
2371 
2372  Assert(proclock->tag.myProc == MyProc);
2373 
2374  lock = proclock->tag.myLock;
2375 
2376  /* Ignore items that are not of the lockmethod to be removed */
2377  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2378  continue;
2379 
2380  /*
2381  * In allLocks mode, force release of all locks even if locallock
2382  * table had problems
2383  */
2384  if (allLocks)
2385  proclock->releaseMask = proclock->holdMask;
2386  else
2387  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2388 
2389  /*
2390  * Ignore items that have nothing to be released, unless they have
2391  * holdMask == 0 and are therefore recyclable
2392  */
2393  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2394  continue;
2395 
2396  PROCLOCK_PRINT("LockReleaseAll", proclock);
2397  LOCK_PRINT("LockReleaseAll", lock, 0);
2398  Assert(lock->nRequested >= 0);
2399  Assert(lock->nGranted >= 0);
2400  Assert(lock->nGranted <= lock->nRequested);
2401  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2402 
2403  /*
2404  * Release the previously-marked lock modes
2405  */
2406  for (i = 1; i <= numLockModes; i++)
2407  {
2408  if (proclock->releaseMask & LOCKBIT_ON(i))
2409  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2410  lockMethodTable);
2411  }
2412  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2413  Assert(lock->nGranted <= lock->nRequested);
2414  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2415 
2416  proclock->releaseMask = 0;
2417 
2418  /* CleanUpLock will wake up waiters if needed. */
2419  CleanUpLock(lock, proclock,
2420  lockMethodTable,
2421  LockTagHashCode(&lock->tag),
2422  wakeupNeeded);
2423  } /* loop over PROCLOCKs within this partition */
2424 
2425  LWLockRelease(partitionLock);
2426  } /* loop over partitions */
2427 
2428 #ifdef LOCK_DEBUG
2429  if (*(lockMethodTable->trace_flag))
2430  elog(LOG, "LockReleaseAll done");
2431 #endif
2432 }
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4454
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:443
int64 nLocks
Definition: lock.h:423
const bool * trace_flag
Definition: lock.h:113
dlist_node * cur
Definition: ilist.h:200

References Assert(), CleanUpLock(), dlist_mutable_iter::cur, DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog(), ERROR, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, PANIC, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), status(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), and VirtualXactLockTableCleanup().

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2469 of file lock.c.

2470 {
2471  if (locallocks == NULL)
2472  {
2474  LOCALLOCK *locallock;
2475 
2477 
2478  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2479  ReleaseLockIfHeld(locallock, false);
2480  }
2481  else
2482  {
2483  int i;
2484 
2485  for (i = nlocks - 1; i >= 0; i--)
2486  ReleaseLockIfHeld(locallocks[i], false);
2487  }
2488 }
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2504

References hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, ReleaseLockIfHeld(), and status().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2439 of file lock.c.

2440 {
2442  LOCALLOCK *locallock;
2443 
2444  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2445  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2446 
2448 
2449  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2450  {
2451  /* Ignore items that are not of the specified lock method */
2452  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2453  continue;
2454 
2455  ReleaseLockIfHeld(locallock, true);
2456  }
2457 }

References elog(), ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, ReleaseLockIfHeld(), and status().

Referenced by pg_advisory_unlock_all().

◆ LockShmemSize()

Size LockShmemSize ( void  )

Definition at line 3579 of file lock.c.

3580 {
3581  Size size = 0;
3582  long max_table_size;
3583 
3584  /* lock hash table */
3585  max_table_size = NLOCKENTS();
3586  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3587 
3588  /* proclock hash table */
3589  max_table_size *= 2;
3590  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3591 
3592  /*
3593  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3594  */
3595  size = add_size(size, size / 10);
3596 
3597  return size;
3598 }
size_t Size
Definition: c.h:589
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:781
Size add_size(Size s1, Size s2)
Definition: shmem.c:502

References add_size(), hash_estimate_size(), and NLOCKENTS.

Referenced by CalculateShmemSize().

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 517 of file lock.c.

518 {
519  return get_hash_value(LockMethodLockHash, (const void *) locktag);
520 }
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:909

References get_hash_value(), and LockMethodLockHash.

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4665 of file lock.c.

4666 {
4667  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4668  LOCK *lock;
4669  bool found;
4670  uint32 hashcode;
4671  LWLock *partitionLock;
4672  int waiters = 0;
4673 
4674  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4675  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4676 
4677  hashcode = LockTagHashCode(locktag);
4678  partitionLock = LockHashPartitionLock(hashcode);
4679  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4680 
4682  (const void *) locktag,
4683  hashcode,
4684  HASH_FIND,
4685  &found);
4686  if (found)
4687  {
4688  Assert(lock != NULL);
4689  waiters = lock->nRequested;
4690  }
4691  LWLockRelease(partitionLock);
4692 
4693  return waiters;
4694 }

References Assert(), elog(), ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

Definition at line 1794 of file lock.c.

1795 {
1796  Assert(locallock->nLocks > 0);
1797  locallock->lockCleared = true;
1798 }

References Assert(), LOCALLOCK::lockCleared, and LOCALLOCK::nLocks.

Referenced by ConditionalLockRelation(), ConditionalLockRelationOid(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ PostPrepare_Locks()

void PostPrepare_Locks ( TransactionId  xid)

Definition at line 3395 of file lock.c.

3396 {
3397  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3399  LOCALLOCK *locallock;
3400  LOCK *lock;
3401  PROCLOCK *proclock;
3402  PROCLOCKTAG proclocktag;
3403  int partition;
3404 
3405  /* Can't prepare a lock group follower. */
3406  Assert(MyProc->lockGroupLeader == NULL ||
3408 
3409  /* This is a critical section: any error means big trouble */
3411 
3412  /*
3413  * First we run through the locallock table and get rid of unwanted
3414  * entries, then we scan the process's proclocks and transfer them to the
3415  * target proc.
3416  *
3417  * We do this separately because we may have multiple locallock entries
3418  * pointing to the same proclock, and we daren't end up with any dangling
3419  * pointers.
3420  */
3422 
3423  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3424  {
3425  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3426  bool haveSessionLock;
3427  bool haveXactLock;
3428  int i;
3429 
3430  if (locallock->proclock == NULL || locallock->lock == NULL)
3431  {
3432  /*
3433  * We must've run out of shared memory while trying to set up this
3434  * lock. Just forget the local entry.
3435  */
3436  Assert(locallock->nLocks == 0);
3437  RemoveLocalLock(locallock);
3438  continue;
3439  }
3440 
3441  /* Ignore VXID locks */
3442  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3443  continue;
3444 
3445  /* Scan to see whether we hold it at session or transaction level */
3446  haveSessionLock = haveXactLock = false;
3447  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3448  {
3449  if (lockOwners[i].owner == NULL)
3450  haveSessionLock = true;
3451  else
3452  haveXactLock = true;
3453  }
3454 
3455  /* Ignore it if we have only session lock */
3456  if (!haveXactLock)
3457  continue;
3458 
3459  /* This can't happen, because we already checked it */
3460  if (haveSessionLock)
3461  ereport(PANIC,
3462  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3463  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3464 
3465  /* Mark the proclock to show we need to release this lockmode */
3466  if (locallock->nLocks > 0)
3467  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3468 
3469  /* And remove the locallock hashtable entry */
3470  RemoveLocalLock(locallock);
3471  }
3472 
3473  /*
3474  * Now, scan each lock partition separately.
3475  */
3476  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3477  {
3478  LWLock *partitionLock;
3479  dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3480  dlist_mutable_iter proclock_iter;
3481 
3482  partitionLock = LockHashPartitionLockByIndex(partition);
3483 
3484  /*
3485  * If the proclock list for this partition is empty, we can skip
3486  * acquiring the partition lock. This optimization is safer than the
3487  * situation in LockReleaseAll, because we got rid of any fast-path
3488  * locks during AtPrepare_Locks, so there cannot be any case where
3489  * another backend is adding something to our lists now. For safety,
3490  * though, we code this the same way as in LockReleaseAll.
3491  */
3492  if (dlist_is_empty(procLocks))
3493  continue; /* needn't examine this partition */
3494 
3495  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3496 
3497  dlist_foreach_modify(proclock_iter, procLocks)
3498  {
3499  proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3500 
3501  Assert(proclock->tag.myProc == MyProc);
3502 
3503  lock = proclock->tag.myLock;
3504 
3505  /* Ignore VXID locks */
3507  continue;
3508 
3509  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3510  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3511  Assert(lock->nRequested >= 0);
3512  Assert(lock->nGranted >= 0);
3513  Assert(lock->nGranted <= lock->nRequested);
3514  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3515 
3516  /* Ignore it if nothing to release (must be a session lock) */
3517  if (proclock->releaseMask == 0)
3518  continue;
3519 
3520  /* Else we should be releasing all locks */
3521  if (proclock->releaseMask != proclock->holdMask)
3522  elog(PANIC, "we seem to have dropped a bit somewhere");
3523 
3524  /*
3525  * We cannot simply modify proclock->tag.myProc to reassign
3526  * ownership of the lock, because that's part of the hash key and
3527  * the proclock would then be in the wrong hash chain. Instead
3528  * use hash_update_hash_key. (We used to create a new hash entry,
3529  * but that risks out-of-memory failure if other processes are
3530  * busy making proclocks too.) We must unlink the proclock from
3531  * our procLink chain and put it into the new proc's chain, too.
3532  *
3533  * Note: the updated proclock hash key will still belong to the
3534  * same hash partition, cf proclock_hash(). So the partition lock
3535  * we already hold is sufficient for this.
3536  */
3537  dlist_delete(&proclock->procLink);
3538 
3539  /*
3540  * Create the new hash key for the proclock.
3541  */
3542  proclocktag.myLock = lock;
3543  proclocktag.myProc = newproc;
3544 
3545  /*
3546  * Update groupLeader pointer to point to the new proc. (We'd
3547  * better not be a member of somebody else's lock group!)
3548  */
3549  Assert(proclock->groupLeader == proclock->tag.myProc);
3550  proclock->groupLeader = newproc;
3551 
3552  /*
3553  * Update the proclock. We should not find any existing entry for
3554  * the same hash key, since there can be only one entry for any
3555  * given lock with my own proc.
3556  */
3558  (void *) proclock,
3559  (void *) &proclocktag))
3560  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3561 
3562  /* Re-link into the new proc's proclock list */
3563  dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3564 
3565  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3566  } /* loop over PROCLOCKs within this partition */
3567 
3568  LWLockRelease(partitionLock);
3569  } /* loop over partitions */
3570 
3571  END_CRIT_SECTION();
3572 }
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1157
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150

References Assert(), dlist_mutable_iter::cur, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog(), END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, status(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ RememberSimpleDeadLock()

void RememberSimpleDeadLock ( PGPROC proc1,
LOCKMODE  lockmode,
LOCK lock,
PGPROC proc2 
)

Definition at line 1145 of file deadlock.c.

1149 {
1150  DEADLOCK_INFO *info = &deadlockDetails[0];
1151 
1152  info->locktag = lock->tag;
1153  info->lockmode = lockmode;
1154  info->pid = proc1->pid;
1155  info++;
1156  info->locktag = proc2->waitLock->tag;
1157  info->lockmode = proc2->waitLockMode;
1158  info->pid = proc2->pid;
1159  nDeadlockDetails = 2;
1160 }

References deadlockDetails, DEADLOCK_INFO::lockmode, DEADLOCK_INFO::locktag, nDeadlockDetails, DEADLOCK_INFO::pid, PGPROC::pid, LOCK::tag, PGPROC::waitLock, and PGPROC::waitLockMode.

Referenced by ProcSleep().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 1913 of file lock.c.

1914 {
1915  LOCK *waitLock = proc->waitLock;
1916  PROCLOCK *proclock = proc->waitProcLock;
1917  LOCKMODE lockmode = proc->waitLockMode;
1918  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1919 
1920  /* Make sure proc is waiting */
1922  Assert(proc->links.next != NULL);
1923  Assert(waitLock);
1924  Assert(!dclist_is_empty(&waitLock->waitProcs));
1925  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1926 
1927  /* Remove proc from lock's wait queue */
1928  dclist_delete_from(&waitLock->waitProcs, &proc->links);
1929 
1930  /* Undo increments of request counts by waiting process */
1931  Assert(waitLock->nRequested > 0);
1932  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1933  waitLock->nRequested--;
1934  Assert(waitLock->requested[lockmode] > 0);
1935  waitLock->requested[lockmode]--;
1936  /* don't forget to clear waitMask bit if appropriate */
1937  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1938  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1939 
1940  /* Clean up the proc's own state, and pass it the ok/fail signal */
1941  proc->waitLock = NULL;
1942  proc->waitProcLock = NULL;
1944 
1945  /*
1946  * Delete the proclock immediately if it represents no already-held locks.
1947  * (This must happen now because if the owner of the lock decides to
1948  * release it, and the requested/granted counts then go to zero,
1949  * LockRelease expects there to be no remaining proclocks.) Then see if
1950  * any other waiters for the lock can be woken up now.
1951  */
1952  CleanUpLock(waitLock, proclock,
1953  LockMethods[lockmethodid], hashcode,
1954  true);
1955 }
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static void dclist_delete_from(dclist_head *head, dlist_node *node)
Definition: ilist.h:763
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:125
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:126
PROCLOCK * waitProcLock
Definition: proc.h:224
ProcWaitStatus waitStatus
Definition: proc.h:168
dlist_node links
Definition: proc.h:164
dlist_node * next
Definition: ilist.h:140

References Assert(), CleanUpLock(), dclist_delete_from(), dclist_is_empty(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, dlist_node::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), LockErrorCleanup(), and ProcSleep().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4554 of file lock.c.

4555 {
4556  LOCKTAG tag;
4557  PGPROC *proc;
4559 
4561 
4563  /* no vxid lock; localTransactionId is a normal, locked XID */
4564  return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4565 
4566  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4567 
4568  /*
4569  * If a lock table entry must be made, this is the PGPROC on whose behalf
4570  * it must be done. Note that the transaction might end or the PGPROC
4571  * might be reassigned to a new backend before we get around to examining
4572  * it, but it doesn't matter. If we find upon examination that the
4573  * relevant lxid is no longer running here, that's enough to prove that
4574  * it's no longer running anywhere.
4575  */
4576  proc = BackendIdGetProc(vxid.backendId);
4577  if (proc == NULL)
4578  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4579 
4580  /*
4581  * We must acquire this lock before checking the backendId and lxid
4582  * against the ones we're waiting for. The target backend will only set
4583  * or clear lxid while holding this lock.
4584  */
4586 
4587  if (proc->backendId != vxid.backendId
4588  || proc->fpLocalTransactionId != vxid.localTransactionId)
4589  {
4590  /* VXID ended */
4591  LWLockRelease(&proc->fpInfoLock);
4592  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4593  }
4594 
4595  /*
4596  * If we aren't asked to wait, there's no need to set up a lock table
4597  * entry. The transaction is still in progress, so just return false.
4598  */
4599  if (!wait)
4600  {
4601  LWLockRelease(&proc->fpInfoLock);
4602  return false;
4603  }
4604 
4605  /*
4606  * OK, we're going to need to sleep on the VXID. But first, we must set
4607  * up the primary lock table entry, if needed (ie, convert the proc's
4608  * fast-path lock on its VXID to a regular lock).
4609  */
4610  if (proc->fpVXIDLock)
4611  {
4612  PROCLOCK *proclock;
4613  uint32 hashcode;
4614  LWLock *partitionLock;
4615 
4616  hashcode = LockTagHashCode(&tag);
4617 
4618  partitionLock = LockHashPartitionLock(hashcode);
4619  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4620 
4622  &tag, hashcode, ExclusiveLock);
4623  if (!proclock)
4624  {
4625  LWLockRelease(partitionLock);
4626  LWLockRelease(&proc->fpInfoLock);
4627  ereport(ERROR,
4628  (errcode(ERRCODE_OUT_OF_MEMORY),
4629  errmsg("out of shared memory"),
4630  errhint("You might need to increase max_locks_per_transaction.")));
4631  }
4632  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4633 
4634  LWLockRelease(partitionLock);
4635 
4636  proc->fpVXIDLock = false;
4637  }
4638 
4639  /*
4640  * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4641  * search. The proc might have assigned this XID but not yet locked it,
4642  * in which case the proc will lock this XID before releasing the VXID.
4643  * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4644  * so we won't save an XID of a different VXID. It doesn't matter whether
4645  * we save this before or after setting up the primary lock table entry.
4646  */
4647  xid = proc->xid;
4648 
4649  /* Done with proc->fpLockBits */
4650  LWLockRelease(&proc->fpInfoLock);
4651 
4652  /* Time to wait. */
4653  (void) LockAcquire(&tag, ShareLock, false, false);
4654 
4655  LockRelease(&tag, ShareLock, false);
4656  return XactLockForVirtualXact(vxid, xid, wait);
4657 }
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4503
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:747
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:1969
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:69
#define ShareLock
Definition: lockdefs.h:40
PGPROC * BackendIdGetProc(int backendID)
Definition: sinvaladt.c:385
#define InvalidTransactionId
Definition: transam.h:31

References Assert(), VirtualTransactionId::backendId, PGPROC::backendId, BackendIdGetProc(), DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4454 of file lock.c.

4455 {
4456  bool fastpath;
4457  LocalTransactionId lxid;
4458 
4460 
4461  /*
4462  * Clean up shared memory state.
4463  */
4465 
4466  fastpath = MyProc->fpVXIDLock;
4467  lxid = MyProc->fpLocalTransactionId;
4468  MyProc->fpVXIDLock = false;
4470 
4472 
4473  /*
4474  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4475  * that means someone transferred the lock to the main lock table.
4476  */
4477  if (!fastpath && LocalTransactionIdIsValid(lxid))
4478  {
4479  VirtualTransactionId vxid;
4480  LOCKTAG locktag;
4481 
4482  vxid.backendId = MyBackendId;
4483  vxid.localTransactionId = lxid;
4484  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4485 
4487  &locktag, ExclusiveLock, false);
4488  }
4489 }
uint32 LocalTransactionId
Definition: c.h:638
BackendId MyBackendId
Definition: globals.c:85
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:66

References Assert(), VirtualTransactionId::backendId, PGPROC::backendId, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, InvalidBackendId, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyBackendId, MyProc, and SET_LOCKTAG_VIRTUALTRANSACTION.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

Variable Documentation

◆ LockTagTypeNames

PGDLLIMPORT const char* const LockTagTypeNames[]
extern

Definition at line 29 of file lockfuncs.c.

Referenced by GetLockNameFromTagType(), and pg_lock_status().

◆ max_locks_per_xact

PGDLLIMPORT int max_locks_per_xact
extern

Definition at line 55 of file lock.c.

Referenced by CheckRequiredParameterValues(), InitControlFile(), and XLogReportParameters().