PostgreSQL Source Code git master
lock.c File Reference
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/twophase_rmgr.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/memutils.h"
#include "utils/ps_status.h"
#include "utils/resowner.h"
Include dependency graph for lock.c:

Go to the source code of this file.

Data Structures

struct  TwoPhaseLockRecord
 
struct  FastPathStrongRelationLockData
 

Macros

#define NLOCKENTS()    mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
 
#define FAST_PATH_REL_GROUP(rel)    (((uint64) (rel) * 49157) % FastPathLockGroupsPerBackend)
 
#define FAST_PATH_SLOT(group, index)
 
#define FAST_PATH_GROUP(index)
 
#define FAST_PATH_INDEX(index)
 
#define FAST_PATH_BITS_PER_SLOT   3
 
#define FAST_PATH_LOCKNUMBER_OFFSET   1
 
#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
 
#define FAST_PATH_BITS(proc, n)   (proc)->fpLockBits[FAST_PATH_GROUP(n)]
 
#define FAST_PATH_GET_BITS(proc, n)    ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
 
#define FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_SET_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
 
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)    FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
 
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)    (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
 
#define EligibleForRelationFastPath(locktag, mode)
 
#define ConflictsWithRelationFastPath(locktag, mode)
 
#define FAST_PATH_STRONG_LOCK_HASH_BITS   10
 
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
 
#define FastPathStrongLockHashPartition(hashcode)    ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
 
#define LOCK_PRINT(where, lock, type)   ((void) 0)
 
#define PROCLOCK_PRINT(where, proclockP)   ((void) 0)
 

Typedefs

typedef struct TwoPhaseLockRecord TwoPhaseLockRecord
 

Functions

static bool FastPathGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathUnGrantRelationLock (Oid relid, LOCKMODE lockmode)
 
static bool FastPathTransferRelationLocks (LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
 
static PROCLOCKFastPathGetRelationLockEntry (LOCALLOCK *locallock)
 
static uint32 proclock_hash (const void *key, Size keysize)
 
static void RemoveLocalLock (LOCALLOCK *locallock)
 
static PROCLOCKSetupLockInTable (LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
 
static void GrantLockLocal (LOCALLOCK *locallock, ResourceOwner owner)
 
static void BeginStrongLockAcquire (LOCALLOCK *locallock, uint32 fasthashcode)
 
static void FinishStrongLockAcquire (void)
 
static ProcWaitStatus WaitOnLock (LOCALLOCK *locallock, ResourceOwner owner)
 
static void ReleaseLockIfHeld (LOCALLOCK *locallock, bool sessionLock)
 
static void LockReassignOwner (LOCALLOCK *locallock, ResourceOwner parent)
 
static bool UnGrantLock (LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
 
static void CleanUpLock (LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
 
static void LockRefindAndRelease (LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
 
static void GetSingleProcBlockerStatusData (PGPROC *blocked_proc, BlockedProcsData *data)
 
void LockManagerShmemInit (void)
 
void InitLockManagerAccess (void)
 
LockMethod GetLocksMethodTable (const LOCK *lock)
 
LockMethod GetLockTagsMethodTable (const LOCKTAG *locktag)
 
uint32 LockTagHashCode (const LOCKTAG *locktag)
 
static uint32 ProcLockHashCode (const PROCLOCKTAG *proclocktag, uint32 hashcode)
 
bool DoLockModesConflict (LOCKMODE mode1, LOCKMODE mode2)
 
bool LockHeldByMe (const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
 
bool LockHasWaiters (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
LockAcquireResult LockAcquire (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
 
LockAcquireResult LockAcquireExtended (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
 
static void CheckAndSetLockHeld (LOCALLOCK *locallock, bool acquired)
 
bool LockCheckConflicts (LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
 
void GrantLock (LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
 
void AbortStrongLockAcquire (void)
 
void GrantAwaitedLock (void)
 
LOCALLOCKGetAwaitedLock (void)
 
void MarkLockClear (LOCALLOCK *locallock)
 
void RemoveFromWaitQueue (PGPROC *proc, uint32 hashcode)
 
bool LockRelease (const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
 
void LockReleaseAll (LOCKMETHODID lockmethodid, bool allLocks)
 
void LockReleaseSession (LOCKMETHODID lockmethodid)
 
void LockReleaseCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
void LockReassignCurrentOwner (LOCALLOCK **locallocks, int nlocks)
 
VirtualTransactionIdGetLockConflicts (const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
 
static void CheckForSessionAndXactLocks (void)
 
void AtPrepare_Locks (void)
 
void PostPrepare_Locks (TransactionId xid)
 
Size LockManagerShmemSize (void)
 
LockDataGetLockStatusData (void)
 
BlockedProcsDataGetBlockerStatusData (int blocked_pid)
 
xl_standby_lockGetRunningTransactionLocks (int *nlocks)
 
const char * GetLockmodeName (LOCKMETHODID lockmethodid, LOCKMODE mode)
 
void lock_twophase_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_standby_recover (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postcommit (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void lock_twophase_postabort (TransactionId xid, uint16 info, void *recdata, uint32 len)
 
void VirtualXactLockTableInsert (VirtualTransactionId vxid)
 
void VirtualXactLockTableCleanup (void)
 
static bool XactLockForVirtualXact (VirtualTransactionId vxid, TransactionId xid, bool wait)
 
bool VirtualXactLock (VirtualTransactionId vxid, bool wait)
 
int LockWaiterCount (const LOCKTAG *locktag)
 

Variables

int max_locks_per_xact
 
static const LOCKMASK LockConflicts []
 
static const char *const lock_mode_names []
 
static bool Dummy_trace = false
 
static const LockMethodData default_lockmethod
 
static const LockMethodData user_lockmethod
 
static const LockMethod LockMethods []
 
static int FastPathLocalUseCounts [FP_LOCK_GROUPS_PER_BACKEND_MAX]
 
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
 
int FastPathLockGroupsPerBackend = 0
 
static volatile FastPathStrongRelationLockDataFastPathStrongRelationLocks
 
static HTABLockMethodLockHash
 
static HTABLockMethodProcLockHash
 
static HTABLockMethodLocalHash
 
static LOCALLOCKStrongLockInProgress
 
static LOCALLOCKawaitedLock
 
static ResourceOwner awaitedOwner
 

Macro Definition Documentation

◆ ConflictsWithRelationFastPath

#define ConflictsWithRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
@ LOCKTAG_RELATION
Definition: lock.h:137
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
static PgChecksumMode mode
Definition: pg_checksums.c:55
#define InvalidOid
Definition: postgres_ext.h:36

Definition at line 268 of file lock.c.

◆ EligibleForRelationFastPath

#define EligibleForRelationFastPath (   locktag,
  mode 
)
Value:
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \
Oid MyDatabaseId
Definition: globals.c:93

Definition at line 262 of file lock.c.

◆ FAST_PATH_BIT_POSITION

#define FAST_PATH_BIT_POSITION (   n,
 
)
Value:
AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
#define AssertMacro(condition)
Definition: c.h:813
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:237
#define FAST_PATH_INDEX(index)
Definition: lock.c:231
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:236
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:85

Definition at line 242 of file lock.c.

◆ FAST_PATH_BITS

#define FAST_PATH_BITS (   proc,
 
)    (proc)->fpLockBits[FAST_PATH_GROUP(n)]

Definition at line 239 of file lock.c.

◆ FAST_PATH_BITS_PER_SLOT

#define FAST_PATH_BITS_PER_SLOT   3

Definition at line 236 of file lock.c.

◆ FAST_PATH_CHECK_LOCKMODE

#define FAST_PATH_CHECK_LOCKMODE (   proc,
  n,
 
)     (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))

Definition at line 251 of file lock.c.

◆ FAST_PATH_CLEAR_LOCKMODE

#define FAST_PATH_CLEAR_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))

Definition at line 249 of file lock.c.

◆ FAST_PATH_GET_BITS

#define FAST_PATH_GET_BITS (   proc,
 
)     ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)

Definition at line 240 of file lock.c.

◆ FAST_PATH_GROUP

#define FAST_PATH_GROUP (   index)
Value:
uint32_t uint32
Definition: c.h:485
#define FP_LOCK_SLOTS_PER_GROUP
Definition: proc.h:84
Definition: type.h:96

Definition at line 228 of file lock.c.

◆ FAST_PATH_INDEX

#define FAST_PATH_INDEX (   index)
Value:

Definition at line 231 of file lock.c.

◆ FAST_PATH_LOCKNUMBER_OFFSET

#define FAST_PATH_LOCKNUMBER_OFFSET   1

Definition at line 237 of file lock.c.

◆ FAST_PATH_MASK

#define FAST_PATH_MASK   ((1 << FAST_PATH_BITS_PER_SLOT) - 1)

Definition at line 238 of file lock.c.

◆ FAST_PATH_REL_GROUP

#define FAST_PATH_REL_GROUP (   rel)     (((uint64) (rel) * 49157) % FastPathLockGroupsPerBackend)

Definition at line 212 of file lock.c.

◆ FAST_PATH_SET_LOCKMODE

#define FAST_PATH_SET_LOCKMODE (   proc,
  n,
 
)     FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)

Definition at line 247 of file lock.c.

◆ FAST_PATH_SLOT

#define FAST_PATH_SLOT (   group,
  index 
)
Value:
AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
int FastPathLockGroupsPerBackend
Definition: lock.c:200

Definition at line 219 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_BITS

#define FAST_PATH_STRONG_LOCK_HASH_BITS   10

Definition at line 295 of file lock.c.

◆ FAST_PATH_STRONG_LOCK_HASH_PARTITIONS

#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS    (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)

Definition at line 296 of file lock.c.

◆ FastPathStrongLockHashPartition

#define FastPathStrongLockHashPartition (   hashcode)     ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)

Definition at line 298 of file lock.c.

◆ LOCK_PRINT

#define LOCK_PRINT (   where,
  lock,
  type 
)    ((void) 0)

Definition at line 400 of file lock.c.

◆ NLOCKENTS

Definition at line 54 of file lock.c.

◆ PROCLOCK_PRINT

#define PROCLOCK_PRINT (   where,
  proclockP 
)    ((void) 0)

Definition at line 401 of file lock.c.

Typedef Documentation

◆ TwoPhaseLockRecord

Function Documentation

◆ AbortStrongLockAcquire()

void AbortStrongLockAcquire ( void  )

Definition at line 1809 of file lock.c.

1810{
1811 uint32 fasthashcode;
1812 LOCALLOCK *locallock = StrongLockInProgress;
1813
1814 if (locallock == NULL)
1815 return;
1816
1817 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1818 Assert(locallock->holdsStrongLockCount == true);
1820 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1821 FastPathStrongRelationLocks->count[fasthashcode]--;
1822 locallock->holdsStrongLockCount = false;
1823 StrongLockInProgress = NULL;
1825}
#define Assert(condition)
Definition: c.h:812
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:298
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:307
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:322
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:304
uint32 hashcode
Definition: lock.h:432
bool holdsStrongLockCount
Definition: lock.h:439

References Assert, FastPathStrongRelationLockData::count, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended(), and LockErrorCleanup().

◆ AtPrepare_Locks()

void AtPrepare_Locks ( void  )

Definition at line 3383 of file lock.c.

3384{
3385 HASH_SEQ_STATUS status;
3386 LOCALLOCK *locallock;
3387
3388 /* First, verify there aren't locks of both xact and session level */
3390
3391 /* Now do the per-locallock cleanup work */
3393
3394 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3395 {
3396 TwoPhaseLockRecord record;
3397 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3398 bool haveSessionLock;
3399 bool haveXactLock;
3400 int i;
3401
3402 /*
3403 * Ignore VXID locks. We don't want those to be held by prepared
3404 * transactions, since they aren't meaningful after a restart.
3405 */
3407 continue;
3408
3409 /* Ignore it if we don't actually hold the lock */
3410 if (locallock->nLocks <= 0)
3411 continue;
3412
3413 /* Scan to see whether we hold it at session or transaction level */
3414 haveSessionLock = haveXactLock = false;
3415 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3416 {
3417 if (lockOwners[i].owner == NULL)
3418 haveSessionLock = true;
3419 else
3420 haveXactLock = true;
3421 }
3422
3423 /* Ignore it if we have only session lock */
3424 if (!haveXactLock)
3425 continue;
3426
3427 /* This can't happen, because we already checked it */
3428 if (haveSessionLock)
3429 ereport(ERROR,
3430 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3431 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3432
3433 /*
3434 * If the local lock was taken via the fast-path, we need to move it
3435 * to the primary lock table, or just get a pointer to the existing
3436 * primary lock table entry if by chance it's already been
3437 * transferred.
3438 */
3439 if (locallock->proclock == NULL)
3440 {
3441 locallock->proclock = FastPathGetRelationLockEntry(locallock);
3442 locallock->lock = locallock->proclock->tag.myLock;
3443 }
3444
3445 /*
3446 * Arrange to not release any strong lock count held by this lock
3447 * entry. We must retain the count until the prepared transaction is
3448 * committed or rolled back.
3449 */
3450 locallock->holdsStrongLockCount = false;
3451
3452 /*
3453 * Create a 2PC record.
3454 */
3455 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3456 record.lockmode = locallock->tag.mode;
3457
3459 &record, sizeof(TwoPhaseLockRecord));
3460 }
3461}
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1420
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int i
Definition: isn.c:72
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2865
static HTAB * LockMethodLocalHash
Definition: lock.c:318
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3295
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:143
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
LOCALLOCKOWNER * lockOwners
Definition: lock.h:438
LOCK * lock
Definition: lock.h:433
int64 nLocks
Definition: lock.h:435
int numLockOwners
Definition: lock.h:436
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
LOCK * myLock
Definition: lock.h:365
PROCLOCKTAG tag
Definition: lock.h:372
LOCKTAG locktag
Definition: lock.c:158
LOCKMODE lockmode
Definition: lock.c:159
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1265
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25

References CheckForSessionAndXactLocks(), ereport, errcode(), errmsg(), ERROR, FastPathGetRelationLockEntry(), hash_seq_init(), hash_seq_search(), LOCALLOCK::holdsStrongLockCount, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LockMethodLocalHash, TwoPhaseLockRecord::lockmode, LOCALLOCK::lockOwners, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, RegisterTwoPhaseRecord(), PROCLOCK::tag, LOCALLOCK::tag, and TWOPHASE_RM_LOCK_ID.

Referenced by PrepareTransaction().

◆ BeginStrongLockAcquire()

static void BeginStrongLockAcquire ( LOCALLOCK locallock,
uint32  fasthashcode 
)
static

Definition at line 1773 of file lock.c.

1774{
1776 Assert(locallock->holdsStrongLockCount == false);
1777
1778 /*
1779 * Adding to a memory location is not atomic, so we take a spinlock to
1780 * ensure we don't collide with someone else trying to bump the count at
1781 * the same time.
1782 *
1783 * XXX: It might be worth considering using an atomic fetch-and-add
1784 * instruction here, on architectures where that is supported.
1785 */
1786
1788 FastPathStrongRelationLocks->count[fasthashcode]++;
1789 locallock->holdsStrongLockCount = true;
1790 StrongLockInProgress = locallock;
1792}

References Assert, FastPathStrongRelationLockData::count, FastPathStrongRelationLocks, LOCALLOCK::holdsStrongLockCount, FastPathStrongRelationLockData::mutex, SpinLockAcquire, SpinLockRelease, and StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ CheckAndSetLockHeld()

static void CheckAndSetLockHeld ( LOCALLOCK locallock,
bool  acquired 
)
inlinestatic

Definition at line 1413 of file lock.c.

1414{
1415#ifdef USE_ASSERT_CHECKING
1416 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1417 IsRelationExtensionLockHeld = acquired;
1418#endif
1419}
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:138
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:444

References LOCALLOCK_LOCKTAG, and LOCKTAG_RELATION_EXTEND.

Referenced by GrantLockLocal(), and RemoveLocalLock().

◆ CheckForSessionAndXactLocks()

static void CheckForSessionAndXactLocks ( void  )
static

Definition at line 3295 of file lock.c.

3296{
3297 typedef struct
3298 {
3299 LOCKTAG lock; /* identifies the lockable object */
3300 bool sessLock; /* is any lockmode held at session level? */
3301 bool xactLock; /* is any lockmode held at xact level? */
3302 } PerLockTagEntry;
3303
3304 HASHCTL hash_ctl;
3305 HTAB *lockhtab;
3306 HASH_SEQ_STATUS status;
3307 LOCALLOCK *locallock;
3308
3309 /* Create a local hash table keyed by LOCKTAG only */
3310 hash_ctl.keysize = sizeof(LOCKTAG);
3311 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3312 hash_ctl.hcxt = CurrentMemoryContext;
3313
3314 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3315 256, /* arbitrary initial size */
3316 &hash_ctl,
3318
3319 /* Scan local lock table to find entries for each LOCKTAG */
3321
3322 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3323 {
3324 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3325 PerLockTagEntry *hentry;
3326 bool found;
3327 int i;
3328
3329 /*
3330 * Ignore VXID locks. We don't want those to be held by prepared
3331 * transactions, since they aren't meaningful after a restart.
3332 */
3334 continue;
3335
3336 /* Ignore it if we don't actually hold the lock */
3337 if (locallock->nLocks <= 0)
3338 continue;
3339
3340 /* Otherwise, find or make an entry in lockhtab */
3341 hentry = (PerLockTagEntry *) hash_search(lockhtab,
3342 &locallock->tag.lock,
3343 HASH_ENTER, &found);
3344 if (!found) /* initialize, if newly created */
3345 hentry->sessLock = hentry->xactLock = false;
3346
3347 /* Scan to see if we hold lock at session or xact level or both */
3348 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3349 {
3350 if (lockOwners[i].owner == NULL)
3351 hentry->sessLock = true;
3352 else
3353 hentry->xactLock = true;
3354 }
3355
3356 /*
3357 * We can throw error immediately when we see both types of locks; no
3358 * need to wait around to see if there are more violations.
3359 */
3360 if (hentry->sessLock && hentry->xactLock)
3361 ereport(ERROR,
3362 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3363 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3364 }
3365
3366 /* Success, so clean up */
3367 hash_destroy(lockhtab);
3368}
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
struct LOCKTAG LOCKTAG
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
Definition: dynahash.c:220

References CurrentMemoryContext, HASHCTL::entrysize, ereport, errcode(), errmsg(), ERROR, HASH_BLOBS, HASH_CONTEXT, hash_create(), hash_destroy(), HASH_ELEM, HASH_ENTER, hash_search(), hash_seq_init(), hash_seq_search(), HASHCTL::hcxt, i, HASHCTL::keysize, LOCALLOCKTAG::lock, LockMethodLocalHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ CleanUpLock()

static void CleanUpLock ( LOCK lock,
PROCLOCK proclock,
LockMethod  lockMethodTable,
uint32  hashcode,
bool  wakeupNeeded 
)
static

Definition at line 1687 of file lock.c.

1690{
1691 /*
1692 * If this was my last hold on this lock, delete my entry in the proclock
1693 * table.
1694 */
1695 if (proclock->holdMask == 0)
1696 {
1697 uint32 proclock_hashcode;
1698
1699 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1700 dlist_delete(&proclock->lockLink);
1701 dlist_delete(&proclock->procLink);
1702 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1704 &(proclock->tag),
1705 proclock_hashcode,
1707 NULL))
1708 elog(PANIC, "proclock table corrupted");
1709 }
1710
1711 if (lock->nRequested == 0)
1712 {
1713 /*
1714 * The caller just released the last lock, so garbage-collect the lock
1715 * object.
1716 */
1717 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1720 &(lock->tag),
1721 hashcode,
1723 NULL))
1724 elog(PANIC, "lock table corrupted");
1725 }
1726 else if (wakeupNeeded)
1727 {
1728 /* There are waiters on this lock, so wake them up. */
1729 ProcLockWakeup(lockMethodTable, lock);
1730 }
1731}
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:968
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:225
@ HASH_REMOVE
Definition: hsearch.h:115
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:400
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:599
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:401
static HTAB * LockMethodLockHash
Definition: lock.c:316
static HTAB * LockMethodProcLockHash
Definition: lock.c:317
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1736
int nRequested
Definition: lock.h:319
LOCKTAG tag
Definition: lock.h:311
dlist_head procLocks
Definition: lock.h:316
LOCKMASK holdMask
Definition: lock.h:376
dlist_node lockLink
Definition: lock.h:378
dlist_node procLink
Definition: lock.h:379

References Assert, dlist_delete(), dlist_is_empty(), elog, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCK_PRINT, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, ProcLockWakeup(), LOCK::tag, and PROCLOCK::tag.

Referenced by LockRefindAndRelease(), LockRelease(), LockReleaseAll(), and RemoveFromWaitQueue().

◆ DoLockModesConflict()

bool DoLockModesConflict ( LOCKMODE  mode1,
LOCKMODE  mode2 
)

Definition at line 617 of file lock.c.

618{
619 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
620
621 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
622 return true;
623
624 return false;
625}
static const LockMethod LockMethods[]
Definition: lock.c:148
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
const LOCKMASK * conflictTab
Definition: lock.h:111

References LockMethodData::conflictTab, DEFAULT_LOCKMETHOD, LOCKBIT_ON, and LockMethods.

Referenced by Do_MultiXactIdWait(), DoesMultiXactIdConflict(), initialize_reloptions(), and test_lockmode_for_conflict().

◆ FastPathGetRelationLockEntry()

static PROCLOCK * FastPathGetRelationLockEntry ( LOCALLOCK locallock)
static

Definition at line 2865 of file lock.c.

2866{
2867 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2868 LOCKTAG *locktag = &locallock->tag.lock;
2869 PROCLOCK *proclock = NULL;
2870 LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2871 Oid relid = locktag->locktag_field2;
2872 uint32 i,
2873 group;
2874
2875 /* fast-path group the lock belongs to */
2876 group = FAST_PATH_REL_GROUP(relid);
2877
2879
2880 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2881 {
2882 uint32 lockmode;
2883
2884 /* index into the whole per-backend array */
2885 uint32 f = FAST_PATH_SLOT(group, i);
2886
2887 /* Look for an allocated slot matching the given relid. */
2888 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2889 continue;
2890
2891 /* If we don't have a lock of the given mode, forget it! */
2892 lockmode = locallock->tag.mode;
2893 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2894 break;
2895
2896 /* Find or create lock object. */
2897 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2898
2899 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2900 locallock->hashcode, lockmode);
2901 if (!proclock)
2902 {
2903 LWLockRelease(partitionLock);
2905 ereport(ERROR,
2906 (errcode(ERRCODE_OUT_OF_MEMORY),
2907 errmsg("out of shared memory"),
2908 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
2909 }
2910 GrantLock(proclock->tag.myLock, proclock, lockmode);
2911 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2912
2913 LWLockRelease(partitionLock);
2914
2915 /* No need to examine remaining slots. */
2916 break;
2917 }
2918
2920
2921 /* Lock may have already been transferred by some other backend. */
2922 if (proclock == NULL)
2923 {
2924 LOCK *lock;
2925 PROCLOCKTAG proclocktag;
2926 uint32 proclock_hashcode;
2927
2928 LWLockAcquire(partitionLock, LW_SHARED);
2929
2931 locktag,
2932 locallock->hashcode,
2933 HASH_FIND,
2934 NULL);
2935 if (!lock)
2936 elog(ERROR, "failed to re-find shared lock object");
2937
2938 proclocktag.myLock = lock;
2939 proclocktag.myProc = MyProc;
2940
2941 proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2942 proclock = (PROCLOCK *)
2944 &proclocktag,
2945 proclock_hashcode,
2946 HASH_FIND,
2947 NULL);
2948 if (!proclock)
2949 elog(ERROR, "failed to re-find shared proclock object");
2950 LWLockRelease(partitionLock);
2951 }
2952
2953 return proclock;
2954}
int errhint(const char *fmt,...)
Definition: elog.c:1317
@ HASH_FIND
Definition: hsearch.h:113
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1232
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:251
#define FAST_PATH_REL_GROUP(rel)
Definition: lock.c:212
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1607
#define FAST_PATH_SLOT(group, index)
Definition: lock.c:219
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:249
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:240
#define LockHashPartitionLock(hashcode)
Definition: lock.h:526
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
unsigned int Oid
Definition: postgres_ext.h:31
PGPROC * MyProc
Definition: proc.c:66
uint32 locktag_field2
Definition: lock.h:167
Definition: lock.h:309
Definition: lwlock.h:42
LWLock fpInfoLock
Definition: proc.h:293
Oid * fpRelId
Definition: proc.h:295
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370

References DEFAULT_LOCKMETHOD, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), HASH_FIND, hash_search_with_hash_value(), LOCALLOCK::hashcode, i, LOCALLOCKTAG::lock, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LW_SHARED, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, ProcLockHashCode(), SetupLockInTable(), PROCLOCK::tag, and LOCALLOCK::tag.

Referenced by AtPrepare_Locks().

◆ FastPathGrantRelationLock()

static bool FastPathGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2691 of file lock.c.

2692{
2693 uint32 i;
2694 uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2695
2696 /* fast-path group the lock belongs to */
2697 uint32 group = FAST_PATH_REL_GROUP(relid);
2698
2699 /* Scan for existing entry for this relid, remembering empty slot. */
2700 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2701 {
2702 /* index into the whole per-backend array */
2703 uint32 f = FAST_PATH_SLOT(group, i);
2704
2705 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2706 unused_slot = f;
2707 else if (MyProc->fpRelId[f] == relid)
2708 {
2709 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2710 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2711 return true;
2712 }
2713 }
2714
2715 /* If no existing entry, use any empty slot. */
2716 if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2717 {
2718 MyProc->fpRelId[unused_slot] = relid;
2719 FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2720 ++FastPathLocalUseCounts[group];
2721 return true;
2722 }
2723
2724 /* No existing entry, and no empty slot. */
2725 return false;
2726}
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:247
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition: lock.c:174

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SET_LOCKMODE, FAST_PATH_SLOT, FastPathLocalUseCounts, FP_LOCK_SLOTS_PER_BACKEND, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockAcquireExtended().

◆ FastPathTransferRelationLocks()

static bool FastPathTransferRelationLocks ( LockMethod  lockMethodTable,
const LOCKTAG locktag,
uint32  hashcode 
)
static

Definition at line 2770 of file lock.c.

2772{
2773 LWLock *partitionLock = LockHashPartitionLock(hashcode);
2774 Oid relid = locktag->locktag_field2;
2775 uint32 i;
2776
2777 /*
2778 * Every PGPROC that can potentially hold a fast-path lock is present in
2779 * ProcGlobal->allProcs. Prepared transactions are not, but any
2780 * outstanding fast-path locks held by prepared transactions are
2781 * transferred to the main lock table.
2782 */
2783 for (i = 0; i < ProcGlobal->allProcCount; i++)
2784 {
2785 PGPROC *proc = &ProcGlobal->allProcs[i];
2786 uint32 j,
2787 group;
2788
2790
2791 /*
2792 * If the target backend isn't referencing the same database as the
2793 * lock, then we needn't examine the individual relation IDs at all;
2794 * none of them can be relevant.
2795 *
2796 * proc->databaseId is set at backend startup time and never changes
2797 * thereafter, so it might be safe to perform this test before
2798 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2799 * assume that if the target backend holds any fast-path locks, it
2800 * must have performed a memory-fencing operation (in particular, an
2801 * LWLock acquisition) since setting proc->databaseId. However, it's
2802 * less clear that our backend is certain to have performed a memory
2803 * fencing operation since the other backend set proc->databaseId. So
2804 * for now, we test it after acquiring the LWLock just to be safe.
2805 */
2806 if (proc->databaseId != locktag->locktag_field1)
2807 {
2808 LWLockRelease(&proc->fpInfoLock);
2809 continue;
2810 }
2811
2812 /* fast-path group the lock belongs to */
2813 group = FAST_PATH_REL_GROUP(relid);
2814
2815 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2816 {
2817 uint32 lockmode;
2818
2819 /* index into the whole per-backend array */
2820 uint32 f = FAST_PATH_SLOT(group, j);
2821
2822 /* Look for an allocated slot matching the given relid. */
2823 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2824 continue;
2825
2826 /* Find or create lock object. */
2827 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2828 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2830 ++lockmode)
2831 {
2832 PROCLOCK *proclock;
2833
2834 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2835 continue;
2836 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2837 hashcode, lockmode);
2838 if (!proclock)
2839 {
2840 LWLockRelease(partitionLock);
2841 LWLockRelease(&proc->fpInfoLock);
2842 return false;
2843 }
2844 GrantLock(proclock->tag.myLock, proclock, lockmode);
2845 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2846 }
2847 LWLockRelease(partitionLock);
2848
2849 /* No need to examine remaining slots. */
2850 break;
2851 }
2852 LWLockRelease(&proc->fpInfoLock);
2853 }
2854 return true;
2855}
int j
Definition: isn.c:73
PROC_HDR * ProcGlobal
Definition: proc.c:78
uint32 locktag_field1
Definition: lock.h:166
Definition: proc.h:162
Oid databaseId
Definition: proc.h:207
PGPROC * allProcs
Definition: proc.h:371
uint32 allProcCount
Definition: proc.h:389

References PROC_HDR::allProcCount, PROC_HDR::allProcs, PGPROC::databaseId, FAST_PATH_BITS_PER_SLOT, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpRelId, GrantLock(), i, j, LockHashPartitionLock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, ProcGlobal, SetupLockInTable(), and PROCLOCK::tag.

Referenced by LockAcquireExtended().

◆ FastPathUnGrantRelationLock()

static bool FastPathUnGrantRelationLock ( Oid  relid,
LOCKMODE  lockmode 
)
static

Definition at line 2734 of file lock.c.

2735{
2736 uint32 i;
2737 bool result = false;
2738
2739 /* fast-path group the lock belongs to */
2740 uint32 group = FAST_PATH_REL_GROUP(relid);
2741
2742 FastPathLocalUseCounts[group] = 0;
2743 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2744 {
2745 /* index into the whole per-backend array */
2746 uint32 f = FAST_PATH_SLOT(group, i);
2747
2748 if (MyProc->fpRelId[f] == relid
2749 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2750 {
2751 Assert(!result);
2752 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2753 result = true;
2754 /* we continue iterating so as to update FastPathLocalUseCount */
2755 }
2756 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2757 ++FastPathLocalUseCounts[group];
2758 }
2759 return result;
2760}

References Assert, FAST_PATH_CHECK_LOCKMODE, FAST_PATH_CLEAR_LOCKMODE, FAST_PATH_GET_BITS, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FastPathLocalUseCounts, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpRelId, i, and MyProc.

Referenced by LockRelease(), and LockReleaseAll().

◆ FinishStrongLockAcquire()

static void FinishStrongLockAcquire ( void  )
static

Definition at line 1799 of file lock.c.

1800{
1801 StrongLockInProgress = NULL;
1802}

References StrongLockInProgress.

Referenced by LockAcquireExtended().

◆ GetAwaitedLock()

LOCALLOCK * GetAwaitedLock ( void  )

Definition at line 1847 of file lock.c.

1848{
1849 return awaitedLock;
1850}
static LOCALLOCK * awaitedLock
Definition: lock.c:323

References awaitedLock.

Referenced by LockErrorCleanup(), ProcessRecoveryConflictInterrupt(), and ProcSleep().

◆ GetBlockerStatusData()

BlockedProcsData * GetBlockerStatusData ( int  blocked_pid)

Definition at line 3903 of file lock.c.

3904{
3906 PGPROC *proc;
3907 int i;
3908
3910
3911 /*
3912 * Guess how much space we'll need, and preallocate. Most of the time
3913 * this will avoid needing to do repalloc while holding the LWLocks. (We
3914 * assume, but check with an Assert, that MaxBackends is enough entries
3915 * for the procs[] array; the other two could need enlargement, though.)
3916 */
3917 data->nprocs = data->nlocks = data->npids = 0;
3918 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3919 data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3920 data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3921 data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3922
3923 /*
3924 * In order to search the ProcArray for blocked_pid and assume that that
3925 * entry won't immediately disappear under us, we must hold ProcArrayLock.
3926 * In addition, to examine the lock grouping fields of any other backend,
3927 * we must hold all the hash partition locks. (Only one of those locks is
3928 * actually relevant for any one lock group, but we can't know which one
3929 * ahead of time.) It's fairly annoying to hold all those locks
3930 * throughout this, but it's no worse than GetLockStatusData(), and it
3931 * does have the advantage that we're guaranteed to return a
3932 * self-consistent instantaneous state.
3933 */
3934 LWLockAcquire(ProcArrayLock, LW_SHARED);
3935
3936 proc = BackendPidGetProcWithLock(blocked_pid);
3937
3938 /* Nothing to do if it's gone */
3939 if (proc != NULL)
3940 {
3941 /*
3942 * Acquire lock on the entire shared lock data structure. See notes
3943 * in GetLockStatusData().
3944 */
3945 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3947
3948 if (proc->lockGroupLeader == NULL)
3949 {
3950 /* Easy case, proc is not a lock group member */
3952 }
3953 else
3954 {
3955 /* Examine all procs in proc's lock group */
3956 dlist_iter iter;
3957
3959 {
3960 PGPROC *memberProc;
3961
3962 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3964 }
3965 }
3966
3967 /*
3968 * And release locks. See notes in GetLockStatusData().
3969 */
3970 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3972
3973 Assert(data->nprocs <= data->maxprocs);
3974 }
3975
3976 LWLockRelease(ProcArrayLock);
3977
3978 return data;
3979}
int MaxBackends
Definition: globals.c:145
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3983
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:529
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:97
void * palloc(Size size)
Definition: mcxt.c:1317
const void * data
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3219
dlist_head lockGroupMembers
Definition: proc.h:305
PGPROC * lockGroupLeader
Definition: proc.h:304
dlist_node * cur
Definition: ilist.h:179

References Assert, BackendPidGetProcWithLock(), dlist_iter::cur, data, dlist_container, dlist_foreach, GetSingleProcBlockerStatusData(), i, PGPROC::lockGroupLeader, PGPROC::lockGroupMembers, LockHashPartitionLockByIndex, LW_SHARED, LWLockAcquire(), LWLockRelease(), MaxBackends, NUM_LOCK_PARTITIONS, and palloc().

Referenced by pg_blocking_pids().

◆ GetLockConflicts()

VirtualTransactionId * GetLockConflicts ( const LOCKTAG locktag,
LOCKMODE  lockmode,
int *  countp 
)

Definition at line 2976 of file lock.c.

2977{
2978 static VirtualTransactionId *vxids;
2979 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2980 LockMethod lockMethodTable;
2981 LOCK *lock;
2982 LOCKMASK conflictMask;
2983 dlist_iter proclock_iter;
2984 PROCLOCK *proclock;
2985 uint32 hashcode;
2986 LWLock *partitionLock;
2987 int count = 0;
2988 int fast_count = 0;
2989
2990 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2991 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2992 lockMethodTable = LockMethods[lockmethodid];
2993 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2994 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2995
2996 /*
2997 * Allocate memory to store results, and fill with InvalidVXID. We only
2998 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2999 * InHotStandby allocate once in TopMemoryContext.
3000 */
3001 if (InHotStandby)
3002 {
3003 if (vxids == NULL)
3004 vxids = (VirtualTransactionId *)
3006 sizeof(VirtualTransactionId) *
3008 }
3009 else
3010 vxids = (VirtualTransactionId *)
3013
3014 /* Compute hash code and partition lock, and look up conflicting modes. */
3015 hashcode = LockTagHashCode(locktag);
3016 partitionLock = LockHashPartitionLock(hashcode);
3017 conflictMask = lockMethodTable->conflictTab[lockmode];
3018
3019 /*
3020 * Fast path locks might not have been entered in the primary lock table.
3021 * If the lock we're dealing with could conflict with such a lock, we must
3022 * examine each backend's fast-path array for conflicts.
3023 */
3024 if (ConflictsWithRelationFastPath(locktag, lockmode))
3025 {
3026 int i;
3027 Oid relid = locktag->locktag_field2;
3029
3030 /*
3031 * Iterate over relevant PGPROCs. Anything held by a prepared
3032 * transaction will have been transferred to the primary lock table,
3033 * so we need not worry about those. This is all a bit fuzzy, because
3034 * new locks could be taken after we've visited a particular
3035 * partition, but the callers had better be prepared to deal with that
3036 * anyway, since the locks could equally well be taken between the
3037 * time we return the value and the time the caller does something
3038 * with it.
3039 */
3040 for (i = 0; i < ProcGlobal->allProcCount; i++)
3041 {
3042 PGPROC *proc = &ProcGlobal->allProcs[i];
3043 uint32 j,
3044 group;
3045
3046 /* A backend never blocks itself */
3047 if (proc == MyProc)
3048 continue;
3049
3051
3052 /*
3053 * If the target backend isn't referencing the same database as
3054 * the lock, then we needn't examine the individual relation IDs
3055 * at all; none of them can be relevant.
3056 *
3057 * See FastPathTransferRelationLocks() for discussion of why we do
3058 * this test after acquiring the lock.
3059 */
3060 if (proc->databaseId != locktag->locktag_field1)
3061 {
3062 LWLockRelease(&proc->fpInfoLock);
3063 continue;
3064 }
3065
3066 /* fast-path group the lock belongs to */
3067 group = FAST_PATH_REL_GROUP(relid);
3068
3069 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3070 {
3071 uint32 lockmask;
3072
3073 /* index into the whole per-backend array */
3074 uint32 f = FAST_PATH_SLOT(group, j);
3075
3076 /* Look for an allocated slot matching the given relid. */
3077 if (relid != proc->fpRelId[f])
3078 continue;
3079 lockmask = FAST_PATH_GET_BITS(proc, f);
3080 if (!lockmask)
3081 continue;
3082 lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3083
3084 /*
3085 * There can only be one entry per relation, so if we found it
3086 * and it doesn't conflict, we can skip the rest of the slots.
3087 */
3088 if ((lockmask & conflictMask) == 0)
3089 break;
3090
3091 /* Conflict! */
3092 GET_VXID_FROM_PGPROC(vxid, *proc);
3093
3095 vxids[count++] = vxid;
3096 /* else, xact already committed or aborted */
3097
3098 /* No need to examine remaining slots. */
3099 break;
3100 }
3101
3102 LWLockRelease(&proc->fpInfoLock);
3103 }
3104 }
3105
3106 /* Remember how many fast-path conflicts we found. */
3107 fast_count = count;
3108
3109 /*
3110 * Look up the lock object matching the tag.
3111 */
3112 LWLockAcquire(partitionLock, LW_SHARED);
3113
3115 locktag,
3116 hashcode,
3117 HASH_FIND,
3118 NULL);
3119 if (!lock)
3120 {
3121 /*
3122 * If the lock object doesn't exist, there is nothing holding a lock
3123 * on this lockable object.
3124 */
3125 LWLockRelease(partitionLock);
3126 vxids[count].procNumber = INVALID_PROC_NUMBER;
3128 if (countp)
3129 *countp = count;
3130 return vxids;
3131 }
3132
3133 /*
3134 * Examine each existing holder (or awaiter) of the lock.
3135 */
3136 dlist_foreach(proclock_iter, &lock->procLocks)
3137 {
3138 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3139
3140 if (conflictMask & proclock->holdMask)
3141 {
3142 PGPROC *proc = proclock->tag.myProc;
3143
3144 /* A backend never blocks itself */
3145 if (proc != MyProc)
3146 {
3148
3149 GET_VXID_FROM_PGPROC(vxid, *proc);
3150
3152 {
3153 int i;
3154
3155 /* Avoid duplicate entries. */
3156 for (i = 0; i < fast_count; ++i)
3157 if (VirtualTransactionIdEquals(vxids[i], vxid))
3158 break;
3159 if (i >= fast_count)
3160 vxids[count++] = vxid;
3161 }
3162 /* else, xact already committed or aborted */
3163 }
3164 }
3165 }
3166
3167 LWLockRelease(partitionLock);
3168
3169 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3170 elog(PANIC, "too many conflicting locks found");
3171
3172 vxids[count].procNumber = INVALID_PROC_NUMBER;
3174 if (countp)
3175 *countp = count;
3176 return vxids;
3177}
#define lengthof(array)
Definition: c.h:742
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:268
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:551
uint16 LOCKMETHODID
Definition: lock.h:122
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:67
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition: lock.h:77
#define InvalidLocalTransactionId
Definition: lock.h:65
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:71
int LOCKMASK
Definition: lockdefs.h:25
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
void * palloc0(Size size)
Definition: mcxt.c:1347
MemoryContext TopMemoryContext
Definition: mcxt.c:149
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
uint8 locktag_lockmethodid
Definition: lock.h:171
int numLockModes
Definition: lock.h:110
LocalTransactionId localTransactionId
Definition: lock.h:62
ProcNumber procNumber
Definition: lock.h:61
int max_prepared_xacts
Definition: twophase.c:115
#define InHotStandby
Definition: xlogutils.h:60

References PROC_HDR::allProcCount, PROC_HDR::allProcs, ConflictsWithRelationFastPath, LockMethodData::conflictTab, dlist_iter::cur, PGPROC::databaseId, dlist_container, dlist_foreach, elog, ERROR, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_REL_GROUP, FAST_PATH_SLOT, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpRelId, GET_VXID_FROM_PGPROC, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, i, InHotStandby, INVALID_PROC_NUMBER, InvalidLocalTransactionId, j, lengthof, VirtualTransactionId::localTransactionId, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_SHARED, LWLockAcquire(), LWLockRelease(), max_prepared_xacts, MaxBackends, MemoryContextAlloc(), MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, palloc0(), PANIC, ProcGlobal, LOCK::procLocks, VirtualTransactionId::procNumber, PROCLOCK::tag, TopMemoryContext, VirtualTransactionIdEquals, and VirtualTransactionIdIsValid.

Referenced by ProcSleep(), ResolveRecoveryConflictWithLock(), and WaitForLockersMultiple().

◆ GetLockmodeName()

const char * GetLockmodeName ( LOCKMETHODID  lockmethodid,
LOCKMODE  mode 
)

Definition at line 4160 of file lock.c.

4161{
4162 Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4163 Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4164 return LockMethods[lockmethodid]->lockModeNames[mode];
4165}
const char *const * lockModeNames
Definition: lock.h:112

References Assert, lengthof, LockMethods, LockMethodData::lockModeNames, and mode.

Referenced by DeadLockReport(), pg_lock_status(), and ProcSleep().

◆ GetLocksMethodTable()

LockMethod GetLocksMethodTable ( const LOCK lock)

Definition at line 521 of file lock.c.

522{
523 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
524
525 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
526 return LockMethods[lockmethodid];
527}
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:324

References Assert, lengthof, LOCK_LOCKMETHOD, and LockMethods.

Referenced by DeadLockCheck(), and FindLockCycleRecurseMember().

◆ GetLockStatusData()

LockData * GetLockStatusData ( void  )

Definition at line 3700 of file lock.c.

3701{
3702 LockData *data;
3703 PROCLOCK *proclock;
3704 HASH_SEQ_STATUS seqstat;
3705 int els;
3706 int el;
3707 int i;
3708
3709 data = (LockData *) palloc(sizeof(LockData));
3710
3711 /* Guess how much space we'll need. */
3712 els = MaxBackends;
3713 el = 0;
3714 data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3715
3716 /*
3717 * First, we iterate through the per-backend fast-path arrays, locking
3718 * them one at a time. This might produce an inconsistent picture of the
3719 * system state, but taking all of those LWLocks at the same time seems
3720 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3721 * matter too much, because none of these locks can be involved in lock
3722 * conflicts anyway - anything that might must be present in the main lock
3723 * table. (For the same reason, we don't sweat about making leaderPid
3724 * completely valid. We cannot safely dereference another backend's
3725 * lockGroupLeader field without holding all lock partition locks, and
3726 * it's not worth that.)
3727 */
3728 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3729 {
3730 PGPROC *proc = &ProcGlobal->allProcs[i];
3731
3732 /* Skip backends with pid=0, as they don't hold fast-path locks */
3733 if (proc->pid == 0)
3734 continue;
3735
3737
3738 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3739 {
3740 /* Skip groups without registered fast-path locks */
3741 if (proc->fpLockBits[g] == 0)
3742 continue;
3743
3744 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3745 {
3746 LockInstanceData *instance;
3747 uint32 f = FAST_PATH_SLOT(g, j);
3748 uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3749
3750 /* Skip unallocated slots */
3751 if (!lockbits)
3752 continue;
3753
3754 if (el >= els)
3755 {
3756 els += MaxBackends;
3757 data->locks = (LockInstanceData *)
3758 repalloc(data->locks, sizeof(LockInstanceData) * els);
3759 }
3760
3761 instance = &data->locks[el];
3762 SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3763 proc->fpRelId[f]);
3764 instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3765 instance->waitLockMode = NoLock;
3766 instance->vxid.procNumber = proc->vxid.procNumber;
3767 instance->vxid.localTransactionId = proc->vxid.lxid;
3768 instance->pid = proc->pid;
3769 instance->leaderPid = proc->pid;
3770 instance->fastpath = true;
3771
3772 /*
3773 * Successfully taking fast path lock means there were no
3774 * conflicting locks.
3775 */
3776 instance->waitStart = 0;
3777
3778 el++;
3779 }
3780 }
3781
3782 if (proc->fpVXIDLock)
3783 {
3785 LockInstanceData *instance;
3786
3787 if (el >= els)
3788 {
3789 els += MaxBackends;
3790 data->locks = (LockInstanceData *)
3791 repalloc(data->locks, sizeof(LockInstanceData) * els);
3792 }
3793
3794 vxid.procNumber = proc->vxid.procNumber;
3796
3797 instance = &data->locks[el];
3798 SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3799 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3800 instance->waitLockMode = NoLock;
3801 instance->vxid.procNumber = proc->vxid.procNumber;
3802 instance->vxid.localTransactionId = proc->vxid.lxid;
3803 instance->pid = proc->pid;
3804 instance->leaderPid = proc->pid;
3805 instance->fastpath = true;
3806 instance->waitStart = 0;
3807
3808 el++;
3809 }
3810
3811 LWLockRelease(&proc->fpInfoLock);
3812 }
3813
3814 /*
3815 * Next, acquire lock on the entire shared lock data structure. We do
3816 * this so that, at least for locks in the primary lock table, the state
3817 * will be self-consistent.
3818 *
3819 * Since this is a read-only operation, we take shared instead of
3820 * exclusive lock. There's not a whole lot of point to this, because all
3821 * the normal operations require exclusive lock, but it doesn't hurt
3822 * anything either. It will at least allow two backends to do
3823 * GetLockStatusData in parallel.
3824 *
3825 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3826 */
3827 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3829
3830 /* Now we can safely count the number of proclocks */
3832 if (data->nelements > els)
3833 {
3834 els = data->nelements;
3835 data->locks = (LockInstanceData *)
3836 repalloc(data->locks, sizeof(LockInstanceData) * els);
3837 }
3838
3839 /* Now scan the tables to copy the data */
3841
3842 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3843 {
3844 PGPROC *proc = proclock->tag.myProc;
3845 LOCK *lock = proclock->tag.myLock;
3846 LockInstanceData *instance = &data->locks[el];
3847
3848 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3849 instance->holdMask = proclock->holdMask;
3850 if (proc->waitLock == proclock->tag.myLock)
3851 instance->waitLockMode = proc->waitLockMode;
3852 else
3853 instance->waitLockMode = NoLock;
3854 instance->vxid.procNumber = proc->vxid.procNumber;
3855 instance->vxid.localTransactionId = proc->vxid.lxid;
3856 instance->pid = proc->pid;
3857 instance->leaderPid = proclock->groupLeader->pid;
3858 instance->fastpath = false;
3859 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3860
3861 el++;
3862 }
3863
3864 /*
3865 * And release locks. We do this in reverse order for two reasons: (1)
3866 * Anyone else who needs more than one of the locks will be trying to lock
3867 * them in increasing order; we don't want to release the other process
3868 * until it can get all the locks it needs. (2) This avoids O(N^2)
3869 * behavior inside LWLockRelease.
3870 */
3871 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3873
3874 Assert(el == data->nelements);
3875
3876 return data;
3877}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:467
int64 TimestampTz
Definition: timestamp.h:39
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1341
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:235
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:181
#define NoLock
Definition: lockdefs.h:34
#define ExclusiveLock
Definition: lockdefs.h:42
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1541
Definition: lock.h:466
LOCKMASK holdMask
Definition: lock.h:455
LOCKMODE waitLockMode
Definition: lock.h:456
bool fastpath
Definition: lock.h:462
LOCKTAG locktag
Definition: lock.h:454
TimestampTz waitStart
Definition: lock.h:458
int leaderPid
Definition: lock.h:461
VirtualTransactionId vxid
Definition: lock.h:457
LocalTransactionId lxid
Definition: proc.h:200
struct PGPROC::@123 vxid
uint64 * fpLockBits
Definition: proc.h:294
pg_atomic_uint64 waitStart
Definition: proc.h:237
bool fpVXIDLock
Definition: proc.h:296
ProcNumber procNumber
Definition: proc.h:195
int pid
Definition: proc.h:182
LOCK * waitLock
Definition: proc.h:232
LOCKMODE waitLockMode
Definition: proc.h:234
LocalTransactionId fpLocalTransactionId
Definition: proc.h:297
PGPROC * groupLeader
Definition: lock.h:375

References PROC_HDR::allProcCount, PROC_HDR::allProcs, Assert, data, PGPROC::databaseId, ExclusiveLock, FAST_PATH_GET_BITS, FAST_PATH_LOCKNUMBER_OFFSET, FAST_PATH_SLOT, LockInstanceData::fastpath, FastPathLockGroupsPerBackend, FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpLockBits, PGPROC::fpRelId, PGPROC::fpVXIDLock, PROCLOCK::groupLeader, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, LockInstanceData::holdMask, i, j, LockInstanceData::leaderPid, VirtualTransactionId::localTransactionId, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LockInstanceData::locktag, LW_SHARED, LWLockAcquire(), LWLockRelease(), PGPROC::lxid, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NoLock, NUM_LOCK_PARTITIONS, palloc(), pg_atomic_read_u64(), LockInstanceData::pid, PGPROC::pid, ProcGlobal, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), SET_LOCKTAG_RELATION, SET_LOCKTAG_VIRTUALTRANSACTION, LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, LockInstanceData::waitStart, and PGPROC::waitStart.

Referenced by pg_lock_status().

◆ GetLockTagsMethodTable()

LockMethod GetLockTagsMethodTable ( const LOCKTAG locktag)

Definition at line 533 of file lock.c.

534{
535 LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
536
537 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
538 return LockMethods[lockmethodid];
539}

References Assert, lengthof, LockMethods, and LOCKTAG::locktag_lockmethodid.

Referenced by pg_blocking_pids().

◆ GetRunningTransactionLocks()

xl_standby_lock * GetRunningTransactionLocks ( int *  nlocks)

Definition at line 4078 of file lock.c.

4079{
4080 xl_standby_lock *accessExclusiveLocks;
4081 PROCLOCK *proclock;
4082 HASH_SEQ_STATUS seqstat;
4083 int i;
4084 int index;
4085 int els;
4086
4087 /*
4088 * Acquire lock on the entire shared lock data structure.
4089 *
4090 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4091 */
4092 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4094
4095 /* Now we can safely count the number of proclocks */
4097
4098 /*
4099 * Allocating enough space for all locks in the lock table is overkill,
4100 * but it's more convenient and faster than having to enlarge the array.
4101 */
4102 accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4103
4104 /* Now scan the tables to copy the data */
4106
4107 /*
4108 * If lock is a currently granted AccessExclusiveLock then it will have
4109 * just one proclock holder, so locks are never accessed twice in this
4110 * particular case. Don't copy this code for use elsewhere because in the
4111 * general case this will give you duplicate locks when looking at
4112 * non-exclusive lock types.
4113 */
4114 index = 0;
4115 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4116 {
4117 /* make sure this definition matches the one used in LockAcquire */
4118 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4120 {
4121 PGPROC *proc = proclock->tag.myProc;
4122 LOCK *lock = proclock->tag.myLock;
4123 TransactionId xid = proc->xid;
4124
4125 /*
4126 * Don't record locks for transactions if we know they have
4127 * already issued their WAL record for commit but not yet released
4128 * lock. It is still possible that we see locks held by already
4129 * complete transactions, if they haven't yet zeroed their xids.
4130 */
4131 if (!TransactionIdIsValid(xid))
4132 continue;
4133
4134 accessExclusiveLocks[index].xid = xid;
4135 accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4136 accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4137
4138 index++;
4139 }
4140 }
4141
4142 Assert(index <= els);
4143
4144 /*
4145 * And release locks. We do this in reverse order for two reasons: (1)
4146 * Anyone else who needs more than one of the locks will be trying to lock
4147 * them in increasing order; we don't want to release the other process
4148 * until it can get all the locks it needs. (2) This avoids O(N^2)
4149 * behavior inside LWLockRelease.
4150 */
4151 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4153
4154 *nlocks = index;
4155 return accessExclusiveLocks;
4156}
uint32 TransactionId
Definition: c.h:606
#define AccessExclusiveLock
Definition: lockdefs.h:43
TransactionId xid
Definition: proc.h:172
TransactionId xid
Definition: lockdefs.h:53
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References AccessExclusiveLock, Assert, xl_standby_lock::dbOid, hash_get_num_entries(), hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodProcLockHash, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG_RELATION, LOCKTAG::locktag_type, LW_SHARED, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, NUM_LOCK_PARTITIONS, palloc(), xl_standby_lock::relOid, LOCK::tag, PROCLOCK::tag, TransactionIdIsValid, xl_standby_lock::xid, and PGPROC::xid.

Referenced by LogStandbySnapshot().

◆ GetSingleProcBlockerStatusData()

static void GetSingleProcBlockerStatusData ( PGPROC blocked_proc,
BlockedProcsData data 
)
static

Definition at line 3983 of file lock.c.

3984{
3985 LOCK *theLock = blocked_proc->waitLock;
3986 BlockedProcData *bproc;
3987 dlist_iter proclock_iter;
3988 dlist_iter proc_iter;
3989 dclist_head *waitQueue;
3990 int queue_size;
3991
3992 /* Nothing to do if this proc is not blocked */
3993 if (theLock == NULL)
3994 return;
3995
3996 /* Set up a procs[] element */
3997 bproc = &data->procs[data->nprocs++];
3998 bproc->pid = blocked_proc->pid;
3999 bproc->first_lock = data->nlocks;
4000 bproc->first_waiter = data->npids;
4001
4002 /*
4003 * We may ignore the proc's fast-path arrays, since nothing in those could
4004 * be related to a contended lock.
4005 */
4006
4007 /* Collect all PROCLOCKs associated with theLock */
4008 dlist_foreach(proclock_iter, &theLock->procLocks)
4009 {
4010 PROCLOCK *proclock =
4011 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4012 PGPROC *proc = proclock->tag.myProc;
4013 LOCK *lock = proclock->tag.myLock;
4014 LockInstanceData *instance;
4015
4016 if (data->nlocks >= data->maxlocks)
4017 {
4018 data->maxlocks += MaxBackends;
4019 data->locks = (LockInstanceData *)
4020 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4021 }
4022
4023 instance = &data->locks[data->nlocks];
4024 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4025 instance->holdMask = proclock->holdMask;
4026 if (proc->waitLock == lock)
4027 instance->waitLockMode = proc->waitLockMode;
4028 else
4029 instance->waitLockMode = NoLock;
4030 instance->vxid.procNumber = proc->vxid.procNumber;
4031 instance->vxid.localTransactionId = proc->vxid.lxid;
4032 instance->pid = proc->pid;
4033 instance->leaderPid = proclock->groupLeader->pid;
4034 instance->fastpath = false;
4035 data->nlocks++;
4036 }
4037
4038 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4039 waitQueue = &(theLock->waitProcs);
4040 queue_size = dclist_count(waitQueue);
4041
4042 if (queue_size > data->maxpids - data->npids)
4043 {
4044 data->maxpids = Max(data->maxpids + MaxBackends,
4045 data->npids + queue_size);
4046 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4047 sizeof(int) * data->maxpids);
4048 }
4049
4050 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4051 dclist_foreach(proc_iter, waitQueue)
4052 {
4053 PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
4054
4055 if (queued_proc == blocked_proc)
4056 break;
4057 data->waiter_pids[data->npids++] = queued_proc->pid;
4058 queued_proc = (PGPROC *) queued_proc->links.next;
4059 }
4060
4061 bproc->num_locks = data->nlocks - bproc->first_lock;
4062 bproc->num_waiters = data->npids - bproc->first_waiter;
4063}
#define Max(x, y)
Definition: c.h:952
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int first_lock
Definition: lock.h:476
int first_waiter
Definition: lock.h:480
int num_waiters
Definition: lock.h:481
int num_locks
Definition: lock.h:477
dclist_head waitProcs
Definition: lock.h:317
dlist_node links
Definition: proc.h:163
dlist_node * next
Definition: ilist.h:140
static struct link * links
Definition: zic.c:299

References dlist_iter::cur, data, dclist_count(), dclist_foreach, dlist_container, dlist_foreach, LockInstanceData::fastpath, BlockedProcData::first_lock, BlockedProcData::first_waiter, PROCLOCK::groupLeader, PROCLOCK::holdMask, LockInstanceData::holdMask, LockInstanceData::leaderPid, PGPROC::links, links, VirtualTransactionId::localTransactionId, LockInstanceData::locktag, PGPROC::lxid, Max, MaxBackends, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, dlist_node::next, NoLock, BlockedProcData::num_locks, BlockedProcData::num_waiters, LockInstanceData::pid, BlockedProcData::pid, PGPROC::pid, LOCK::procLocks, VirtualTransactionId::procNumber, PGPROC::procNumber, repalloc(), LOCK::tag, PROCLOCK::tag, LockInstanceData::vxid, PGPROC::vxid, PGPROC::waitLock, LockInstanceData::waitLockMode, PGPROC::waitLockMode, and LOCK::waitProcs.

Referenced by GetBlockerStatusData().

◆ GrantAwaitedLock()

void GrantAwaitedLock ( void  )

Definition at line 1838 of file lock.c.

1839{
1841}
static ResourceOwner awaitedOwner
Definition: lock.c:324
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1741

References awaitedLock, awaitedOwner, and GrantLockLocal().

Referenced by LockErrorCleanup().

◆ GrantLock()

void GrantLock ( LOCK lock,
PROCLOCK proclock,
LOCKMODE  lockmode 
)

Definition at line 1607 of file lock.c.

1608{
1609 lock->nGranted++;
1610 lock->granted[lockmode]++;
1611 lock->grantMask |= LOCKBIT_ON(lockmode);
1612 if (lock->granted[lockmode] == lock->requested[lockmode])
1613 lock->waitMask &= LOCKBIT_OFF(lockmode);
1614 proclock->holdMask |= LOCKBIT_ON(lockmode);
1615 LOCK_PRINT("GrantLock", lock, lockmode);
1616 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1617 Assert(lock->nGranted <= lock->nRequested);
1618}
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:85
int requested[MAX_LOCKMODES]
Definition: lock.h:318
int granted[MAX_LOCKMODES]
Definition: lock.h:320
LOCKMASK grantMask
Definition: lock.h:314
LOCKMASK waitMask
Definition: lock.h:315
int nGranted
Definition: lock.h:321

References Assert, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCKBIT_ON, LOCK::nGranted, LOCK::nRequested, LOCK::requested, and LOCK::waitMask.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), JoinWaitQueue(), lock_twophase_recover(), LockAcquireExtended(), ProcLockWakeup(), and VirtualXactLock().

◆ GrantLockLocal()

static void GrantLockLocal ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1741 of file lock.c.

1742{
1743 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1744 int i;
1745
1746 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1747 /* Count the total */
1748 locallock->nLocks++;
1749 /* Count the per-owner lock */
1750 for (i = 0; i < locallock->numLockOwners; i++)
1751 {
1752 if (lockOwners[i].owner == owner)
1753 {
1754 lockOwners[i].nLocks++;
1755 return;
1756 }
1757 }
1758 lockOwners[i].owner = owner;
1759 lockOwners[i].nLocks = 1;
1760 locallock->numLockOwners++;
1761 if (owner != NULL)
1762 ResourceOwnerRememberLock(owner, locallock);
1763
1764 /* Indicate that the lock is acquired for certain types of locks. */
1765 CheckAndSetLockHeld(locallock, true);
1766}
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1413
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1045
int64 nLocks
Definition: lock.h:423
struct ResourceOwnerData * owner
Definition: lock.h:422
int maxLockOwners
Definition: lock.h:437

References Assert, CheckAndSetLockHeld(), i, LOCALLOCK::lockOwners, LOCALLOCK::maxLockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, and ResourceOwnerRememberLock().

Referenced by GrantAwaitedLock(), and LockAcquireExtended().

◆ InitLockManagerAccess()

void InitLockManagerAccess ( void  )

Definition at line 499 of file lock.c.

500{
501 /*
502 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
503 * counts and resource owner information.
504 */
505 HASHCTL info;
506
507 info.keysize = sizeof(LOCALLOCKTAG);
508 info.entrysize = sizeof(LOCALLOCK);
509
510 LockMethodLocalHash = hash_create("LOCALLOCK hash",
511 16,
512 &info,
514}
struct LOCALLOCK LOCALLOCK
struct LOCALLOCKTAG LOCALLOCKTAG

References HASHCTL::entrysize, HASH_BLOBS, hash_create(), HASH_ELEM, HASHCTL::keysize, and LockMethodLocalHash.

Referenced by BaseInit().

◆ lock_twophase_postabort()

void lock_twophase_postabort ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4503 of file lock.c.

4505{
4506 lock_twophase_postcommit(xid, info, recdata, len);
4507}
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4477
const void size_t len

References len, and lock_twophase_postcommit().

◆ lock_twophase_postcommit()

void lock_twophase_postcommit ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4477 of file lock.c.

4479{
4480 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4481 PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4482 LOCKTAG *locktag;
4483 LOCKMETHODID lockmethodid;
4484 LockMethod lockMethodTable;
4485
4486 Assert(len == sizeof(TwoPhaseLockRecord));
4487 locktag = &rec->locktag;
4488 lockmethodid = locktag->locktag_lockmethodid;
4489
4490 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4491 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4492 lockMethodTable = LockMethods[lockmethodid];
4493
4494 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4495}
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3191
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:918

References Assert, elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, LockRefindAndRelease(), TwoPhaseLockRecord::locktag, LOCKTAG::locktag_lockmethodid, and TwoPhaseGetDummyProc().

Referenced by lock_twophase_postabort().

◆ lock_twophase_recover()

void lock_twophase_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4264 of file lock.c.

4266{
4267 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4268 PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4269 LOCKTAG *locktag;
4270 LOCKMODE lockmode;
4271 LOCKMETHODID lockmethodid;
4272 LOCK *lock;
4273 PROCLOCK *proclock;
4274 PROCLOCKTAG proclocktag;
4275 bool found;
4276 uint32 hashcode;
4277 uint32 proclock_hashcode;
4278 int partition;
4279 LWLock *partitionLock;
4280 LockMethod lockMethodTable;
4281
4282 Assert(len == sizeof(TwoPhaseLockRecord));
4283 locktag = &rec->locktag;
4284 lockmode = rec->lockmode;
4285 lockmethodid = locktag->locktag_lockmethodid;
4286
4287 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4288 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4289 lockMethodTable = LockMethods[lockmethodid];
4290
4291 hashcode = LockTagHashCode(locktag);
4292 partition = LockHashPartition(hashcode);
4293 partitionLock = LockHashPartitionLock(hashcode);
4294
4295 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4296
4297 /*
4298 * Find or create a lock with this tag.
4299 */
4301 locktag,
4302 hashcode,
4304 &found);
4305 if (!lock)
4306 {
4307 LWLockRelease(partitionLock);
4308 ereport(ERROR,
4309 (errcode(ERRCODE_OUT_OF_MEMORY),
4310 errmsg("out of shared memory"),
4311 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4312 }
4313
4314 /*
4315 * if it's a new lock object, initialize it
4316 */
4317 if (!found)
4318 {
4319 lock->grantMask = 0;
4320 lock->waitMask = 0;
4321 dlist_init(&lock->procLocks);
4322 dclist_init(&lock->waitProcs);
4323 lock->nRequested = 0;
4324 lock->nGranted = 0;
4325 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4326 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4327 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4328 }
4329 else
4330 {
4331 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4332 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4333 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4334 Assert(lock->nGranted <= lock->nRequested);
4335 }
4336
4337 /*
4338 * Create the hash key for the proclock table.
4339 */
4340 proclocktag.myLock = lock;
4341 proclocktag.myProc = proc;
4342
4343 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4344
4345 /*
4346 * Find or create a proclock entry with this tag
4347 */
4349 &proclocktag,
4350 proclock_hashcode,
4352 &found);
4353 if (!proclock)
4354 {
4355 /* Oops, not enough shmem for the proclock */
4356 if (lock->nRequested == 0)
4357 {
4358 /*
4359 * There are no other requestors of this lock, so garbage-collect
4360 * the lock object. We *must* do this to avoid a permanent leak
4361 * of shared memory, because there won't be anything to cause
4362 * anyone to release the lock object later.
4363 */
4366 &(lock->tag),
4367 hashcode,
4369 NULL))
4370 elog(PANIC, "lock table corrupted");
4371 }
4372 LWLockRelease(partitionLock);
4373 ereport(ERROR,
4374 (errcode(ERRCODE_OUT_OF_MEMORY),
4375 errmsg("out of shared memory"),
4376 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4377 }
4378
4379 /*
4380 * If new, initialize the new entry
4381 */
4382 if (!found)
4383 {
4384 Assert(proc->lockGroupLeader == NULL);
4385 proclock->groupLeader = proc;
4386 proclock->holdMask = 0;
4387 proclock->releaseMask = 0;
4388 /* Add proclock to appropriate lists */
4389 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4390 dlist_push_tail(&proc->myProcLocks[partition],
4391 &proclock->procLink);
4392 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4393 }
4394 else
4395 {
4396 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4397 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4398 }
4399
4400 /*
4401 * lock->nRequested and lock->requested[] count the total number of
4402 * requests, whether granted or waiting, so increment those immediately.
4403 */
4404 lock->nRequested++;
4405 lock->requested[lockmode]++;
4406 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4407
4408 /*
4409 * We shouldn't already hold the desired lock.
4410 */
4411 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4412 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4413 lockMethodTable->lockModeNames[lockmode],
4414 lock->tag.locktag_field1, lock->tag.locktag_field2,
4415 lock->tag.locktag_field3);
4416
4417 /*
4418 * We ignore any possible conflicts and just grant ourselves the lock. Not
4419 * only because we don't bother, but also to avoid deadlocks when
4420 * switching from standby to normal mode. See function comment.
4421 */
4422 GrantLock(lock, proclock, lockmode);
4423
4424 /*
4425 * Bump strong lock count, to make sure any fast-path lock requests won't
4426 * be granted without consulting the primary lock table.
4427 */
4428 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4429 {
4430 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4431
4433 FastPathStrongRelationLocks->count[fasthashcode]++;
4435 }
4436
4437 LWLockRelease(partitionLock);
4438}
#define MemSet(start, val, len)
Definition: c.h:974
@ HASH_ENTER_NULL
Definition: hsearch.h:116
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define MAX_LOCKMODES
Definition: lock.h:82
#define LockHashPartition(hashcode)
Definition: lock.h:524
int LOCKMODE
Definition: lockdefs.h:26
uint32 locktag_field3
Definition: lock.h:168
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:261
LOCKMASK releaseMask
Definition: lock.h:377

References Assert, ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ereport, errcode(), errhint(), errmsg(), ERROR, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, LOCK::granted, GrantLock(), LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, len, lengthof, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethods, TwoPhaseLockRecord::lockmode, LockMethodData::lockModeNames, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MAX_LOCKMODES, MemSet, FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, SpinLockAcquire, SpinLockRelease, LOCK::tag, TwoPhaseGetDummyProc(), LOCK::waitMask, and LOCK::waitProcs.

◆ lock_twophase_standby_recover()

void lock_twophase_standby_recover ( TransactionId  xid,
uint16  info,
void *  recdata,
uint32  len 
)

Definition at line 4445 of file lock.c.

4447{
4448 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4449 LOCKTAG *locktag;
4450 LOCKMODE lockmode;
4451 LOCKMETHODID lockmethodid;
4452
4453 Assert(len == sizeof(TwoPhaseLockRecord));
4454 locktag = &rec->locktag;
4455 lockmode = rec->lockmode;
4456 lockmethodid = locktag->locktag_lockmethodid;
4457
4458 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4459 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4460
4461 if (lockmode == AccessExclusiveLock &&
4462 locktag->locktag_type == LOCKTAG_RELATION)
4463 {
4465 locktag->locktag_field1 /* dboid */ ,
4466 locktag->locktag_field2 /* reloid */ );
4467 }
4468}
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:985

References AccessExclusiveLock, Assert, elog, ERROR, len, lengthof, LockMethods, TwoPhaseLockRecord::lockmode, TwoPhaseLockRecord::locktag, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_RELATION, LOCKTAG::locktag_type, and StandbyAcquireAccessExclusiveLock().

◆ LockAcquire()

LockAcquireResult LockAcquire ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait 
)

◆ LockAcquireExtended()

LockAcquireResult LockAcquireExtended ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock,
bool  dontWait,
bool  reportMemoryError,
LOCALLOCK **  locallockp 
)

Definition at line 827 of file lock.c.

833{
834 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
835 LockMethod lockMethodTable;
836 LOCALLOCKTAG localtag;
837 LOCALLOCK *locallock;
838 LOCK *lock;
839 PROCLOCK *proclock;
840 bool found;
841 ResourceOwner owner;
842 uint32 hashcode;
843 LWLock *partitionLock;
844 bool found_conflict;
845 ProcWaitStatus waitResult;
846 bool log_lock = false;
847
848 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
849 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
850 lockMethodTable = LockMethods[lockmethodid];
851 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
852 elog(ERROR, "unrecognized lock mode: %d", lockmode);
853
854 if (RecoveryInProgress() && !InRecovery &&
855 (locktag->locktag_type == LOCKTAG_OBJECT ||
856 locktag->locktag_type == LOCKTAG_RELATION) &&
857 lockmode > RowExclusiveLock)
859 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
860 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
861 lockMethodTable->lockModeNames[lockmode]),
862 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
863
864#ifdef LOCK_DEBUG
865 if (LOCK_DEBUG_ENABLED(locktag))
866 elog(LOG, "LockAcquire: lock [%u,%u] %s",
867 locktag->locktag_field1, locktag->locktag_field2,
868 lockMethodTable->lockModeNames[lockmode]);
869#endif
870
871 /* Identify owner for lock */
872 if (sessionLock)
873 owner = NULL;
874 else
875 owner = CurrentResourceOwner;
876
877 /*
878 * Find or create a LOCALLOCK entry for this lock and lockmode
879 */
880 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
881 localtag.lock = *locktag;
882 localtag.mode = lockmode;
883
885 &localtag,
886 HASH_ENTER, &found);
887
888 /*
889 * if it's a new locallock object, initialize it
890 */
891 if (!found)
892 {
893 locallock->lock = NULL;
894 locallock->proclock = NULL;
895 locallock->hashcode = LockTagHashCode(&(localtag.lock));
896 locallock->nLocks = 0;
897 locallock->holdsStrongLockCount = false;
898 locallock->lockCleared = false;
899 locallock->numLockOwners = 0;
900 locallock->maxLockOwners = 8;
901 locallock->lockOwners = NULL; /* in case next line fails */
902 locallock->lockOwners = (LOCALLOCKOWNER *)
904 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
905 }
906 else
907 {
908 /* Make sure there will be room to remember the lock */
909 if (locallock->numLockOwners >= locallock->maxLockOwners)
910 {
911 int newsize = locallock->maxLockOwners * 2;
912
913 locallock->lockOwners = (LOCALLOCKOWNER *)
914 repalloc(locallock->lockOwners,
915 newsize * sizeof(LOCALLOCKOWNER));
916 locallock->maxLockOwners = newsize;
917 }
918 }
919 hashcode = locallock->hashcode;
920
921 if (locallockp)
922 *locallockp = locallock;
923
924 /*
925 * If we already hold the lock, we can just increase the count locally.
926 *
927 * If lockCleared is already set, caller need not worry about absorbing
928 * sinval messages related to the lock's object.
929 */
930 if (locallock->nLocks > 0)
931 {
932 GrantLockLocal(locallock, owner);
933 if (locallock->lockCleared)
935 else
937 }
938
939 /*
940 * We don't acquire any other heavyweight lock while holding the relation
941 * extension lock. We do allow to acquire the same relation extension
942 * lock more than once but that case won't reach here.
943 */
944 Assert(!IsRelationExtensionLockHeld);
945
946 /*
947 * Prepare to emit a WAL record if acquisition of this lock needs to be
948 * replayed in a standby server.
949 *
950 * Here we prepare to log; after lock is acquired we'll issue log record.
951 * This arrangement simplifies error recovery in case the preparation step
952 * fails.
953 *
954 * Only AccessExclusiveLocks can conflict with lock types that read-only
955 * transactions can acquire in a standby server. Make sure this definition
956 * matches the one in GetRunningTransactionLocks().
957 */
958 if (lockmode >= AccessExclusiveLock &&
959 locktag->locktag_type == LOCKTAG_RELATION &&
962 {
964 log_lock = true;
965 }
966
967 /*
968 * Attempt to take lock via fast path, if eligible. But if we remember
969 * having filled up the fast path array, we don't attempt to make any
970 * further use of it until we release some locks. It's possible that some
971 * other backend has transferred some of those locks to the shared hash
972 * table, leaving space free, but it's not worth acquiring the LWLock just
973 * to check. It's also possible that we're acquiring a second or third
974 * lock type on a relation we have already locked using the fast-path, but
975 * for now we don't worry about that case either.
976 */
977 if (EligibleForRelationFastPath(locktag, lockmode) &&
979 {
980 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
981 bool acquired;
982
983 /*
984 * LWLockAcquire acts as a memory sequencing point, so it's safe to
985 * assume that any strong locker whose increment to
986 * FastPathStrongRelationLocks->counts becomes visible after we test
987 * it has yet to begin to transfer fast-path locks.
988 */
990 if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
991 acquired = false;
992 else
993 acquired = FastPathGrantRelationLock(locktag->locktag_field2,
994 lockmode);
996 if (acquired)
997 {
998 /*
999 * The locallock might contain stale pointers to some old shared
1000 * objects; we MUST reset these to null before considering the
1001 * lock to be acquired via fast-path.
1002 */
1003 locallock->lock = NULL;
1004 locallock->proclock = NULL;
1005 GrantLockLocal(locallock, owner);
1006 return LOCKACQUIRE_OK;
1007 }
1008 }
1009
1010 /*
1011 * If this lock could potentially have been taken via the fast-path by
1012 * some other backend, we must (temporarily) disable further use of the
1013 * fast-path for this lock tag, and migrate any locks already taken via
1014 * this method to the main lock table.
1015 */
1016 if (ConflictsWithRelationFastPath(locktag, lockmode))
1017 {
1018 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1019
1020 BeginStrongLockAcquire(locallock, fasthashcode);
1021 if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1022 hashcode))
1023 {
1025 if (locallock->nLocks == 0)
1026 RemoveLocalLock(locallock);
1027 if (locallockp)
1028 *locallockp = NULL;
1029 if (reportMemoryError)
1030 ereport(ERROR,
1031 (errcode(ERRCODE_OUT_OF_MEMORY),
1032 errmsg("out of shared memory"),
1033 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1034 else
1035 return LOCKACQUIRE_NOT_AVAIL;
1036 }
1037 }
1038
1039 /*
1040 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1041 * take it via the fast-path, either, so we've got to mess with the shared
1042 * lock table.
1043 */
1044 partitionLock = LockHashPartitionLock(hashcode);
1045
1046 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1047
1048 /*
1049 * Find or create lock and proclock entries with this tag
1050 *
1051 * Note: if the locallock object already existed, it might have a pointer
1052 * to the lock already ... but we should not assume that that pointer is
1053 * valid, since a lock object with zero hold and request counts can go
1054 * away anytime. So we have to use SetupLockInTable() to recompute the
1055 * lock and proclock pointers, even if they're already set.
1056 */
1057 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1058 hashcode, lockmode);
1059 if (!proclock)
1060 {
1062 LWLockRelease(partitionLock);
1063 if (locallock->nLocks == 0)
1064 RemoveLocalLock(locallock);
1065 if (locallockp)
1066 *locallockp = NULL;
1067 if (reportMemoryError)
1068 ereport(ERROR,
1069 (errcode(ERRCODE_OUT_OF_MEMORY),
1070 errmsg("out of shared memory"),
1071 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1072 else
1073 return LOCKACQUIRE_NOT_AVAIL;
1074 }
1075 locallock->proclock = proclock;
1076 lock = proclock->tag.myLock;
1077 locallock->lock = lock;
1078
1079 /*
1080 * If lock requested conflicts with locks requested by waiters, must join
1081 * wait queue. Otherwise, check for conflict with already-held locks.
1082 * (That's last because most complex check.)
1083 */
1084 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1085 found_conflict = true;
1086 else
1087 found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1088 lock, proclock);
1089
1090 if (!found_conflict)
1091 {
1092 /* No conflict with held or previously requested locks */
1093 GrantLock(lock, proclock, lockmode);
1094 waitResult = PROC_WAIT_STATUS_OK;
1095 }
1096 else
1097 {
1098 /*
1099 * Join the lock's wait queue. We call this even in the dontWait
1100 * case, because JoinWaitQueue() may discover that we can acquire the
1101 * lock immediately after all.
1102 */
1103 waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
1104 }
1105
1106 if (waitResult == PROC_WAIT_STATUS_ERROR)
1107 {
1108 /*
1109 * We're not getting the lock because a deadlock was detected already
1110 * while trying to join the wait queue, or because we would have to
1111 * wait but the caller requested no blocking.
1112 *
1113 * Undo the changes to shared entries before releasing the partition
1114 * lock.
1115 */
1117
1118 if (proclock->holdMask == 0)
1119 {
1120 uint32 proclock_hashcode;
1121
1122 proclock_hashcode = ProcLockHashCode(&proclock->tag,
1123 hashcode);
1124 dlist_delete(&proclock->lockLink);
1125 dlist_delete(&proclock->procLink);
1127 &(proclock->tag),
1128 proclock_hashcode,
1130 NULL))
1131 elog(PANIC, "proclock table corrupted");
1132 }
1133 else
1134 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1135 lock->nRequested--;
1136 lock->requested[lockmode]--;
1137 LOCK_PRINT("LockAcquire: did not join wait queue",
1138 lock, lockmode);
1139 Assert((lock->nRequested > 0) &&
1140 (lock->requested[lockmode] >= 0));
1141 Assert(lock->nGranted <= lock->nRequested);
1142 LWLockRelease(partitionLock);
1143 if (locallock->nLocks == 0)
1144 RemoveLocalLock(locallock);
1145
1146 if (dontWait)
1147 {
1148 if (locallockp)
1149 *locallockp = NULL;
1150 return LOCKACQUIRE_NOT_AVAIL;
1151 }
1152 else
1153 {
1155 /* DeadLockReport() will not return */
1156 }
1157 }
1158
1159 /*
1160 * We are now in the lock queue, or the lock was already granted. If
1161 * queued, go to sleep.
1162 */
1163 if (waitResult == PROC_WAIT_STATUS_WAITING)
1164 {
1165 Assert(!dontWait);
1166 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1167 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1168 LWLockRelease(partitionLock);
1169
1170 waitResult = WaitOnLock(locallock, owner);
1171
1172 /*
1173 * NOTE: do not do any material change of state between here and
1174 * return. All required changes in locktable state must have been
1175 * done when the lock was granted to us --- see notes in WaitOnLock.
1176 */
1177
1178 if (waitResult == PROC_WAIT_STATUS_ERROR)
1179 {
1180 /*
1181 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1182 * now.
1183 */
1184 Assert(!dontWait);
1186 /* DeadLockReport() will not return */
1187 }
1188 }
1189 else
1190 LWLockRelease(partitionLock);
1191 Assert(waitResult == PROC_WAIT_STATUS_OK);
1192
1193 /* The lock was granted to us. Update the local lock entry accordingly */
1194 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1195 GrantLockLocal(locallock, owner);
1196
1197 /*
1198 * Lock state is fully up-to-date now; if we error out after this, no
1199 * special error cleanup is required.
1200 */
1202
1203 /*
1204 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1205 * standby server.
1206 */
1207 if (log_lock)
1208 {
1209 /*
1210 * Decode the locktag back to the original values, to avoid sending
1211 * lots of empty bytes with every message. See lock.h to check how a
1212 * locktag is defined for LOCKTAG_RELATION
1213 */
1215 locktag->locktag_field2);
1216 }
1217
1218 return LOCKACQUIRE_OK;
1219}
void DeadLockReport(void)
Definition: deadlock.c:1072
#define LOG
Definition: elog.h:31
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1425
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2770
void AbortStrongLockAcquire(void)
Definition: lock.c:1809
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2691
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1872
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:262
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1773
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1478
static void FinishStrongLockAcquire(void)
Definition: lock.c:1799
@ LOCKTAG_OBJECT
Definition: lock.h:145
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:504
@ LOCKACQUIRE_OK
Definition: lock.h:502
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:503
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:501
#define RowExclusiveLock
Definition: lockdefs.h:38
ProcWaitStatus
Definition: proc.h:123
@ PROC_WAIT_STATUS_OK
Definition: proc.h:124
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:125
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:126
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1089
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1436
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1419
bool lockCleared
Definition: lock.h:440
bool RecoveryInProgress(void)
Definition: xlog.c:6334
#define XLogStandbyInfoActive()
Definition: xlog.h:123
bool InRecovery
Definition: xlogutils.c:50

References AbortStrongLockAcquire(), AccessExclusiveLock, Assert, BeginStrongLockAcquire(), ConflictsWithRelationFastPath, LockMethodData::conflictTab, FastPathStrongRelationLockData::count, CurrentResourceOwner, DeadLockReport(), dlist_delete(), EligibleForRelationFastPath, elog, ereport, errcode(), errhint(), errmsg(), ERROR, FAST_PATH_REL_GROUP, FastPathGrantRelationLock(), FastPathLocalUseCounts, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, FastPathTransferRelationLocks(), FinishStrongLockAcquire(), FP_LOCK_SLOTS_PER_GROUP, PGPROC::fpInfoLock, GrantLock(), GrantLockLocal(), HASH_ENTER, HASH_REMOVE, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, LOCALLOCK::holdsStrongLockCount, InRecovery, JoinWaitQueue(), lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKACQUIRE_ALREADY_CLEAR, LOCKACQUIRE_ALREADY_HELD, LOCKACQUIRE_NOT_AVAIL, LOCKACQUIRE_OK, LOCKBIT_ON, LockCheckConflicts(), LOCALLOCK::lockCleared, LockHashPartitionLock, PROCLOCK::lockLink, LockMethodLocalHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOCKTAG_OBJECT, LOCKTAG_RELATION, LOCKTAG::locktag_type, LockTagHashCode(), LOG, LogAccessExclusiveLock(), LogAccessExclusiveLockPrepare(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCK::maxLockOwners, MemoryContextAlloc(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, PANIC, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_OK, PROC_WAIT_STATUS_WAITING, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, ProcLockHashCode(), RecoveryInProgress(), RemoveLocalLock(), repalloc(), LOCK::requested, RowExclusiveLock, SetupLockInTable(), PROCLOCK::tag, TopMemoryContext, LOCK::waitMask, WaitOnLock(), and XLogStandbyInfoActive.

Referenced by ConditionalLockDatabaseObject(), ConditionalLockRelation(), ConditionalLockRelationOid(), ConditionalLockSharedObject(), LockAcquire(), LockRelation(), LockRelationId(), and LockRelationOid().

◆ LockCheckConflicts()

bool LockCheckConflicts ( LockMethod  lockMethodTable,
LOCKMODE  lockmode,
LOCK lock,
PROCLOCK proclock 
)

Definition at line 1478 of file lock.c.

1482{
1483 int numLockModes = lockMethodTable->numLockModes;
1484 LOCKMASK myLocks;
1485 int conflictMask = lockMethodTable->conflictTab[lockmode];
1486 int conflictsRemaining[MAX_LOCKMODES];
1487 int totalConflictsRemaining = 0;
1488 dlist_iter proclock_iter;
1489 int i;
1490
1491 /*
1492 * first check for global conflicts: If no locks conflict with my request,
1493 * then I get the lock.
1494 *
1495 * Checking for conflict: lock->grantMask represents the types of
1496 * currently held locks. conflictTable[lockmode] has a bit set for each
1497 * type of lock that conflicts with request. Bitwise compare tells if
1498 * there is a conflict.
1499 */
1500 if (!(conflictMask & lock->grantMask))
1501 {
1502 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1503 return false;
1504 }
1505
1506 /*
1507 * Rats. Something conflicts. But it could still be my own lock, or a
1508 * lock held by another member of my locking group. First, figure out how
1509 * many conflicts remain after subtracting out any locks I hold myself.
1510 */
1511 myLocks = proclock->holdMask;
1512 for (i = 1; i <= numLockModes; i++)
1513 {
1514 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1515 {
1516 conflictsRemaining[i] = 0;
1517 continue;
1518 }
1519 conflictsRemaining[i] = lock->granted[i];
1520 if (myLocks & LOCKBIT_ON(i))
1521 --conflictsRemaining[i];
1522 totalConflictsRemaining += conflictsRemaining[i];
1523 }
1524
1525 /* If no conflicts remain, we get the lock. */
1526 if (totalConflictsRemaining == 0)
1527 {
1528 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1529 return false;
1530 }
1531
1532 /* If no group locking, it's definitely a conflict. */
1533 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1534 {
1535 Assert(proclock->tag.myProc == MyProc);
1536 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1537 proclock);
1538 return true;
1539 }
1540
1541 /*
1542 * The relation extension lock conflict even between the group members.
1543 */
1545 {
1546 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1547 proclock);
1548 return true;
1549 }
1550
1551 /*
1552 * Locks held in conflicting modes by members of our own lock group are
1553 * not real conflicts; we can subtract those out and see if we still have
1554 * a conflict. This is O(N) in the number of processes holding or
1555 * awaiting locks on this object. We could improve that by making the
1556 * shared memory state more complex (and larger) but it doesn't seem worth
1557 * it.
1558 */
1559 dlist_foreach(proclock_iter, &lock->procLocks)
1560 {
1561 PROCLOCK *otherproclock =
1562 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1563
1564 if (proclock != otherproclock &&
1565 proclock->groupLeader == otherproclock->groupLeader &&
1566 (otherproclock->holdMask & conflictMask) != 0)
1567 {
1568 int intersectMask = otherproclock->holdMask & conflictMask;
1569
1570 for (i = 1; i <= numLockModes; i++)
1571 {
1572 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1573 {
1574 if (conflictsRemaining[i] <= 0)
1575 elog(PANIC, "proclocks held do not match lock");
1576 conflictsRemaining[i]--;
1577 totalConflictsRemaining--;
1578 }
1579 }
1580
1581 if (totalConflictsRemaining == 0)
1582 {
1583 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1584 proclock);
1585 return false;
1586 }
1587 }
1588 }
1589
1590 /* Nope, it's a real conflict. */
1591 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1592 return true;
1593}
#define LOCK_LOCKTAG(lock)
Definition: lock.h:325

References Assert, LockMethodData::conflictTab, dlist_iter::cur, dlist_container, dlist_foreach, elog, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, PROCLOCK::holdMask, i, LOCK_LOCKTAG, LOCKBIT_ON, PGPROC::lockGroupLeader, LOCKTAG_RELATION_EXTEND, MAX_LOCKMODES, MyProc, PROCLOCKTAG::myProc, LockMethodData::numLockModes, PANIC, PROCLOCK_PRINT, LOCK::procLocks, and PROCLOCK::tag.

Referenced by JoinWaitQueue(), LockAcquireExtended(), and ProcLockWakeup().

◆ LockHasWaiters()

bool LockHasWaiters ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 690 of file lock.c.

691{
692 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
693 LockMethod lockMethodTable;
694 LOCALLOCKTAG localtag;
695 LOCALLOCK *locallock;
696 LOCK *lock;
697 PROCLOCK *proclock;
698 LWLock *partitionLock;
699 bool hasWaiters = false;
700
701 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
702 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
703 lockMethodTable = LockMethods[lockmethodid];
704 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
705 elog(ERROR, "unrecognized lock mode: %d", lockmode);
706
707#ifdef LOCK_DEBUG
708 if (LOCK_DEBUG_ENABLED(locktag))
709 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
710 locktag->locktag_field1, locktag->locktag_field2,
711 lockMethodTable->lockModeNames[lockmode]);
712#endif
713
714 /*
715 * Find the LOCALLOCK entry for this lock and lockmode
716 */
717 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
718 localtag.lock = *locktag;
719 localtag.mode = lockmode;
720
722 &localtag,
723 HASH_FIND, NULL);
724
725 /*
726 * let the caller print its own error message, too. Do not ereport(ERROR).
727 */
728 if (!locallock || locallock->nLocks <= 0)
729 {
730 elog(WARNING, "you don't own a lock of type %s",
731 lockMethodTable->lockModeNames[lockmode]);
732 return false;
733 }
734
735 /*
736 * Check the shared lock table.
737 */
738 partitionLock = LockHashPartitionLock(locallock->hashcode);
739
740 LWLockAcquire(partitionLock, LW_SHARED);
741
742 /*
743 * We don't need to re-find the lock or proclock, since we kept their
744 * addresses in the locallock table, and they couldn't have been removed
745 * while we were holding a lock on them.
746 */
747 lock = locallock->lock;
748 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
749 proclock = locallock->proclock;
750 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
751
752 /*
753 * Double-check that we are actually holding a lock of the type we want to
754 * release.
755 */
756 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
757 {
758 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
759 LWLockRelease(partitionLock);
760 elog(WARNING, "you don't own a lock of type %s",
761 lockMethodTable->lockModeNames[lockmode]);
762 RemoveLocalLock(locallock);
763 return false;
764 }
765
766 /*
767 * Do the checking.
768 */
769 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
770 hasWaiters = true;
771
772 LWLockRelease(partitionLock);
773
774 return hasWaiters;
775}
#define WARNING
Definition: elog.h:36

References LockMethodData::conflictTab, elog, ERROR, HASH_FIND, hash_search(), LOCALLOCK::hashcode, PROCLOCK::holdMask, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLock, LockMethodLocalHash, LockMethods, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_SHARED, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), LOCK::waitMask, and WARNING.

Referenced by LockHasWaitersRelation().

◆ LockHeldByMe()

bool LockHeldByMe ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  orstronger 
)

Definition at line 637 of file lock.c.

639{
640 LOCALLOCKTAG localtag;
641 LOCALLOCK *locallock;
642
643 /*
644 * See if there is a LOCALLOCK entry for this lock and lockmode
645 */
646 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
647 localtag.lock = *locktag;
648 localtag.mode = lockmode;
649
651 &localtag,
652 HASH_FIND, NULL);
653
654 if (locallock && locallock->nLocks > 0)
655 return true;
656
657 if (orstronger)
658 {
659 LOCKMODE slockmode;
660
661 for (slockmode = lockmode + 1;
662 slockmode <= MaxLockMode;
663 slockmode++)
664 {
665 if (LockHeldByMe(locktag, slockmode, false))
666 return true;
667 }
668 }
669
670 return false;
671}
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition: lock.c:637
#define MaxLockMode
Definition: lockdefs.h:45

References HASH_FIND, hash_search(), LOCALLOCKTAG::lock, LockHeldByMe(), LockMethodLocalHash, MaxLockMode, MemSet, LOCALLOCKTAG::mode, and LOCALLOCK::nLocks.

Referenced by CheckRelationLockedByMe(), CheckRelationOidLockedByMe(), and LockHeldByMe().

◆ LockManagerShmemInit()

void LockManagerShmemInit ( void  )

Definition at line 438 of file lock.c.

439{
440 HASHCTL info;
441 long init_table_size,
442 max_table_size;
443 bool found;
444
445 /*
446 * Compute init/max size to request for lock hashtables. Note these
447 * calculations must agree with LockManagerShmemSize!
448 */
449 max_table_size = NLOCKENTS();
450 init_table_size = max_table_size / 2;
451
452 /*
453 * Allocate hash table for LOCK structs. This stores per-locked-object
454 * information.
455 */
456 info.keysize = sizeof(LOCKTAG);
457 info.entrysize = sizeof(LOCK);
459
460 LockMethodLockHash = ShmemInitHash("LOCK hash",
461 init_table_size,
462 max_table_size,
463 &info,
465
466 /* Assume an average of 2 holders per lock */
467 max_table_size *= 2;
468 init_table_size *= 2;
469
470 /*
471 * Allocate hash table for PROCLOCK structs. This stores
472 * per-lock-per-holder information.
473 */
474 info.keysize = sizeof(PROCLOCKTAG);
475 info.entrysize = sizeof(PROCLOCK);
476 info.hash = proclock_hash;
478
479 LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
480 init_table_size,
481 max_table_size,
482 &info,
484
485 /*
486 * Allocate fast-path structures.
487 */
489 ShmemInitStruct("Fast Path Strong Relation Lock Data",
490 sizeof(FastPathStrongRelationLockData), &found);
491 if (!found)
493}
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_PARTITION
Definition: hsearch.h:92
#define NLOCKENTS()
Definition: lock.c:54
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:568
struct LOCK LOCK
struct PROCLOCK PROCLOCK
struct PROCLOCKTAG PROCLOCKTAG
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:327
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:382
#define SpinLockInit(lock)
Definition: spin.h:57
HashValueFunc hash
Definition: hsearch.h:78
long num_partitions
Definition: hsearch.h:68

References HASHCTL::entrysize, FastPathStrongRelationLocks, HASHCTL::hash, HASH_BLOBS, HASH_ELEM, HASH_FUNCTION, HASH_PARTITION, HASHCTL::keysize, LockMethodLockHash, LockMethodProcLockHash, FastPathStrongRelationLockData::mutex, NLOCKENTS, NUM_LOCK_PARTITIONS, HASHCTL::num_partitions, proclock_hash(), ShmemInitHash(), ShmemInitStruct(), and SpinLockInit.

Referenced by CreateOrAttachShmemStructs().

◆ LockManagerShmemSize()

Size LockManagerShmemSize ( void  )

Definition at line 3663 of file lock.c.

3664{
3665 Size size = 0;
3666 long max_table_size;
3667
3668 /* lock hash table */
3669 max_table_size = NLOCKENTS();
3670 size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3671
3672 /* proclock hash table */
3673 max_table_size *= 2;
3674 size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3675
3676 /*
3677 * Since NLOCKENTS is only an estimate, add 10% safety margin.
3678 */
3679 size = add_size(size, size / 10);
3680
3681 return size;
3682}
size_t Size
Definition: c.h:559
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:783
Size add_size(Size s1, Size s2)
Definition: shmem.c:488
static pg_noinline void Size size
Definition: slab.c:607

References add_size(), hash_estimate_size(), NLOCKENTS, and size.

Referenced by CalculateShmemSize().

◆ LockReassignCurrentOwner()

void LockReassignCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2615 of file lock.c.

2616{
2618
2619 Assert(parent != NULL);
2620
2621 if (locallocks == NULL)
2622 {
2623 HASH_SEQ_STATUS status;
2624 LOCALLOCK *locallock;
2625
2627
2628 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2629 LockReassignOwner(locallock, parent);
2630 }
2631 else
2632 {
2633 int i;
2634
2635 for (i = nlocks - 1; i >= 0; i--)
2636 LockReassignOwner(locallocks[i], parent);
2637 }
2638}
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2645
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:888

References Assert, CurrentResourceOwner, hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, LockReassignOwner(), and ResourceOwnerGetParent().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReassignOwner()

static void LockReassignOwner ( LOCALLOCK locallock,
ResourceOwner  parent 
)
static

Definition at line 2645 of file lock.c.

2646{
2647 LOCALLOCKOWNER *lockOwners;
2648 int i;
2649 int ic = -1;
2650 int ip = -1;
2651
2652 /*
2653 * Scan to see if there are any locks belonging to current owner or its
2654 * parent
2655 */
2656 lockOwners = locallock->lockOwners;
2657 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2658 {
2659 if (lockOwners[i].owner == CurrentResourceOwner)
2660 ic = i;
2661 else if (lockOwners[i].owner == parent)
2662 ip = i;
2663 }
2664
2665 if (ic < 0)
2666 return; /* no current locks */
2667
2668 if (ip < 0)
2669 {
2670 /* Parent has no slot, so just give it the child's slot */
2671 lockOwners[ic].owner = parent;
2672 ResourceOwnerRememberLock(parent, locallock);
2673 }
2674 else
2675 {
2676 /* Merge child's count with parent's */
2677 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2678 /* compact out unused slot */
2679 locallock->numLockOwners--;
2680 if (ic < locallock->numLockOwners)
2681 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2682 }
2684}
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1065

References CurrentResourceOwner, i, LOCALLOCK::lockOwners, LOCALLOCKOWNER::nLocks, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, ResourceOwnerForgetLock(), and ResourceOwnerRememberLock().

Referenced by LockReassignCurrentOwner().

◆ LockRefindAndRelease()

static void LockRefindAndRelease ( LockMethod  lockMethodTable,
PGPROC proc,
LOCKTAG locktag,
LOCKMODE  lockmode,
bool  decrement_strong_lock_count 
)
static

Definition at line 3191 of file lock.c.

3194{
3195 LOCK *lock;
3196 PROCLOCK *proclock;
3197 PROCLOCKTAG proclocktag;
3198 uint32 hashcode;
3199 uint32 proclock_hashcode;
3200 LWLock *partitionLock;
3201 bool wakeupNeeded;
3202
3203 hashcode = LockTagHashCode(locktag);
3204 partitionLock = LockHashPartitionLock(hashcode);
3205
3206 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3207
3208 /*
3209 * Re-find the lock object (it had better be there).
3210 */
3212 locktag,
3213 hashcode,
3214 HASH_FIND,
3215 NULL);
3216 if (!lock)
3217 elog(PANIC, "failed to re-find shared lock object");
3218
3219 /*
3220 * Re-find the proclock object (ditto).
3221 */
3222 proclocktag.myLock = lock;
3223 proclocktag.myProc = proc;
3224
3225 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3226
3228 &proclocktag,
3229 proclock_hashcode,
3230 HASH_FIND,
3231 NULL);
3232 if (!proclock)
3233 elog(PANIC, "failed to re-find shared proclock object");
3234
3235 /*
3236 * Double-check that we are actually holding a lock of the type we want to
3237 * release.
3238 */
3239 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3240 {
3241 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3242 LWLockRelease(partitionLock);
3243 elog(WARNING, "you don't own a lock of type %s",
3244 lockMethodTable->lockModeNames[lockmode]);
3245 return;
3246 }
3247
3248 /*
3249 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3250 */
3251 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3252
3253 CleanUpLock(lock, proclock,
3254 lockMethodTable, hashcode,
3255 wakeupNeeded);
3256
3257 LWLockRelease(partitionLock);
3258
3259 /*
3260 * Decrement strong lock count. This logic is needed only for 2PC.
3261 */
3262 if (decrement_strong_lock_count
3263 && ConflictsWithRelationFastPath(locktag, lockmode))
3264 {
3265 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3266
3268 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3269 FastPathStrongRelationLocks->count[fasthashcode]--;
3271 }
3272}
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1630
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1687

References Assert, CleanUpLock(), ConflictsWithRelationFastPath, FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_FIND, hash_search_with_hash_value(), PROCLOCK::holdMask, LOCKBIT_ON, LockHashPartitionLock, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), FastPathStrongRelationLockData::mutex, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PANIC, PROCLOCK_PRINT, ProcLockHashCode(), SpinLockAcquire, SpinLockRelease, UnGrantLock(), and WARNING.

Referenced by lock_twophase_postcommit(), LockReleaseAll(), and VirtualXactLockTableCleanup().

◆ LockRelease()

bool LockRelease ( const LOCKTAG locktag,
LOCKMODE  lockmode,
bool  sessionLock 
)

Definition at line 2011 of file lock.c.

2012{
2013 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2014 LockMethod lockMethodTable;
2015 LOCALLOCKTAG localtag;
2016 LOCALLOCK *locallock;
2017 LOCK *lock;
2018 PROCLOCK *proclock;
2019 LWLock *partitionLock;
2020 bool wakeupNeeded;
2021
2022 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2023 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2024 lockMethodTable = LockMethods[lockmethodid];
2025 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2026 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2027
2028#ifdef LOCK_DEBUG
2029 if (LOCK_DEBUG_ENABLED(locktag))
2030 elog(LOG, "LockRelease: lock [%u,%u] %s",
2031 locktag->locktag_field1, locktag->locktag_field2,
2032 lockMethodTable->lockModeNames[lockmode]);
2033#endif
2034
2035 /*
2036 * Find the LOCALLOCK entry for this lock and lockmode
2037 */
2038 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2039 localtag.lock = *locktag;
2040 localtag.mode = lockmode;
2041
2043 &localtag,
2044 HASH_FIND, NULL);
2045
2046 /*
2047 * let the caller print its own error message, too. Do not ereport(ERROR).
2048 */
2049 if (!locallock || locallock->nLocks <= 0)
2050 {
2051 elog(WARNING, "you don't own a lock of type %s",
2052 lockMethodTable->lockModeNames[lockmode]);
2053 return false;
2054 }
2055
2056 /*
2057 * Decrease the count for the resource owner.
2058 */
2059 {
2060 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2061 ResourceOwner owner;
2062 int i;
2063
2064 /* Identify owner for lock */
2065 if (sessionLock)
2066 owner = NULL;
2067 else
2068 owner = CurrentResourceOwner;
2069
2070 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2071 {
2072 if (lockOwners[i].owner == owner)
2073 {
2074 Assert(lockOwners[i].nLocks > 0);
2075 if (--lockOwners[i].nLocks == 0)
2076 {
2077 if (owner != NULL)
2078 ResourceOwnerForgetLock(owner, locallock);
2079 /* compact out unused slot */
2080 locallock->numLockOwners--;
2081 if (i < locallock->numLockOwners)
2082 lockOwners[i] = lockOwners[locallock->numLockOwners];
2083 }
2084 break;
2085 }
2086 }
2087 if (i < 0)
2088 {
2089 /* don't release a lock belonging to another owner */
2090 elog(WARNING, "you don't own a lock of type %s",
2091 lockMethodTable->lockModeNames[lockmode]);
2092 return false;
2093 }
2094 }
2095
2096 /*
2097 * Decrease the total local count. If we're still holding the lock, we're
2098 * done.
2099 */
2100 locallock->nLocks--;
2101
2102 if (locallock->nLocks > 0)
2103 return true;
2104
2105 /*
2106 * At this point we can no longer suppose we are clear of invalidation
2107 * messages related to this lock. Although we'll delete the LOCALLOCK
2108 * object before any intentional return from this routine, it seems worth
2109 * the trouble to explicitly reset lockCleared right now, just in case
2110 * some error prevents us from deleting the LOCALLOCK.
2111 */
2112 locallock->lockCleared = false;
2113
2114 /* Attempt fast release of any lock eligible for the fast path. */
2115 if (EligibleForRelationFastPath(locktag, lockmode) &&
2117 {
2118 bool released;
2119
2120 /*
2121 * We might not find the lock here, even if we originally entered it
2122 * here. Another backend may have moved it to the main table.
2123 */
2125 released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2126 lockmode);
2128 if (released)
2129 {
2130 RemoveLocalLock(locallock);
2131 return true;
2132 }
2133 }
2134
2135 /*
2136 * Otherwise we've got to mess with the shared lock table.
2137 */
2138 partitionLock = LockHashPartitionLock(locallock->hashcode);
2139
2140 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2141
2142 /*
2143 * Normally, we don't need to re-find the lock or proclock, since we kept
2144 * their addresses in the locallock table, and they couldn't have been
2145 * removed while we were holding a lock on them. But it's possible that
2146 * the lock was taken fast-path and has since been moved to the main hash
2147 * table by another backend, in which case we will need to look up the
2148 * objects here. We assume the lock field is NULL if so.
2149 */
2150 lock = locallock->lock;
2151 if (!lock)
2152 {
2153 PROCLOCKTAG proclocktag;
2154
2155 Assert(EligibleForRelationFastPath(locktag, lockmode));
2157 locktag,
2158 locallock->hashcode,
2159 HASH_FIND,
2160 NULL);
2161 if (!lock)
2162 elog(ERROR, "failed to re-find shared lock object");
2163 locallock->lock = lock;
2164
2165 proclocktag.myLock = lock;
2166 proclocktag.myProc = MyProc;
2168 &proclocktag,
2169 HASH_FIND,
2170 NULL);
2171 if (!locallock->proclock)
2172 elog(ERROR, "failed to re-find shared proclock object");
2173 }
2174 LOCK_PRINT("LockRelease: found", lock, lockmode);
2175 proclock = locallock->proclock;
2176 PROCLOCK_PRINT("LockRelease: found", proclock);
2177
2178 /*
2179 * Double-check that we are actually holding a lock of the type we want to
2180 * release.
2181 */
2182 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2183 {
2184 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2185 LWLockRelease(partitionLock);
2186 elog(WARNING, "you don't own a lock of type %s",
2187 lockMethodTable->lockModeNames[lockmode]);
2188 RemoveLocalLock(locallock);
2189 return false;
2190 }
2191
2192 /*
2193 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2194 */
2195 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2196
2197 CleanUpLock(lock, proclock,
2198 lockMethodTable, locallock->hashcode,
2199 wakeupNeeded);
2200
2201 LWLockRelease(partitionLock);
2202
2203 RemoveLocalLock(locallock);
2204 return true;
2205}
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2734

References Assert, CleanUpLock(), CurrentResourceOwner, EligibleForRelationFastPath, elog, ERROR, FAST_PATH_REL_GROUP, FastPathLocalUseCounts, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, HASH_FIND, hash_search(), hash_search_with_hash_value(), LOCALLOCK::hashcode, PROCLOCK::holdMask, i, lengthof, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, LOCALLOCK::lockCleared, LockHashPartitionLock, LockMethodLocalHash, LockMethodLockHash, LockMethodProcLockHash, LockMethods, LockMethodData::lockModeNames, LOCALLOCK::lockOwners, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_lockmethodid, LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MemSet, LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, LOCALLOCK::nLocks, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCK::proclock, PROCLOCK_PRINT, RemoveLocalLock(), ResourceOwnerForgetLock(), UnGrantLock(), and WARNING.

Referenced by ConditionalXactLockTableWait(), pg_advisory_unlock_int4(), pg_advisory_unlock_int8(), pg_advisory_unlock_shared_int4(), pg_advisory_unlock_shared_int8(), ReleaseLockIfHeld(), SearchSysCacheLocked1(), SpeculativeInsertionLockRelease(), SpeculativeInsertionWait(), StandbyReleaseXidEntryLocks(), UnlockApplyTransactionForSession(), UnlockDatabaseObject(), UnlockPage(), UnlockRelation(), UnlockRelationForExtension(), UnlockRelationId(), UnlockRelationIdForSession(), UnlockRelationOid(), UnlockSharedObject(), UnlockSharedObjectForSession(), UnlockTuple(), VirtualXactLock(), XactLockForVirtualXact(), XactLockTableDelete(), and XactLockTableWait().

◆ LockReleaseAll()

void LockReleaseAll ( LOCKMETHODID  lockmethodid,
bool  allLocks 
)

Definition at line 2216 of file lock.c.

2217{
2218 HASH_SEQ_STATUS status;
2219 LockMethod lockMethodTable;
2220 int i,
2221 numLockModes;
2222 LOCALLOCK *locallock;
2223 LOCK *lock;
2224 int partition;
2225 bool have_fast_path_lwlock = false;
2226
2227 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2228 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2229 lockMethodTable = LockMethods[lockmethodid];
2230
2231#ifdef LOCK_DEBUG
2232 if (*(lockMethodTable->trace_flag))
2233 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2234#endif
2235
2236 /*
2237 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2238 * the only way that the lock we hold on our own VXID can ever get
2239 * released: it is always and only released when a toplevel transaction
2240 * ends.
2241 */
2242 if (lockmethodid == DEFAULT_LOCKMETHOD)
2244
2245 numLockModes = lockMethodTable->numLockModes;
2246
2247 /*
2248 * First we run through the locallock table and get rid of unwanted
2249 * entries, then we scan the process's proclocks and get rid of those. We
2250 * do this separately because we may have multiple locallock entries
2251 * pointing to the same proclock, and we daren't end up with any dangling
2252 * pointers. Fast-path locks are cleaned up during the locallock table
2253 * scan, though.
2254 */
2256
2257 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2258 {
2259 /*
2260 * If the LOCALLOCK entry is unused, something must've gone wrong
2261 * while trying to acquire this lock. Just forget the local entry.
2262 */
2263 if (locallock->nLocks == 0)
2264 {
2265 RemoveLocalLock(locallock);
2266 continue;
2267 }
2268
2269 /* Ignore items that are not of the lockmethod to be removed */
2270 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2271 continue;
2272
2273 /*
2274 * If we are asked to release all locks, we can just zap the entry.
2275 * Otherwise, must scan to see if there are session locks. We assume
2276 * there is at most one lockOwners entry for session locks.
2277 */
2278 if (!allLocks)
2279 {
2280 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2281
2282 /* If session lock is above array position 0, move it down to 0 */
2283 for (i = 0; i < locallock->numLockOwners; i++)
2284 {
2285 if (lockOwners[i].owner == NULL)
2286 lockOwners[0] = lockOwners[i];
2287 else
2288 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2289 }
2290
2291 if (locallock->numLockOwners > 0 &&
2292 lockOwners[0].owner == NULL &&
2293 lockOwners[0].nLocks > 0)
2294 {
2295 /* Fix the locallock to show just the session locks */
2296 locallock->nLocks = lockOwners[0].nLocks;
2297 locallock->numLockOwners = 1;
2298 /* We aren't deleting this locallock, so done */
2299 continue;
2300 }
2301 else
2302 locallock->numLockOwners = 0;
2303 }
2304
2305#ifdef USE_ASSERT_CHECKING
2306
2307 /*
2308 * Tuple locks are currently held only for short durations within a
2309 * transaction. Check that we didn't forget to release one.
2310 */
2311 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
2312 elog(WARNING, "tuple lock held at commit");
2313#endif
2314
2315 /*
2316 * If the lock or proclock pointers are NULL, this lock was taken via
2317 * the relation fast-path (and is not known to have been transferred).
2318 */
2319 if (locallock->proclock == NULL || locallock->lock == NULL)
2320 {
2321 LOCKMODE lockmode = locallock->tag.mode;
2322 Oid relid;
2323
2324 /* Verify that a fast-path lock is what we've got. */
2325 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2326 elog(PANIC, "locallock table corrupted");
2327
2328 /*
2329 * If we don't currently hold the LWLock that protects our
2330 * fast-path data structures, we must acquire it before attempting
2331 * to release the lock via the fast-path. We will continue to
2332 * hold the LWLock until we're done scanning the locallock table,
2333 * unless we hit a transferred fast-path lock. (XXX is this
2334 * really such a good idea? There could be a lot of entries ...)
2335 */
2336 if (!have_fast_path_lwlock)
2337 {
2339 have_fast_path_lwlock = true;
2340 }
2341
2342 /* Attempt fast-path release. */
2343 relid = locallock->tag.lock.locktag_field2;
2344 if (FastPathUnGrantRelationLock(relid, lockmode))
2345 {
2346 RemoveLocalLock(locallock);
2347 continue;
2348 }
2349
2350 /*
2351 * Our lock, originally taken via the fast path, has been
2352 * transferred to the main lock table. That's going to require
2353 * some extra work, so release our fast-path lock before starting.
2354 */
2356 have_fast_path_lwlock = false;
2357
2358 /*
2359 * Now dump the lock. We haven't got a pointer to the LOCK or
2360 * PROCLOCK in this case, so we have to handle this a bit
2361 * differently than a normal lock release. Unfortunately, this
2362 * requires an extra LWLock acquire-and-release cycle on the
2363 * partitionLock, but hopefully it shouldn't happen often.
2364 */
2365 LockRefindAndRelease(lockMethodTable, MyProc,
2366 &locallock->tag.lock, lockmode, false);
2367 RemoveLocalLock(locallock);
2368 continue;
2369 }
2370
2371 /* Mark the proclock to show we need to release this lockmode */
2372 if (locallock->nLocks > 0)
2373 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2374
2375 /* And remove the locallock hashtable entry */
2376 RemoveLocalLock(locallock);
2377 }
2378
2379 /* Done with the fast-path data structures */
2380 if (have_fast_path_lwlock)
2382
2383 /*
2384 * Now, scan each lock partition separately.
2385 */
2386 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2387 {
2388 LWLock *partitionLock;
2389 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2390 dlist_mutable_iter proclock_iter;
2391
2392 partitionLock = LockHashPartitionLockByIndex(partition);
2393
2394 /*
2395 * If the proclock list for this partition is empty, we can skip
2396 * acquiring the partition lock. This optimization is trickier than
2397 * it looks, because another backend could be in process of adding
2398 * something to our proclock list due to promoting one of our
2399 * fast-path locks. However, any such lock must be one that we
2400 * decided not to delete above, so it's okay to skip it again now;
2401 * we'd just decide not to delete it again. We must, however, be
2402 * careful to re-fetch the list header once we've acquired the
2403 * partition lock, to be sure we have a valid, up-to-date pointer.
2404 * (There is probably no significant risk if pointer fetch/store is
2405 * atomic, but we don't wish to assume that.)
2406 *
2407 * XXX This argument assumes that the locallock table correctly
2408 * represents all of our fast-path locks. While allLocks mode
2409 * guarantees to clean up all of our normal locks regardless of the
2410 * locallock situation, we lose that guarantee for fast-path locks.
2411 * This is not ideal.
2412 */
2413 if (dlist_is_empty(procLocks))
2414 continue; /* needn't examine this partition */
2415
2416 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2417
2418 dlist_foreach_modify(proclock_iter, procLocks)
2419 {
2420 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2421 bool wakeupNeeded = false;
2422
2423 Assert(proclock->tag.myProc == MyProc);
2424
2425 lock = proclock->tag.myLock;
2426
2427 /* Ignore items that are not of the lockmethod to be removed */
2428 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2429 continue;
2430
2431 /*
2432 * In allLocks mode, force release of all locks even if locallock
2433 * table had problems
2434 */
2435 if (allLocks)
2436 proclock->releaseMask = proclock->holdMask;
2437 else
2438 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2439
2440 /*
2441 * Ignore items that have nothing to be released, unless they have
2442 * holdMask == 0 and are therefore recyclable
2443 */
2444 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2445 continue;
2446
2447 PROCLOCK_PRINT("LockReleaseAll", proclock);
2448 LOCK_PRINT("LockReleaseAll", lock, 0);
2449 Assert(lock->nRequested >= 0);
2450 Assert(lock->nGranted >= 0);
2451 Assert(lock->nGranted <= lock->nRequested);
2452 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2453
2454 /*
2455 * Release the previously-marked lock modes
2456 */
2457 for (i = 1; i <= numLockModes; i++)
2458 {
2459 if (proclock->releaseMask & LOCKBIT_ON(i))
2460 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2461 lockMethodTable);
2462 }
2463 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2464 Assert(lock->nGranted <= lock->nRequested);
2465 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2466
2467 proclock->releaseMask = 0;
2468
2469 /* CleanUpLock will wake up waiters if needed. */
2470 CleanUpLock(lock, proclock,
2471 lockMethodTable,
2472 LockTagHashCode(&lock->tag),
2473 wakeupNeeded);
2474 } /* loop over PROCLOCKs within this partition */
2475
2476 LWLockRelease(partitionLock);
2477 } /* loop over partitions */
2478
2479#ifdef LOCK_DEBUG
2480 if (*(lockMethodTable->trace_flag))
2481 elog(LOG, "LockReleaseAll done");
2482#endif
2483}
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4550
@ LOCKTAG_TUPLE
Definition: lock.h:141
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:443
const bool * trace_flag
Definition: lock.h:113
dlist_node * cur
Definition: ilist.h:200

References Assert, CleanUpLock(), dlist_mutable_iter::cur, DEFAULT_LOCKMETHOD, dlist_container, dlist_foreach_modify, dlist_is_empty(), EligibleForRelationFastPath, elog, ERROR, FastPathUnGrantRelationLock(), PGPROC::fpInfoLock, LOCK::grantMask, hash_seq_init(), hash_seq_search(), PROCLOCK::holdMask, i, lengthof, LOCALLOCK_LOCKMETHOD, LOCALLOCK_LOCKTAG, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_LOCKMETHOD, LOCK_PRINT, LOCKBIT_ON, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethods, LOCALLOCK::lockOwners, LockRefindAndRelease(), LOCKTAG::locktag_field2, LOCKTAG_TUPLE, LockTagHashCode(), LOG, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LockMethodData::numLockModes, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, PANIC, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), ResourceOwnerForgetLock(), LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, LockMethodData::trace_flag, UnGrantLock(), VirtualXactLockTableCleanup(), and WARNING.

Referenced by DiscardAll(), logicalrep_worker_onexit(), ProcReleaseLocks(), and ShutdownPostgres().

◆ LockReleaseCurrentOwner()

void LockReleaseCurrentOwner ( LOCALLOCK **  locallocks,
int  nlocks 
)

Definition at line 2520 of file lock.c.

2521{
2522 if (locallocks == NULL)
2523 {
2524 HASH_SEQ_STATUS status;
2525 LOCALLOCK *locallock;
2526
2528
2529 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2530 ReleaseLockIfHeld(locallock, false);
2531 }
2532 else
2533 {
2534 int i;
2535
2536 for (i = nlocks - 1; i >= 0; i--)
2537 ReleaseLockIfHeld(locallocks[i], false);
2538 }
2539}
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2555

References hash_seq_init(), hash_seq_search(), i, LockMethodLocalHash, and ReleaseLockIfHeld().

Referenced by ResourceOwnerReleaseInternal().

◆ LockReleaseSession()

void LockReleaseSession ( LOCKMETHODID  lockmethodid)

Definition at line 2490 of file lock.c.

2491{
2492 HASH_SEQ_STATUS status;
2493 LOCALLOCK *locallock;
2494
2495 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2496 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2497
2499
2500 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2501 {
2502 /* Ignore items that are not of the specified lock method */
2503 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2504 continue;
2505
2506 ReleaseLockIfHeld(locallock, true);
2507 }
2508}

References elog, ERROR, hash_seq_init(), hash_seq_search(), lengthof, LOCALLOCK_LOCKMETHOD, LockMethodLocalHash, LockMethods, and ReleaseLockIfHeld().

Referenced by pg_advisory_unlock_all().

◆ LockTagHashCode()

uint32 LockTagHashCode ( const LOCKTAG locktag)

Definition at line 551 of file lock.c.

552{
553 return get_hash_value(LockMethodLockHash, locktag);
554}
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:911

References get_hash_value(), and LockMethodLockHash.

Referenced by CheckDeadLock(), GetLockConflicts(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), LockReleaseAll(), LockWaiterCount(), proclock_hash(), and VirtualXactLock().

◆ LockWaiterCount()

int LockWaiterCount ( const LOCKTAG locktag)

Definition at line 4761 of file lock.c.

4762{
4763 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4764 LOCK *lock;
4765 bool found;
4766 uint32 hashcode;
4767 LWLock *partitionLock;
4768 int waiters = 0;
4769
4770 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4771 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4772
4773 hashcode = LockTagHashCode(locktag);
4774 partitionLock = LockHashPartitionLock(hashcode);
4775 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4776
4778 locktag,
4779 hashcode,
4780 HASH_FIND,
4781 &found);
4782 if (found)
4783 {
4784 Assert(lock != NULL);
4785 waiters = lock->nRequested;
4786 }
4787 LWLockRelease(partitionLock);
4788
4789 return waiters;
4790}

References Assert, elog, ERROR, HASH_FIND, hash_search_with_hash_value(), lengthof, LockHashPartitionLock, LockMethodLockHash, LockMethods, LOCKTAG::locktag_lockmethodid, LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), and LOCK::nRequested.

Referenced by RelationExtensionLockWaiterCount().

◆ MarkLockClear()

void MarkLockClear ( LOCALLOCK locallock)

◆ PostPrepare_Locks()

void PostPrepare_Locks ( TransactionId  xid)

Definition at line 3479 of file lock.c.

3480{
3481 PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3482 HASH_SEQ_STATUS status;
3483 LOCALLOCK *locallock;
3484 LOCK *lock;
3485 PROCLOCK *proclock;
3486 PROCLOCKTAG proclocktag;
3487 int partition;
3488
3489 /* Can't prepare a lock group follower. */
3490 Assert(MyProc->lockGroupLeader == NULL ||
3492
3493 /* This is a critical section: any error means big trouble */
3495
3496 /*
3497 * First we run through the locallock table and get rid of unwanted
3498 * entries, then we scan the process's proclocks and transfer them to the
3499 * target proc.
3500 *
3501 * We do this separately because we may have multiple locallock entries
3502 * pointing to the same proclock, and we daren't end up with any dangling
3503 * pointers.
3504 */
3506
3507 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3508 {
3509 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3510 bool haveSessionLock;
3511 bool haveXactLock;
3512 int i;
3513
3514 if (locallock->proclock == NULL || locallock->lock == NULL)
3515 {
3516 /*
3517 * We must've run out of shared memory while trying to set up this
3518 * lock. Just forget the local entry.
3519 */
3520 Assert(locallock->nLocks == 0);
3521 RemoveLocalLock(locallock);
3522 continue;
3523 }
3524
3525 /* Ignore VXID locks */
3527 continue;
3528
3529 /* Scan to see whether we hold it at session or transaction level */
3530 haveSessionLock = haveXactLock = false;
3531 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3532 {
3533 if (lockOwners[i].owner == NULL)
3534 haveSessionLock = true;
3535 else
3536 haveXactLock = true;
3537 }
3538
3539 /* Ignore it if we have only session lock */
3540 if (!haveXactLock)
3541 continue;
3542
3543 /* This can't happen, because we already checked it */
3544 if (haveSessionLock)
3545 ereport(PANIC,
3546 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3547 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3548
3549 /* Mark the proclock to show we need to release this lockmode */
3550 if (locallock->nLocks > 0)
3551 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3552
3553 /* And remove the locallock hashtable entry */
3554 RemoveLocalLock(locallock);
3555 }
3556
3557 /*
3558 * Now, scan each lock partition separately.
3559 */
3560 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3561 {
3562 LWLock *partitionLock;
3563 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3564 dlist_mutable_iter proclock_iter;
3565
3566 partitionLock = LockHashPartitionLockByIndex(partition);
3567
3568 /*
3569 * If the proclock list for this partition is empty, we can skip
3570 * acquiring the partition lock. This optimization is safer than the
3571 * situation in LockReleaseAll, because we got rid of any fast-path
3572 * locks during AtPrepare_Locks, so there cannot be any case where
3573 * another backend is adding something to our lists now. For safety,
3574 * though, we code this the same way as in LockReleaseAll.
3575 */
3576 if (dlist_is_empty(procLocks))
3577 continue; /* needn't examine this partition */
3578
3579 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3580
3581 dlist_foreach_modify(proclock_iter, procLocks)
3582 {
3583 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3584
3585 Assert(proclock->tag.myProc == MyProc);
3586
3587 lock = proclock->tag.myLock;
3588
3589 /* Ignore VXID locks */
3591 continue;
3592
3593 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3594 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3595 Assert(lock->nRequested >= 0);
3596 Assert(lock->nGranted >= 0);
3597 Assert(lock->nGranted <= lock->nRequested);
3598 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3599
3600 /* Ignore it if nothing to release (must be a session lock) */
3601 if (proclock->releaseMask == 0)
3602 continue;
3603
3604 /* Else we should be releasing all locks */
3605 if (proclock->releaseMask != proclock->holdMask)
3606 elog(PANIC, "we seem to have dropped a bit somewhere");
3607
3608 /*
3609 * We cannot simply modify proclock->tag.myProc to reassign
3610 * ownership of the lock, because that's part of the hash key and
3611 * the proclock would then be in the wrong hash chain. Instead
3612 * use hash_update_hash_key. (We used to create a new hash entry,
3613 * but that risks out-of-memory failure if other processes are
3614 * busy making proclocks too.) We must unlink the proclock from
3615 * our procLink chain and put it into the new proc's chain, too.
3616 *
3617 * Note: the updated proclock hash key will still belong to the
3618 * same hash partition, cf proclock_hash(). So the partition lock
3619 * we already hold is sufficient for this.
3620 */
3621 dlist_delete(&proclock->procLink);
3622
3623 /*
3624 * Create the new hash key for the proclock.
3625 */
3626 proclocktag.myLock = lock;
3627 proclocktag.myProc = newproc;
3628
3629 /*
3630 * Update groupLeader pointer to point to the new proc. (We'd
3631 * better not be a member of somebody else's lock group!)
3632 */
3633 Assert(proclock->groupLeader == proclock->tag.myProc);
3634 proclock->groupLeader = newproc;
3635
3636 /*
3637 * Update the proclock. We should not find any existing entry for
3638 * the same hash key, since there can be only one entry for any
3639 * given lock with my own proc.
3640 */
3642 proclock,
3643 &proclocktag))
3644 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3645
3646 /* Re-link into the new proc's proclock list */
3647 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3648
3649 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3650 } /* loop over PROCLOCKs within this partition */
3651
3652 LWLockRelease(partitionLock);
3653 } /* loop over partitions */
3654
3656}
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1145
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151

References Assert, dlist_mutable_iter::cur, dlist_container, dlist_delete(), dlist_foreach_modify, dlist_is_empty(), dlist_push_tail(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), LOCK::grantMask, PROCLOCK::groupLeader, hash_seq_init(), hash_seq_search(), hash_update_hash_key(), PROCLOCK::holdMask, i, LOCALLOCKTAG::lock, LOCALLOCK::lock, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartitionLockByIndex, LockMethodLocalHash, LockMethodProcLockHash, LOCALLOCK::lockOwners, LOCKTAG::locktag_type, LOCKTAG_VIRTUALTRANSACTION, LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), LOCALLOCKTAG::mode, PROCLOCKTAG::myLock, MyProc, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCALLOCK::nLocks, LOCK::nRequested, NUM_LOCK_PARTITIONS, LOCALLOCK::numLockOwners, PANIC, PROCLOCK::procLink, LOCALLOCK::proclock, PROCLOCK_PRINT, PROCLOCK::releaseMask, RemoveLocalLock(), START_CRIT_SECTION, LOCK::tag, PROCLOCK::tag, LOCALLOCK::tag, and TwoPhaseGetDummyProc().

Referenced by PrepareTransaction().

◆ proclock_hash()

static uint32 proclock_hash ( const void *  key,
Size  keysize 
)
static

Definition at line 568 of file lock.c.

569{
570 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
571 uint32 lockhash;
572 Datum procptr;
573
574 Assert(keysize == sizeof(PROCLOCKTAG));
575
576 /* Look into the associated LOCK object, and compute its hash code */
577 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
578
579 /*
580 * To make the hash code also depend on the PGPROC, we xor the proc
581 * struct's address into the hash code, left-shifted so that the
582 * partition-number bits don't change. Since this is only a hash, we
583 * don't care if we lose high-order bits of the address; use an
584 * intermediate variable to suppress cast-pointer-to-int warnings.
585 */
586 procptr = PointerGetDatum(proclocktag->myProc);
587 lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
588
589 return lockhash;
590}
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:96
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64

References Assert, sort-test::key, LockTagHashCode(), LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PointerGetDatum(), and LOCK::tag.

Referenced by LockManagerShmemInit().

◆ ProcLockHashCode()

static uint32 ProcLockHashCode ( const PROCLOCKTAG proclocktag,
uint32  hashcode 
)
inlinestatic

Definition at line 599 of file lock.c.

600{
601 uint32 lockhash = hashcode;
602 Datum procptr;
603
604 /*
605 * This must match proclock_hash()!
606 */
607 procptr = PointerGetDatum(proclocktag->myProc);
608 lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
609
610 return lockhash;
611}

References LOG2_NUM_LOCK_PARTITIONS, PROCLOCKTAG::myProc, and PointerGetDatum().

Referenced by CleanUpLock(), FastPathGetRelationLockEntry(), lock_twophase_recover(), LockAcquireExtended(), LockRefindAndRelease(), and SetupLockInTable().

◆ ReleaseLockIfHeld()

static void ReleaseLockIfHeld ( LOCALLOCK locallock,
bool  sessionLock 
)
static

Definition at line 2555 of file lock.c.

2556{
2557 ResourceOwner owner;
2558 LOCALLOCKOWNER *lockOwners;
2559 int i;
2560
2561 /* Identify owner for lock (must match LockRelease!) */
2562 if (sessionLock)
2563 owner = NULL;
2564 else
2565 owner = CurrentResourceOwner;
2566
2567 /* Scan to see if there are any locks belonging to the target owner */
2568 lockOwners = locallock->lockOwners;
2569 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2570 {
2571 if (lockOwners[i].owner == owner)
2572 {
2573 Assert(lockOwners[i].nLocks > 0);
2574 if (lockOwners[i].nLocks < locallock->nLocks)
2575 {
2576 /*
2577 * We will still hold this lock after forgetting this
2578 * ResourceOwner.
2579 */
2580 locallock->nLocks -= lockOwners[i].nLocks;
2581 /* compact out unused slot */
2582 locallock->numLockOwners--;
2583 if (owner != NULL)
2584 ResourceOwnerForgetLock(owner, locallock);
2585 if (i < locallock->numLockOwners)
2586 lockOwners[i] = lockOwners[locallock->numLockOwners];
2587 }
2588 else
2589 {
2590 Assert(lockOwners[i].nLocks == locallock->nLocks);
2591 /* We want to call LockRelease just once */
2592 lockOwners[i].nLocks = 1;
2593 locallock->nLocks = 1;
2594 if (!LockRelease(&locallock->tag.lock,
2595 locallock->tag.mode,
2596 sessionLock))
2597 elog(WARNING, "ReleaseLockIfHeld: failed??");
2598 }
2599 break;
2600 }
2601 }
2602}
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:2011

References Assert, CurrentResourceOwner, elog, i, LOCALLOCKTAG::lock, LOCALLOCK::lockOwners, LockRelease(), LOCALLOCKTAG::mode, LOCALLOCKOWNER::nLocks, LOCALLOCK::nLocks, LOCALLOCK::numLockOwners, ResourceOwnerForgetLock(), LOCALLOCK::tag, and WARNING.

Referenced by LockReleaseCurrentOwner(), and LockReleaseSession().

◆ RemoveFromWaitQueue()

void RemoveFromWaitQueue ( PGPROC proc,
uint32  hashcode 
)

Definition at line 1955 of file lock.c.

1956{
1957 LOCK *waitLock = proc->waitLock;
1958 PROCLOCK *proclock = proc->waitProcLock;
1959 LOCKMODE lockmode = proc->waitLockMode;
1960 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1961
1962 /* Make sure proc is waiting */
1964 Assert(proc->links.next != NULL);
1965 Assert(waitLock);
1966 Assert(!dclist_is_empty(&waitLock->waitProcs));
1967 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1968
1969 /* Remove proc from lock's wait queue */
1970 dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
1971
1972 /* Undo increments of request counts by waiting process */
1973 Assert(waitLock->nRequested > 0);
1974 Assert(waitLock->nRequested > proc->waitLock->nGranted);
1975 waitLock->nRequested--;
1976 Assert(waitLock->requested[lockmode] > 0);
1977 waitLock->requested[lockmode]--;
1978 /* don't forget to clear waitMask bit if appropriate */
1979 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1980 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1981
1982 /* Clean up the proc's own state, and pass it the ok/fail signal */
1983 proc->waitLock = NULL;
1984 proc->waitProcLock = NULL;
1986
1987 /*
1988 * Delete the proclock immediately if it represents no already-held locks.
1989 * (This must happen now because if the owner of the lock decides to
1990 * release it, and the requested/granted counts then go to zero,
1991 * LockRelease expects there to be no remaining proclocks.) Then see if
1992 * any other waiters for the lock can be woken up now.
1993 */
1994 CleanUpLock(waitLock, proclock,
1995 LockMethods[lockmethodid], hashcode,
1996 true);
1997}
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
PROCLOCK * waitProcLock
Definition: proc.h:233
ProcWaitStatus waitStatus
Definition: proc.h:167

References Assert, CleanUpLock(), dclist_delete_from_thoroughly(), dclist_is_empty(), LOCK::granted, lengthof, PGPROC::links, LOCK_LOCKMETHOD, LOCKBIT_OFF, LockMethods, dlist_node::next, LOCK::nGranted, LOCK::nRequested, PROC_WAIT_STATUS_ERROR, PROC_WAIT_STATUS_WAITING, LOCK::requested, PGPROC::waitLock, PGPROC::waitLockMode, LOCK::waitMask, PGPROC::waitProcLock, LOCK::waitProcs, and PGPROC::waitStatus.

Referenced by CheckDeadLock(), and LockErrorCleanup().

◆ RemoveLocalLock()

static void RemoveLocalLock ( LOCALLOCK locallock)
static

Definition at line 1425 of file lock.c.

1426{
1427 int i;
1428
1429 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1430 {
1431 if (locallock->lockOwners[i].owner != NULL)
1432 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1433 }
1434 locallock->numLockOwners = 0;
1435 if (locallock->lockOwners != NULL)
1436 pfree(locallock->lockOwners);
1437 locallock->lockOwners = NULL;
1438
1439 if (locallock->holdsStrongLockCount)
1440 {
1441 uint32 fasthashcode;
1442
1443 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1444
1446 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1447 FastPathStrongRelationLocks->count[fasthashcode]--;
1448 locallock->holdsStrongLockCount = false;
1450 }
1451
1453 &(locallock->tag),
1454 HASH_REMOVE, NULL))
1455 elog(WARNING, "locallock table corrupted");
1456
1457 /*
1458 * Indicate that the lock is released for certain types of locks
1459 */
1460 CheckAndSetLockHeld(locallock, false);
1461}
void pfree(void *pointer)
Definition: mcxt.c:1521

References Assert, CheckAndSetLockHeld(), FastPathStrongRelationLockData::count, elog, FastPathStrongLockHashPartition, FastPathStrongRelationLocks, HASH_REMOVE, hash_search(), LOCALLOCK::hashcode, LOCALLOCK::holdsStrongLockCount, i, LockMethodLocalHash, LOCALLOCK::lockOwners, FastPathStrongRelationLockData::mutex, LOCALLOCK::numLockOwners, LOCALLOCKOWNER::owner, pfree(), ResourceOwnerForgetLock(), SpinLockAcquire, SpinLockRelease, LOCALLOCK::tag, and WARNING.

Referenced by LockAcquireExtended(), LockHasWaiters(), LockRelease(), LockReleaseAll(), and PostPrepare_Locks().

◆ SetupLockInTable()

static PROCLOCK * SetupLockInTable ( LockMethod  lockMethodTable,
PGPROC proc,
const LOCKTAG locktag,
uint32  hashcode,
LOCKMODE  lockmode 
)
static

Definition at line 1232 of file lock.c.

1234{
1235 LOCK *lock;
1236 PROCLOCK *proclock;
1237 PROCLOCKTAG proclocktag;
1238 uint32 proclock_hashcode;
1239 bool found;
1240
1241 /*
1242 * Find or create a lock with this tag.
1243 */
1245 locktag,
1246 hashcode,
1248 &found);
1249 if (!lock)
1250 return NULL;
1251
1252 /*
1253 * if it's a new lock object, initialize it
1254 */
1255 if (!found)
1256 {
1257 lock->grantMask = 0;
1258 lock->waitMask = 0;
1259 dlist_init(&lock->procLocks);
1260 dclist_init(&lock->waitProcs);
1261 lock->nRequested = 0;
1262 lock->nGranted = 0;
1263 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1264 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1265 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1266 }
1267 else
1268 {
1269 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1270 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1271 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1272 Assert(lock->nGranted <= lock->nRequested);
1273 }
1274
1275 /*
1276 * Create the hash key for the proclock table.
1277 */
1278 proclocktag.myLock = lock;
1279 proclocktag.myProc = proc;
1280
1281 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1282
1283 /*
1284 * Find or create a proclock entry with this tag
1285 */
1287 &proclocktag,
1288 proclock_hashcode,
1290 &found);
1291 if (!proclock)
1292 {
1293 /* Oops, not enough shmem for the proclock */
1294 if (lock->nRequested == 0)
1295 {
1296 /*
1297 * There are no other requestors of this lock, so garbage-collect
1298 * the lock object. We *must* do this to avoid a permanent leak
1299 * of shared memory, because there won't be anything to cause
1300 * anyone to release the lock object later.
1301 */
1302 Assert(dlist_is_empty(&(lock->procLocks)));
1304 &(lock->tag),
1305 hashcode,
1307 NULL))
1308 elog(PANIC, "lock table corrupted");
1309 }
1310 return NULL;
1311 }
1312
1313 /*
1314 * If new, initialize the new entry
1315 */
1316 if (!found)
1317 {
1318 uint32 partition = LockHashPartition(hashcode);
1319
1320 /*
1321 * It might seem unsafe to access proclock->groupLeader without a
1322 * lock, but it's not really. Either we are initializing a proclock
1323 * on our own behalf, in which case our group leader isn't changing
1324 * because the group leader for a process can only ever be changed by
1325 * the process itself; or else we are transferring a fast-path lock to
1326 * the main lock table, in which case that process can't change it's
1327 * lock group leader without first releasing all of its locks (and in
1328 * particular the one we are currently transferring).
1329 */
1330 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1331 proc->lockGroupLeader : proc;
1332 proclock->holdMask = 0;
1333 proclock->releaseMask = 0;
1334 /* Add proclock to appropriate lists */
1335 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1336 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1337 PROCLOCK_PRINT("LockAcquire: new", proclock);
1338 }
1339 else
1340 {
1341 PROCLOCK_PRINT("LockAcquire: found", proclock);
1342 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1343
1344#ifdef CHECK_DEADLOCK_RISK
1345
1346 /*
1347 * Issue warning if we already hold a lower-level lock on this object
1348 * and do not hold a lock of the requested level or higher. This
1349 * indicates a deadlock-prone coding practice (eg, we'd have a
1350 * deadlock if another backend were following the same code path at
1351 * about the same time).
1352 *
1353 * This is not enabled by default, because it may generate log entries
1354 * about user-level coding practices that are in fact safe in context.
1355 * It can be enabled to help find system-level problems.
1356 *
1357 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1358 * better to use a table. For now, though, this works.
1359 */
1360 {
1361 int i;
1362
1363 for (i = lockMethodTable->numLockModes; i > 0; i--)
1364 {
1365 if (proclock->holdMask & LOCKBIT_ON(i))
1366 {
1367 if (i >= (int) lockmode)
1368 break; /* safe: we have a lock >= req level */
1369 elog(LOG, "deadlock risk: raising lock level"
1370 " from %s to %s on object %u/%u/%u",
1371 lockMethodTable->lockModeNames[i],
1372 lockMethodTable->lockModeNames[lockmode],
1373 lock->tag.locktag_field1, lock->tag.locktag_field2,
1374 lock->tag.locktag_field3);
1375 break;
1376 }
1377 }
1378 }
1379#endif /* CHECK_DEADLOCK_RISK */
1380 }
1381
1382 /*
1383 * lock->nRequested and lock->requested[] count the total number of
1384 * requests, whether granted or waiting, so increment those immediately.
1385 * The other counts don't increment till we get the lock.
1386 */
1387 lock->nRequested++;
1388 lock->requested[lockmode]++;
1389 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1390
1391 /*
1392 * We shouldn't already hold the desired lock; else locallock table is
1393 * broken.
1394 */
1395 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1396 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1397 lockMethodTable->lockModeNames[lockmode],
1398 lock->tag.locktag_field1, lock->tag.locktag_field2,
1399 lock->tag.locktag_field3);
1400
1401 return proclock;
1402}

References Assert, dclist_init(), dlist_init(), dlist_is_empty(), dlist_push_tail(), elog, ERROR, LOCK::granted, LOCK::grantMask, PROCLOCK::groupLeader, HASH_ENTER_NULL, HASH_REMOVE, hash_search_with_hash_value(), PROCLOCK::holdMask, i, LOCK_PRINT, LOCKBIT_ON, PGPROC::lockGroupLeader, LockHashPartition, PROCLOCK::lockLink, LockMethodLockHash, LockMethodProcLockHash, LockMethodData::lockModeNames, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOG, MAX_LOCKMODES, MemSet, PROCLOCKTAG::myLock, PROCLOCKTAG::myProc, PGPROC::myProcLocks, LOCK::nGranted, LOCK::nRequested, LockMethodData::numLockModes, PANIC, PROCLOCK::procLink, PROCLOCK_PRINT, ProcLockHashCode(), LOCK::procLocks, PROCLOCK::releaseMask, LOCK::requested, LOCK::tag, LOCK::waitMask, and LOCK::waitProcs.

Referenced by FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), LockAcquireExtended(), and VirtualXactLock().

◆ UnGrantLock()

static bool UnGrantLock ( LOCK lock,
LOCKMODE  lockmode,
PROCLOCK proclock,
LockMethod  lockMethodTable 
)
static

Definition at line 1630 of file lock.c.

1632{
1633 bool wakeupNeeded = false;
1634
1635 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1636 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1637 Assert(lock->nGranted <= lock->nRequested);
1638
1639 /*
1640 * fix the general lock stats
1641 */
1642 lock->nRequested--;
1643 lock->requested[lockmode]--;
1644 lock->nGranted--;
1645 lock->granted[lockmode]--;
1646
1647 if (lock->granted[lockmode] == 0)
1648 {
1649 /* change the conflict mask. No more of this lock type. */
1650 lock->grantMask &= LOCKBIT_OFF(lockmode);
1651 }
1652
1653 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1654
1655 /*
1656 * We need only run ProcLockWakeup if the released lock conflicts with at
1657 * least one of the lock types requested by waiter(s). Otherwise whatever
1658 * conflict made them wait must still exist. NOTE: before MVCC, we could
1659 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1660 * not true anymore, because the remaining granted locks might belong to
1661 * some waiter, who could now be awakened because he doesn't conflict with
1662 * his own locks.
1663 */
1664 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1665 wakeupNeeded = true;
1666
1667 /*
1668 * Now fix the per-proclock state.
1669 */
1670 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1671 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1672
1673 return wakeupNeeded;
1674}

References Assert, LockMethodData::conflictTab, LOCK::granted, LOCK::grantMask, PROCLOCK::holdMask, LOCK_PRINT, LOCKBIT_OFF, LOCK::nGranted, LOCK::nRequested, PROCLOCK_PRINT, LOCK::requested, and LOCK::waitMask.

Referenced by LockRefindAndRelease(), LockRelease(), and LockReleaseAll().

◆ VirtualXactLock()

bool VirtualXactLock ( VirtualTransactionId  vxid,
bool  wait 
)

Definition at line 4650 of file lock.c.

4651{
4652 LOCKTAG tag;
4653 PGPROC *proc;
4655
4657
4659 /* no vxid lock; localTransactionId is a normal, locked XID */
4660 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4661
4663
4664 /*
4665 * If a lock table entry must be made, this is the PGPROC on whose behalf
4666 * it must be done. Note that the transaction might end or the PGPROC
4667 * might be reassigned to a new backend before we get around to examining
4668 * it, but it doesn't matter. If we find upon examination that the
4669 * relevant lxid is no longer running here, that's enough to prove that
4670 * it's no longer running anywhere.
4671 */
4672 proc = ProcNumberGetProc(vxid.procNumber);
4673 if (proc == NULL)
4674 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4675
4676 /*
4677 * We must acquire this lock before checking the procNumber and lxid
4678 * against the ones we're waiting for. The target backend will only set
4679 * or clear lxid while holding this lock.
4680 */
4682
4683 if (proc->vxid.procNumber != vxid.procNumber
4685 {
4686 /* VXID ended */
4687 LWLockRelease(&proc->fpInfoLock);
4688 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4689 }
4690
4691 /*
4692 * If we aren't asked to wait, there's no need to set up a lock table
4693 * entry. The transaction is still in progress, so just return false.
4694 */
4695 if (!wait)
4696 {
4697 LWLockRelease(&proc->fpInfoLock);
4698 return false;
4699 }
4700
4701 /*
4702 * OK, we're going to need to sleep on the VXID. But first, we must set
4703 * up the primary lock table entry, if needed (ie, convert the proc's
4704 * fast-path lock on its VXID to a regular lock).
4705 */
4706 if (proc->fpVXIDLock)
4707 {
4708 PROCLOCK *proclock;
4709 uint32 hashcode;
4710 LWLock *partitionLock;
4711
4712 hashcode = LockTagHashCode(&tag);
4713
4714 partitionLock = LockHashPartitionLock(hashcode);
4715 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4716
4718 &tag, hashcode, ExclusiveLock);
4719 if (!proclock)
4720 {
4721 LWLockRelease(partitionLock);
4722 LWLockRelease(&proc->fpInfoLock);
4723 ereport(ERROR,
4724 (errcode(ERRCODE_OUT_OF_MEMORY),
4725 errmsg("out of shared memory"),
4726 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4727 }
4728 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4729
4730 LWLockRelease(partitionLock);
4731
4732 proc->fpVXIDLock = false;
4733 }
4734
4735 /*
4736 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4737 * search. The proc might have assigned this XID but not yet locked it,
4738 * in which case the proc will lock this XID before releasing the VXID.
4739 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4740 * so we won't save an XID of a different VXID. It doesn't matter whether
4741 * we save this before or after setting up the primary lock table entry.
4742 */
4743 xid = proc->xid;
4744
4745 /* Done with proc->fpLockBits */
4746 LWLockRelease(&proc->fpInfoLock);
4747
4748 /* Time to wait. */
4749 (void) LockAcquire(&tag, ShareLock, false, false);
4750
4751 LockRelease(&tag, ShareLock, false);
4752 return XactLockForVirtualXact(vxid, xid, wait);
4753}
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4599
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:803
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:69
#define ShareLock
Definition: lockdefs.h:40
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition: procarray.c:3138
#define InvalidTransactionId
Definition: transam.h:31

References Assert, DEFAULT_LOCKMETHOD, ereport, errcode(), errhint(), errmsg(), ERROR, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, GrantLock(), InvalidTransactionId, VirtualTransactionId::localTransactionId, LockAcquire(), LockHashPartitionLock, LockMethods, LockRelease(), LockTagHashCode(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), PROCLOCKTAG::myLock, VirtualTransactionId::procNumber, PGPROC::procNumber, ProcNumberGetProc(), SET_LOCKTAG_VIRTUALTRANSACTION, SetupLockInTable(), ShareLock, PROCLOCK::tag, VirtualTransactionIdIsRecoveredPreparedXact, VirtualTransactionIdIsValid, PGPROC::vxid, XactLockForVirtualXact(), and PGPROC::xid.

Referenced by ResolveRecoveryConflictWithVirtualXIDs(), WaitForLockersMultiple(), and WaitForOlderSnapshots().

◆ VirtualXactLockTableCleanup()

void VirtualXactLockTableCleanup ( void  )

Definition at line 4550 of file lock.c.

4551{
4552 bool fastpath;
4553 LocalTransactionId lxid;
4554
4556
4557 /*
4558 * Clean up shared memory state.
4559 */
4561
4562 fastpath = MyProc->fpVXIDLock;
4564 MyProc->fpVXIDLock = false;
4566
4568
4569 /*
4570 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4571 * that means someone transferred the lock to the main lock table.
4572 */
4573 if (!fastpath && LocalTransactionIdIsValid(lxid))
4574 {
4576 LOCKTAG locktag;
4577
4578 vxid.procNumber = MyProcNumber;
4579 vxid.localTransactionId = lxid;
4580 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4581
4583 &locktag, ExclusiveLock, false);
4584 }
4585}
uint32 LocalTransactionId
Definition: c.h:608
ProcNumber MyProcNumber
Definition: globals.c:89
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:66

References Assert, DEFAULT_LOCKMETHOD, ExclusiveLock, PGPROC::fpInfoLock, PGPROC::fpLocalTransactionId, PGPROC::fpVXIDLock, INVALID_PROC_NUMBER, InvalidLocalTransactionId, VirtualTransactionId::localTransactionId, LocalTransactionIdIsValid, LockMethods, LockRefindAndRelease(), LW_EXCLUSIVE, LWLockAcquire(), LWLockRelease(), MyProc, MyProcNumber, VirtualTransactionId::procNumber, PGPROC::procNumber, SET_LOCKTAG_VIRTUALTRANSACTION, and PGPROC::vxid.

Referenced by LockReleaseAll(), and ShutdownRecoveryTransactionEnvironment().

◆ VirtualXactLockTableInsert()

◆ WaitOnLock()

static ProcWaitStatus WaitOnLock ( LOCALLOCK locallock,
ResourceOwner  owner 
)
static

Definition at line 1872 of file lock.c.

1873{
1874 ProcWaitStatus result;
1875
1876 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1877 locallock->tag.lock.locktag_field2,
1878 locallock->tag.lock.locktag_field3,
1879 locallock->tag.lock.locktag_field4,
1880 locallock->tag.lock.locktag_type,
1881 locallock->tag.mode);
1882
1883 /* adjust the process title to indicate that it's waiting */
1884 set_ps_display_suffix("waiting");
1885
1886 /*
1887 * Record the fact that we are waiting for a lock, so that
1888 * LockErrorCleanup will clean up if cancel/die happens.
1889 */
1890 awaitedLock = locallock;
1891 awaitedOwner = owner;
1892
1893 /*
1894 * NOTE: Think not to put any shared-state cleanup after the call to
1895 * ProcSleep, in either the normal or failure path. The lock state must
1896 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1897 * waiting for the lock. This is necessary because of the possibility
1898 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1899 * grants us the lock, but before we've noticed it. Hence, after granting,
1900 * the locktable state must fully reflect the fact that we own the lock;
1901 * we can't do additional work on return.
1902 *
1903 * We can and do use a PG_TRY block to try to clean up after failure, but
1904 * this still has a major limitation: elog(FATAL) can occur while waiting
1905 * (eg, a "die" interrupt), and then control won't come back here. So all
1906 * cleanup of essential state should happen in LockErrorCleanup, not here.
1907 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1908 * is unimportant if the process exits.
1909 */
1910 PG_TRY();
1911 {
1912 result = ProcSleep(locallock);
1913 }
1914 PG_CATCH();
1915 {
1916 /* In this path, awaitedLock remains set until LockErrorCleanup */
1917
1918 /* reset ps display to remove the suffix */
1920
1921 /* and propagate the error */
1922 PG_RE_THROW();
1923 }
1924 PG_END_TRY();
1925
1926 /*
1927 * We no longer want LockErrorCleanup to do anything.
1928 */
1929 awaitedLock = NULL;
1930
1931 /* reset ps display to remove the suffix */
1933
1934 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
1935 locallock->tag.lock.locktag_field2,
1936 locallock->tag.lock.locktag_field3,
1937 locallock->tag.lock.locktag_field4,
1938 locallock->tag.lock.locktag_type,
1939 locallock->tag.mode);
1940
1941 return result;
1942}
#define PG_RE_THROW()
Definition: elog.h:412
#define PG_TRY(...)
Definition: elog.h:371
#define PG_END_TRY(...)
Definition: elog.h:396
#define PG_CATCH(...)
Definition: elog.h:381
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:423
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:371
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition: proc.c:1258
uint16 locktag_field4
Definition: lock.h:169

References awaitedLock, awaitedOwner, LOCALLOCKTAG::lock, LOCKTAG::locktag_field1, LOCKTAG::locktag_field2, LOCKTAG::locktag_field3, LOCKTAG::locktag_field4, LOCKTAG::locktag_type, LOCALLOCKTAG::mode, PG_CATCH, PG_END_TRY, PG_RE_THROW, PG_TRY, ProcSleep(), set_ps_display_remove_suffix(), set_ps_display_suffix(), and LOCALLOCK::tag.

Referenced by LockAcquireExtended().

◆ XactLockForVirtualXact()

static bool XactLockForVirtualXact ( VirtualTransactionId  vxid,
TransactionId  xid,
bool  wait 
)
static

Definition at line 4599 of file lock.c.

4601{
4602 bool more = false;
4603
4604 /* There is no point to wait for 2PCs if you have no 2PCs. */
4605 if (max_prepared_xacts == 0)
4606 return true;
4607
4608 do
4609 {
4611 LOCKTAG tag;
4612
4613 /* Clear state from previous iterations. */
4614 if (more)
4615 {
4617 more = false;
4618 }
4619
4620 /* If we have no xid, try to find one. */
4621 if (!TransactionIdIsValid(xid))
4622 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4623 if (!TransactionIdIsValid(xid))
4624 {
4625 Assert(!more);
4626 return true;
4627 }
4628
4629 /* Check or wait for XID completion. */
4630 SET_LOCKTAG_TRANSACTION(tag, xid);
4631 lar = LockAcquire(&tag, ShareLock, false, !wait);
4632 if (lar == LOCKACQUIRE_NOT_AVAIL)
4633 return false;
4634 LockRelease(&tag, ShareLock, false);
4635 } while (more);
4636
4637 return true;
4638}
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:226
LockAcquireResult
Definition: lock.h:500
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:852

References Assert, InvalidTransactionId, LockAcquire(), LOCKACQUIRE_NOT_AVAIL, LockRelease(), max_prepared_xacts, SET_LOCKTAG_TRANSACTION, ShareLock, TransactionIdIsValid, and TwoPhaseGetXidByVirtualXID().

Referenced by VirtualXactLock().

Variable Documentation

◆ awaitedLock

LOCALLOCK* awaitedLock
static

Definition at line 323 of file lock.c.

Referenced by GetAwaitedLock(), GrantAwaitedLock(), and WaitOnLock().

◆ awaitedOwner

ResourceOwner awaitedOwner
static

Definition at line 324 of file lock.c.

Referenced by GrantAwaitedLock(), and WaitOnLock().

◆ default_lockmethod

const LockMethodData default_lockmethod
static
Initial value:
= {
}
static bool Dummy_trace
Definition: lock.c:120
static const char *const lock_mode_names[]
Definition: lock.c:106
static const LOCKMASK LockConflicts[]
Definition: lock.c:63

Definition at line 123 of file lock.c.

◆ Dummy_trace

bool Dummy_trace = false
static

Definition at line 120 of file lock.c.

◆ FastPathLocalUseCounts

int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
static

◆ FastPathLockGroupsPerBackend

int FastPathLockGroupsPerBackend = 0

◆ FastPathStrongRelationLocks

◆ lock_mode_names

const char* const lock_mode_names[]
static
Initial value:
=
{
"INVALID",
"AccessShareLock",
"RowShareLock",
"RowExclusiveLock",
"ShareUpdateExclusiveLock",
"ShareLock",
"ShareRowExclusiveLock",
"ExclusiveLock",
"AccessExclusiveLock"
}

Definition at line 106 of file lock.c.

◆ LockConflicts

const LOCKMASK LockConflicts[]
static

Definition at line 63 of file lock.c.

◆ LockMethodLocalHash

◆ LockMethodLockHash

◆ LockMethodProcLockHash

◆ LockMethods

◆ max_locks_per_xact

int max_locks_per_xact

◆ PG_USED_FOR_ASSERTS_ONLY

bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false
static

Definition at line 189 of file lock.c.

◆ StrongLockInProgress

LOCALLOCK* StrongLockInProgress
static

Definition at line 322 of file lock.c.

Referenced by AbortStrongLockAcquire(), BeginStrongLockAcquire(), and FinishStrongLockAcquire().

◆ user_lockmethod

const LockMethodData user_lockmethod
static
Initial value:

Definition at line 134 of file lock.c.