PostgreSQL Source Code git master
lock.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * lock.c
4 * POSTGRES primary lock mechanism
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/storage/lmgr/lock.c
12 *
13 * NOTES
14 * A lock table is a shared memory hash table. When
15 * a process tries to acquire a lock of a type that conflicts
16 * with existing locks, it is put to sleep using the routines
17 * in storage/lmgr/proc.c.
18 *
19 * For the most part, this code should be invoked via lmgr.c
20 * or another lock-management module, not directly.
21 *
22 * Interface:
23 *
24 * LockManagerShmemInit(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25 * LockAcquire(), LockRelease(), LockReleaseAll(),
26 * LockCheckConflicts(), GrantLock()
27 *
28 *-------------------------------------------------------------------------
29 */
30#include "postgres.h"
31
32#include <signal.h>
33#include <unistd.h>
34
35#include "access/transam.h"
36#include "access/twophase.h"
38#include "access/xlog.h"
39#include "access/xlogutils.h"
40#include "miscadmin.h"
41#include "pg_trace.h"
42#include "storage/lmgr.h"
43#include "storage/proc.h"
44#include "storage/procarray.h"
45#include "storage/spin.h"
46#include "storage/standby.h"
47#include "utils/memutils.h"
48#include "utils/ps_status.h"
49#include "utils/resowner.h"
50
51
52/* GUC variables */
53int max_locks_per_xact; /* used to set the lock table size */
54bool log_lock_failures = false;
55
56#define NLOCKENTS() \
57 mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
58
59
60/*
61 * Data structures defining the semantics of the standard lock methods.
62 *
63 * The conflict table defines the semantics of the various lock modes.
64 */
65static const LOCKMASK LockConflicts[] = {
66 0,
67
68 /* AccessShareLock */
70
71 /* RowShareLock */
73
74 /* RowExclusiveLock */
77
78 /* ShareUpdateExclusiveLock */
82
83 /* ShareLock */
87
88 /* ShareRowExclusiveLock */
92
93 /* ExclusiveLock */
98
99 /* AccessExclusiveLock */
104
105};
106
107/* Names of lock modes, for debug printouts */
108static const char *const lock_mode_names[] =
109{
110 "INVALID",
111 "AccessShareLock",
112 "RowShareLock",
113 "RowExclusiveLock",
114 "ShareUpdateExclusiveLock",
115 "ShareLock",
116 "ShareRowExclusiveLock",
117 "ExclusiveLock",
118 "AccessExclusiveLock"
119};
120
121#ifndef LOCK_DEBUG
122static bool Dummy_trace = false;
123#endif
124
129#ifdef LOCK_DEBUG
130 &Trace_locks
131#else
133#endif
134};
135
140#ifdef LOCK_DEBUG
141 &Trace_userlocks
142#else
144#endif
145};
146
147/*
148 * map from lock method id to the lock table data structures
149 */
150static const LockMethod LockMethods[] = {
151 NULL,
154};
155
156
157/* Record that's written to 2PC state file when a lock is persisted */
158typedef struct TwoPhaseLockRecord
159{
163
164
165/*
166 * Count of the number of fast path lock slots we believe to be used. This
167 * might be higher than the real number if another backend has transferred
168 * our locks to the primary lock table, but it can never be lower than the
169 * real value, since only we can acquire locks on our own behalf.
170 *
171 * XXX Allocate a static array of the maximum size. We could use a pointer
172 * and then allocate just the right size to save a couple kB, but then we
173 * would have to initialize that, while for the static array that happens
174 * automatically. Doesn't seem worth the extra complexity.
175 */
177
178/*
179 * Flag to indicate if the relation extension lock is held by this backend.
180 * This flag is used to ensure that while holding the relation extension lock
181 * we don't try to acquire a heavyweight lock on any other object. This
182 * restriction implies that the relation extension lock won't ever participate
183 * in the deadlock cycle because we can never wait for any other heavyweight
184 * lock after acquiring this lock.
185 *
186 * Such a restriction is okay for relation extension locks as unlike other
187 * heavyweight locks these are not held till the transaction end. These are
188 * taken for a short duration to extend a particular relation and then
189 * released.
190 */
191static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
192
193/*
194 * Number of fast-path locks per backend - size of the arrays in PGPROC.
195 * This is set only once during start, before initializing shared memory,
196 * and remains constant after that.
197 *
198 * We set the limit based on max_locks_per_transaction GUC, because that's
199 * the best information about expected number of locks per backend we have.
200 * See InitializeFastPathLocks() for details.
201 */
203
204/*
205 * Macros to calculate the fast-path group and index for a relation.
206 *
207 * The formula is a simple hash function, designed to spread the OIDs a bit,
208 * so that even contiguous values end up in different groups. In most cases
209 * there will be gaps anyway, but the multiplication should help a bit.
210 *
211 * The selected constant (49157) is a prime not too close to 2^k, and it's
212 * small enough to not cause overflows (in 64-bit).
213 *
214 * We can assume that FastPathLockGroupsPerBackend is a power-of-two per
215 * InitializeFastPathLocks().
216 */
217#define FAST_PATH_REL_GROUP(rel) \
218 (((uint64) (rel) * 49157) & (FastPathLockGroupsPerBackend - 1))
219
220/*
221 * Given the group/slot indexes, calculate the slot index in the whole array
222 * of fast-path lock slots.
223 */
224#define FAST_PATH_SLOT(group, index) \
225 (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
226 AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
227 ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
228
229/*
230 * Given a slot index (into the whole per-backend array), calculated using
231 * the FAST_PATH_SLOT macro, split it into group and index (in the group).
232 */
233#define FAST_PATH_GROUP(index) \
234 (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
235 ((index) / FP_LOCK_SLOTS_PER_GROUP))
236#define FAST_PATH_INDEX(index) \
237 (AssertMacro((uint32) (index) < FastPathLockSlotsPerBackend()), \
238 ((index) % FP_LOCK_SLOTS_PER_GROUP))
239
240/* Macros for manipulating proc->fpLockBits */
241#define FAST_PATH_BITS_PER_SLOT 3
242#define FAST_PATH_LOCKNUMBER_OFFSET 1
243#define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
244#define FAST_PATH_BITS(proc, n) (proc)->fpLockBits[FAST_PATH_GROUP(n)]
245#define FAST_PATH_GET_BITS(proc, n) \
246 ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
247#define FAST_PATH_BIT_POSITION(n, l) \
248 (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
249 AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
250 AssertMacro((n) < FastPathLockSlotsPerBackend()), \
251 ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
252#define FAST_PATH_SET_LOCKMODE(proc, n, l) \
253 FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
254#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
255 FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
256#define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
257 (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
258
259/*
260 * The fast-path lock mechanism is concerned only with relation locks on
261 * unshared relations by backends bound to a database. The fast-path
262 * mechanism exists mostly to accelerate acquisition and release of locks
263 * that rarely conflict. Because ShareUpdateExclusiveLock is
264 * self-conflicting, it can't use the fast-path mechanism; but it also does
265 * not conflict with any of the locks that do, so we can ignore it completely.
266 */
267#define EligibleForRelationFastPath(locktag, mode) \
268 ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
269 (locktag)->locktag_type == LOCKTAG_RELATION && \
270 (locktag)->locktag_field1 == MyDatabaseId && \
271 MyDatabaseId != InvalidOid && \
272 (mode) < ShareUpdateExclusiveLock)
273#define ConflictsWithRelationFastPath(locktag, mode) \
274 ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
275 (locktag)->locktag_type == LOCKTAG_RELATION && \
276 (locktag)->locktag_field1 != InvalidOid && \
277 (mode) > ShareUpdateExclusiveLock)
278
279static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
280static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
281static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
282 const LOCKTAG *locktag, uint32 hashcode);
284
285/*
286 * To make the fast-path lock mechanism work, we must have some way of
287 * preventing the use of the fast-path when a conflicting lock might be present.
288 * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
289 * and maintain an integer count of the number of "strong" lockers
290 * in each partition. When any "strong" lockers are present (which is
291 * hopefully not very often), the fast-path mechanism can't be used, and we
292 * must fall back to the slower method of pushing matching locks directly
293 * into the main lock tables.
294 *
295 * The deadlock detector does not know anything about the fast path mechanism,
296 * so any locks that might be involved in a deadlock must be transferred from
297 * the fast-path queues to the main lock table.
298 */
299
300#define FAST_PATH_STRONG_LOCK_HASH_BITS 10
301#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
302 (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
303#define FastPathStrongLockHashPartition(hashcode) \
304 ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
305
306typedef struct
307{
308 slock_t mutex;
311
313
314
315/*
316 * Pointers to hash tables containing lock state
317 *
318 * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
319 * shared memory; LockMethodLocalHash is local to each backend.
320 */
324
325
326/* private state for error cleanup */
330
331
332#ifdef LOCK_DEBUG
333
334/*------
335 * The following configuration options are available for lock debugging:
336 *
337 * TRACE_LOCKS -- give a bunch of output what's going on in this file
338 * TRACE_USERLOCKS -- same but for user locks
339 * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
340 * (use to avoid output on system tables)
341 * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
342 * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
343 *
344 * Furthermore, but in storage/lmgr/lwlock.c:
345 * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
346 *
347 * Define LOCK_DEBUG at compile time to get all these enabled.
348 * --------
349 */
350
351int Trace_lock_oidmin = FirstNormalObjectId;
352bool Trace_locks = false;
353bool Trace_userlocks = false;
354int Trace_lock_table = 0;
355bool Debug_deadlocks = false;
356
357
358inline static bool
359LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
360{
361 return
363 ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
364 || (Trace_lock_table &&
365 (tag->locktag_field2 == Trace_lock_table));
366}
367
368
369inline static void
370LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
371{
372 if (LOCK_DEBUG_ENABLED(&lock->tag))
373 elog(LOG,
374 "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
375 "req(%d,%d,%d,%d,%d,%d,%d)=%d "
376 "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
377 where, lock,
381 lock->grantMask,
382 lock->requested[1], lock->requested[2], lock->requested[3],
383 lock->requested[4], lock->requested[5], lock->requested[6],
384 lock->requested[7], lock->nRequested,
385 lock->granted[1], lock->granted[2], lock->granted[3],
386 lock->granted[4], lock->granted[5], lock->granted[6],
387 lock->granted[7], lock->nGranted,
388 dclist_count(&lock->waitProcs),
389 LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
390}
391
392
393inline static void
394PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
395{
396 if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
397 elog(LOG,
398 "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
399 where, proclockP, proclockP->tag.myLock,
400 PROCLOCK_LOCKMETHOD(*(proclockP)),
401 proclockP->tag.myProc, (int) proclockP->holdMask);
402}
403#else /* not LOCK_DEBUG */
404
405#define LOCK_PRINT(where, lock, type) ((void) 0)
406#define PROCLOCK_PRINT(where, proclockP) ((void) 0)
407#endif /* not LOCK_DEBUG */
408
409
410static uint32 proclock_hash(const void *key, Size keysize);
411static void RemoveLocalLock(LOCALLOCK *locallock);
412static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
413 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
414static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
415static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
416static void FinishStrongLockAcquire(void);
417static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
418static void waitonlock_error_callback(void *arg);
419static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
420static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
421static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
422 PROCLOCK *proclock, LockMethod lockMethodTable);
423static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
424 LockMethod lockMethodTable, uint32 hashcode,
425 bool wakeupNeeded);
426static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
427 LOCKTAG *locktag, LOCKMODE lockmode,
428 bool decrement_strong_lock_count);
429static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
431
432
433/*
434 * Initialize the lock manager's shmem data structures.
435 *
436 * This is called from CreateSharedMemoryAndSemaphores(), which see for more
437 * comments. In the normal postmaster case, the shared hash tables are
438 * created here, and backends inherit pointers to them via fork(). In the
439 * EXEC_BACKEND case, each backend re-executes this code to obtain pointers to
440 * the already existing shared hash tables. In either case, each backend must
441 * also call InitLockManagerAccess() to create the locallock hash table.
442 */
443void
445{
446 HASHCTL info;
447 int64 init_table_size,
448 max_table_size;
449 bool found;
450
451 /*
452 * Compute init/max size to request for lock hashtables. Note these
453 * calculations must agree with LockManagerShmemSize!
454 */
455 max_table_size = NLOCKENTS();
456 init_table_size = max_table_size / 2;
457
458 /*
459 * Allocate hash table for LOCK structs. This stores per-locked-object
460 * information.
461 */
462 info.keysize = sizeof(LOCKTAG);
463 info.entrysize = sizeof(LOCK);
465
466 LockMethodLockHash = ShmemInitHash("LOCK hash",
467 init_table_size,
468 max_table_size,
469 &info,
471
472 /* Assume an average of 2 holders per lock */
473 max_table_size *= 2;
474 init_table_size *= 2;
475
476 /*
477 * Allocate hash table for PROCLOCK structs. This stores
478 * per-lock-per-holder information.
479 */
480 info.keysize = sizeof(PROCLOCKTAG);
481 info.entrysize = sizeof(PROCLOCK);
482 info.hash = proclock_hash;
484
485 LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
486 init_table_size,
487 max_table_size,
488 &info,
490
491 /*
492 * Allocate fast-path structures.
493 */
495 ShmemInitStruct("Fast Path Strong Relation Lock Data",
496 sizeof(FastPathStrongRelationLockData), &found);
497 if (!found)
499}
500
501/*
502 * Initialize the lock manager's backend-private data structures.
503 */
504void
506{
507 /*
508 * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
509 * counts and resource owner information.
510 */
511 HASHCTL info;
512
513 info.keysize = sizeof(LOCALLOCKTAG);
514 info.entrysize = sizeof(LOCALLOCK);
515
516 LockMethodLocalHash = hash_create("LOCALLOCK hash",
517 16,
518 &info,
520}
521
522
523/*
524 * Fetch the lock method table associated with a given lock
525 */
528{
529 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
530
531 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
532 return LockMethods[lockmethodid];
533}
534
535/*
536 * Fetch the lock method table associated with a given locktag
537 */
540{
541 LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
542
543 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
544 return LockMethods[lockmethodid];
545}
546
547
548/*
549 * Compute the hash code associated with a LOCKTAG.
550 *
551 * To avoid unnecessary recomputations of the hash code, we try to do this
552 * just once per function, and then pass it around as needed. Aside from
553 * passing the hashcode to hash_search_with_hash_value(), we can extract
554 * the lock partition number from the hashcode.
555 */
556uint32
558{
559 return get_hash_value(LockMethodLockHash, locktag);
560}
561
562/*
563 * Compute the hash code associated with a PROCLOCKTAG.
564 *
565 * Because we want to use just one set of partition locks for both the
566 * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
567 * fall into the same partition number as their associated LOCKs.
568 * dynahash.c expects the partition number to be the low-order bits of
569 * the hash code, and therefore a PROCLOCKTAG's hash code must have the
570 * same low-order bits as the associated LOCKTAG's hash code. We achieve
571 * this with this specialized hash function.
572 */
573static uint32
574proclock_hash(const void *key, Size keysize)
575{
576 const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
577 uint32 lockhash;
578 Datum procptr;
579
580 Assert(keysize == sizeof(PROCLOCKTAG));
581
582 /* Look into the associated LOCK object, and compute its hash code */
583 lockhash = LockTagHashCode(&proclocktag->myLock->tag);
584
585 /*
586 * To make the hash code also depend on the PGPROC, we xor the proc
587 * struct's address into the hash code, left-shifted so that the
588 * partition-number bits don't change. Since this is only a hash, we
589 * don't care if we lose high-order bits of the address; use an
590 * intermediate variable to suppress cast-pointer-to-int warnings.
591 */
592 procptr = PointerGetDatum(proclocktag->myProc);
593 lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
594
595 return lockhash;
596}
597
598/*
599 * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
600 * for its underlying LOCK.
601 *
602 * We use this just to avoid redundant calls of LockTagHashCode().
603 */
604static inline uint32
605ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
606{
607 uint32 lockhash = hashcode;
608 Datum procptr;
609
610 /*
611 * This must match proclock_hash()!
612 */
613 procptr = PointerGetDatum(proclocktag->myProc);
614 lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
615
616 return lockhash;
617}
618
619/*
620 * Given two lock modes, return whether they would conflict.
621 */
622bool
624{
625 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
626
627 if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
628 return true;
629
630 return false;
631}
632
633/*
634 * LockHeldByMe -- test whether lock 'locktag' is held by the current
635 * transaction
636 *
637 * Returns true if current transaction holds a lock on 'tag' of mode
638 * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
639 * ("Stronger" is defined as "numerically higher", which is a bit
640 * semantically dubious but is OK for the purposes we use this for.)
641 */
642bool
643LockHeldByMe(const LOCKTAG *locktag,
644 LOCKMODE lockmode, bool orstronger)
645{
646 LOCALLOCKTAG localtag;
647 LOCALLOCK *locallock;
648
649 /*
650 * See if there is a LOCALLOCK entry for this lock and lockmode
651 */
652 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
653 localtag.lock = *locktag;
654 localtag.mode = lockmode;
655
657 &localtag,
658 HASH_FIND, NULL);
659
660 if (locallock && locallock->nLocks > 0)
661 return true;
662
663 if (orstronger)
664 {
665 LOCKMODE slockmode;
666
667 for (slockmode = lockmode + 1;
668 slockmode <= MaxLockMode;
669 slockmode++)
670 {
671 if (LockHeldByMe(locktag, slockmode, false))
672 return true;
673 }
674 }
675
676 return false;
677}
678
679#ifdef USE_ASSERT_CHECKING
680/*
681 * GetLockMethodLocalHash -- return the hash of local locks, for modules that
682 * evaluate assertions based on all locks held.
683 */
684HTAB *
685GetLockMethodLocalHash(void)
686{
687 return LockMethodLocalHash;
688}
689#endif
690
691/*
692 * LockHasWaiters -- look up 'locktag' and check if releasing this
693 * lock would wake up other processes waiting for it.
694 */
695bool
696LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
697{
698 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
699 LockMethod lockMethodTable;
700 LOCALLOCKTAG localtag;
701 LOCALLOCK *locallock;
702 LOCK *lock;
703 PROCLOCK *proclock;
704 LWLock *partitionLock;
705 bool hasWaiters = false;
706
707 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
708 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
709 lockMethodTable = LockMethods[lockmethodid];
710 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
711 elog(ERROR, "unrecognized lock mode: %d", lockmode);
712
713#ifdef LOCK_DEBUG
714 if (LOCK_DEBUG_ENABLED(locktag))
715 elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
716 locktag->locktag_field1, locktag->locktag_field2,
717 lockMethodTable->lockModeNames[lockmode]);
718#endif
719
720 /*
721 * Find the LOCALLOCK entry for this lock and lockmode
722 */
723 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
724 localtag.lock = *locktag;
725 localtag.mode = lockmode;
726
728 &localtag,
729 HASH_FIND, NULL);
730
731 /*
732 * let the caller print its own error message, too. Do not ereport(ERROR).
733 */
734 if (!locallock || locallock->nLocks <= 0)
735 {
736 elog(WARNING, "you don't own a lock of type %s",
737 lockMethodTable->lockModeNames[lockmode]);
738 return false;
739 }
740
741 /*
742 * Check the shared lock table.
743 */
744 partitionLock = LockHashPartitionLock(locallock->hashcode);
745
746 LWLockAcquire(partitionLock, LW_SHARED);
747
748 /*
749 * We don't need to re-find the lock or proclock, since we kept their
750 * addresses in the locallock table, and they couldn't have been removed
751 * while we were holding a lock on them.
752 */
753 lock = locallock->lock;
754 LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
755 proclock = locallock->proclock;
756 PROCLOCK_PRINT("LockHasWaiters: found", proclock);
757
758 /*
759 * Double-check that we are actually holding a lock of the type we want to
760 * release.
761 */
762 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
763 {
764 PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
765 LWLockRelease(partitionLock);
766 elog(WARNING, "you don't own a lock of type %s",
767 lockMethodTable->lockModeNames[lockmode]);
768 RemoveLocalLock(locallock);
769 return false;
770 }
771
772 /*
773 * Do the checking.
774 */
775 if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
776 hasWaiters = true;
777
778 LWLockRelease(partitionLock);
779
780 return hasWaiters;
781}
782
783/*
784 * LockAcquire -- Check for lock conflicts, sleep if conflict found,
785 * set lock if/when no conflicts.
786 *
787 * Inputs:
788 * locktag: unique identifier for the lockable object
789 * lockmode: lock mode to acquire
790 * sessionLock: if true, acquire lock for session not current transaction
791 * dontWait: if true, don't wait to acquire lock
792 *
793 * Returns one of:
794 * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
795 * LOCKACQUIRE_OK lock successfully acquired
796 * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
797 * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
798 *
799 * In the normal case where dontWait=false and the caller doesn't need to
800 * distinguish a freshly acquired lock from one already taken earlier in
801 * this same transaction, there is no need to examine the return value.
802 *
803 * Side Effects: The lock is acquired and recorded in lock tables.
804 *
805 * NOTE: if we wait for the lock, there is no way to abort the wait
806 * short of aborting the transaction.
807 */
809LockAcquire(const LOCKTAG *locktag,
810 LOCKMODE lockmode,
811 bool sessionLock,
812 bool dontWait)
813{
814 return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
815 true, NULL, false);
816}
817
818/*
819 * LockAcquireExtended - allows us to specify additional options
820 *
821 * reportMemoryError specifies whether a lock request that fills the lock
822 * table should generate an ERROR or not. Passing "false" allows the caller
823 * to attempt to recover from lock-table-full situations, perhaps by forcibly
824 * canceling other lock holders and then retrying. Note, however, that the
825 * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
826 * in combination with dontWait = true, as the cause of failure couldn't be
827 * distinguished.
828 *
829 * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
830 * table entry if a lock is successfully acquired, or NULL if not.
831 *
832 * logLockFailure indicates whether to log details when a lock acquisition
833 * fails with dontWait = true.
834 */
837 LOCKMODE lockmode,
838 bool sessionLock,
839 bool dontWait,
840 bool reportMemoryError,
841 LOCALLOCK **locallockp,
842 bool logLockFailure)
843{
844 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
845 LockMethod lockMethodTable;
846 LOCALLOCKTAG localtag;
847 LOCALLOCK *locallock;
848 LOCK *lock;
849 PROCLOCK *proclock;
850 bool found;
851 ResourceOwner owner;
852 uint32 hashcode;
853 LWLock *partitionLock;
854 bool found_conflict;
855 ProcWaitStatus waitResult;
856 bool log_lock = false;
857
858 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
859 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
860 lockMethodTable = LockMethods[lockmethodid];
861 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
862 elog(ERROR, "unrecognized lock mode: %d", lockmode);
863
864 if (RecoveryInProgress() && !InRecovery &&
865 (locktag->locktag_type == LOCKTAG_OBJECT ||
866 locktag->locktag_type == LOCKTAG_RELATION) &&
867 lockmode > RowExclusiveLock)
869 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
870 errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
871 lockMethodTable->lockModeNames[lockmode]),
872 errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
873
874#ifdef LOCK_DEBUG
875 if (LOCK_DEBUG_ENABLED(locktag))
876 elog(LOG, "LockAcquire: lock [%u,%u] %s",
877 locktag->locktag_field1, locktag->locktag_field2,
878 lockMethodTable->lockModeNames[lockmode]);
879#endif
880
881 /* Identify owner for lock */
882 if (sessionLock)
883 owner = NULL;
884 else
885 owner = CurrentResourceOwner;
886
887 /*
888 * Find or create a LOCALLOCK entry for this lock and lockmode
889 */
890 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
891 localtag.lock = *locktag;
892 localtag.mode = lockmode;
893
895 &localtag,
896 HASH_ENTER, &found);
897
898 /*
899 * if it's a new locallock object, initialize it
900 */
901 if (!found)
902 {
903 locallock->lock = NULL;
904 locallock->proclock = NULL;
905 locallock->hashcode = LockTagHashCode(&(localtag.lock));
906 locallock->nLocks = 0;
907 locallock->holdsStrongLockCount = false;
908 locallock->lockCleared = false;
909 locallock->numLockOwners = 0;
910 locallock->maxLockOwners = 8;
911 locallock->lockOwners = NULL; /* in case next line fails */
912 locallock->lockOwners = (LOCALLOCKOWNER *)
914 locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
915 }
916 else
917 {
918 /* Make sure there will be room to remember the lock */
919 if (locallock->numLockOwners >= locallock->maxLockOwners)
920 {
921 int newsize = locallock->maxLockOwners * 2;
922
923 locallock->lockOwners = (LOCALLOCKOWNER *)
924 repalloc(locallock->lockOwners,
925 newsize * sizeof(LOCALLOCKOWNER));
926 locallock->maxLockOwners = newsize;
927 }
928 }
929 hashcode = locallock->hashcode;
930
931 if (locallockp)
932 *locallockp = locallock;
933
934 /*
935 * If we already hold the lock, we can just increase the count locally.
936 *
937 * If lockCleared is already set, caller need not worry about absorbing
938 * sinval messages related to the lock's object.
939 */
940 if (locallock->nLocks > 0)
941 {
942 GrantLockLocal(locallock, owner);
943 if (locallock->lockCleared)
945 else
947 }
948
949 /*
950 * We don't acquire any other heavyweight lock while holding the relation
951 * extension lock. We do allow to acquire the same relation extension
952 * lock more than once but that case won't reach here.
953 */
954 Assert(!IsRelationExtensionLockHeld);
955
956 /*
957 * Prepare to emit a WAL record if acquisition of this lock needs to be
958 * replayed in a standby server.
959 *
960 * Here we prepare to log; after lock is acquired we'll issue log record.
961 * This arrangement simplifies error recovery in case the preparation step
962 * fails.
963 *
964 * Only AccessExclusiveLocks can conflict with lock types that read-only
965 * transactions can acquire in a standby server. Make sure this definition
966 * matches the one in GetRunningTransactionLocks().
967 */
968 if (lockmode >= AccessExclusiveLock &&
969 locktag->locktag_type == LOCKTAG_RELATION &&
972 {
974 log_lock = true;
975 }
976
977 /*
978 * Attempt to take lock via fast path, if eligible. But if we remember
979 * having filled up the fast path array, we don't attempt to make any
980 * further use of it until we release some locks. It's possible that some
981 * other backend has transferred some of those locks to the shared hash
982 * table, leaving space free, but it's not worth acquiring the LWLock just
983 * to check. It's also possible that we're acquiring a second or third
984 * lock type on a relation we have already locked using the fast-path, but
985 * for now we don't worry about that case either.
986 */
987 if (EligibleForRelationFastPath(locktag, lockmode) &&
989 {
990 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
991 bool acquired;
992
993 /*
994 * LWLockAcquire acts as a memory sequencing point, so it's safe to
995 * assume that any strong locker whose increment to
996 * FastPathStrongRelationLocks->counts becomes visible after we test
997 * it has yet to begin to transfer fast-path locks.
998 */
1000 if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
1001 acquired = false;
1002 else
1003 acquired = FastPathGrantRelationLock(locktag->locktag_field2,
1004 lockmode);
1006 if (acquired)
1007 {
1008 /*
1009 * The locallock might contain stale pointers to some old shared
1010 * objects; we MUST reset these to null before considering the
1011 * lock to be acquired via fast-path.
1012 */
1013 locallock->lock = NULL;
1014 locallock->proclock = NULL;
1015 GrantLockLocal(locallock, owner);
1016 return LOCKACQUIRE_OK;
1017 }
1018 }
1019
1020 /*
1021 * If this lock could potentially have been taken via the fast-path by
1022 * some other backend, we must (temporarily) disable further use of the
1023 * fast-path for this lock tag, and migrate any locks already taken via
1024 * this method to the main lock table.
1025 */
1026 if (ConflictsWithRelationFastPath(locktag, lockmode))
1027 {
1028 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1029
1030 BeginStrongLockAcquire(locallock, fasthashcode);
1031 if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1032 hashcode))
1033 {
1035 if (locallock->nLocks == 0)
1036 RemoveLocalLock(locallock);
1037 if (locallockp)
1038 *locallockp = NULL;
1039 if (reportMemoryError)
1040 ereport(ERROR,
1041 (errcode(ERRCODE_OUT_OF_MEMORY),
1042 errmsg("out of shared memory"),
1043 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1044 else
1045 return LOCKACQUIRE_NOT_AVAIL;
1046 }
1047 }
1048
1049 /*
1050 * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1051 * take it via the fast-path, either, so we've got to mess with the shared
1052 * lock table.
1053 */
1054 partitionLock = LockHashPartitionLock(hashcode);
1055
1056 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1057
1058 /*
1059 * Find or create lock and proclock entries with this tag
1060 *
1061 * Note: if the locallock object already existed, it might have a pointer
1062 * to the lock already ... but we should not assume that that pointer is
1063 * valid, since a lock object with zero hold and request counts can go
1064 * away anytime. So we have to use SetupLockInTable() to recompute the
1065 * lock and proclock pointers, even if they're already set.
1066 */
1067 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1068 hashcode, lockmode);
1069 if (!proclock)
1070 {
1072 LWLockRelease(partitionLock);
1073 if (locallock->nLocks == 0)
1074 RemoveLocalLock(locallock);
1075 if (locallockp)
1076 *locallockp = NULL;
1077 if (reportMemoryError)
1078 ereport(ERROR,
1079 (errcode(ERRCODE_OUT_OF_MEMORY),
1080 errmsg("out of shared memory"),
1081 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1082 else
1083 return LOCKACQUIRE_NOT_AVAIL;
1084 }
1085 locallock->proclock = proclock;
1086 lock = proclock->tag.myLock;
1087 locallock->lock = lock;
1088
1089 /*
1090 * If lock requested conflicts with locks requested by waiters, must join
1091 * wait queue. Otherwise, check for conflict with already-held locks.
1092 * (That's last because most complex check.)
1093 */
1094 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1095 found_conflict = true;
1096 else
1097 found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1098 lock, proclock);
1099
1100 if (!found_conflict)
1101 {
1102 /* No conflict with held or previously requested locks */
1103 GrantLock(lock, proclock, lockmode);
1104 waitResult = PROC_WAIT_STATUS_OK;
1105 }
1106 else
1107 {
1108 /*
1109 * Join the lock's wait queue. We call this even in the dontWait
1110 * case, because JoinWaitQueue() may discover that we can acquire the
1111 * lock immediately after all.
1112 */
1113 waitResult = JoinWaitQueue(locallock, lockMethodTable, dontWait);
1114 }
1115
1116 if (waitResult == PROC_WAIT_STATUS_ERROR)
1117 {
1118 /*
1119 * We're not getting the lock because a deadlock was detected already
1120 * while trying to join the wait queue, or because we would have to
1121 * wait but the caller requested no blocking.
1122 *
1123 * Undo the changes to shared entries before releasing the partition
1124 * lock.
1125 */
1127
1128 if (proclock->holdMask == 0)
1129 {
1130 uint32 proclock_hashcode;
1131
1132 proclock_hashcode = ProcLockHashCode(&proclock->tag,
1133 hashcode);
1134 dlist_delete(&proclock->lockLink);
1135 dlist_delete(&proclock->procLink);
1137 &(proclock->tag),
1138 proclock_hashcode,
1140 NULL))
1141 elog(PANIC, "proclock table corrupted");
1142 }
1143 else
1144 PROCLOCK_PRINT("LockAcquire: did not join wait queue", proclock);
1145 lock->nRequested--;
1146 lock->requested[lockmode]--;
1147 LOCK_PRINT("LockAcquire: did not join wait queue",
1148 lock, lockmode);
1149 Assert((lock->nRequested > 0) &&
1150 (lock->requested[lockmode] >= 0));
1151 Assert(lock->nGranted <= lock->nRequested);
1152 LWLockRelease(partitionLock);
1153 if (locallock->nLocks == 0)
1154 RemoveLocalLock(locallock);
1155
1156 if (dontWait)
1157 {
1158 /*
1159 * Log lock holders and waiters as a detail log message if
1160 * logLockFailure = true and lock acquisition fails with dontWait
1161 * = true
1162 */
1163 if (logLockFailure)
1164 {
1166 lock_waiters_sbuf,
1167 lock_holders_sbuf;
1168 const char *modename;
1169 int lockHoldersNum = 0;
1170
1172 initStringInfo(&lock_waiters_sbuf);
1173 initStringInfo(&lock_holders_sbuf);
1174
1175 DescribeLockTag(&buf, &locallock->tag.lock);
1176 modename = GetLockmodeName(locallock->tag.lock.locktag_lockmethodid,
1177 lockmode);
1178
1179 /* Gather a list of all lock holders and waiters */
1180 LWLockAcquire(partitionLock, LW_SHARED);
1181 GetLockHoldersAndWaiters(locallock, &lock_holders_sbuf,
1182 &lock_waiters_sbuf, &lockHoldersNum);
1183 LWLockRelease(partitionLock);
1184
1185 ereport(LOG,
1186 (errmsg("process %d could not obtain %s on %s",
1187 MyProcPid, modename, buf.data),
1189 "Process holding the lock: %s, Wait queue: %s.",
1190 "Processes holding the lock: %s, Wait queue: %s.",
1191 lockHoldersNum,
1192 lock_holders_sbuf.data,
1193 lock_waiters_sbuf.data)));
1194
1195 pfree(buf.data);
1196 pfree(lock_holders_sbuf.data);
1197 pfree(lock_waiters_sbuf.data);
1198 }
1199 if (locallockp)
1200 *locallockp = NULL;
1201 return LOCKACQUIRE_NOT_AVAIL;
1202 }
1203 else
1204 {
1206 /* DeadLockReport() will not return */
1207 }
1208 }
1209
1210 /*
1211 * We are now in the lock queue, or the lock was already granted. If
1212 * queued, go to sleep.
1213 */
1214 if (waitResult == PROC_WAIT_STATUS_WAITING)
1215 {
1216 Assert(!dontWait);
1217 PROCLOCK_PRINT("LockAcquire: sleeping on lock", proclock);
1218 LOCK_PRINT("LockAcquire: sleeping on lock", lock, lockmode);
1219 LWLockRelease(partitionLock);
1220
1221 waitResult = WaitOnLock(locallock, owner);
1222
1223 /*
1224 * NOTE: do not do any material change of state between here and
1225 * return. All required changes in locktable state must have been
1226 * done when the lock was granted to us --- see notes in WaitOnLock.
1227 */
1228
1229 if (waitResult == PROC_WAIT_STATUS_ERROR)
1230 {
1231 /*
1232 * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1233 * now.
1234 */
1235 Assert(!dontWait);
1237 /* DeadLockReport() will not return */
1238 }
1239 }
1240 else
1241 LWLockRelease(partitionLock);
1242 Assert(waitResult == PROC_WAIT_STATUS_OK);
1243
1244 /* The lock was granted to us. Update the local lock entry accordingly */
1245 Assert((proclock->holdMask & LOCKBIT_ON(lockmode)) != 0);
1246 GrantLockLocal(locallock, owner);
1247
1248 /*
1249 * Lock state is fully up-to-date now; if we error out after this, no
1250 * special error cleanup is required.
1251 */
1253
1254 /*
1255 * Emit a WAL record if acquisition of this lock needs to be replayed in a
1256 * standby server.
1257 */
1258 if (log_lock)
1259 {
1260 /*
1261 * Decode the locktag back to the original values, to avoid sending
1262 * lots of empty bytes with every message. See lock.h to check how a
1263 * locktag is defined for LOCKTAG_RELATION
1264 */
1266 locktag->locktag_field2);
1267 }
1268
1269 return LOCKACQUIRE_OK;
1270}
1271
1272/*
1273 * Find or create LOCK and PROCLOCK objects as needed for a new lock
1274 * request.
1275 *
1276 * Returns the PROCLOCK object, or NULL if we failed to create the objects
1277 * for lack of shared memory.
1278 *
1279 * The appropriate partition lock must be held at entry, and will be
1280 * held at exit.
1281 */
1282static PROCLOCK *
1283SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1284 const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1285{
1286 LOCK *lock;
1287 PROCLOCK *proclock;
1288 PROCLOCKTAG proclocktag;
1289 uint32 proclock_hashcode;
1290 bool found;
1291
1292 /*
1293 * Find or create a lock with this tag.
1294 */
1296 locktag,
1297 hashcode,
1299 &found);
1300 if (!lock)
1301 return NULL;
1302
1303 /*
1304 * if it's a new lock object, initialize it
1305 */
1306 if (!found)
1307 {
1308 lock->grantMask = 0;
1309 lock->waitMask = 0;
1310 dlist_init(&lock->procLocks);
1311 dclist_init(&lock->waitProcs);
1312 lock->nRequested = 0;
1313 lock->nGranted = 0;
1314 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1315 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1316 LOCK_PRINT("LockAcquire: new", lock, lockmode);
1317 }
1318 else
1319 {
1320 LOCK_PRINT("LockAcquire: found", lock, lockmode);
1321 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1322 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1323 Assert(lock->nGranted <= lock->nRequested);
1324 }
1325
1326 /*
1327 * Create the hash key for the proclock table.
1328 */
1329 proclocktag.myLock = lock;
1330 proclocktag.myProc = proc;
1331
1332 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1333
1334 /*
1335 * Find or create a proclock entry with this tag
1336 */
1338 &proclocktag,
1339 proclock_hashcode,
1341 &found);
1342 if (!proclock)
1343 {
1344 /* Oops, not enough shmem for the proclock */
1345 if (lock->nRequested == 0)
1346 {
1347 /*
1348 * There are no other requestors of this lock, so garbage-collect
1349 * the lock object. We *must* do this to avoid a permanent leak
1350 * of shared memory, because there won't be anything to cause
1351 * anyone to release the lock object later.
1352 */
1353 Assert(dlist_is_empty(&(lock->procLocks)));
1355 &(lock->tag),
1356 hashcode,
1358 NULL))
1359 elog(PANIC, "lock table corrupted");
1360 }
1361 return NULL;
1362 }
1363
1364 /*
1365 * If new, initialize the new entry
1366 */
1367 if (!found)
1368 {
1369 uint32 partition = LockHashPartition(hashcode);
1370
1371 /*
1372 * It might seem unsafe to access proclock->groupLeader without a
1373 * lock, but it's not really. Either we are initializing a proclock
1374 * on our own behalf, in which case our group leader isn't changing
1375 * because the group leader for a process can only ever be changed by
1376 * the process itself; or else we are transferring a fast-path lock to
1377 * the main lock table, in which case that process can't change its
1378 * lock group leader without first releasing all of its locks (and in
1379 * particular the one we are currently transferring).
1380 */
1381 proclock->groupLeader = proc->lockGroupLeader != NULL ?
1382 proc->lockGroupLeader : proc;
1383 proclock->holdMask = 0;
1384 proclock->releaseMask = 0;
1385 /* Add proclock to appropriate lists */
1386 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1387 dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1388 PROCLOCK_PRINT("LockAcquire: new", proclock);
1389 }
1390 else
1391 {
1392 PROCLOCK_PRINT("LockAcquire: found", proclock);
1393 Assert((proclock->holdMask & ~lock->grantMask) == 0);
1394
1395#ifdef CHECK_DEADLOCK_RISK
1396
1397 /*
1398 * Issue warning if we already hold a lower-level lock on this object
1399 * and do not hold a lock of the requested level or higher. This
1400 * indicates a deadlock-prone coding practice (eg, we'd have a
1401 * deadlock if another backend were following the same code path at
1402 * about the same time).
1403 *
1404 * This is not enabled by default, because it may generate log entries
1405 * about user-level coding practices that are in fact safe in context.
1406 * It can be enabled to help find system-level problems.
1407 *
1408 * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1409 * better to use a table. For now, though, this works.
1410 */
1411 {
1412 int i;
1413
1414 for (i = lockMethodTable->numLockModes; i > 0; i--)
1415 {
1416 if (proclock->holdMask & LOCKBIT_ON(i))
1417 {
1418 if (i >= (int) lockmode)
1419 break; /* safe: we have a lock >= req level */
1420 elog(LOG, "deadlock risk: raising lock level"
1421 " from %s to %s on object %u/%u/%u",
1422 lockMethodTable->lockModeNames[i],
1423 lockMethodTable->lockModeNames[lockmode],
1424 lock->tag.locktag_field1, lock->tag.locktag_field2,
1425 lock->tag.locktag_field3);
1426 break;
1427 }
1428 }
1429 }
1430#endif /* CHECK_DEADLOCK_RISK */
1431 }
1432
1433 /*
1434 * lock->nRequested and lock->requested[] count the total number of
1435 * requests, whether granted or waiting, so increment those immediately.
1436 * The other counts don't increment till we get the lock.
1437 */
1438 lock->nRequested++;
1439 lock->requested[lockmode]++;
1440 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1441
1442 /*
1443 * We shouldn't already hold the desired lock; else locallock table is
1444 * broken.
1445 */
1446 if (proclock->holdMask & LOCKBIT_ON(lockmode))
1447 elog(ERROR, "lock %s on object %u/%u/%u is already held",
1448 lockMethodTable->lockModeNames[lockmode],
1449 lock->tag.locktag_field1, lock->tag.locktag_field2,
1450 lock->tag.locktag_field3);
1451
1452 return proclock;
1453}
1454
1455/*
1456 * Check and set/reset the flag that we hold the relation extension lock.
1457 *
1458 * It is callers responsibility that this function is called after
1459 * acquiring/releasing the relation extension lock.
1460 *
1461 * Pass acquired as true if lock is acquired, false otherwise.
1462 */
1463static inline void
1464CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1465{
1466#ifdef USE_ASSERT_CHECKING
1467 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1468 IsRelationExtensionLockHeld = acquired;
1469#endif
1470}
1471
1472/*
1473 * Subroutine to free a locallock entry
1474 */
1475static void
1477{
1478 int i;
1479
1480 for (i = locallock->numLockOwners - 1; i >= 0; i--)
1481 {
1482 if (locallock->lockOwners[i].owner != NULL)
1483 ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1484 }
1485 locallock->numLockOwners = 0;
1486 if (locallock->lockOwners != NULL)
1487 pfree(locallock->lockOwners);
1488 locallock->lockOwners = NULL;
1489
1490 if (locallock->holdsStrongLockCount)
1491 {
1492 uint32 fasthashcode;
1493
1494 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1495
1497 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1498 FastPathStrongRelationLocks->count[fasthashcode]--;
1499 locallock->holdsStrongLockCount = false;
1501 }
1502
1504 &(locallock->tag),
1505 HASH_REMOVE, NULL))
1506 elog(WARNING, "locallock table corrupted");
1507
1508 /*
1509 * Indicate that the lock is released for certain types of locks
1510 */
1511 CheckAndSetLockHeld(locallock, false);
1512}
1513
1514/*
1515 * LockCheckConflicts -- test whether requested lock conflicts
1516 * with those already granted
1517 *
1518 * Returns true if conflict, false if no conflict.
1519 *
1520 * NOTES:
1521 * Here's what makes this complicated: one process's locks don't
1522 * conflict with one another, no matter what purpose they are held for
1523 * (eg, session and transaction locks do not conflict). Nor do the locks
1524 * of one process in a lock group conflict with those of another process in
1525 * the same group. So, we must subtract off these locks when determining
1526 * whether the requested new lock conflicts with those already held.
1527 */
1528bool
1530 LOCKMODE lockmode,
1531 LOCK *lock,
1532 PROCLOCK *proclock)
1533{
1534 int numLockModes = lockMethodTable->numLockModes;
1535 LOCKMASK myLocks;
1536 int conflictMask = lockMethodTable->conflictTab[lockmode];
1537 int conflictsRemaining[MAX_LOCKMODES];
1538 int totalConflictsRemaining = 0;
1539 dlist_iter proclock_iter;
1540 int i;
1541
1542 /*
1543 * first check for global conflicts: If no locks conflict with my request,
1544 * then I get the lock.
1545 *
1546 * Checking for conflict: lock->grantMask represents the types of
1547 * currently held locks. conflictTable[lockmode] has a bit set for each
1548 * type of lock that conflicts with request. Bitwise compare tells if
1549 * there is a conflict.
1550 */
1551 if (!(conflictMask & lock->grantMask))
1552 {
1553 PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1554 return false;
1555 }
1556
1557 /*
1558 * Rats. Something conflicts. But it could still be my own lock, or a
1559 * lock held by another member of my locking group. First, figure out how
1560 * many conflicts remain after subtracting out any locks I hold myself.
1561 */
1562 myLocks = proclock->holdMask;
1563 for (i = 1; i <= numLockModes; i++)
1564 {
1565 if ((conflictMask & LOCKBIT_ON(i)) == 0)
1566 {
1567 conflictsRemaining[i] = 0;
1568 continue;
1569 }
1570 conflictsRemaining[i] = lock->granted[i];
1571 if (myLocks & LOCKBIT_ON(i))
1572 --conflictsRemaining[i];
1573 totalConflictsRemaining += conflictsRemaining[i];
1574 }
1575
1576 /* If no conflicts remain, we get the lock. */
1577 if (totalConflictsRemaining == 0)
1578 {
1579 PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1580 return false;
1581 }
1582
1583 /* If no group locking, it's definitely a conflict. */
1584 if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1585 {
1586 Assert(proclock->tag.myProc == MyProc);
1587 PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1588 proclock);
1589 return true;
1590 }
1591
1592 /*
1593 * The relation extension lock conflict even between the group members.
1594 */
1596 {
1597 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1598 proclock);
1599 return true;
1600 }
1601
1602 /*
1603 * Locks held in conflicting modes by members of our own lock group are
1604 * not real conflicts; we can subtract those out and see if we still have
1605 * a conflict. This is O(N) in the number of processes holding or
1606 * awaiting locks on this object. We could improve that by making the
1607 * shared memory state more complex (and larger) but it doesn't seem worth
1608 * it.
1609 */
1610 dlist_foreach(proclock_iter, &lock->procLocks)
1611 {
1612 PROCLOCK *otherproclock =
1613 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1614
1615 if (proclock != otherproclock &&
1616 proclock->groupLeader == otherproclock->groupLeader &&
1617 (otherproclock->holdMask & conflictMask) != 0)
1618 {
1619 int intersectMask = otherproclock->holdMask & conflictMask;
1620
1621 for (i = 1; i <= numLockModes; i++)
1622 {
1623 if ((intersectMask & LOCKBIT_ON(i)) != 0)
1624 {
1625 if (conflictsRemaining[i] <= 0)
1626 elog(PANIC, "proclocks held do not match lock");
1627 conflictsRemaining[i]--;
1628 totalConflictsRemaining--;
1629 }
1630 }
1631
1632 if (totalConflictsRemaining == 0)
1633 {
1634 PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1635 proclock);
1636 return false;
1637 }
1638 }
1639 }
1640
1641 /* Nope, it's a real conflict. */
1642 PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1643 return true;
1644}
1645
1646/*
1647 * GrantLock -- update the lock and proclock data structures to show
1648 * the lock request has been granted.
1649 *
1650 * NOTE: if proc was blocked, it also needs to be removed from the wait list
1651 * and have its waitLock/waitProcLock fields cleared. That's not done here.
1652 *
1653 * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1654 * table entry; but since we may be awaking some other process, we can't do
1655 * that here; it's done by GrantLockLocal, instead.
1656 */
1657void
1658GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1659{
1660 lock->nGranted++;
1661 lock->granted[lockmode]++;
1662 lock->grantMask |= LOCKBIT_ON(lockmode);
1663 if (lock->granted[lockmode] == lock->requested[lockmode])
1664 lock->waitMask &= LOCKBIT_OFF(lockmode);
1665 proclock->holdMask |= LOCKBIT_ON(lockmode);
1666 LOCK_PRINT("GrantLock", lock, lockmode);
1667 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1668 Assert(lock->nGranted <= lock->nRequested);
1669}
1670
1671/*
1672 * UnGrantLock -- opposite of GrantLock.
1673 *
1674 * Updates the lock and proclock data structures to show that the lock
1675 * is no longer held nor requested by the current holder.
1676 *
1677 * Returns true if there were any waiters waiting on the lock that
1678 * should now be woken up with ProcLockWakeup.
1679 */
1680static bool
1681UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1682 PROCLOCK *proclock, LockMethod lockMethodTable)
1683{
1684 bool wakeupNeeded = false;
1685
1686 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1687 Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1688 Assert(lock->nGranted <= lock->nRequested);
1689
1690 /*
1691 * fix the general lock stats
1692 */
1693 lock->nRequested--;
1694 lock->requested[lockmode]--;
1695 lock->nGranted--;
1696 lock->granted[lockmode]--;
1697
1698 if (lock->granted[lockmode] == 0)
1699 {
1700 /* change the conflict mask. No more of this lock type. */
1701 lock->grantMask &= LOCKBIT_OFF(lockmode);
1702 }
1703
1704 LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1705
1706 /*
1707 * We need only run ProcLockWakeup if the released lock conflicts with at
1708 * least one of the lock types requested by waiter(s). Otherwise whatever
1709 * conflict made them wait must still exist. NOTE: before MVCC, we could
1710 * skip wakeup if lock->granted[lockmode] was still positive. But that's
1711 * not true anymore, because the remaining granted locks might belong to
1712 * some waiter, who could now be awakened because he doesn't conflict with
1713 * his own locks.
1714 */
1715 if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1716 wakeupNeeded = true;
1717
1718 /*
1719 * Now fix the per-proclock state.
1720 */
1721 proclock->holdMask &= LOCKBIT_OFF(lockmode);
1722 PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1723
1724 return wakeupNeeded;
1725}
1726
1727/*
1728 * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1729 * proclock and lock objects if possible, and call ProcLockWakeup if there
1730 * are remaining requests and the caller says it's OK. (Normally, this
1731 * should be called after UnGrantLock, and wakeupNeeded is the result from
1732 * UnGrantLock.)
1733 *
1734 * The appropriate partition lock must be held at entry, and will be
1735 * held at exit.
1736 */
1737static void
1738CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1739 LockMethod lockMethodTable, uint32 hashcode,
1740 bool wakeupNeeded)
1741{
1742 /*
1743 * If this was my last hold on this lock, delete my entry in the proclock
1744 * table.
1745 */
1746 if (proclock->holdMask == 0)
1747 {
1748 uint32 proclock_hashcode;
1749
1750 PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1751 dlist_delete(&proclock->lockLink);
1752 dlist_delete(&proclock->procLink);
1753 proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1755 &(proclock->tag),
1756 proclock_hashcode,
1758 NULL))
1759 elog(PANIC, "proclock table corrupted");
1760 }
1761
1762 if (lock->nRequested == 0)
1763 {
1764 /*
1765 * The caller just released the last lock, so garbage-collect the lock
1766 * object.
1767 */
1768 LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1771 &(lock->tag),
1772 hashcode,
1774 NULL))
1775 elog(PANIC, "lock table corrupted");
1776 }
1777 else if (wakeupNeeded)
1778 {
1779 /* There are waiters on this lock, so wake them up. */
1780 ProcLockWakeup(lockMethodTable, lock);
1781 }
1782}
1783
1784/*
1785 * GrantLockLocal -- update the locallock data structures to show
1786 * the lock request has been granted.
1787 *
1788 * We expect that LockAcquire made sure there is room to add a new
1789 * ResourceOwner entry.
1790 */
1791static void
1793{
1794 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1795 int i;
1796
1797 Assert(locallock->numLockOwners < locallock->maxLockOwners);
1798 /* Count the total */
1799 locallock->nLocks++;
1800 /* Count the per-owner lock */
1801 for (i = 0; i < locallock->numLockOwners; i++)
1802 {
1803 if (lockOwners[i].owner == owner)
1804 {
1805 lockOwners[i].nLocks++;
1806 return;
1807 }
1808 }
1809 lockOwners[i].owner = owner;
1810 lockOwners[i].nLocks = 1;
1811 locallock->numLockOwners++;
1812 if (owner != NULL)
1813 ResourceOwnerRememberLock(owner, locallock);
1814
1815 /* Indicate that the lock is acquired for certain types of locks. */
1816 CheckAndSetLockHeld(locallock, true);
1817}
1818
1819/*
1820 * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1821 * and arrange for error cleanup if it fails
1822 */
1823static void
1824BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1825{
1827 Assert(locallock->holdsStrongLockCount == false);
1828
1829 /*
1830 * Adding to a memory location is not atomic, so we take a spinlock to
1831 * ensure we don't collide with someone else trying to bump the count at
1832 * the same time.
1833 *
1834 * XXX: It might be worth considering using an atomic fetch-and-add
1835 * instruction here, on architectures where that is supported.
1836 */
1837
1839 FastPathStrongRelationLocks->count[fasthashcode]++;
1840 locallock->holdsStrongLockCount = true;
1841 StrongLockInProgress = locallock;
1843}
1844
1845/*
1846 * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1847 * acquisition once it's no longer needed
1848 */
1849static void
1851{
1852 StrongLockInProgress = NULL;
1853}
1854
1855/*
1856 * AbortStrongLockAcquire - undo strong lock state changes performed by
1857 * BeginStrongLockAcquire.
1858 */
1859void
1861{
1862 uint32 fasthashcode;
1863 LOCALLOCK *locallock = StrongLockInProgress;
1864
1865 if (locallock == NULL)
1866 return;
1867
1868 fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1869 Assert(locallock->holdsStrongLockCount == true);
1871 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1872 FastPathStrongRelationLocks->count[fasthashcode]--;
1873 locallock->holdsStrongLockCount = false;
1874 StrongLockInProgress = NULL;
1876}
1877
1878/*
1879 * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1880 * WaitOnLock on.
1881 *
1882 * proc.c needs this for the case where we are booted off the lock by
1883 * timeout, but discover that someone granted us the lock anyway.
1884 *
1885 * We could just export GrantLockLocal, but that would require including
1886 * resowner.h in lock.h, which creates circularity.
1887 */
1888void
1890{
1892}
1893
1894/*
1895 * GetAwaitedLock -- Return the lock we're currently doing WaitOnLock on.
1896 */
1897LOCALLOCK *
1899{
1900 return awaitedLock;
1901}
1902
1903/*
1904 * ResetAwaitedLock -- Forget that we are waiting on a lock.
1905 */
1906void
1908{
1909 awaitedLock = NULL;
1910}
1911
1912/*
1913 * MarkLockClear -- mark an acquired lock as "clear"
1914 *
1915 * This means that we know we have absorbed all sinval messages that other
1916 * sessions generated before we acquired this lock, and so we can confidently
1917 * assume we know about any catalog changes protected by this lock.
1918 */
1919void
1921{
1922 Assert(locallock->nLocks > 0);
1923 locallock->lockCleared = true;
1924}
1925
1926/*
1927 * WaitOnLock -- wait to acquire a lock
1928 *
1929 * This is a wrapper around ProcSleep, with extra tracing and bookkeeping.
1930 */
1931static ProcWaitStatus
1933{
1934 ProcWaitStatus result;
1935 ErrorContextCallback waiterrcontext;
1936
1937 TRACE_POSTGRESQL_LOCK_WAIT_START(locallock->tag.lock.locktag_field1,
1938 locallock->tag.lock.locktag_field2,
1939 locallock->tag.lock.locktag_field3,
1940 locallock->tag.lock.locktag_field4,
1941 locallock->tag.lock.locktag_type,
1942 locallock->tag.mode);
1943
1944 /* Setup error traceback support for ereport() */
1945 waiterrcontext.callback = waitonlock_error_callback;
1946 waiterrcontext.arg = locallock;
1947 waiterrcontext.previous = error_context_stack;
1948 error_context_stack = &waiterrcontext;
1949
1950 /* adjust the process title to indicate that it's waiting */
1951 set_ps_display_suffix("waiting");
1952
1953 /*
1954 * Record the fact that we are waiting for a lock, so that
1955 * LockErrorCleanup will clean up if cancel/die happens.
1956 */
1957 awaitedLock = locallock;
1958 awaitedOwner = owner;
1959
1960 /*
1961 * NOTE: Think not to put any shared-state cleanup after the call to
1962 * ProcSleep, in either the normal or failure path. The lock state must
1963 * be fully set by the lock grantor, or by CheckDeadLock if we give up
1964 * waiting for the lock. This is necessary because of the possibility
1965 * that a cancel/die interrupt will interrupt ProcSleep after someone else
1966 * grants us the lock, but before we've noticed it. Hence, after granting,
1967 * the locktable state must fully reflect the fact that we own the lock;
1968 * we can't do additional work on return.
1969 *
1970 * We can and do use a PG_TRY block to try to clean up after failure, but
1971 * this still has a major limitation: elog(FATAL) can occur while waiting
1972 * (eg, a "die" interrupt), and then control won't come back here. So all
1973 * cleanup of essential state should happen in LockErrorCleanup, not here.
1974 * We can use PG_TRY to clear the "waiting" status flags, since doing that
1975 * is unimportant if the process exits.
1976 */
1977 PG_TRY();
1978 {
1979 result = ProcSleep(locallock);
1980 }
1981 PG_CATCH();
1982 {
1983 /* In this path, awaitedLock remains set until LockErrorCleanup */
1984
1985 /* reset ps display to remove the suffix */
1987
1988 /* and propagate the error */
1989 PG_RE_THROW();
1990 }
1991 PG_END_TRY();
1992
1993 /*
1994 * We no longer want LockErrorCleanup to do anything.
1995 */
1996 awaitedLock = NULL;
1997
1998 /* reset ps display to remove the suffix */
2000
2001 error_context_stack = waiterrcontext.previous;
2002
2003 TRACE_POSTGRESQL_LOCK_WAIT_DONE(locallock->tag.lock.locktag_field1,
2004 locallock->tag.lock.locktag_field2,
2005 locallock->tag.lock.locktag_field3,
2006 locallock->tag.lock.locktag_field4,
2007 locallock->tag.lock.locktag_type,
2008 locallock->tag.mode);
2009
2010 return result;
2011}
2012
2013/*
2014 * error context callback for failures in WaitOnLock
2015 *
2016 * We report which lock was being waited on, in the same style used in
2017 * deadlock reports. This helps with lock timeout errors in particular.
2018 */
2019static void
2021{
2022 LOCALLOCK *locallock = (LOCALLOCK *) arg;
2023 const LOCKTAG *tag = &locallock->tag.lock;
2024 LOCKMODE mode = locallock->tag.mode;
2025 StringInfoData locktagbuf;
2026
2027 initStringInfo(&locktagbuf);
2028 DescribeLockTag(&locktagbuf, tag);
2029
2030 errcontext("waiting for %s on %s",
2032 locktagbuf.data);
2033}
2034
2035/*
2036 * Remove a proc from the wait-queue it is on (caller must know it is on one).
2037 * This is only used when the proc has failed to get the lock, so we set its
2038 * waitStatus to PROC_WAIT_STATUS_ERROR.
2039 *
2040 * Appropriate partition lock must be held by caller. Also, caller is
2041 * responsible for signaling the proc if needed.
2042 *
2043 * NB: this does not clean up any locallock object that may exist for the lock.
2044 */
2045void
2047{
2048 LOCK *waitLock = proc->waitLock;
2049 PROCLOCK *proclock = proc->waitProcLock;
2050 LOCKMODE lockmode = proc->waitLockMode;
2051 LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
2052
2053 /* Make sure proc is waiting */
2055 Assert(proc->links.next != NULL);
2056 Assert(waitLock);
2057 Assert(!dclist_is_empty(&waitLock->waitProcs));
2058 Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
2059
2060 /* Remove proc from lock's wait queue */
2061 dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
2062
2063 /* Undo increments of request counts by waiting process */
2064 Assert(waitLock->nRequested > 0);
2065 Assert(waitLock->nRequested > proc->waitLock->nGranted);
2066 waitLock->nRequested--;
2067 Assert(waitLock->requested[lockmode] > 0);
2068 waitLock->requested[lockmode]--;
2069 /* don't forget to clear waitMask bit if appropriate */
2070 if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
2071 waitLock->waitMask &= LOCKBIT_OFF(lockmode);
2072
2073 /* Clean up the proc's own state, and pass it the ok/fail signal */
2074 proc->waitLock = NULL;
2075 proc->waitProcLock = NULL;
2077
2078 /*
2079 * Delete the proclock immediately if it represents no already-held locks.
2080 * (This must happen now because if the owner of the lock decides to
2081 * release it, and the requested/granted counts then go to zero,
2082 * LockRelease expects there to be no remaining proclocks.) Then see if
2083 * any other waiters for the lock can be woken up now.
2084 */
2085 CleanUpLock(waitLock, proclock,
2086 LockMethods[lockmethodid], hashcode,
2087 true);
2088}
2089
2090/*
2091 * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2092 * Release a session lock if 'sessionLock' is true, else release a
2093 * regular transaction lock.
2094 *
2095 * Side Effects: find any waiting processes that are now wakable,
2096 * grant them their requested locks and awaken them.
2097 * (We have to grant the lock here to avoid a race between
2098 * the waking process and any new process to
2099 * come along and request the lock.)
2100 */
2101bool
2102LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2103{
2104 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2105 LockMethod lockMethodTable;
2106 LOCALLOCKTAG localtag;
2107 LOCALLOCK *locallock;
2108 LOCK *lock;
2109 PROCLOCK *proclock;
2110 LWLock *partitionLock;
2111 bool wakeupNeeded;
2112
2113 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2114 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2115 lockMethodTable = LockMethods[lockmethodid];
2116 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2117 elog(ERROR, "unrecognized lock mode: %d", lockmode);
2118
2119#ifdef LOCK_DEBUG
2120 if (LOCK_DEBUG_ENABLED(locktag))
2121 elog(LOG, "LockRelease: lock [%u,%u] %s",
2122 locktag->locktag_field1, locktag->locktag_field2,
2123 lockMethodTable->lockModeNames[lockmode]);
2124#endif
2125
2126 /*
2127 * Find the LOCALLOCK entry for this lock and lockmode
2128 */
2129 MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2130 localtag.lock = *locktag;
2131 localtag.mode = lockmode;
2132
2134 &localtag,
2135 HASH_FIND, NULL);
2136
2137 /*
2138 * let the caller print its own error message, too. Do not ereport(ERROR).
2139 */
2140 if (!locallock || locallock->nLocks <= 0)
2141 {
2142 elog(WARNING, "you don't own a lock of type %s",
2143 lockMethodTable->lockModeNames[lockmode]);
2144 return false;
2145 }
2146
2147 /*
2148 * Decrease the count for the resource owner.
2149 */
2150 {
2151 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2152 ResourceOwner owner;
2153 int i;
2154
2155 /* Identify owner for lock */
2156 if (sessionLock)
2157 owner = NULL;
2158 else
2159 owner = CurrentResourceOwner;
2160
2161 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2162 {
2163 if (lockOwners[i].owner == owner)
2164 {
2165 Assert(lockOwners[i].nLocks > 0);
2166 if (--lockOwners[i].nLocks == 0)
2167 {
2168 if (owner != NULL)
2169 ResourceOwnerForgetLock(owner, locallock);
2170 /* compact out unused slot */
2171 locallock->numLockOwners--;
2172 if (i < locallock->numLockOwners)
2173 lockOwners[i] = lockOwners[locallock->numLockOwners];
2174 }
2175 break;
2176 }
2177 }
2178 if (i < 0)
2179 {
2180 /* don't release a lock belonging to another owner */
2181 elog(WARNING, "you don't own a lock of type %s",
2182 lockMethodTable->lockModeNames[lockmode]);
2183 return false;
2184 }
2185 }
2186
2187 /*
2188 * Decrease the total local count. If we're still holding the lock, we're
2189 * done.
2190 */
2191 locallock->nLocks--;
2192
2193 if (locallock->nLocks > 0)
2194 return true;
2195
2196 /*
2197 * At this point we can no longer suppose we are clear of invalidation
2198 * messages related to this lock. Although we'll delete the LOCALLOCK
2199 * object before any intentional return from this routine, it seems worth
2200 * the trouble to explicitly reset lockCleared right now, just in case
2201 * some error prevents us from deleting the LOCALLOCK.
2202 */
2203 locallock->lockCleared = false;
2204
2205 /* Attempt fast release of any lock eligible for the fast path. */
2206 if (EligibleForRelationFastPath(locktag, lockmode) &&
2208 {
2209 bool released;
2210
2211 /*
2212 * We might not find the lock here, even if we originally entered it
2213 * here. Another backend may have moved it to the main table.
2214 */
2216 released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2217 lockmode);
2219 if (released)
2220 {
2221 RemoveLocalLock(locallock);
2222 return true;
2223 }
2224 }
2225
2226 /*
2227 * Otherwise we've got to mess with the shared lock table.
2228 */
2229 partitionLock = LockHashPartitionLock(locallock->hashcode);
2230
2231 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2232
2233 /*
2234 * Normally, we don't need to re-find the lock or proclock, since we kept
2235 * their addresses in the locallock table, and they couldn't have been
2236 * removed while we were holding a lock on them. But it's possible that
2237 * the lock was taken fast-path and has since been moved to the main hash
2238 * table by another backend, in which case we will need to look up the
2239 * objects here. We assume the lock field is NULL if so.
2240 */
2241 lock = locallock->lock;
2242 if (!lock)
2243 {
2244 PROCLOCKTAG proclocktag;
2245
2246 Assert(EligibleForRelationFastPath(locktag, lockmode));
2248 locktag,
2249 locallock->hashcode,
2250 HASH_FIND,
2251 NULL);
2252 if (!lock)
2253 elog(ERROR, "failed to re-find shared lock object");
2254 locallock->lock = lock;
2255
2256 proclocktag.myLock = lock;
2257 proclocktag.myProc = MyProc;
2259 &proclocktag,
2260 HASH_FIND,
2261 NULL);
2262 if (!locallock->proclock)
2263 elog(ERROR, "failed to re-find shared proclock object");
2264 }
2265 LOCK_PRINT("LockRelease: found", lock, lockmode);
2266 proclock = locallock->proclock;
2267 PROCLOCK_PRINT("LockRelease: found", proclock);
2268
2269 /*
2270 * Double-check that we are actually holding a lock of the type we want to
2271 * release.
2272 */
2273 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2274 {
2275 PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2276 LWLockRelease(partitionLock);
2277 elog(WARNING, "you don't own a lock of type %s",
2278 lockMethodTable->lockModeNames[lockmode]);
2279 RemoveLocalLock(locallock);
2280 return false;
2281 }
2282
2283 /*
2284 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2285 */
2286 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2287
2288 CleanUpLock(lock, proclock,
2289 lockMethodTable, locallock->hashcode,
2290 wakeupNeeded);
2291
2292 LWLockRelease(partitionLock);
2293
2294 RemoveLocalLock(locallock);
2295 return true;
2296}
2297
2298/*
2299 * LockReleaseAll -- Release all locks of the specified lock method that
2300 * are held by the current process.
2301 *
2302 * Well, not necessarily *all* locks. The available behaviors are:
2303 * allLocks == true: release all locks including session locks.
2304 * allLocks == false: release all non-session locks.
2305 */
2306void
2307LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2308{
2309 HASH_SEQ_STATUS status;
2310 LockMethod lockMethodTable;
2311 int i,
2312 numLockModes;
2313 LOCALLOCK *locallock;
2314 LOCK *lock;
2315 int partition;
2316 bool have_fast_path_lwlock = false;
2317
2318 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2319 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2320 lockMethodTable = LockMethods[lockmethodid];
2321
2322#ifdef LOCK_DEBUG
2323 if (*(lockMethodTable->trace_flag))
2324 elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2325#endif
2326
2327 /*
2328 * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2329 * the only way that the lock we hold on our own VXID can ever get
2330 * released: it is always and only released when a toplevel transaction
2331 * ends.
2332 */
2333 if (lockmethodid == DEFAULT_LOCKMETHOD)
2335
2336 numLockModes = lockMethodTable->numLockModes;
2337
2338 /*
2339 * First we run through the locallock table and get rid of unwanted
2340 * entries, then we scan the process's proclocks and get rid of those. We
2341 * do this separately because we may have multiple locallock entries
2342 * pointing to the same proclock, and we daren't end up with any dangling
2343 * pointers. Fast-path locks are cleaned up during the locallock table
2344 * scan, though.
2345 */
2347
2348 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2349 {
2350 /*
2351 * If the LOCALLOCK entry is unused, something must've gone wrong
2352 * while trying to acquire this lock. Just forget the local entry.
2353 */
2354 if (locallock->nLocks == 0)
2355 {
2356 RemoveLocalLock(locallock);
2357 continue;
2358 }
2359
2360 /* Ignore items that are not of the lockmethod to be removed */
2361 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2362 continue;
2363
2364 /*
2365 * If we are asked to release all locks, we can just zap the entry.
2366 * Otherwise, must scan to see if there are session locks. We assume
2367 * there is at most one lockOwners entry for session locks.
2368 */
2369 if (!allLocks)
2370 {
2371 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2372
2373 /* If session lock is above array position 0, move it down to 0 */
2374 for (i = 0; i < locallock->numLockOwners; i++)
2375 {
2376 if (lockOwners[i].owner == NULL)
2377 lockOwners[0] = lockOwners[i];
2378 else
2379 ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2380 }
2381
2382 if (locallock->numLockOwners > 0 &&
2383 lockOwners[0].owner == NULL &&
2384 lockOwners[0].nLocks > 0)
2385 {
2386 /* Fix the locallock to show just the session locks */
2387 locallock->nLocks = lockOwners[0].nLocks;
2388 locallock->numLockOwners = 1;
2389 /* We aren't deleting this locallock, so done */
2390 continue;
2391 }
2392 else
2393 locallock->numLockOwners = 0;
2394 }
2395
2396#ifdef USE_ASSERT_CHECKING
2397
2398 /*
2399 * Tuple locks are currently held only for short durations within a
2400 * transaction. Check that we didn't forget to release one.
2401 */
2402 if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
2403 elog(WARNING, "tuple lock held at commit");
2404#endif
2405
2406 /*
2407 * If the lock or proclock pointers are NULL, this lock was taken via
2408 * the relation fast-path (and is not known to have been transferred).
2409 */
2410 if (locallock->proclock == NULL || locallock->lock == NULL)
2411 {
2412 LOCKMODE lockmode = locallock->tag.mode;
2413 Oid relid;
2414
2415 /* Verify that a fast-path lock is what we've got. */
2416 if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2417 elog(PANIC, "locallock table corrupted");
2418
2419 /*
2420 * If we don't currently hold the LWLock that protects our
2421 * fast-path data structures, we must acquire it before attempting
2422 * to release the lock via the fast-path. We will continue to
2423 * hold the LWLock until we're done scanning the locallock table,
2424 * unless we hit a transferred fast-path lock. (XXX is this
2425 * really such a good idea? There could be a lot of entries ...)
2426 */
2427 if (!have_fast_path_lwlock)
2428 {
2430 have_fast_path_lwlock = true;
2431 }
2432
2433 /* Attempt fast-path release. */
2434 relid = locallock->tag.lock.locktag_field2;
2435 if (FastPathUnGrantRelationLock(relid, lockmode))
2436 {
2437 RemoveLocalLock(locallock);
2438 continue;
2439 }
2440
2441 /*
2442 * Our lock, originally taken via the fast path, has been
2443 * transferred to the main lock table. That's going to require
2444 * some extra work, so release our fast-path lock before starting.
2445 */
2447 have_fast_path_lwlock = false;
2448
2449 /*
2450 * Now dump the lock. We haven't got a pointer to the LOCK or
2451 * PROCLOCK in this case, so we have to handle this a bit
2452 * differently than a normal lock release. Unfortunately, this
2453 * requires an extra LWLock acquire-and-release cycle on the
2454 * partitionLock, but hopefully it shouldn't happen often.
2455 */
2456 LockRefindAndRelease(lockMethodTable, MyProc,
2457 &locallock->tag.lock, lockmode, false);
2458 RemoveLocalLock(locallock);
2459 continue;
2460 }
2461
2462 /* Mark the proclock to show we need to release this lockmode */
2463 if (locallock->nLocks > 0)
2464 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2465
2466 /* And remove the locallock hashtable entry */
2467 RemoveLocalLock(locallock);
2468 }
2469
2470 /* Done with the fast-path data structures */
2471 if (have_fast_path_lwlock)
2473
2474 /*
2475 * Now, scan each lock partition separately.
2476 */
2477 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2478 {
2479 LWLock *partitionLock;
2480 dlist_head *procLocks = &MyProc->myProcLocks[partition];
2481 dlist_mutable_iter proclock_iter;
2482
2483 partitionLock = LockHashPartitionLockByIndex(partition);
2484
2485 /*
2486 * If the proclock list for this partition is empty, we can skip
2487 * acquiring the partition lock. This optimization is trickier than
2488 * it looks, because another backend could be in process of adding
2489 * something to our proclock list due to promoting one of our
2490 * fast-path locks. However, any such lock must be one that we
2491 * decided not to delete above, so it's okay to skip it again now;
2492 * we'd just decide not to delete it again. We must, however, be
2493 * careful to re-fetch the list header once we've acquired the
2494 * partition lock, to be sure we have a valid, up-to-date pointer.
2495 * (There is probably no significant risk if pointer fetch/store is
2496 * atomic, but we don't wish to assume that.)
2497 *
2498 * XXX This argument assumes that the locallock table correctly
2499 * represents all of our fast-path locks. While allLocks mode
2500 * guarantees to clean up all of our normal locks regardless of the
2501 * locallock situation, we lose that guarantee for fast-path locks.
2502 * This is not ideal.
2503 */
2504 if (dlist_is_empty(procLocks))
2505 continue; /* needn't examine this partition */
2506
2507 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2508
2509 dlist_foreach_modify(proclock_iter, procLocks)
2510 {
2511 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2512 bool wakeupNeeded = false;
2513
2514 Assert(proclock->tag.myProc == MyProc);
2515
2516 lock = proclock->tag.myLock;
2517
2518 /* Ignore items that are not of the lockmethod to be removed */
2519 if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2520 continue;
2521
2522 /*
2523 * In allLocks mode, force release of all locks even if locallock
2524 * table had problems
2525 */
2526 if (allLocks)
2527 proclock->releaseMask = proclock->holdMask;
2528 else
2529 Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2530
2531 /*
2532 * Ignore items that have nothing to be released, unless they have
2533 * holdMask == 0 and are therefore recyclable
2534 */
2535 if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2536 continue;
2537
2538 PROCLOCK_PRINT("LockReleaseAll", proclock);
2539 LOCK_PRINT("LockReleaseAll", lock, 0);
2540 Assert(lock->nRequested >= 0);
2541 Assert(lock->nGranted >= 0);
2542 Assert(lock->nGranted <= lock->nRequested);
2543 Assert((proclock->holdMask & ~lock->grantMask) == 0);
2544
2545 /*
2546 * Release the previously-marked lock modes
2547 */
2548 for (i = 1; i <= numLockModes; i++)
2549 {
2550 if (proclock->releaseMask & LOCKBIT_ON(i))
2551 wakeupNeeded |= UnGrantLock(lock, i, proclock,
2552 lockMethodTable);
2553 }
2554 Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2555 Assert(lock->nGranted <= lock->nRequested);
2556 LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2557
2558 proclock->releaseMask = 0;
2559
2560 /* CleanUpLock will wake up waiters if needed. */
2561 CleanUpLock(lock, proclock,
2562 lockMethodTable,
2563 LockTagHashCode(&lock->tag),
2564 wakeupNeeded);
2565 } /* loop over PROCLOCKs within this partition */
2566
2567 LWLockRelease(partitionLock);
2568 } /* loop over partitions */
2569
2570#ifdef LOCK_DEBUG
2571 if (*(lockMethodTable->trace_flag))
2572 elog(LOG, "LockReleaseAll done");
2573#endif
2574}
2575
2576/*
2577 * LockReleaseSession -- Release all session locks of the specified lock method
2578 * that are held by the current process.
2579 */
2580void
2582{
2583 HASH_SEQ_STATUS status;
2584 LOCALLOCK *locallock;
2585
2586 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2587 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2588
2590
2591 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2592 {
2593 /* Ignore items that are not of the specified lock method */
2594 if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2595 continue;
2596
2597 ReleaseLockIfHeld(locallock, true);
2598 }
2599}
2600
2601/*
2602 * LockReleaseCurrentOwner
2603 * Release all locks belonging to CurrentResourceOwner
2604 *
2605 * If the caller knows what those locks are, it can pass them as an array.
2606 * That speeds up the call significantly, when a lot of locks are held.
2607 * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2608 * table to find them.
2609 */
2610void
2611LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2612{
2613 if (locallocks == NULL)
2614 {
2615 HASH_SEQ_STATUS status;
2616 LOCALLOCK *locallock;
2617
2619
2620 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2621 ReleaseLockIfHeld(locallock, false);
2622 }
2623 else
2624 {
2625 int i;
2626
2627 for (i = nlocks - 1; i >= 0; i--)
2628 ReleaseLockIfHeld(locallocks[i], false);
2629 }
2630}
2631
2632/*
2633 * ReleaseLockIfHeld
2634 * Release any session-level locks on this lockable object if sessionLock
2635 * is true; else, release any locks held by CurrentResourceOwner.
2636 *
2637 * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2638 * locks), but without refactoring LockRelease() we cannot support releasing
2639 * locks belonging to resource owners other than CurrentResourceOwner.
2640 * If we were to refactor, it'd be a good idea to fix it so we don't have to
2641 * do a hashtable lookup of the locallock, too. However, currently this
2642 * function isn't used heavily enough to justify refactoring for its
2643 * convenience.
2644 */
2645static void
2646ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2647{
2648 ResourceOwner owner;
2649 LOCALLOCKOWNER *lockOwners;
2650 int i;
2651
2652 /* Identify owner for lock (must match LockRelease!) */
2653 if (sessionLock)
2654 owner = NULL;
2655 else
2656 owner = CurrentResourceOwner;
2657
2658 /* Scan to see if there are any locks belonging to the target owner */
2659 lockOwners = locallock->lockOwners;
2660 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2661 {
2662 if (lockOwners[i].owner == owner)
2663 {
2664 Assert(lockOwners[i].nLocks > 0);
2665 if (lockOwners[i].nLocks < locallock->nLocks)
2666 {
2667 /*
2668 * We will still hold this lock after forgetting this
2669 * ResourceOwner.
2670 */
2671 locallock->nLocks -= lockOwners[i].nLocks;
2672 /* compact out unused slot */
2673 locallock->numLockOwners--;
2674 if (owner != NULL)
2675 ResourceOwnerForgetLock(owner, locallock);
2676 if (i < locallock->numLockOwners)
2677 lockOwners[i] = lockOwners[locallock->numLockOwners];
2678 }
2679 else
2680 {
2681 Assert(lockOwners[i].nLocks == locallock->nLocks);
2682 /* We want to call LockRelease just once */
2683 lockOwners[i].nLocks = 1;
2684 locallock->nLocks = 1;
2685 if (!LockRelease(&locallock->tag.lock,
2686 locallock->tag.mode,
2687 sessionLock))
2688 elog(WARNING, "ReleaseLockIfHeld: failed??");
2689 }
2690 break;
2691 }
2692 }
2693}
2694
2695/*
2696 * LockReassignCurrentOwner
2697 * Reassign all locks belonging to CurrentResourceOwner to belong
2698 * to its parent resource owner.
2699 *
2700 * If the caller knows what those locks are, it can pass them as an array.
2701 * That speeds up the call significantly, when a lot of locks are held
2702 * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2703 * and we'll traverse through our hash table to find them.
2704 */
2705void
2706LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2707{
2709
2710 Assert(parent != NULL);
2711
2712 if (locallocks == NULL)
2713 {
2714 HASH_SEQ_STATUS status;
2715 LOCALLOCK *locallock;
2716
2718
2719 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2720 LockReassignOwner(locallock, parent);
2721 }
2722 else
2723 {
2724 int i;
2725
2726 for (i = nlocks - 1; i >= 0; i--)
2727 LockReassignOwner(locallocks[i], parent);
2728 }
2729}
2730
2731/*
2732 * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2733 * CurrentResourceOwner to its parent.
2734 */
2735static void
2737{
2738 LOCALLOCKOWNER *lockOwners;
2739 int i;
2740 int ic = -1;
2741 int ip = -1;
2742
2743 /*
2744 * Scan to see if there are any locks belonging to current owner or its
2745 * parent
2746 */
2747 lockOwners = locallock->lockOwners;
2748 for (i = locallock->numLockOwners - 1; i >= 0; i--)
2749 {
2750 if (lockOwners[i].owner == CurrentResourceOwner)
2751 ic = i;
2752 else if (lockOwners[i].owner == parent)
2753 ip = i;
2754 }
2755
2756 if (ic < 0)
2757 return; /* no current locks */
2758
2759 if (ip < 0)
2760 {
2761 /* Parent has no slot, so just give it the child's slot */
2762 lockOwners[ic].owner = parent;
2763 ResourceOwnerRememberLock(parent, locallock);
2764 }
2765 else
2766 {
2767 /* Merge child's count with parent's */
2768 lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2769 /* compact out unused slot */
2770 locallock->numLockOwners--;
2771 if (ic < locallock->numLockOwners)
2772 lockOwners[ic] = lockOwners[locallock->numLockOwners];
2773 }
2775}
2776
2777/*
2778 * FastPathGrantRelationLock
2779 * Grant lock using per-backend fast-path array, if there is space.
2780 */
2781static bool
2783{
2784 uint32 i;
2785 uint32 unused_slot = FastPathLockSlotsPerBackend();
2786
2787 /* fast-path group the lock belongs to */
2788 uint32 group = FAST_PATH_REL_GROUP(relid);
2789
2790 /* Scan for existing entry for this relid, remembering empty slot. */
2791 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2792 {
2793 /* index into the whole per-backend array */
2794 uint32 f = FAST_PATH_SLOT(group, i);
2795
2796 if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2797 unused_slot = f;
2798 else if (MyProc->fpRelId[f] == relid)
2799 {
2800 Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2801 FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2802 return true;
2803 }
2804 }
2805
2806 /* If no existing entry, use any empty slot. */
2807 if (unused_slot < FastPathLockSlotsPerBackend())
2808 {
2809 MyProc->fpRelId[unused_slot] = relid;
2810 FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2811 ++FastPathLocalUseCounts[group];
2812 return true;
2813 }
2814
2815 /* No existing entry, and no empty slot. */
2816 return false;
2817}
2818
2819/*
2820 * FastPathUnGrantRelationLock
2821 * Release fast-path lock, if present. Update backend-private local
2822 * use count, while we're at it.
2823 */
2824static bool
2826{
2827 uint32 i;
2828 bool result = false;
2829
2830 /* fast-path group the lock belongs to */
2831 uint32 group = FAST_PATH_REL_GROUP(relid);
2832
2833 FastPathLocalUseCounts[group] = 0;
2834 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2835 {
2836 /* index into the whole per-backend array */
2837 uint32 f = FAST_PATH_SLOT(group, i);
2838
2839 if (MyProc->fpRelId[f] == relid
2840 && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2841 {
2842 Assert(!result);
2843 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2844 result = true;
2845 /* we continue iterating so as to update FastPathLocalUseCount */
2846 }
2847 if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2848 ++FastPathLocalUseCounts[group];
2849 }
2850 return result;
2851}
2852
2853/*
2854 * FastPathTransferRelationLocks
2855 * Transfer locks matching the given lock tag from per-backend fast-path
2856 * arrays to the shared hash table.
2857 *
2858 * Returns true if successful, false if ran out of shared memory.
2859 */
2860static bool
2861FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2862 uint32 hashcode)
2863{
2864 LWLock *partitionLock = LockHashPartitionLock(hashcode);
2865 Oid relid = locktag->locktag_field2;
2866 uint32 i;
2867
2868 /* fast-path group the lock belongs to */
2869 uint32 group = FAST_PATH_REL_GROUP(relid);
2870
2871 /*
2872 * Every PGPROC that can potentially hold a fast-path lock is present in
2873 * ProcGlobal->allProcs. Prepared transactions are not, but any
2874 * outstanding fast-path locks held by prepared transactions are
2875 * transferred to the main lock table.
2876 */
2877 for (i = 0; i < ProcGlobal->allProcCount; i++)
2878 {
2879 PGPROC *proc = &ProcGlobal->allProcs[i];
2880 uint32 j;
2881
2883
2884 /*
2885 * If the target backend isn't referencing the same database as the
2886 * lock, then we needn't examine the individual relation IDs at all;
2887 * none of them can be relevant.
2888 *
2889 * proc->databaseId is set at backend startup time and never changes
2890 * thereafter, so it might be safe to perform this test before
2891 * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2892 * assume that if the target backend holds any fast-path locks, it
2893 * must have performed a memory-fencing operation (in particular, an
2894 * LWLock acquisition) since setting proc->databaseId. However, it's
2895 * less clear that our backend is certain to have performed a memory
2896 * fencing operation since the other backend set proc->databaseId. So
2897 * for now, we test it after acquiring the LWLock just to be safe.
2898 *
2899 * Also skip groups without any registered fast-path locks.
2900 */
2901 if (proc->databaseId != locktag->locktag_field1 ||
2902 proc->fpLockBits[group] == 0)
2903 {
2904 LWLockRelease(&proc->fpInfoLock);
2905 continue;
2906 }
2907
2908 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2909 {
2910 uint32 lockmode;
2911
2912 /* index into the whole per-backend array */
2913 uint32 f = FAST_PATH_SLOT(group, j);
2914
2915 /* Look for an allocated slot matching the given relid. */
2916 if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2917 continue;
2918
2919 /* Find or create lock object. */
2920 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2921 for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2923 ++lockmode)
2924 {
2925 PROCLOCK *proclock;
2926
2927 if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2928 continue;
2929 proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2930 hashcode, lockmode);
2931 if (!proclock)
2932 {
2933 LWLockRelease(partitionLock);
2934 LWLockRelease(&proc->fpInfoLock);
2935 return false;
2936 }
2937 GrantLock(proclock->tag.myLock, proclock, lockmode);
2938 FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2939 }
2940 LWLockRelease(partitionLock);
2941
2942 /* No need to examine remaining slots. */
2943 break;
2944 }
2945 LWLockRelease(&proc->fpInfoLock);
2946 }
2947 return true;
2948}
2949
2950/*
2951 * FastPathGetRelationLockEntry
2952 * Return the PROCLOCK for a lock originally taken via the fast-path,
2953 * transferring it to the primary lock table if necessary.
2954 *
2955 * Note: caller takes care of updating the locallock object.
2956 */
2957static PROCLOCK *
2959{
2960 LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2961 LOCKTAG *locktag = &locallock->tag.lock;
2962 PROCLOCK *proclock = NULL;
2963 LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2964 Oid relid = locktag->locktag_field2;
2965 uint32 i,
2966 group;
2967
2968 /* fast-path group the lock belongs to */
2969 group = FAST_PATH_REL_GROUP(relid);
2970
2972
2973 for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2974 {
2975 uint32 lockmode;
2976
2977 /* index into the whole per-backend array */
2978 uint32 f = FAST_PATH_SLOT(group, i);
2979
2980 /* Look for an allocated slot matching the given relid. */
2981 if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2982 continue;
2983
2984 /* If we don't have a lock of the given mode, forget it! */
2985 lockmode = locallock->tag.mode;
2986 if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2987 break;
2988
2989 /* Find or create lock object. */
2990 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2991
2992 proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2993 locallock->hashcode, lockmode);
2994 if (!proclock)
2995 {
2996 LWLockRelease(partitionLock);
2998 ereport(ERROR,
2999 (errcode(ERRCODE_OUT_OF_MEMORY),
3000 errmsg("out of shared memory"),
3001 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
3002 }
3003 GrantLock(proclock->tag.myLock, proclock, lockmode);
3004 FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
3005
3006 LWLockRelease(partitionLock);
3007
3008 /* No need to examine remaining slots. */
3009 break;
3010 }
3011
3013
3014 /* Lock may have already been transferred by some other backend. */
3015 if (proclock == NULL)
3016 {
3017 LOCK *lock;
3018 PROCLOCKTAG proclocktag;
3019 uint32 proclock_hashcode;
3020
3021 LWLockAcquire(partitionLock, LW_SHARED);
3022
3024 locktag,
3025 locallock->hashcode,
3026 HASH_FIND,
3027 NULL);
3028 if (!lock)
3029 elog(ERROR, "failed to re-find shared lock object");
3030
3031 proclocktag.myLock = lock;
3032 proclocktag.myProc = MyProc;
3033
3034 proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
3035 proclock = (PROCLOCK *)
3037 &proclocktag,
3038 proclock_hashcode,
3039 HASH_FIND,
3040 NULL);
3041 if (!proclock)
3042 elog(ERROR, "failed to re-find shared proclock object");
3043 LWLockRelease(partitionLock);
3044 }
3045
3046 return proclock;
3047}
3048
3049/*
3050 * GetLockConflicts
3051 * Get an array of VirtualTransactionIds of xacts currently holding locks
3052 * that would conflict with the specified lock/lockmode.
3053 * xacts merely awaiting such a lock are NOT reported.
3054 *
3055 * The result array is palloc'd and is terminated with an invalid VXID.
3056 * *countp, if not null, is updated to the number of items set.
3057 *
3058 * Of course, the result could be out of date by the time it's returned, so
3059 * use of this function has to be thought about carefully. Similarly, a
3060 * PGPROC with no "lxid" will be considered non-conflicting regardless of any
3061 * lock it holds. Existing callers don't care about a locker after that
3062 * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
3063 * pg_xact updates and before releasing locks.
3064 *
3065 * Note we never include the current xact's vxid in the result array,
3066 * since an xact never blocks itself.
3067 */
3069GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
3070{
3071 static VirtualTransactionId *vxids;
3072 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
3073 LockMethod lockMethodTable;
3074 LOCK *lock;
3075 LOCKMASK conflictMask;
3076 dlist_iter proclock_iter;
3077 PROCLOCK *proclock;
3078 uint32 hashcode;
3079 LWLock *partitionLock;
3080 int count = 0;
3081 int fast_count = 0;
3082
3083 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
3084 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
3085 lockMethodTable = LockMethods[lockmethodid];
3086 if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
3087 elog(ERROR, "unrecognized lock mode: %d", lockmode);
3088
3089 /*
3090 * Allocate memory to store results, and fill with InvalidVXID. We only
3091 * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3092 * InHotStandby allocate once in TopMemoryContext.
3093 */
3094 if (InHotStandby)
3095 {
3096 if (vxids == NULL)
3097 vxids = (VirtualTransactionId *)
3099 sizeof(VirtualTransactionId) *
3101 }
3102 else
3104
3105 /* Compute hash code and partition lock, and look up conflicting modes. */
3106 hashcode = LockTagHashCode(locktag);
3107 partitionLock = LockHashPartitionLock(hashcode);
3108 conflictMask = lockMethodTable->conflictTab[lockmode];
3109
3110 /*
3111 * Fast path locks might not have been entered in the primary lock table.
3112 * If the lock we're dealing with could conflict with such a lock, we must
3113 * examine each backend's fast-path array for conflicts.
3114 */
3115 if (ConflictsWithRelationFastPath(locktag, lockmode))
3116 {
3117 int i;
3118 Oid relid = locktag->locktag_field2;
3120
3121 /* fast-path group the lock belongs to */
3122 uint32 group = FAST_PATH_REL_GROUP(relid);
3123
3124 /*
3125 * Iterate over relevant PGPROCs. Anything held by a prepared
3126 * transaction will have been transferred to the primary lock table,
3127 * so we need not worry about those. This is all a bit fuzzy, because
3128 * new locks could be taken after we've visited a particular
3129 * partition, but the callers had better be prepared to deal with that
3130 * anyway, since the locks could equally well be taken between the
3131 * time we return the value and the time the caller does something
3132 * with it.
3133 */
3134 for (i = 0; i < ProcGlobal->allProcCount; i++)
3135 {
3136 PGPROC *proc = &ProcGlobal->allProcs[i];
3137 uint32 j;
3138
3139 /* A backend never blocks itself */
3140 if (proc == MyProc)
3141 continue;
3142
3144
3145 /*
3146 * If the target backend isn't referencing the same database as
3147 * the lock, then we needn't examine the individual relation IDs
3148 * at all; none of them can be relevant.
3149 *
3150 * See FastPathTransferRelationLocks() for discussion of why we do
3151 * this test after acquiring the lock.
3152 *
3153 * Also skip groups without any registered fast-path locks.
3154 */
3155 if (proc->databaseId != locktag->locktag_field1 ||
3156 proc->fpLockBits[group] == 0)
3157 {
3158 LWLockRelease(&proc->fpInfoLock);
3159 continue;
3160 }
3161
3162 for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3163 {
3164 uint32 lockmask;
3165
3166 /* index into the whole per-backend array */
3167 uint32 f = FAST_PATH_SLOT(group, j);
3168
3169 /* Look for an allocated slot matching the given relid. */
3170 if (relid != proc->fpRelId[f])
3171 continue;
3172 lockmask = FAST_PATH_GET_BITS(proc, f);
3173 if (!lockmask)
3174 continue;
3175 lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3176
3177 /*
3178 * There can only be one entry per relation, so if we found it
3179 * and it doesn't conflict, we can skip the rest of the slots.
3180 */
3181 if ((lockmask & conflictMask) == 0)
3182 break;
3183
3184 /* Conflict! */
3185 GET_VXID_FROM_PGPROC(vxid, *proc);
3186
3188 vxids[count++] = vxid;
3189 /* else, xact already committed or aborted */
3190
3191 /* No need to examine remaining slots. */
3192 break;
3193 }
3194
3195 LWLockRelease(&proc->fpInfoLock);
3196 }
3197 }
3198
3199 /* Remember how many fast-path conflicts we found. */
3200 fast_count = count;
3201
3202 /*
3203 * Look up the lock object matching the tag.
3204 */
3205 LWLockAcquire(partitionLock, LW_SHARED);
3206
3208 locktag,
3209 hashcode,
3210 HASH_FIND,
3211 NULL);
3212 if (!lock)
3213 {
3214 /*
3215 * If the lock object doesn't exist, there is nothing holding a lock
3216 * on this lockable object.
3217 */
3218 LWLockRelease(partitionLock);
3219 vxids[count].procNumber = INVALID_PROC_NUMBER;
3221 if (countp)
3222 *countp = count;
3223 return vxids;
3224 }
3225
3226 /*
3227 * Examine each existing holder (or awaiter) of the lock.
3228 */
3229 dlist_foreach(proclock_iter, &lock->procLocks)
3230 {
3231 proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3232
3233 if (conflictMask & proclock->holdMask)
3234 {
3235 PGPROC *proc = proclock->tag.myProc;
3236
3237 /* A backend never blocks itself */
3238 if (proc != MyProc)
3239 {
3241
3242 GET_VXID_FROM_PGPROC(vxid, *proc);
3243
3245 {
3246 int i;
3247
3248 /* Avoid duplicate entries. */
3249 for (i = 0; i < fast_count; ++i)
3250 if (VirtualTransactionIdEquals(vxids[i], vxid))
3251 break;
3252 if (i >= fast_count)
3253 vxids[count++] = vxid;
3254 }
3255 /* else, xact already committed or aborted */
3256 }
3257 }
3258 }
3259
3260 LWLockRelease(partitionLock);
3261
3262 if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3263 elog(PANIC, "too many conflicting locks found");
3264
3265 vxids[count].procNumber = INVALID_PROC_NUMBER;
3267 if (countp)
3268 *countp = count;
3269 return vxids;
3270}
3271
3272/*
3273 * Find a lock in the shared lock table and release it. It is the caller's
3274 * responsibility to verify that this is a sane thing to do. (For example, it
3275 * would be bad to release a lock here if there might still be a LOCALLOCK
3276 * object with pointers to it.)
3277 *
3278 * We currently use this in two situations: first, to release locks held by
3279 * prepared transactions on commit (see lock_twophase_postcommit); and second,
3280 * to release locks taken via the fast-path, transferred to the main hash
3281 * table, and then released (see LockReleaseAll).
3282 */
3283static void
3285 LOCKTAG *locktag, LOCKMODE lockmode,
3286 bool decrement_strong_lock_count)
3287{
3288 LOCK *lock;
3289 PROCLOCK *proclock;
3290 PROCLOCKTAG proclocktag;
3291 uint32 hashcode;
3292 uint32 proclock_hashcode;
3293 LWLock *partitionLock;
3294 bool wakeupNeeded;
3295
3296 hashcode = LockTagHashCode(locktag);
3297 partitionLock = LockHashPartitionLock(hashcode);
3298
3299 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3300
3301 /*
3302 * Re-find the lock object (it had better be there).
3303 */
3305 locktag,
3306 hashcode,
3307 HASH_FIND,
3308 NULL);
3309 if (!lock)
3310 elog(PANIC, "failed to re-find shared lock object");
3311
3312 /*
3313 * Re-find the proclock object (ditto).
3314 */
3315 proclocktag.myLock = lock;
3316 proclocktag.myProc = proc;
3317
3318 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3319
3321 &proclocktag,
3322 proclock_hashcode,
3323 HASH_FIND,
3324 NULL);
3325 if (!proclock)
3326 elog(PANIC, "failed to re-find shared proclock object");
3327
3328 /*
3329 * Double-check that we are actually holding a lock of the type we want to
3330 * release.
3331 */
3332 if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3333 {
3334 PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3335 LWLockRelease(partitionLock);
3336 elog(WARNING, "you don't own a lock of type %s",
3337 lockMethodTable->lockModeNames[lockmode]);
3338 return;
3339 }
3340
3341 /*
3342 * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3343 */
3344 wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3345
3346 CleanUpLock(lock, proclock,
3347 lockMethodTable, hashcode,
3348 wakeupNeeded);
3349
3350 LWLockRelease(partitionLock);
3351
3352 /*
3353 * Decrement strong lock count. This logic is needed only for 2PC.
3354 */
3355 if (decrement_strong_lock_count
3356 && ConflictsWithRelationFastPath(locktag, lockmode))
3357 {
3358 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3359
3361 Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3362 FastPathStrongRelationLocks->count[fasthashcode]--;
3364 }
3365}
3366
3367/*
3368 * CheckForSessionAndXactLocks
3369 * Check to see if transaction holds both session-level and xact-level
3370 * locks on the same object; if so, throw an error.
3371 *
3372 * If we have both session- and transaction-level locks on the same object,
3373 * PREPARE TRANSACTION must fail. This should never happen with regular
3374 * locks, since we only take those at session level in some special operations
3375 * like VACUUM. It's possible to hit this with advisory locks, though.
3376 *
3377 * It would be nice if we could keep the session hold and give away the
3378 * transactional hold to the prepared xact. However, that would require two
3379 * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3380 * available when it comes time for PostPrepare_Locks to do the deed.
3381 * So for now, we error out while we can still do so safely.
3382 *
3383 * Since the LOCALLOCK table stores a separate entry for each lockmode,
3384 * we can't implement this check by examining LOCALLOCK entries in isolation.
3385 * We must build a transient hashtable that is indexed by locktag only.
3386 */
3387static void
3389{
3390 typedef struct
3391 {
3392 LOCKTAG lock; /* identifies the lockable object */
3393 bool sessLock; /* is any lockmode held at session level? */
3394 bool xactLock; /* is any lockmode held at xact level? */
3395 } PerLockTagEntry;
3396
3397 HASHCTL hash_ctl;
3398 HTAB *lockhtab;
3399 HASH_SEQ_STATUS status;
3400 LOCALLOCK *locallock;
3401
3402 /* Create a local hash table keyed by LOCKTAG only */
3403 hash_ctl.keysize = sizeof(LOCKTAG);
3404 hash_ctl.entrysize = sizeof(PerLockTagEntry);
3405 hash_ctl.hcxt = CurrentMemoryContext;
3406
3407 lockhtab = hash_create("CheckForSessionAndXactLocks table",
3408 256, /* arbitrary initial size */
3409 &hash_ctl,
3411
3412 /* Scan local lock table to find entries for each LOCKTAG */
3414
3415 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3416 {
3417 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3418 PerLockTagEntry *hentry;
3419 bool found;
3420 int i;
3421
3422 /*
3423 * Ignore VXID locks. We don't want those to be held by prepared
3424 * transactions, since they aren't meaningful after a restart.
3425 */
3427 continue;
3428
3429 /* Ignore it if we don't actually hold the lock */
3430 if (locallock->nLocks <= 0)
3431 continue;
3432
3433 /* Otherwise, find or make an entry in lockhtab */
3434 hentry = (PerLockTagEntry *) hash_search(lockhtab,
3435 &locallock->tag.lock,
3436 HASH_ENTER, &found);
3437 if (!found) /* initialize, if newly created */
3438 hentry->sessLock = hentry->xactLock = false;
3439
3440 /* Scan to see if we hold lock at session or xact level or both */
3441 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3442 {
3443 if (lockOwners[i].owner == NULL)
3444 hentry->sessLock = true;
3445 else
3446 hentry->xactLock = true;
3447 }
3448
3449 /*
3450 * We can throw error immediately when we see both types of locks; no
3451 * need to wait around to see if there are more violations.
3452 */
3453 if (hentry->sessLock && hentry->xactLock)
3454 ereport(ERROR,
3455 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3456 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3457 }
3458
3459 /* Success, so clean up */
3460 hash_destroy(lockhtab);
3461}
3462
3463/*
3464 * AtPrepare_Locks
3465 * Do the preparatory work for a PREPARE: make 2PC state file records
3466 * for all locks currently held.
3467 *
3468 * Session-level locks are ignored, as are VXID locks.
3469 *
3470 * For the most part, we don't need to touch shared memory for this ---
3471 * all the necessary state information is in the locallock table.
3472 * Fast-path locks are an exception, however: we move any such locks to
3473 * the main table before allowing PREPARE TRANSACTION to succeed.
3474 */
3475void
3477{
3478 HASH_SEQ_STATUS status;
3479 LOCALLOCK *locallock;
3480
3481 /* First, verify there aren't locks of both xact and session level */
3483
3484 /* Now do the per-locallock cleanup work */
3486
3487 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3488 {
3489 TwoPhaseLockRecord record;
3490 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3491 bool haveSessionLock;
3492 bool haveXactLock;
3493 int i;
3494
3495 /*
3496 * Ignore VXID locks. We don't want those to be held by prepared
3497 * transactions, since they aren't meaningful after a restart.
3498 */
3500 continue;
3501
3502 /* Ignore it if we don't actually hold the lock */
3503 if (locallock->nLocks <= 0)
3504 continue;
3505
3506 /* Scan to see whether we hold it at session or transaction level */
3507 haveSessionLock = haveXactLock = false;
3508 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3509 {
3510 if (lockOwners[i].owner == NULL)
3511 haveSessionLock = true;
3512 else
3513 haveXactLock = true;
3514 }
3515
3516 /* Ignore it if we have only session lock */
3517 if (!haveXactLock)
3518 continue;
3519
3520 /* This can't happen, because we already checked it */
3521 if (haveSessionLock)
3522 ereport(ERROR,
3523 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3524 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3525
3526 /*
3527 * If the local lock was taken via the fast-path, we need to move it
3528 * to the primary lock table, or just get a pointer to the existing
3529 * primary lock table entry if by chance it's already been
3530 * transferred.
3531 */
3532 if (locallock->proclock == NULL)
3533 {
3534 locallock->proclock = FastPathGetRelationLockEntry(locallock);
3535 locallock->lock = locallock->proclock->tag.myLock;
3536 }
3537
3538 /*
3539 * Arrange to not release any strong lock count held by this lock
3540 * entry. We must retain the count until the prepared transaction is
3541 * committed or rolled back.
3542 */
3543 locallock->holdsStrongLockCount = false;
3544
3545 /*
3546 * Create a 2PC record.
3547 */
3548 memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3549 record.lockmode = locallock->tag.mode;
3550
3552 &record, sizeof(TwoPhaseLockRecord));
3553 }
3554}
3555
3556/*
3557 * PostPrepare_Locks
3558 * Clean up after successful PREPARE
3559 *
3560 * Here, we want to transfer ownership of our locks to a dummy PGPROC
3561 * that's now associated with the prepared transaction, and we want to
3562 * clean out the corresponding entries in the LOCALLOCK table.
3563 *
3564 * Note: by removing the LOCALLOCK entries, we are leaving dangling
3565 * pointers in the transaction's resource owner. This is OK at the
3566 * moment since resowner.c doesn't try to free locks retail at a toplevel
3567 * transaction commit or abort. We could alternatively zero out nLocks
3568 * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3569 * but that probably costs more cycles.
3570 */
3571void
3573{
3574 PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
3575 HASH_SEQ_STATUS status;
3576 LOCALLOCK *locallock;
3577 LOCK *lock;
3578 PROCLOCK *proclock;
3579 PROCLOCKTAG proclocktag;
3580 int partition;
3581
3582 /* Can't prepare a lock group follower. */
3583 Assert(MyProc->lockGroupLeader == NULL ||
3585
3586 /* This is a critical section: any error means big trouble */
3588
3589 /*
3590 * First we run through the locallock table and get rid of unwanted
3591 * entries, then we scan the process's proclocks and transfer them to the
3592 * target proc.
3593 *
3594 * We do this separately because we may have multiple locallock entries
3595 * pointing to the same proclock, and we daren't end up with any dangling
3596 * pointers.
3597 */
3599
3600 while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3601 {
3602 LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3603 bool haveSessionLock;
3604 bool haveXactLock;
3605 int i;
3606
3607 if (locallock->proclock == NULL || locallock->lock == NULL)
3608 {
3609 /*
3610 * We must've run out of shared memory while trying to set up this
3611 * lock. Just forget the local entry.
3612 */
3613 Assert(locallock->nLocks == 0);
3614 RemoveLocalLock(locallock);
3615 continue;
3616 }
3617
3618 /* Ignore VXID locks */
3620 continue;
3621
3622 /* Scan to see whether we hold it at session or transaction level */
3623 haveSessionLock = haveXactLock = false;
3624 for (i = locallock->numLockOwners - 1; i >= 0; i--)
3625 {
3626 if (lockOwners[i].owner == NULL)
3627 haveSessionLock = true;
3628 else
3629 haveXactLock = true;
3630 }
3631
3632 /* Ignore it if we have only session lock */
3633 if (!haveXactLock)
3634 continue;
3635
3636 /* This can't happen, because we already checked it */
3637 if (haveSessionLock)
3638 ereport(PANIC,
3639 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3640 errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3641
3642 /* Mark the proclock to show we need to release this lockmode */
3643 if (locallock->nLocks > 0)
3644 locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3645
3646 /* And remove the locallock hashtable entry */
3647 RemoveLocalLock(locallock);
3648 }
3649
3650 /*
3651 * Now, scan each lock partition separately.
3652 */
3653 for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3654 {
3655 LWLock *partitionLock;
3656 dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3657 dlist_mutable_iter proclock_iter;
3658
3659 partitionLock = LockHashPartitionLockByIndex(partition);
3660
3661 /*
3662 * If the proclock list for this partition is empty, we can skip
3663 * acquiring the partition lock. This optimization is safer than the
3664 * situation in LockReleaseAll, because we got rid of any fast-path
3665 * locks during AtPrepare_Locks, so there cannot be any case where
3666 * another backend is adding something to our lists now. For safety,
3667 * though, we code this the same way as in LockReleaseAll.
3668 */
3669 if (dlist_is_empty(procLocks))
3670 continue; /* needn't examine this partition */
3671
3672 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3673
3674 dlist_foreach_modify(proclock_iter, procLocks)
3675 {
3676 proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3677
3678 Assert(proclock->tag.myProc == MyProc);
3679
3680 lock = proclock->tag.myLock;
3681
3682 /* Ignore VXID locks */
3684 continue;
3685
3686 PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3687 LOCK_PRINT("PostPrepare_Locks", lock, 0);
3688 Assert(lock->nRequested >= 0);
3689 Assert(lock->nGranted >= 0);
3690 Assert(lock->nGranted <= lock->nRequested);
3691 Assert((proclock->holdMask & ~lock->grantMask) == 0);
3692
3693 /* Ignore it if nothing to release (must be a session lock) */
3694 if (proclock->releaseMask == 0)
3695 continue;
3696
3697 /* Else we should be releasing all locks */
3698 if (proclock->releaseMask != proclock->holdMask)
3699 elog(PANIC, "we seem to have dropped a bit somewhere");
3700
3701 /*
3702 * We cannot simply modify proclock->tag.myProc to reassign
3703 * ownership of the lock, because that's part of the hash key and
3704 * the proclock would then be in the wrong hash chain. Instead
3705 * use hash_update_hash_key. (We used to create a new hash entry,
3706 * but that risks out-of-memory failure if other processes are
3707 * busy making proclocks too.) We must unlink the proclock from
3708 * our procLink chain and put it into the new proc's chain, too.
3709 *
3710 * Note: the updated proclock hash key will still belong to the
3711 * same hash partition, cf proclock_hash(). So the partition lock
3712 * we already hold is sufficient for this.
3713 */
3714 dlist_delete(&proclock->procLink);
3715
3716 /*
3717 * Create the new hash key for the proclock.
3718 */
3719 proclocktag.myLock = lock;
3720 proclocktag.myProc = newproc;
3721
3722 /*
3723 * Update groupLeader pointer to point to the new proc. (We'd
3724 * better not be a member of somebody else's lock group!)
3725 */
3726 Assert(proclock->groupLeader == proclock->tag.myProc);
3727 proclock->groupLeader = newproc;
3728
3729 /*
3730 * Update the proclock. We should not find any existing entry for
3731 * the same hash key, since there can be only one entry for any
3732 * given lock with my own proc.
3733 */
3735 proclock,
3736 &proclocktag))
3737 elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3738
3739 /* Re-link into the new proc's proclock list */
3740 dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3741
3742 PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3743 } /* loop over PROCLOCKs within this partition */
3744
3745 LWLockRelease(partitionLock);
3746 } /* loop over partitions */
3747
3749}
3750
3751
3752/*
3753 * Estimate shared-memory space used for lock tables
3754 */
3755Size
3757{
3758 Size size = 0;
3759 long max_table_size;
3760
3761 /* lock hash table */
3762 max_table_size = NLOCKENTS();
3763 size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3764
3765 /* proclock hash table */
3766 max_table_size *= 2;
3767 size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3768
3769 /*
3770 * Since NLOCKENTS is only an estimate, add 10% safety margin.
3771 */
3772 size = add_size(size, size / 10);
3773
3774 return size;
3775}
3776
3777/*
3778 * GetLockStatusData - Return a summary of the lock manager's internal
3779 * status, for use in a user-level reporting function.
3780 *
3781 * The return data consists of an array of LockInstanceData objects,
3782 * which are a lightly abstracted version of the PROCLOCK data structures,
3783 * i.e. there is one entry for each unique lock and interested PGPROC.
3784 * It is the caller's responsibility to match up related items (such as
3785 * references to the same lockable object or PGPROC) if wanted.
3786 *
3787 * The design goal is to hold the LWLocks for as short a time as possible;
3788 * thus, this function simply makes a copy of the necessary data and releases
3789 * the locks, allowing the caller to contemplate and format the data for as
3790 * long as it pleases.
3791 */
3792LockData *
3794{
3795 LockData *data;
3796 PROCLOCK *proclock;
3797 HASH_SEQ_STATUS seqstat;
3798 int els;
3799 int el;
3800 int i;
3801
3803
3804 /* Guess how much space we'll need. */
3805 els = MaxBackends;
3806 el = 0;
3807 data->locks = palloc_array(LockInstanceData, els);
3808
3809 /*
3810 * First, we iterate through the per-backend fast-path arrays, locking
3811 * them one at a time. This might produce an inconsistent picture of the
3812 * system state, but taking all of those LWLocks at the same time seems
3813 * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3814 * matter too much, because none of these locks can be involved in lock
3815 * conflicts anyway - anything that might must be present in the main lock
3816 * table. (For the same reason, we don't sweat about making leaderPid
3817 * completely valid. We cannot safely dereference another backend's
3818 * lockGroupLeader field without holding all lock partition locks, and
3819 * it's not worth that.)
3820 */
3821 for (i = 0; i < ProcGlobal->allProcCount; ++i)
3822 {
3823 PGPROC *proc = &ProcGlobal->allProcs[i];
3824
3825 /* Skip backends with pid=0, as they don't hold fast-path locks */
3826 if (proc->pid == 0)
3827 continue;
3828
3830
3831 for (uint32 g = 0; g < FastPathLockGroupsPerBackend; g++)
3832 {
3833 /* Skip groups without registered fast-path locks */
3834 if (proc->fpLockBits[g] == 0)
3835 continue;
3836
3837 for (int j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3838 {
3839 LockInstanceData *instance;
3840 uint32 f = FAST_PATH_SLOT(g, j);
3841 uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3842
3843 /* Skip unallocated slots */
3844 if (!lockbits)
3845 continue;
3846
3847 if (el >= els)
3848 {
3849 els += MaxBackends;
3850 data->locks = (LockInstanceData *)
3851 repalloc(data->locks, sizeof(LockInstanceData) * els);
3852 }
3853
3854 instance = &data->locks[el];
3855 SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3856 proc->fpRelId[f]);
3857 instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3858 instance->waitLockMode = NoLock;
3859 instance->vxid.procNumber = proc->vxid.procNumber;
3860 instance->vxid.localTransactionId = proc->vxid.lxid;
3861 instance->pid = proc->pid;
3862 instance->leaderPid = proc->pid;
3863 instance->fastpath = true;
3864
3865 /*
3866 * Successfully taking fast path lock means there were no
3867 * conflicting locks.
3868 */
3869 instance->waitStart = 0;
3870
3871 el++;
3872 }
3873 }
3874
3875 if (proc->fpVXIDLock)
3876 {
3878 LockInstanceData *instance;
3879
3880 if (el >= els)
3881 {
3882 els += MaxBackends;
3883 data->locks = (LockInstanceData *)
3884 repalloc(data->locks, sizeof(LockInstanceData) * els);
3885 }
3886
3887 vxid.procNumber = proc->vxid.procNumber;
3889
3890 instance = &data->locks[el];
3891 SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3892 instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3893 instance->waitLockMode = NoLock;
3894 instance->vxid.procNumber = proc->vxid.procNumber;
3895 instance->vxid.localTransactionId = proc->vxid.lxid;
3896 instance->pid = proc->pid;
3897 instance->leaderPid = proc->pid;
3898 instance->fastpath = true;
3899 instance->waitStart = 0;
3900
3901 el++;
3902 }
3903
3904 LWLockRelease(&proc->fpInfoLock);
3905 }
3906
3907 /*
3908 * Next, acquire lock on the entire shared lock data structure. We do
3909 * this so that, at least for locks in the primary lock table, the state
3910 * will be self-consistent.
3911 *
3912 * Since this is a read-only operation, we take shared instead of
3913 * exclusive lock. There's not a whole lot of point to this, because all
3914 * the normal operations require exclusive lock, but it doesn't hurt
3915 * anything either. It will at least allow two backends to do
3916 * GetLockStatusData in parallel.
3917 *
3918 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3919 */
3920 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3922
3923 /* Now we can safely count the number of proclocks */
3925 if (data->nelements > els)
3926 {
3927 els = data->nelements;
3928 data->locks = (LockInstanceData *)
3929 repalloc(data->locks, sizeof(LockInstanceData) * els);
3930 }
3931
3932 /* Now scan the tables to copy the data */
3934
3935 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3936 {
3937 PGPROC *proc = proclock->tag.myProc;
3938 LOCK *lock = proclock->tag.myLock;
3939 LockInstanceData *instance = &data->locks[el];
3940
3941 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3942 instance->holdMask = proclock->holdMask;
3943 if (proc->waitLock == proclock->tag.myLock)
3944 instance->waitLockMode = proc->waitLockMode;
3945 else
3946 instance->waitLockMode = NoLock;
3947 instance->vxid.procNumber = proc->vxid.procNumber;
3948 instance->vxid.localTransactionId = proc->vxid.lxid;
3949 instance->pid = proc->pid;
3950 instance->leaderPid = proclock->groupLeader->pid;
3951 instance->fastpath = false;
3952 instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3953
3954 el++;
3955 }
3956
3957 /*
3958 * And release locks. We do this in reverse order for two reasons: (1)
3959 * Anyone else who needs more than one of the locks will be trying to lock
3960 * them in increasing order; we don't want to release the other process
3961 * until it can get all the locks it needs. (2) This avoids O(N^2)
3962 * behavior inside LWLockRelease.
3963 */
3964 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3966
3967 Assert(el == data->nelements);
3968
3969 return data;
3970}
3971
3972/*
3973 * GetBlockerStatusData - Return a summary of the lock manager's state
3974 * concerning locks that are blocking the specified PID or any member of
3975 * the PID's lock group, for use in a user-level reporting function.
3976 *
3977 * For each PID within the lock group that is awaiting some heavyweight lock,
3978 * the return data includes an array of LockInstanceData objects, which are
3979 * the same data structure used by GetLockStatusData; but unlike that function,
3980 * this one reports only the PROCLOCKs associated with the lock that that PID
3981 * is blocked on. (Hence, all the locktags should be the same for any one
3982 * blocked PID.) In addition, we return an array of the PIDs of those backends
3983 * that are ahead of the blocked PID in the lock's wait queue. These can be
3984 * compared with the PIDs in the LockInstanceData objects to determine which
3985 * waiters are ahead of or behind the blocked PID in the queue.
3986 *
3987 * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3988 * waiting on any heavyweight lock, return empty arrays.
3989 *
3990 * The design goal is to hold the LWLocks for as short a time as possible;
3991 * thus, this function simply makes a copy of the necessary data and releases
3992 * the locks, allowing the caller to contemplate and format the data for as
3993 * long as it pleases.
3994 */
3996GetBlockerStatusData(int blocked_pid)
3997{
3999 PGPROC *proc;
4000 int i;
4001
4003
4004 /*
4005 * Guess how much space we'll need, and preallocate. Most of the time
4006 * this will avoid needing to do repalloc while holding the LWLocks. (We
4007 * assume, but check with an Assert, that MaxBackends is enough entries
4008 * for the procs[] array; the other two could need enlargement, though.)
4009 */
4010 data->nprocs = data->nlocks = data->npids = 0;
4011 data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
4012 data->procs = palloc_array(BlockedProcData, data->maxprocs);
4013 data->locks = palloc_array(LockInstanceData, data->maxlocks);
4014 data->waiter_pids = palloc_array(int, data->maxpids);
4015
4016 /*
4017 * In order to search the ProcArray for blocked_pid and assume that that
4018 * entry won't immediately disappear under us, we must hold ProcArrayLock.
4019 * In addition, to examine the lock grouping fields of any other backend,
4020 * we must hold all the hash partition locks. (Only one of those locks is
4021 * actually relevant for any one lock group, but we can't know which one
4022 * ahead of time.) It's fairly annoying to hold all those locks
4023 * throughout this, but it's no worse than GetLockStatusData(), and it
4024 * does have the advantage that we're guaranteed to return a
4025 * self-consistent instantaneous state.
4026 */
4027 LWLockAcquire(ProcArrayLock, LW_SHARED);
4028
4029 proc = BackendPidGetProcWithLock(blocked_pid);
4030
4031 /* Nothing to do if it's gone */
4032 if (proc != NULL)
4033 {
4034 /*
4035 * Acquire lock on the entire shared lock data structure. See notes
4036 * in GetLockStatusData().
4037 */
4038 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4040
4041 if (proc->lockGroupLeader == NULL)
4042 {
4043 /* Easy case, proc is not a lock group member */
4045 }
4046 else
4047 {
4048 /* Examine all procs in proc's lock group */
4049 dlist_iter iter;
4050
4052 {
4053 PGPROC *memberProc;
4054
4055 memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
4057 }
4058 }
4059
4060 /*
4061 * And release locks. See notes in GetLockStatusData().
4062 */
4063 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4065
4066 Assert(data->nprocs <= data->maxprocs);
4067 }
4068
4069 LWLockRelease(ProcArrayLock);
4070
4071 return data;
4072}
4073
4074/* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
4075static void
4077{
4078 LOCK *theLock = blocked_proc->waitLock;
4079 BlockedProcData *bproc;
4080 dlist_iter proclock_iter;
4081 dlist_iter proc_iter;
4082 dclist_head *waitQueue;
4083 int queue_size;
4084
4085 /* Nothing to do if this proc is not blocked */
4086 if (theLock == NULL)
4087 return;
4088
4089 /* Set up a procs[] element */
4090 bproc = &data->procs[data->nprocs++];
4091 bproc->pid = blocked_proc->pid;
4092 bproc->first_lock = data->nlocks;
4093 bproc->first_waiter = data->npids;
4094
4095 /*
4096 * We may ignore the proc's fast-path arrays, since nothing in those could
4097 * be related to a contended lock.
4098 */
4099
4100 /* Collect all PROCLOCKs associated with theLock */
4101 dlist_foreach(proclock_iter, &theLock->procLocks)
4102 {
4103 PROCLOCK *proclock =
4104 dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4105 PGPROC *proc = proclock->tag.myProc;
4106 LOCK *lock = proclock->tag.myLock;
4107 LockInstanceData *instance;
4108
4109 if (data->nlocks >= data->maxlocks)
4110 {
4111 data->maxlocks += MaxBackends;
4112 data->locks = (LockInstanceData *)
4113 repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4114 }
4115
4116 instance = &data->locks[data->nlocks];
4117 memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4118 instance->holdMask = proclock->holdMask;
4119 if (proc->waitLock == lock)
4120 instance->waitLockMode = proc->waitLockMode;
4121 else
4122 instance->waitLockMode = NoLock;
4123 instance->vxid.procNumber = proc->vxid.procNumber;
4124 instance->vxid.localTransactionId = proc->vxid.lxid;
4125 instance->pid = proc->pid;
4126 instance->leaderPid = proclock->groupLeader->pid;
4127 instance->fastpath = false;
4128 data->nlocks++;
4129 }
4130
4131 /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4132 waitQueue = &(theLock->waitProcs);
4133 queue_size = dclist_count(waitQueue);
4134
4135 if (queue_size > data->maxpids - data->npids)
4136 {
4137 data->maxpids = Max(data->maxpids + MaxBackends,
4138 data->npids + queue_size);
4139 data->waiter_pids = (int *) repalloc(data->waiter_pids,
4140 sizeof(int) * data->maxpids);
4141 }
4142
4143 /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4144 dclist_foreach(proc_iter, waitQueue)
4145 {
4146 PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
4147
4148 if (queued_proc == blocked_proc)
4149 break;
4150 data->waiter_pids[data->npids++] = queued_proc->pid;
4151 queued_proc = (PGPROC *) queued_proc->links.next;
4152 }
4153
4154 bproc->num_locks = data->nlocks - bproc->first_lock;
4155 bproc->num_waiters = data->npids - bproc->first_waiter;
4156}
4157
4158/*
4159 * Returns a list of currently held AccessExclusiveLocks, for use by
4160 * LogStandbySnapshot(). The result is a palloc'd array,
4161 * with the number of elements returned into *nlocks.
4162 *
4163 * XXX This currently takes a lock on all partitions of the lock table,
4164 * but it's possible to do better. By reference counting locks and storing
4165 * the value in the ProcArray entry for each backend we could tell if any
4166 * locks need recording without having to acquire the partition locks and
4167 * scan the lock table. Whether that's worth the additional overhead
4168 * is pretty dubious though.
4169 */
4172{
4173 xl_standby_lock *accessExclusiveLocks;
4174 PROCLOCK *proclock;
4175 HASH_SEQ_STATUS seqstat;
4176 int i;
4177 int index;
4178 int els;
4179
4180 /*
4181 * Acquire lock on the entire shared lock data structure.
4182 *
4183 * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4184 */
4185 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4187
4188 /* Now we can safely count the number of proclocks */
4190
4191 /*
4192 * Allocating enough space for all locks in the lock table is overkill,
4193 * but it's more convenient and faster than having to enlarge the array.
4194 */
4195 accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4196
4197 /* Now scan the tables to copy the data */
4199
4200 /*
4201 * If lock is a currently granted AccessExclusiveLock then it will have
4202 * just one proclock holder, so locks are never accessed twice in this
4203 * particular case. Don't copy this code for use elsewhere because in the
4204 * general case this will give you duplicate locks when looking at
4205 * non-exclusive lock types.
4206 */
4207 index = 0;
4208 while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4209 {
4210 /* make sure this definition matches the one used in LockAcquire */
4211 if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4213 {
4214 PGPROC *proc = proclock->tag.myProc;
4215 LOCK *lock = proclock->tag.myLock;
4216 TransactionId xid = proc->xid;
4217
4218 /*
4219 * Don't record locks for transactions if we know they have
4220 * already issued their WAL record for commit but not yet released
4221 * lock. It is still possible that we see locks held by already
4222 * complete transactions, if they haven't yet zeroed their xids.
4223 */
4224 if (!TransactionIdIsValid(xid))
4225 continue;
4226
4227 accessExclusiveLocks[index].xid = xid;
4228 accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4229 accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4230
4231 index++;
4232 }
4233 }
4234
4235 Assert(index <= els);
4236
4237 /*
4238 * And release locks. We do this in reverse order for two reasons: (1)
4239 * Anyone else who needs more than one of the locks will be trying to lock
4240 * them in increasing order; we don't want to release the other process
4241 * until it can get all the locks it needs. (2) This avoids O(N^2)
4242 * behavior inside LWLockRelease.
4243 */
4244 for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4246
4247 *nlocks = index;
4248 return accessExclusiveLocks;
4249}
4250
4251/* Provide the textual name of any lock mode */
4252const char *
4254{
4255 Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4256 Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4257 return LockMethods[lockmethodid]->lockModeNames[mode];
4258}
4259
4260#ifdef LOCK_DEBUG
4261/*
4262 * Dump all locks in the given proc's myProcLocks lists.
4263 *
4264 * Caller is responsible for having acquired appropriate LWLocks.
4265 */
4266void
4267DumpLocks(PGPROC *proc)
4268{
4269 int i;
4270
4271 if (proc == NULL)
4272 return;
4273
4274 if (proc->waitLock)
4275 LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4276
4277 for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4278 {
4279 dlist_head *procLocks = &proc->myProcLocks[i];
4280 dlist_iter iter;
4281
4282 dlist_foreach(iter, procLocks)
4283 {
4284 PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4285 LOCK *lock = proclock->tag.myLock;
4286
4287 Assert(proclock->tag.myProc == proc);
4288 PROCLOCK_PRINT("DumpLocks", proclock);
4289 LOCK_PRINT("DumpLocks", lock, 0);
4290 }
4291 }
4292}
4293
4294/*
4295 * Dump all lmgr locks.
4296 *
4297 * Caller is responsible for having acquired appropriate LWLocks.
4298 */
4299void
4300DumpAllLocks(void)
4301{
4302 PGPROC *proc;
4303 PROCLOCK *proclock;
4304 LOCK *lock;
4305 HASH_SEQ_STATUS status;
4306
4307 proc = MyProc;
4308
4309 if (proc && proc->waitLock)
4310 LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4311
4313
4314 while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4315 {
4316 PROCLOCK_PRINT("DumpAllLocks", proclock);
4317
4318 lock = proclock->tag.myLock;
4319 if (lock)
4320 LOCK_PRINT("DumpAllLocks", lock, 0);
4321 else
4322 elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4323 }
4324}
4325#endif /* LOCK_DEBUG */
4326
4327/*
4328 * LOCK 2PC resource manager's routines
4329 */
4330
4331/*
4332 * Re-acquire a lock belonging to a transaction that was prepared.
4333 *
4334 * Because this function is run at db startup, re-acquiring the locks should
4335 * never conflict with running transactions because there are none. We
4336 * assume that the lock state represented by the stored 2PC files is legal.
4337 *
4338 * When switching from Hot Standby mode to normal operation, the locks will
4339 * be already held by the startup process. The locks are acquired for the new
4340 * procs without checking for conflicts, so we don't get a conflict between the
4341 * startup process and the dummy procs, even though we will momentarily have
4342 * a situation where two procs are holding the same AccessExclusiveLock,
4343 * which isn't normally possible because the conflict. If we're in standby
4344 * mode, but a recovery snapshot hasn't been established yet, it's possible
4345 * that some but not all of the locks are already held by the startup process.
4346 *
4347 * This approach is simple, but also a bit dangerous, because if there isn't
4348 * enough shared memory to acquire the locks, an error will be thrown, which
4349 * is promoted to FATAL and recovery will abort, bringing down postmaster.
4350 * A safer approach would be to transfer the locks like we do in
4351 * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4352 * read-only backends to use up all the shared lock memory anyway, so that
4353 * replaying the WAL record that needs to acquire a lock will throw an error
4354 * and PANIC anyway.
4355 */
4356void
4358 void *recdata, uint32 len)
4359{
4360 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4361 PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
4362 LOCKTAG *locktag;
4363 LOCKMODE lockmode;
4364 LOCKMETHODID lockmethodid;
4365 LOCK *lock;
4366 PROCLOCK *proclock;
4367 PROCLOCKTAG proclocktag;
4368 bool found;
4369 uint32 hashcode;
4370 uint32 proclock_hashcode;
4371 int partition;
4372 LWLock *partitionLock;
4373 LockMethod lockMethodTable;
4374
4375 Assert(len == sizeof(TwoPhaseLockRecord));
4376 locktag = &rec->locktag;
4377 lockmode = rec->lockmode;
4378 lockmethodid = locktag->locktag_lockmethodid;
4379
4380 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4381 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4382 lockMethodTable = LockMethods[lockmethodid];
4383
4384 hashcode = LockTagHashCode(locktag);
4385 partition = LockHashPartition(hashcode);
4386 partitionLock = LockHashPartitionLock(hashcode);
4387
4388 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4389
4390 /*
4391 * Find or create a lock with this tag.
4392 */
4394 locktag,
4395 hashcode,
4397 &found);
4398 if (!lock)
4399 {
4400 LWLockRelease(partitionLock);
4401 ereport(ERROR,
4402 (errcode(ERRCODE_OUT_OF_MEMORY),
4403 errmsg("out of shared memory"),
4404 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4405 }
4406
4407 /*
4408 * if it's a new lock object, initialize it
4409 */
4410 if (!found)
4411 {
4412 lock->grantMask = 0;
4413 lock->waitMask = 0;
4414 dlist_init(&lock->procLocks);
4415 dclist_init(&lock->waitProcs);
4416 lock->nRequested = 0;
4417 lock->nGranted = 0;
4418 MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4419 MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4420 LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4421 }
4422 else
4423 {
4424 LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4425 Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4426 Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4427 Assert(lock->nGranted <= lock->nRequested);
4428 }
4429
4430 /*
4431 * Create the hash key for the proclock table.
4432 */
4433 proclocktag.myLock = lock;
4434 proclocktag.myProc = proc;
4435
4436 proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4437
4438 /*
4439 * Find or create a proclock entry with this tag
4440 */
4442 &proclocktag,
4443 proclock_hashcode,
4445 &found);
4446 if (!proclock)
4447 {
4448 /* Oops, not enough shmem for the proclock */
4449 if (lock->nRequested == 0)
4450 {
4451 /*
4452 * There are no other requestors of this lock, so garbage-collect
4453 * the lock object. We *must* do this to avoid a permanent leak
4454 * of shared memory, because there won't be anything to cause
4455 * anyone to release the lock object later.
4456 */
4459 &(lock->tag),
4460 hashcode,
4462 NULL))
4463 elog(PANIC, "lock table corrupted");
4464 }
4465 LWLockRelease(partitionLock);
4466 ereport(ERROR,
4467 (errcode(ERRCODE_OUT_OF_MEMORY),
4468 errmsg("out of shared memory"),
4469 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4470 }
4471
4472 /*
4473 * If new, initialize the new entry
4474 */
4475 if (!found)
4476 {
4477 Assert(proc->lockGroupLeader == NULL);
4478 proclock->groupLeader = proc;
4479 proclock->holdMask = 0;
4480 proclock->releaseMask = 0;
4481 /* Add proclock to appropriate lists */
4482 dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4483 dlist_push_tail(&proc->myProcLocks[partition],
4484 &proclock->procLink);
4485 PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4486 }
4487 else
4488 {
4489 PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4490 Assert((proclock->holdMask & ~lock->grantMask) == 0);
4491 }
4492
4493 /*
4494 * lock->nRequested and lock->requested[] count the total number of
4495 * requests, whether granted or waiting, so increment those immediately.
4496 */
4497 lock->nRequested++;
4498 lock->requested[lockmode]++;
4499 Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4500
4501 /*
4502 * We shouldn't already hold the desired lock.
4503 */
4504 if (proclock->holdMask & LOCKBIT_ON(lockmode))
4505 elog(ERROR, "lock %s on object %u/%u/%u is already held",
4506 lockMethodTable->lockModeNames[lockmode],
4507 lock->tag.locktag_field1, lock->tag.locktag_field2,
4508 lock->tag.locktag_field3);
4509
4510 /*
4511 * We ignore any possible conflicts and just grant ourselves the lock. Not
4512 * only because we don't bother, but also to avoid deadlocks when
4513 * switching from standby to normal mode. See function comment.
4514 */
4515 GrantLock(lock, proclock, lockmode);
4516
4517 /*
4518 * Bump strong lock count, to make sure any fast-path lock requests won't
4519 * be granted without consulting the primary lock table.
4520 */
4521 if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4522 {
4523 uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4524
4526 FastPathStrongRelationLocks->count[fasthashcode]++;
4528 }
4529
4530 LWLockRelease(partitionLock);
4531}
4532
4533/*
4534 * Re-acquire a lock belonging to a transaction that was prepared, when
4535 * starting up into hot standby mode.
4536 */
4537void
4539 void *recdata, uint32 len)
4540{
4541 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4542 LOCKTAG *locktag;
4543 LOCKMODE lockmode;
4544 LOCKMETHODID lockmethodid;
4545
4546 Assert(len == sizeof(TwoPhaseLockRecord));
4547 locktag = &rec->locktag;
4548 lockmode = rec->lockmode;
4549 lockmethodid = locktag->locktag_lockmethodid;
4550
4551 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4552 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4553
4554 if (lockmode == AccessExclusiveLock &&
4555 locktag->locktag_type == LOCKTAG_RELATION)
4556 {
4558 locktag->locktag_field1 /* dboid */ ,
4559 locktag->locktag_field2 /* reloid */ );
4560 }
4561}
4562
4563
4564/*
4565 * 2PC processing routine for COMMIT PREPARED case.
4566 *
4567 * Find and release the lock indicated by the 2PC record.
4568 */
4569void
4571 void *recdata, uint32 len)
4572{
4573 TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4574 PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
4575 LOCKTAG *locktag;
4576 LOCKMETHODID lockmethodid;
4577 LockMethod lockMethodTable;
4578
4579 Assert(len == sizeof(TwoPhaseLockRecord));
4580 locktag = &rec->locktag;
4581 lockmethodid = locktag->locktag_lockmethodid;
4582
4583 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4584 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4585 lockMethodTable = LockMethods[lockmethodid];
4586
4587 LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4588}
4589
4590/*
4591 * 2PC processing routine for ROLLBACK PREPARED case.
4592 *
4593 * This is actually just the same as the COMMIT case.
4594 */
4595void
4597 void *recdata, uint32 len)
4598{
4599 lock_twophase_postcommit(fxid, info, recdata, len);
4600}
4601
4602/*
4603 * VirtualXactLockTableInsert
4604 *
4605 * Take vxid lock via the fast-path. There can't be any pre-existing
4606 * lockers, as we haven't advertised this vxid via the ProcArray yet.
4607 *
4608 * Since MyProc->fpLocalTransactionId will normally contain the same data
4609 * as MyProc->vxid.lxid, you might wonder if we really need both. The
4610 * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4611 * examined by procarray.c, while fpLocalTransactionId is protected by
4612 * fpInfoLock and is used only by the locking subsystem. Doing it this
4613 * way makes it easier to verify that there are no funny race conditions.
4614 *
4615 * We don't bother recording this lock in the local lock table, since it's
4616 * only ever released at the end of a transaction. Instead,
4617 * LockReleaseAll() calls VirtualXactLockTableCleanup().
4618 */
4619void
4621{
4623
4625
4628 Assert(MyProc->fpVXIDLock == false);
4629
4630 MyProc->fpVXIDLock = true;
4632
4634}
4635
4636/*
4637 * VirtualXactLockTableCleanup
4638 *
4639 * Check whether a VXID lock has been materialized; if so, release it,
4640 * unblocking waiters.
4641 */
4642void
4644{
4645 bool fastpath;
4646 LocalTransactionId lxid;
4647
4649
4650 /*
4651 * Clean up shared memory state.
4652 */
4654
4655 fastpath = MyProc->fpVXIDLock;
4657 MyProc->fpVXIDLock = false;
4659
4661
4662 /*
4663 * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4664 * that means someone transferred the lock to the main lock table.
4665 */
4666 if (!fastpath && LocalTransactionIdIsValid(lxid))
4667 {
4669 LOCKTAG locktag;
4670
4671 vxid.procNumber = MyProcNumber;
4672 vxid.localTransactionId = lxid;
4673 SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4674
4676 &locktag, ExclusiveLock, false);
4677 }
4678}
4679
4680/*
4681 * XactLockForVirtualXact
4682 *
4683 * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4684 * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4685 * functions, it assumes "xid" is never a subtransaction and that "xid" is
4686 * prepared, committed, or aborted.
4687 *
4688 * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4689 * known as "vxid" before its PREPARE TRANSACTION.
4690 */
4691static bool
4693 TransactionId xid, bool wait)
4694{
4695 bool more = false;
4696
4697 /* There is no point to wait for 2PCs if you have no 2PCs. */
4698 if (max_prepared_xacts == 0)
4699 return true;
4700
4701 do
4702 {
4704 LOCKTAG tag;
4705
4706 /* Clear state from previous iterations. */
4707 if (more)
4708 {
4710 more = false;
4711 }
4712
4713 /* If we have no xid, try to find one. */
4714 if (!TransactionIdIsValid(xid))
4715 xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4716 if (!TransactionIdIsValid(xid))
4717 {
4718 Assert(!more);
4719 return true;
4720 }
4721
4722 /* Check or wait for XID completion. */
4723 SET_LOCKTAG_TRANSACTION(tag, xid);
4724 lar = LockAcquire(&tag, ShareLock, false, !wait);
4725 if (lar == LOCKACQUIRE_NOT_AVAIL)
4726 return false;
4727 LockRelease(&tag, ShareLock, false);
4728 } while (more);
4729
4730 return true;
4731}
4732
4733/*
4734 * VirtualXactLock
4735 *
4736 * If wait = true, wait as long as the given VXID or any XID acquired by the
4737 * same transaction is still running. Then, return true.
4738 *
4739 * If wait = false, just check whether that VXID or one of those XIDs is still
4740 * running, and return true or false.
4741 */
4742bool
4744{
4745 LOCKTAG tag;
4746 PGPROC *proc;
4748
4750
4752 /* no vxid lock; localTransactionId is a normal, locked XID */
4753 return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4754
4756
4757 /*
4758 * If a lock table entry must be made, this is the PGPROC on whose behalf
4759 * it must be done. Note that the transaction might end or the PGPROC
4760 * might be reassigned to a new backend before we get around to examining
4761 * it, but it doesn't matter. If we find upon examination that the
4762 * relevant lxid is no longer running here, that's enough to prove that
4763 * it's no longer running anywhere.
4764 */
4765 proc = ProcNumberGetProc(vxid.procNumber);
4766 if (proc == NULL)
4767 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4768
4769 /*
4770 * We must acquire this lock before checking the procNumber and lxid
4771 * against the ones we're waiting for. The target backend will only set
4772 * or clear lxid while holding this lock.
4773 */
4775
4776 if (proc->vxid.procNumber != vxid.procNumber
4778 {
4779 /* VXID ended */
4780 LWLockRelease(&proc->fpInfoLock);
4781 return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4782 }
4783
4784 /*
4785 * If we aren't asked to wait, there's no need to set up a lock table
4786 * entry. The transaction is still in progress, so just return false.
4787 */
4788 if (!wait)
4789 {
4790 LWLockRelease(&proc->fpInfoLock);
4791 return false;
4792 }
4793
4794 /*
4795 * OK, we're going to need to sleep on the VXID. But first, we must set
4796 * up the primary lock table entry, if needed (ie, convert the proc's
4797 * fast-path lock on its VXID to a regular lock).
4798 */
4799 if (proc->fpVXIDLock)
4800 {
4801 PROCLOCK *proclock;
4802 uint32 hashcode;
4803 LWLock *partitionLock;
4804
4805 hashcode = LockTagHashCode(&tag);
4806
4807 partitionLock = LockHashPartitionLock(hashcode);
4808 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4809
4811 &tag, hashcode, ExclusiveLock);
4812 if (!proclock)
4813 {
4814 LWLockRelease(partitionLock);
4815 LWLockRelease(&proc->fpInfoLock);
4816 ereport(ERROR,
4817 (errcode(ERRCODE_OUT_OF_MEMORY),
4818 errmsg("out of shared memory"),
4819 errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4820 }
4821 GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4822
4823 LWLockRelease(partitionLock);
4824
4825 proc->fpVXIDLock = false;
4826 }
4827
4828 /*
4829 * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4830 * search. The proc might have assigned this XID but not yet locked it,
4831 * in which case the proc will lock this XID before releasing the VXID.
4832 * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4833 * so we won't save an XID of a different VXID. It doesn't matter whether
4834 * we save this before or after setting up the primary lock table entry.
4835 */
4836 xid = proc->xid;
4837
4838 /* Done with proc->fpLockBits */
4839 LWLockRelease(&proc->fpInfoLock);
4840
4841 /* Time to wait. */
4842 (void) LockAcquire(&tag, ShareLock, false, false);
4843
4844 LockRelease(&tag, ShareLock, false);
4845 return XactLockForVirtualXact(vxid, xid, wait);
4846}
4847
4848/*
4849 * LockWaiterCount
4850 *
4851 * Find the number of lock requester on this locktag
4852 */
4853int
4855{
4856 LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4857 LOCK *lock;
4858 bool found;
4859 uint32 hashcode;
4860 LWLock *partitionLock;
4861 int waiters = 0;
4862
4863 if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4864 elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4865
4866 hashcode = LockTagHashCode(locktag);
4867 partitionLock = LockHashPartitionLock(hashcode);
4868 LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4869
4871 locktag,
4872 hashcode,
4873 HASH_FIND,
4874 &found);
4875 if (found)
4876 {
4877 Assert(lock != NULL);
4878 waiters = lock->nRequested;
4879 }
4880 LWLockRelease(partitionLock);
4881
4882 return waiters;
4883}
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:467
#define Max(x, y)
Definition: c.h:1010
int64_t int64
Definition: c.h:549
uint16_t uint16
Definition: c.h:551
uint32_t uint32
Definition: c.h:552
#define lengthof(array)
Definition: c.h:801
uint32 LocalTransactionId
Definition: c.h:673
#define MemSet(start, val, len)
Definition: c.h:1032
uint32 TransactionId
Definition: c.h:671
size_t Size
Definition: c.h:624
int64 TimestampTz
Definition: timestamp.h:39
void DeadLockReport(void)
Definition: deadlock.c:1075
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:952
Size hash_estimate_size(int64 num_entries, Size entrysize)
Definition: dynahash.c:783
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:358
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:965
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1415
int64 hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1336
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1140
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:908
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
ErrorContextCallback * error_context_stack
Definition: elog.c:95
int errhint(const char *fmt,...)
Definition: elog.c:1330
int errcode(int sqlerrcode)
Definition: elog.c:863
int errdetail_log_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1285
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define LOG
Definition: elog.h:31
#define PG_RE_THROW()
Definition: elog.h:405
#define errcontext
Definition: elog.h:198
#define PG_TRY(...)
Definition: elog.h:372
#define WARNING
Definition: elog.h:36
#define PG_END_TRY(...)
Definition: elog.h:397
#define PANIC
Definition: elog.h:42
#define ERROR
Definition: elog.h:39
#define PG_CATCH(...)
Definition: elog.h:382
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
#define palloc_object(type)
Definition: fe_memutils.h:74
#define palloc_array(type, count)
Definition: fe_memutils.h:76
#define palloc0_array(type, count)
Definition: fe_memutils.h:77
int MyProcPid
Definition: globals.c:47
ProcNumber MyProcNumber
Definition: globals.c:90
int MaxBackends
Definition: globals.c:146
Assert(PointerIsAligned(start, uint64))
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
@ HASH_ENTER_NULL
Definition: hsearch.h:116
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HASH_PARTITION
Definition: hsearch.h:92
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int j
Definition: isn.c:78
int i
Definition: isn.c:77
void DescribeLockTag(StringInfo buf, const LOCKTAG *tag)
Definition: lmgr.c:1249
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4692
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:809
static LOCALLOCK * awaitedLock
Definition: lock.c:328
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1476
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2736
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition: lock.c:643
static bool Dummy_trace
Definition: lock.c:122
static const char *const lock_mode_names[]
Definition: lock.c:108
void lock_twophase_postabort(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4596
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:405
void PostPrepare_Locks(FullTransactionId fxid)
Definition: lock.c:3572
void lock_twophase_standby_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4538
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:623
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1283
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2958
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition: lock.c:4620
#define NLOCKENTS()
Definition: lock.c:56
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:303
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:605
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:256
void GrantAwaitedLock(void)
Definition: lock.c:1889
int LockWaiterCount(const LOCKTAG *locktag)
Definition: lock.c:4854
void AtPrepare_Locks(void)
Definition: lock.c:3476
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:2102
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:242
Size LockManagerShmemSize(void)
Definition: lock.c:3756
#define FAST_PATH_REL_GROUP(rel)
Definition: lock.c:217
void InitLockManagerAccess(void)
Definition: lock.c:505
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1658
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4643
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition: lock.c:4743
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:3069
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:312
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:2046
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp, bool logLockFailure)
Definition: lock.c:836
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2307
#define FAST_PATH_SLOT(group, index)
Definition: lock.c:224
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1464
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:273
void ResetAwaitedLock(void)
Definition: lock.c:1907
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2861
static HTAB * LockMethodLocalHash
Definition: lock.c:323
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2706
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1681
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:252
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:406
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1738
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:574
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2825
void AbortStrongLockAcquire(void)
Definition: lock.c:1860
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2782
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition: lock.c:176
static HTAB * LockMethodLockHash
Definition: lock.c:321
static ResourceOwner awaitedOwner
Definition: lock.c:329
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition: lock.c:3996
void LockManagerShmemInit(void)
Definition: lock.c:444
static ProcWaitStatus WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1932
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:696
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4253
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:4076
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:254
void lock_twophase_postcommit(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4570
int max_locks_per_xact
Definition: lock.c:53
static const LockMethod LockMethods[]
Definition: lock.c:150
static void waitonlock_error_callback(void *arg)
Definition: lock.c:2020
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2611
LOCALLOCK * GetAwaitedLock(void)
Definition: lock.c:1898
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition: lock.c:2581
void MarkLockClear(LOCALLOCK *locallock)
Definition: lock.c:1920
LockData * GetLockStatusData(void)
Definition: lock.c:3793
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY
Definition: lock.c:191
static const LockMethodData default_lockmethod
Definition: lock.c:125
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:245
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:327
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:241
static const LockMethodData user_lockmethod
Definition: lock.c:136
int FastPathLockGroupsPerBackend
Definition: lock.c:202
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:267
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:557
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1824
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1529
void lock_twophase_recover(FullTransactionId fxid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4357
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1792
static const LOCKMASK LockConflicts[]
Definition: lock.c:65
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2646
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:527
static void FinishStrongLockAcquire(void)
Definition: lock.c:1850
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition: lock.c:301
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition: lock.c:4171
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3284
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3388
static HTAB * LockMethodProcLockHash
Definition: lock.c:322
struct TwoPhaseLockRecord TwoPhaseLockRecord
bool log_lock_failures
Definition: lock.c:54
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition: lock.c:539
uint16 LOCKMETHODID
Definition: lock.h:124
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
struct LOCALLOCK LOCALLOCK
#define LOCK_LOCKTAG(lock)
Definition: lock.h:327
struct LOCK LOCK
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:237
struct PROCLOCK PROCLOCK
@ LOCKTAG_OBJECT
Definition: lock.h:147
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:140
@ LOCKTAG_RELATION
Definition: lock.h:139
@ LOCKTAG_TUPLE
Definition: lock.h:143
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:145
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:69
#define LockHashPartitionLock(hashcode)
Definition: lock.h:528
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition: lock.h:79
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:326
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:87
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:445
#define InvalidLocalTransactionId
Definition: lock.h:67
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:228
struct LOCKTAG LOCKTAG
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:183
#define MAX_LOCKMODES
Definition: lock.h:84
struct PROCLOCKTAG PROCLOCKTAG
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:68
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:446
#define LockHashPartition(hashcode)
Definition: lock.h:526
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:73
struct LOCALLOCKTAG LOCALLOCKTAG
#define PROCLOCK_LOCKMETHOD(proclock)
Definition: lock.h:384
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:531
LockAcquireResult
Definition: lock.h:502
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:506
@ LOCKACQUIRE_OK
Definition: lock.h:504
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:505
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:503
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:71
int LOCKMODE
Definition: lockdefs.h:26
#define NoLock
Definition: lockdefs.h:34
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define ShareRowExclusiveLock
Definition: lockdefs.h:41
#define AccessShareLock
Definition: lockdefs.h:36
int LOCKMASK
Definition: lockdefs.h:25
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
#define ExclusiveLock
Definition: lockdefs.h:42
#define RowShareLock
Definition: lockdefs.h:37
#define ShareLock
Definition: lockdefs.h:40
#define MaxLockMode
Definition: lockdefs.h:45
#define RowExclusiveLock
Definition: lockdefs.h:38
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1174
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1894
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:95
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:94
@ LW_SHARED
Definition: lwlock.h:113
@ LW_EXCLUSIVE
Definition: lwlock.h:112
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1610
void pfree(void *pointer)
Definition: mcxt.c:1594
MemoryContext TopMemoryContext
Definition: mcxt.c:166
void * palloc(Size size)
Definition: mcxt.c:1365
MemoryContext CurrentMemoryContext
Definition: mcxt.c:160
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
void * arg
static PgChecksumMode mode
Definition: pg_checksums.c:56
const void size_t len
const void * data
static char buf[DEFAULT_XLOG_SEG_SIZE]
Definition: pg_test_fsync.c:71
static uint32 DatumGetUInt32(Datum X)
Definition: postgres.h:232
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
uint64_t Datum
Definition: postgres.h:70
unsigned int Oid
Definition: postgres_ext.h:32
#define FP_LOCK_GROUPS_PER_BACKEND_MAX
Definition: proc.h:91
#define FastPathLockSlotsPerBackend()
Definition: proc.h:93
#define FP_LOCK_SLOTS_PER_GROUP
Definition: proc.h:92
ProcWaitStatus
Definition: proc.h:140
@ PROC_WAIT_STATUS_OK
Definition: proc.h:141
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:142
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:143
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3180
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition: procarray.c:3099
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:439
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:387
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1059
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:902
ResourceOwner CurrentResourceOwner
Definition: resowner.c:173
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1079
Size add_size(Size s1, Size s2)
Definition: shmem.c:495
HTAB * ShmemInitHash(const char *name, int64 init_size, int64 max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:334
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:389
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
ProcWaitStatus JoinWaitQueue(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1150
PGPROC * MyProc
Definition: proc.c:67
void GetLockHoldersAndWaiters(LOCALLOCK *locallock, StringInfo lock_holders_sbuf, StringInfo lock_waiters_sbuf, int *lockHoldersNum)
Definition: proc.c:1910
ProcWaitStatus ProcSleep(LOCALLOCK *locallock)
Definition: proc.c:1319
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1749
PROC_HDR * ProcGlobal
Definition: proc.c:79
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1448
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:986
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1431
void initStringInfo(StringInfo str)
Definition: stringinfo.c:97
int first_lock
Definition: lock.h:478
int first_waiter
Definition: lock.h:482
int num_waiters
Definition: lock.h:483
int num_locks
Definition: lock.h:479
struct ErrorContextCallback * previous
Definition: elog.h:297
void(* callback)(void *arg)
Definition: elog.h:298
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:309
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
int64 num_partitions
Definition: hsearch.h:68
Definition: dynahash.c:222
int64 nLocks
Definition: lock.h:425
struct ResourceOwnerData * owner
Definition: lock.h:424
LOCKTAG lock
Definition: lock.h:412
LOCKMODE mode
Definition: lock.h:413
LOCALLOCKOWNER * lockOwners
Definition: lock.h:440
uint32 hashcode
Definition: lock.h:434
int maxLockOwners
Definition: lock.h:439
LOCK * lock
Definition: lock.h:435
int64 nLocks
Definition: lock.h:437
int numLockOwners
Definition: lock.h:438
bool holdsStrongLockCount
Definition: lock.h:441
PROCLOCK * proclock
Definition: lock.h:436
LOCALLOCKTAG tag
Definition: lock.h:431
bool lockCleared
Definition: lock.h:442
Definition: lock.h:167
uint8 locktag_type
Definition: lock.h:172
uint32 locktag_field3
Definition: lock.h:170
uint32 locktag_field1
Definition: lock.h:168
uint8 locktag_lockmethodid
Definition: lock.h:173
uint16 locktag_field4
Definition: lock.h:171
uint32 locktag_field2
Definition: lock.h:169
Definition: lock.h:311
int nRequested
Definition: lock.h:321
LOCKTAG tag
Definition: lock.h:313
int requested[MAX_LOCKMODES]
Definition: lock.h:320
dclist_head waitProcs
Definition: lock.h:319
int granted[MAX_LOCKMODES]
Definition: lock.h:322
LOCKMASK grantMask
Definition: lock.h:316
LOCKMASK waitMask
Definition: lock.h:317
int nGranted
Definition: lock.h:323
dlist_head procLocks
Definition: lock.h:318
Definition: lwlock.h:42
Definition: lock.h:468
LOCKMASK holdMask
Definition: lock.h:457
LOCKMODE waitLockMode
Definition: lock.h:458
bool fastpath
Definition: lock.h:464
LOCKTAG locktag
Definition: lock.h:456
TimestampTz waitStart
Definition: lock.h:460
int leaderPid
Definition: lock.h:463
VirtualTransactionId vxid
Definition: lock.h:459
const bool * trace_flag
Definition: lock.h:115
const LOCKMASK * conflictTab
Definition: lock.h:113
const char *const * lockModeNames
Definition: lock.h:114
int numLockModes
Definition: lock.h:112
Definition: proc.h:179
LWLock fpInfoLock
Definition: proc.h:310
LocalTransactionId lxid
Definition: proc.h:217
PROCLOCK * waitProcLock
Definition: proc.h:250
dlist_head lockGroupMembers
Definition: proc.h:322
Oid * fpRelId
Definition: proc.h:312
struct PGPROC::@130 vxid
Oid databaseId
Definition: proc.h:224
uint64 * fpLockBits
Definition: proc.h:311
pg_atomic_uint64 waitStart
Definition: proc.h:254
bool fpVXIDLock
Definition: proc.h:313
ProcNumber procNumber
Definition: proc.h:212
int pid
Definition: proc.h:199
LOCK * waitLock
Definition: proc.h:249
TransactionId xid
Definition: proc.h:189
LOCKMODE waitLockMode
Definition: proc.h:251
PGPROC * lockGroupLeader
Definition: proc.h:321
LocalTransactionId fpLocalTransactionId
Definition: proc.h:314
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:278
ProcWaitStatus waitStatus
Definition: proc.h:184
dlist_node links
Definition: proc.h:180
LOCK * myLock
Definition: lock.h:367
PGPROC * myProc
Definition: lock.h:368
Definition: lock.h:372
LOCKMASK holdMask
Definition: lock.h:378
dlist_node lockLink
Definition: lock.h:380
PGPROC * groupLeader
Definition: lock.h:377
LOCKMASK releaseMask
Definition: lock.h:379
PROCLOCKTAG tag
Definition: lock.h:374
dlist_node procLink
Definition: lock.h:381
PGPROC * allProcs
Definition: proc.h:388
uint32 allProcCount
Definition: proc.h:406
LOCKTAG locktag
Definition: lock.c:160
LOCKMODE lockmode
Definition: lock.c:161
LocalTransactionId localTransactionId
Definition: lock.h:64
ProcNumber procNumber
Definition: lock.h:63
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
dlist_node * next
Definition: ilist.h:140
Definition: type.h:96
TransactionId xid
Definition: lockdefs.h:53
#define InvalidTransactionId
Definition: transam.h:31
#define XidFromFullTransactionId(x)
Definition: transam.h:48
#define FirstNormalObjectId
Definition: transam.h:197
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1271
int max_prepared_xacts
Definition: twophase.c:116
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:857
PGPROC * TwoPhaseGetDummyProc(FullTransactionId fxid, bool lock_held)
Definition: twophase.c:923
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:27
const char * type
bool RecoveryInProgress(void)
Definition: xlog.c:6404
#define XLogStandbyInfoActive()
Definition: xlog.h:123
bool InRecovery
Definition: xlogutils.c:50
#define InHotStandby
Definition: xlogutils.h:60
static struct link * links
Definition: zic.c:302