PostgreSQL Source Code  git master
lock.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * lock.c
4  * POSTGRES primary lock mechanism
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/lock.c
12  *
13  * NOTES
14  * A lock table is a shared memory hash table. When
15  * a process tries to acquire a lock of a type that conflicts
16  * with existing locks, it is put to sleep using the routines
17  * in storage/lmgr/proc.c.
18  *
19  * For the most part, this code should be invoked via lmgr.c
20  * or another lock-management module, not directly.
21  *
22  * Interface:
23  *
24  * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25  * LockAcquire(), LockRelease(), LockReleaseAll(),
26  * LockCheckConflicts(), GrantLock()
27  *
28  *-------------------------------------------------------------------------
29  */
30 #include "postgres.h"
31 
32 #include <signal.h>
33 #include <unistd.h>
34 
35 #include "access/transam.h"
36 #include "access/twophase.h"
37 #include "access/twophase_rmgr.h"
38 #include "access/xlog.h"
39 #include "access/xlogutils.h"
40 #include "miscadmin.h"
41 #include "pg_trace.h"
42 #include "storage/proc.h"
43 #include "storage/procarray.h"
44 #include "storage/sinvaladt.h"
45 #include "storage/spin.h"
46 #include "storage/standby.h"
47 #include "utils/memutils.h"
48 #include "utils/ps_status.h"
49 #include "utils/resowner.h"
50 
51 
52 /* This configuration variable is used to set the lock table size */
53 int max_locks_per_xact; /* set by guc.c */
54 
55 #define NLOCKENTS() \
56  mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
57 
58 
59 /*
60  * Data structures defining the semantics of the standard lock methods.
61  *
62  * The conflict table defines the semantics of the various lock modes.
63  */
64 static const LOCKMASK LockConflicts[] = {
65  0,
66 
67  /* AccessShareLock */
69 
70  /* RowShareLock */
72 
73  /* RowExclusiveLock */
76 
77  /* ShareUpdateExclusiveLock */
81 
82  /* ShareLock */
86 
87  /* ShareRowExclusiveLock */
91 
92  /* ExclusiveLock */
97 
98  /* AccessExclusiveLock */
103 
104 };
105 
106 /* Names of lock modes, for debug printouts */
107 static const char *const lock_mode_names[] =
108 {
109  "INVALID",
110  "AccessShareLock",
111  "RowShareLock",
112  "RowExclusiveLock",
113  "ShareUpdateExclusiveLock",
114  "ShareLock",
115  "ShareRowExclusiveLock",
116  "ExclusiveLock",
117  "AccessExclusiveLock"
118 };
119 
120 #ifndef LOCK_DEBUG
121 static bool Dummy_trace = false;
122 #endif
123 
125  MaxLockMode,
128 #ifdef LOCK_DEBUG
129  &Trace_locks
130 #else
131  &Dummy_trace
132 #endif
133 };
134 
136  MaxLockMode,
139 #ifdef LOCK_DEBUG
140  &Trace_userlocks
141 #else
142  &Dummy_trace
143 #endif
144 };
145 
146 /*
147  * map from lock method id to the lock table data structures
148  */
149 static const LockMethod LockMethods[] = {
150  NULL,
153 };
154 
155 
156 /* Record that's written to 2PC state file when a lock is persisted */
157 typedef struct TwoPhaseLockRecord
158 {
162 
163 
164 /*
165  * Count of the number of fast path lock slots we believe to be used. This
166  * might be higher than the real number if another backend has transferred
167  * our locks to the primary lock table, but it can never be lower than the
168  * real value, since only we can acquire locks on our own behalf.
169  *
170  * XXX Allocate a static array of the maximum size. We could use a pointer
171  * and then allocate just the right size to save a couple kB, but then we
172  * would have to initialize that, while for the static array that happens
173  * automatically. Doesn't seem worth the extra complexity.
174  */
176 
177 /*
178  * Flag to indicate if the relation extension lock is held by this backend.
179  * This flag is used to ensure that while holding the relation extension lock
180  * we don't try to acquire a heavyweight lock on any other object. This
181  * restriction implies that the relation extension lock won't ever participate
182  * in the deadlock cycle because we can never wait for any other heavyweight
183  * lock after acquiring this lock.
184  *
185  * Such a restriction is okay for relation extension locks as unlike other
186  * heavyweight locks these are not held till the transaction end. These are
187  * taken for a short duration to extend a particular relation and then
188  * released.
189  */
190 static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
191 
192 /*
193  * Number of fast-path locks per backend - size of the arrays in PGPROC.
194  * This is set only once during start, before initializing shared memory,
195  * and remains constant after that.
196  *
197  * We set the limit based on max_locks_per_transaction GUC, because that's
198  * the best information about expected number of locks per backend we have.
199  * See InitializeFastPathLocks() for details.
200  */
202 
203 /*
204  * Macros to calculate the fast-path group and index for a relation.
205  *
206  * The formula is a simple hash function, designed to spread the OIDs a bit,
207  * so that even contiguous values end up in different groups. In most cases
208  * there will be gaps anyway, but the multiplication should help a bit.
209  *
210  * The selected constant (49157) is a prime not too close to 2^k, and it's
211  * small enough to not cause overflows (in 64-bit).
212  */
213 #define FAST_PATH_REL_GROUP(rel) \
214  (((uint64) (rel) * 49157) % FastPathLockGroupsPerBackend)
215 
216 /*
217  * Given the group/slot indexes, calculate the slot index in the whole array
218  * of fast-path lock slots.
219  */
220 #define FAST_PATH_SLOT(group, index) \
221  (AssertMacro((uint32) (group) < FastPathLockGroupsPerBackend), \
222  AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_GROUP), \
223  ((group) * FP_LOCK_SLOTS_PER_GROUP + (index)))
224 
225 /*
226  * Given a slot index (into the whole per-backend array), calculated using
227  * the FAST_PATH_SLOT macro, split it into group and index (in the group).
228  */
229 #define FAST_PATH_GROUP(index) \
230  (AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_BACKEND), \
231  ((index) / FP_LOCK_SLOTS_PER_GROUP))
232 #define FAST_PATH_INDEX(index) \
233  (AssertMacro((uint32) (index) < FP_LOCK_SLOTS_PER_BACKEND), \
234  ((index) % FP_LOCK_SLOTS_PER_GROUP))
235 
236 /* Macros for manipulating proc->fpLockBits */
237 #define FAST_PATH_BITS_PER_SLOT 3
238 #define FAST_PATH_LOCKNUMBER_OFFSET 1
239 #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
240 #define FAST_PATH_BITS(proc, n) (proc)->fpLockBits[FAST_PATH_GROUP(n)]
241 #define FAST_PATH_GET_BITS(proc, n) \
242  ((FAST_PATH_BITS(proc, n) >> (FAST_PATH_BITS_PER_SLOT * FAST_PATH_INDEX(n))) & FAST_PATH_MASK)
243 #define FAST_PATH_BIT_POSITION(n, l) \
244  (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
245  AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
246  AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
247  ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (FAST_PATH_INDEX(n))))
248 #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
249  FAST_PATH_BITS(proc, n) |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
250 #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
251  FAST_PATH_BITS(proc, n) &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
252 #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
253  (FAST_PATH_BITS(proc, n) & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
254 
255 /*
256  * The fast-path lock mechanism is concerned only with relation locks on
257  * unshared relations by backends bound to a database. The fast-path
258  * mechanism exists mostly to accelerate acquisition and release of locks
259  * that rarely conflict. Because ShareUpdateExclusiveLock is
260  * self-conflicting, it can't use the fast-path mechanism; but it also does
261  * not conflict with any of the locks that do, so we can ignore it completely.
262  */
263 #define EligibleForRelationFastPath(locktag, mode) \
264  ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
265  (locktag)->locktag_type == LOCKTAG_RELATION && \
266  (locktag)->locktag_field1 == MyDatabaseId && \
267  MyDatabaseId != InvalidOid && \
268  (mode) < ShareUpdateExclusiveLock)
269 #define ConflictsWithRelationFastPath(locktag, mode) \
270  ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
271  (locktag)->locktag_type == LOCKTAG_RELATION && \
272  (locktag)->locktag_field1 != InvalidOid && \
273  (mode) > ShareUpdateExclusiveLock)
274 
275 static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
276 static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
277 static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
278  const LOCKTAG *locktag, uint32 hashcode);
280 
281 /*
282  * To make the fast-path lock mechanism work, we must have some way of
283  * preventing the use of the fast-path when a conflicting lock might be present.
284  * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
285  * and maintain an integer count of the number of "strong" lockers
286  * in each partition. When any "strong" lockers are present (which is
287  * hopefully not very often), the fast-path mechanism can't be used, and we
288  * must fall back to the slower method of pushing matching locks directly
289  * into the main lock tables.
290  *
291  * The deadlock detector does not know anything about the fast path mechanism,
292  * so any locks that might be involved in a deadlock must be transferred from
293  * the fast-path queues to the main lock table.
294  */
295 
296 #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
297 #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
298  (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
299 #define FastPathStrongLockHashPartition(hashcode) \
300  ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
301 
302 typedef struct
303 {
304  slock_t mutex;
307 
309 
310 
311 /*
312  * Pointers to hash tables containing lock state
313  *
314  * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
315  * shared memory; LockMethodLocalHash is local to each backend.
316  */
320 
321 
322 /* private state for error cleanup */
326 
327 
328 #ifdef LOCK_DEBUG
329 
330 /*------
331  * The following configuration options are available for lock debugging:
332  *
333  * TRACE_LOCKS -- give a bunch of output what's going on in this file
334  * TRACE_USERLOCKS -- same but for user locks
335  * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
336  * (use to avoid output on system tables)
337  * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
338  * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
339  *
340  * Furthermore, but in storage/lmgr/lwlock.c:
341  * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
342  *
343  * Define LOCK_DEBUG at compile time to get all these enabled.
344  * --------
345  */
346 
347 int Trace_lock_oidmin = FirstNormalObjectId;
348 bool Trace_locks = false;
349 bool Trace_userlocks = false;
350 int Trace_lock_table = 0;
351 bool Debug_deadlocks = false;
352 
353 
354 inline static bool
355 LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
356 {
357  return
359  ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
360  || (Trace_lock_table &&
361  (tag->locktag_field2 == Trace_lock_table));
362 }
363 
364 
365 inline static void
366 LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
367 {
368  if (LOCK_DEBUG_ENABLED(&lock->tag))
369  elog(LOG,
370  "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
371  "req(%d,%d,%d,%d,%d,%d,%d)=%d "
372  "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
373  where, lock,
374  lock->tag.locktag_field1, lock->tag.locktag_field2,
375  lock->tag.locktag_field3, lock->tag.locktag_field4,
377  lock->grantMask,
378  lock->requested[1], lock->requested[2], lock->requested[3],
379  lock->requested[4], lock->requested[5], lock->requested[6],
380  lock->requested[7], lock->nRequested,
381  lock->granted[1], lock->granted[2], lock->granted[3],
382  lock->granted[4], lock->granted[5], lock->granted[6],
383  lock->granted[7], lock->nGranted,
384  dclist_count(&lock->waitProcs),
385  LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
386 }
387 
388 
389 inline static void
390 PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
391 {
392  if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
393  elog(LOG,
394  "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
395  where, proclockP, proclockP->tag.myLock,
396  PROCLOCK_LOCKMETHOD(*(proclockP)),
397  proclockP->tag.myProc, (int) proclockP->holdMask);
398 }
399 #else /* not LOCK_DEBUG */
400 
401 #define LOCK_PRINT(where, lock, type) ((void) 0)
402 #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
403 #endif /* not LOCK_DEBUG */
404 
405 
406 static uint32 proclock_hash(const void *key, Size keysize);
407 static void RemoveLocalLock(LOCALLOCK *locallock);
408 static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
409  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
410 static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
411 static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
412 static void FinishStrongLockAcquire(void);
413 static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner,
414  bool dontWait);
415 static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
416 static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
417 static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
418  PROCLOCK *proclock, LockMethod lockMethodTable);
419 static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
420  LockMethod lockMethodTable, uint32 hashcode,
421  bool wakeupNeeded);
422 static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
423  LOCKTAG *locktag, LOCKMODE lockmode,
424  bool decrement_strong_lock_count);
425 static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
427 
428 
429 /*
430  * Initialize the lock manager's shmem data structures.
431  *
432  * This is called from CreateSharedMemoryAndSemaphores(), which see for more
433  * comments. In the normal postmaster case, the shared hash tables are
434  * created here, and backends inherit pointers to them via fork(). In the
435  * EXEC_BACKEND case, each backend re-executes this code to obtain pointers to
436  * the already existing shared hash tables. In either case, each backend must
437  * also call InitLockManagerAccess() to create the locallock hash table.
438  */
439 void
441 {
442  HASHCTL info;
443  long init_table_size,
444  max_table_size;
445  bool found;
446 
447  /*
448  * Compute init/max size to request for lock hashtables. Note these
449  * calculations must agree with LockManagerShmemSize!
450  */
451  max_table_size = NLOCKENTS();
452  init_table_size = max_table_size / 2;
453 
454  /*
455  * Allocate hash table for LOCK structs. This stores per-locked-object
456  * information.
457  */
458  info.keysize = sizeof(LOCKTAG);
459  info.entrysize = sizeof(LOCK);
461 
462  LockMethodLockHash = ShmemInitHash("LOCK hash",
463  init_table_size,
464  max_table_size,
465  &info,
467 
468  /* Assume an average of 2 holders per lock */
469  max_table_size *= 2;
470  init_table_size *= 2;
471 
472  /*
473  * Allocate hash table for PROCLOCK structs. This stores
474  * per-lock-per-holder information.
475  */
476  info.keysize = sizeof(PROCLOCKTAG);
477  info.entrysize = sizeof(PROCLOCK);
478  info.hash = proclock_hash;
480 
481  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
482  init_table_size,
483  max_table_size,
484  &info,
486 
487  /*
488  * Allocate fast-path structures.
489  */
491  ShmemInitStruct("Fast Path Strong Relation Lock Data",
492  sizeof(FastPathStrongRelationLockData), &found);
493  if (!found)
495 }
496 
497 /*
498  * Initialize the lock manager's backend-private data structures.
499  */
500 void
502 {
503  /*
504  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
505  * counts and resource owner information.
506  */
507  HASHCTL info;
508 
509  info.keysize = sizeof(LOCALLOCKTAG);
510  info.entrysize = sizeof(LOCALLOCK);
511 
512  LockMethodLocalHash = hash_create("LOCALLOCK hash",
513  16,
514  &info,
516 }
517 
518 
519 /*
520  * Fetch the lock method table associated with a given lock
521  */
524 {
525  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
526 
527  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
528  return LockMethods[lockmethodid];
529 }
530 
531 /*
532  * Fetch the lock method table associated with a given locktag
533  */
536 {
537  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
538 
539  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
540  return LockMethods[lockmethodid];
541 }
542 
543 
544 /*
545  * Compute the hash code associated with a LOCKTAG.
546  *
547  * To avoid unnecessary recomputations of the hash code, we try to do this
548  * just once per function, and then pass it around as needed. Aside from
549  * passing the hashcode to hash_search_with_hash_value(), we can extract
550  * the lock partition number from the hashcode.
551  */
552 uint32
553 LockTagHashCode(const LOCKTAG *locktag)
554 {
555  return get_hash_value(LockMethodLockHash, (const void *) locktag);
556 }
557 
558 /*
559  * Compute the hash code associated with a PROCLOCKTAG.
560  *
561  * Because we want to use just one set of partition locks for both the
562  * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
563  * fall into the same partition number as their associated LOCKs.
564  * dynahash.c expects the partition number to be the low-order bits of
565  * the hash code, and therefore a PROCLOCKTAG's hash code must have the
566  * same low-order bits as the associated LOCKTAG's hash code. We achieve
567  * this with this specialized hash function.
568  */
569 static uint32
570 proclock_hash(const void *key, Size keysize)
571 {
572  const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
573  uint32 lockhash;
574  Datum procptr;
575 
576  Assert(keysize == sizeof(PROCLOCKTAG));
577 
578  /* Look into the associated LOCK object, and compute its hash code */
579  lockhash = LockTagHashCode(&proclocktag->myLock->tag);
580 
581  /*
582  * To make the hash code also depend on the PGPROC, we xor the proc
583  * struct's address into the hash code, left-shifted so that the
584  * partition-number bits don't change. Since this is only a hash, we
585  * don't care if we lose high-order bits of the address; use an
586  * intermediate variable to suppress cast-pointer-to-int warnings.
587  */
588  procptr = PointerGetDatum(proclocktag->myProc);
589  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
590 
591  return lockhash;
592 }
593 
594 /*
595  * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
596  * for its underlying LOCK.
597  *
598  * We use this just to avoid redundant calls of LockTagHashCode().
599  */
600 static inline uint32
601 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
602 {
603  uint32 lockhash = hashcode;
604  Datum procptr;
605 
606  /*
607  * This must match proclock_hash()!
608  */
609  procptr = PointerGetDatum(proclocktag->myProc);
610  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
611 
612  return lockhash;
613 }
614 
615 /*
616  * Given two lock modes, return whether they would conflict.
617  */
618 bool
620 {
621  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
622 
623  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
624  return true;
625 
626  return false;
627 }
628 
629 /*
630  * LockHeldByMe -- test whether lock 'locktag' is held by the current
631  * transaction
632  *
633  * Returns true if current transaction holds a lock on 'tag' of mode
634  * 'lockmode'. If 'orstronger' is true, a stronger lockmode is also OK.
635  * ("Stronger" is defined as "numerically higher", which is a bit
636  * semantically dubious but is OK for the purposes we use this for.)
637  */
638 bool
639 LockHeldByMe(const LOCKTAG *locktag,
640  LOCKMODE lockmode, bool orstronger)
641 {
642  LOCALLOCKTAG localtag;
643  LOCALLOCK *locallock;
644 
645  /*
646  * See if there is a LOCALLOCK entry for this lock and lockmode
647  */
648  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
649  localtag.lock = *locktag;
650  localtag.mode = lockmode;
651 
652  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
653  &localtag,
654  HASH_FIND, NULL);
655 
656  if (locallock && locallock->nLocks > 0)
657  return true;
658 
659  if (orstronger)
660  {
661  LOCKMODE slockmode;
662 
663  for (slockmode = lockmode + 1;
664  slockmode <= MaxLockMode;
665  slockmode++)
666  {
667  if (LockHeldByMe(locktag, slockmode, false))
668  return true;
669  }
670  }
671 
672  return false;
673 }
674 
675 #ifdef USE_ASSERT_CHECKING
676 /*
677  * GetLockMethodLocalHash -- return the hash of local locks, for modules that
678  * evaluate assertions based on all locks held.
679  */
680 HTAB *
681 GetLockMethodLocalHash(void)
682 {
683  return LockMethodLocalHash;
684 }
685 #endif
686 
687 /*
688  * LockHasWaiters -- look up 'locktag' and check if releasing this
689  * lock would wake up other processes waiting for it.
690  */
691 bool
692 LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
693 {
694  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
695  LockMethod lockMethodTable;
696  LOCALLOCKTAG localtag;
697  LOCALLOCK *locallock;
698  LOCK *lock;
699  PROCLOCK *proclock;
700  LWLock *partitionLock;
701  bool hasWaiters = false;
702 
703  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
704  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
705  lockMethodTable = LockMethods[lockmethodid];
706  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
707  elog(ERROR, "unrecognized lock mode: %d", lockmode);
708 
709 #ifdef LOCK_DEBUG
710  if (LOCK_DEBUG_ENABLED(locktag))
711  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
712  locktag->locktag_field1, locktag->locktag_field2,
713  lockMethodTable->lockModeNames[lockmode]);
714 #endif
715 
716  /*
717  * Find the LOCALLOCK entry for this lock and lockmode
718  */
719  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
720  localtag.lock = *locktag;
721  localtag.mode = lockmode;
722 
723  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
724  &localtag,
725  HASH_FIND, NULL);
726 
727  /*
728  * let the caller print its own error message, too. Do not ereport(ERROR).
729  */
730  if (!locallock || locallock->nLocks <= 0)
731  {
732  elog(WARNING, "you don't own a lock of type %s",
733  lockMethodTable->lockModeNames[lockmode]);
734  return false;
735  }
736 
737  /*
738  * Check the shared lock table.
739  */
740  partitionLock = LockHashPartitionLock(locallock->hashcode);
741 
742  LWLockAcquire(partitionLock, LW_SHARED);
743 
744  /*
745  * We don't need to re-find the lock or proclock, since we kept their
746  * addresses in the locallock table, and they couldn't have been removed
747  * while we were holding a lock on them.
748  */
749  lock = locallock->lock;
750  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
751  proclock = locallock->proclock;
752  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
753 
754  /*
755  * Double-check that we are actually holding a lock of the type we want to
756  * release.
757  */
758  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
759  {
760  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
761  LWLockRelease(partitionLock);
762  elog(WARNING, "you don't own a lock of type %s",
763  lockMethodTable->lockModeNames[lockmode]);
764  RemoveLocalLock(locallock);
765  return false;
766  }
767 
768  /*
769  * Do the checking.
770  */
771  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
772  hasWaiters = true;
773 
774  LWLockRelease(partitionLock);
775 
776  return hasWaiters;
777 }
778 
779 /*
780  * LockAcquire -- Check for lock conflicts, sleep if conflict found,
781  * set lock if/when no conflicts.
782  *
783  * Inputs:
784  * locktag: unique identifier for the lockable object
785  * lockmode: lock mode to acquire
786  * sessionLock: if true, acquire lock for session not current transaction
787  * dontWait: if true, don't wait to acquire lock
788  *
789  * Returns one of:
790  * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
791  * LOCKACQUIRE_OK lock successfully acquired
792  * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
793  * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
794  *
795  * In the normal case where dontWait=false and the caller doesn't need to
796  * distinguish a freshly acquired lock from one already taken earlier in
797  * this same transaction, there is no need to examine the return value.
798  *
799  * Side Effects: The lock is acquired and recorded in lock tables.
800  *
801  * NOTE: if we wait for the lock, there is no way to abort the wait
802  * short of aborting the transaction.
803  */
805 LockAcquire(const LOCKTAG *locktag,
806  LOCKMODE lockmode,
807  bool sessionLock,
808  bool dontWait)
809 {
810  return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
811  true, NULL);
812 }
813 
814 /*
815  * LockAcquireExtended - allows us to specify additional options
816  *
817  * reportMemoryError specifies whether a lock request that fills the lock
818  * table should generate an ERROR or not. Passing "false" allows the caller
819  * to attempt to recover from lock-table-full situations, perhaps by forcibly
820  * canceling other lock holders and then retrying. Note, however, that the
821  * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
822  * in combination with dontWait = true, as the cause of failure couldn't be
823  * distinguished.
824  *
825  * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
826  * table entry if a lock is successfully acquired, or NULL if not.
827  */
830  LOCKMODE lockmode,
831  bool sessionLock,
832  bool dontWait,
833  bool reportMemoryError,
834  LOCALLOCK **locallockp)
835 {
836  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
837  LockMethod lockMethodTable;
838  LOCALLOCKTAG localtag;
839  LOCALLOCK *locallock;
840  LOCK *lock;
841  PROCLOCK *proclock;
842  bool found;
843  ResourceOwner owner;
844  uint32 hashcode;
845  LWLock *partitionLock;
846  bool found_conflict;
847  bool log_lock = false;
848 
849  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
850  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
851  lockMethodTable = LockMethods[lockmethodid];
852  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
853  elog(ERROR, "unrecognized lock mode: %d", lockmode);
854 
855  if (RecoveryInProgress() && !InRecovery &&
856  (locktag->locktag_type == LOCKTAG_OBJECT ||
857  locktag->locktag_type == LOCKTAG_RELATION) &&
858  lockmode > RowExclusiveLock)
859  ereport(ERROR,
860  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
861  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
862  lockMethodTable->lockModeNames[lockmode]),
863  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
864 
865 #ifdef LOCK_DEBUG
866  if (LOCK_DEBUG_ENABLED(locktag))
867  elog(LOG, "LockAcquire: lock [%u,%u] %s",
868  locktag->locktag_field1, locktag->locktag_field2,
869  lockMethodTable->lockModeNames[lockmode]);
870 #endif
871 
872  /* Identify owner for lock */
873  if (sessionLock)
874  owner = NULL;
875  else
876  owner = CurrentResourceOwner;
877 
878  /*
879  * Find or create a LOCALLOCK entry for this lock and lockmode
880  */
881  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
882  localtag.lock = *locktag;
883  localtag.mode = lockmode;
884 
885  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
886  &localtag,
887  HASH_ENTER, &found);
888 
889  /*
890  * if it's a new locallock object, initialize it
891  */
892  if (!found)
893  {
894  locallock->lock = NULL;
895  locallock->proclock = NULL;
896  locallock->hashcode = LockTagHashCode(&(localtag.lock));
897  locallock->nLocks = 0;
898  locallock->holdsStrongLockCount = false;
899  locallock->lockCleared = false;
900  locallock->numLockOwners = 0;
901  locallock->maxLockOwners = 8;
902  locallock->lockOwners = NULL; /* in case next line fails */
903  locallock->lockOwners = (LOCALLOCKOWNER *)
905  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
906  }
907  else
908  {
909  /* Make sure there will be room to remember the lock */
910  if (locallock->numLockOwners >= locallock->maxLockOwners)
911  {
912  int newsize = locallock->maxLockOwners * 2;
913 
914  locallock->lockOwners = (LOCALLOCKOWNER *)
915  repalloc(locallock->lockOwners,
916  newsize * sizeof(LOCALLOCKOWNER));
917  locallock->maxLockOwners = newsize;
918  }
919  }
920  hashcode = locallock->hashcode;
921 
922  if (locallockp)
923  *locallockp = locallock;
924 
925  /*
926  * If we already hold the lock, we can just increase the count locally.
927  *
928  * If lockCleared is already set, caller need not worry about absorbing
929  * sinval messages related to the lock's object.
930  */
931  if (locallock->nLocks > 0)
932  {
933  GrantLockLocal(locallock, owner);
934  if (locallock->lockCleared)
936  else
938  }
939 
940  /*
941  * We don't acquire any other heavyweight lock while holding the relation
942  * extension lock. We do allow to acquire the same relation extension
943  * lock more than once but that case won't reach here.
944  */
945  Assert(!IsRelationExtensionLockHeld);
946 
947  /*
948  * Prepare to emit a WAL record if acquisition of this lock needs to be
949  * replayed in a standby server.
950  *
951  * Here we prepare to log; after lock is acquired we'll issue log record.
952  * This arrangement simplifies error recovery in case the preparation step
953  * fails.
954  *
955  * Only AccessExclusiveLocks can conflict with lock types that read-only
956  * transactions can acquire in a standby server. Make sure this definition
957  * matches the one in GetRunningTransactionLocks().
958  */
959  if (lockmode >= AccessExclusiveLock &&
960  locktag->locktag_type == LOCKTAG_RELATION &&
961  !RecoveryInProgress() &&
963  {
965  log_lock = true;
966  }
967 
968  /*
969  * Attempt to take lock via fast path, if eligible. But if we remember
970  * having filled up the fast path array, we don't attempt to make any
971  * further use of it until we release some locks. It's possible that some
972  * other backend has transferred some of those locks to the shared hash
973  * table, leaving space free, but it's not worth acquiring the LWLock just
974  * to check. It's also possible that we're acquiring a second or third
975  * lock type on a relation we have already locked using the fast-path, but
976  * for now we don't worry about that case either.
977  */
978  if (EligibleForRelationFastPath(locktag, lockmode) &&
980  {
981  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
982  bool acquired;
983 
984  /*
985  * LWLockAcquire acts as a memory sequencing point, so it's safe to
986  * assume that any strong locker whose increment to
987  * FastPathStrongRelationLocks->counts becomes visible after we test
988  * it has yet to begin to transfer fast-path locks.
989  */
991  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
992  acquired = false;
993  else
994  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
995  lockmode);
997  if (acquired)
998  {
999  /*
1000  * The locallock might contain stale pointers to some old shared
1001  * objects; we MUST reset these to null before considering the
1002  * lock to be acquired via fast-path.
1003  */
1004  locallock->lock = NULL;
1005  locallock->proclock = NULL;
1006  GrantLockLocal(locallock, owner);
1007  return LOCKACQUIRE_OK;
1008  }
1009  }
1010 
1011  /*
1012  * If this lock could potentially have been taken via the fast-path by
1013  * some other backend, we must (temporarily) disable further use of the
1014  * fast-path for this lock tag, and migrate any locks already taken via
1015  * this method to the main lock table.
1016  */
1017  if (ConflictsWithRelationFastPath(locktag, lockmode))
1018  {
1019  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
1020 
1021  BeginStrongLockAcquire(locallock, fasthashcode);
1022  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
1023  hashcode))
1024  {
1026  if (locallock->nLocks == 0)
1027  RemoveLocalLock(locallock);
1028  if (locallockp)
1029  *locallockp = NULL;
1030  if (reportMemoryError)
1031  ereport(ERROR,
1032  (errcode(ERRCODE_OUT_OF_MEMORY),
1033  errmsg("out of shared memory"),
1034  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1035  else
1036  return LOCKACQUIRE_NOT_AVAIL;
1037  }
1038  }
1039 
1040  /*
1041  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
1042  * take it via the fast-path, either, so we've got to mess with the shared
1043  * lock table.
1044  */
1045  partitionLock = LockHashPartitionLock(hashcode);
1046 
1047  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
1048 
1049  /*
1050  * Find or create lock and proclock entries with this tag
1051  *
1052  * Note: if the locallock object already existed, it might have a pointer
1053  * to the lock already ... but we should not assume that that pointer is
1054  * valid, since a lock object with zero hold and request counts can go
1055  * away anytime. So we have to use SetupLockInTable() to recompute the
1056  * lock and proclock pointers, even if they're already set.
1057  */
1058  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1059  hashcode, lockmode);
1060  if (!proclock)
1061  {
1063  LWLockRelease(partitionLock);
1064  if (locallock->nLocks == 0)
1065  RemoveLocalLock(locallock);
1066  if (locallockp)
1067  *locallockp = NULL;
1068  if (reportMemoryError)
1069  ereport(ERROR,
1070  (errcode(ERRCODE_OUT_OF_MEMORY),
1071  errmsg("out of shared memory"),
1072  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
1073  else
1074  return LOCKACQUIRE_NOT_AVAIL;
1075  }
1076  locallock->proclock = proclock;
1077  lock = proclock->tag.myLock;
1078  locallock->lock = lock;
1079 
1080  /*
1081  * If lock requested conflicts with locks requested by waiters, must join
1082  * wait queue. Otherwise, check for conflict with already-held locks.
1083  * (That's last because most complex check.)
1084  */
1085  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1086  found_conflict = true;
1087  else
1088  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1089  lock, proclock);
1090 
1091  if (!found_conflict)
1092  {
1093  /* No conflict with held or previously requested locks */
1094  GrantLock(lock, proclock, lockmode);
1095  GrantLockLocal(locallock, owner);
1096  }
1097  else
1098  {
1099  /*
1100  * Set bitmask of locks this process already holds on this object.
1101  */
1102  MyProc->heldLocks = proclock->holdMask;
1103 
1104  /*
1105  * Sleep till someone wakes me up. We do this even in the dontWait
1106  * case, because while trying to go to sleep, we may discover that we
1107  * can acquire the lock immediately after all.
1108  */
1109 
1110  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1111  locktag->locktag_field2,
1112  locktag->locktag_field3,
1113  locktag->locktag_field4,
1114  locktag->locktag_type,
1115  lockmode);
1116 
1117  WaitOnLock(locallock, owner, dontWait);
1118 
1119  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1120  locktag->locktag_field2,
1121  locktag->locktag_field3,
1122  locktag->locktag_field4,
1123  locktag->locktag_type,
1124  lockmode);
1125 
1126  /*
1127  * NOTE: do not do any material change of state between here and
1128  * return. All required changes in locktable state must have been
1129  * done when the lock was granted to us --- see notes in WaitOnLock.
1130  */
1131 
1132  /*
1133  * Check the proclock entry status. If dontWait = true, this is an
1134  * expected case; otherwise, it will only happen if something in the
1135  * ipc communication doesn't work correctly.
1136  */
1137  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1138  {
1140 
1141  if (dontWait)
1142  {
1143  /*
1144  * We can't acquire the lock immediately. If caller specified
1145  * no blocking, remove useless table entries and return
1146  * LOCKACQUIRE_NOT_AVAIL without waiting.
1147  */
1148  if (proclock->holdMask == 0)
1149  {
1150  uint32 proclock_hashcode;
1151 
1152  proclock_hashcode = ProcLockHashCode(&proclock->tag,
1153  hashcode);
1154  dlist_delete(&proclock->lockLink);
1155  dlist_delete(&proclock->procLink);
1157  &(proclock->tag),
1158  proclock_hashcode,
1159  HASH_REMOVE,
1160  NULL))
1161  elog(PANIC, "proclock table corrupted");
1162  }
1163  else
1164  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1165  lock->nRequested--;
1166  lock->requested[lockmode]--;
1167  LOCK_PRINT("LockAcquire: conditional lock failed",
1168  lock, lockmode);
1169  Assert((lock->nRequested > 0) &&
1170  (lock->requested[lockmode] >= 0));
1171  Assert(lock->nGranted <= lock->nRequested);
1172  LWLockRelease(partitionLock);
1173  if (locallock->nLocks == 0)
1174  RemoveLocalLock(locallock);
1175  if (locallockp)
1176  *locallockp = NULL;
1177  return LOCKACQUIRE_NOT_AVAIL;
1178  }
1179  else
1180  {
1181  /*
1182  * We should have gotten the lock, but somehow that didn't
1183  * happen. If we get here, it's a bug.
1184  */
1185  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1186  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1187  LWLockRelease(partitionLock);
1188  elog(ERROR, "LockAcquire failed");
1189  }
1190  }
1191  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1192  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1193  }
1194 
1195  /*
1196  * Lock state is fully up-to-date now; if we error out after this, no
1197  * special error cleanup is required.
1198  */
1200 
1201  LWLockRelease(partitionLock);
1202 
1203  /*
1204  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1205  * standby server.
1206  */
1207  if (log_lock)
1208  {
1209  /*
1210  * Decode the locktag back to the original values, to avoid sending
1211  * lots of empty bytes with every message. See lock.h to check how a
1212  * locktag is defined for LOCKTAG_RELATION
1213  */
1215  locktag->locktag_field2);
1216  }
1217 
1218  return LOCKACQUIRE_OK;
1219 }
1220 
1221 /*
1222  * Find or create LOCK and PROCLOCK objects as needed for a new lock
1223  * request.
1224  *
1225  * Returns the PROCLOCK object, or NULL if we failed to create the objects
1226  * for lack of shared memory.
1227  *
1228  * The appropriate partition lock must be held at entry, and will be
1229  * held at exit.
1230  */
1231 static PROCLOCK *
1232 SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1233  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1234 {
1235  LOCK *lock;
1236  PROCLOCK *proclock;
1237  PROCLOCKTAG proclocktag;
1238  uint32 proclock_hashcode;
1239  bool found;
1240 
1241  /*
1242  * Find or create a lock with this tag.
1243  */
1245  locktag,
1246  hashcode,
1248  &found);
1249  if (!lock)
1250  return NULL;
1251 
1252  /*
1253  * if it's a new lock object, initialize it
1254  */
1255  if (!found)
1256  {
1257  lock->grantMask = 0;
1258  lock->waitMask = 0;
1259  dlist_init(&lock->procLocks);
1260  dclist_init(&lock->waitProcs);
1261  lock->nRequested = 0;
1262  lock->nGranted = 0;
1263  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1264  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1265  LOCK_PRINT("LockAcquire: new", lock, lockmode);
1266  }
1267  else
1268  {
1269  LOCK_PRINT("LockAcquire: found", lock, lockmode);
1270  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1271  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1272  Assert(lock->nGranted <= lock->nRequested);
1273  }
1274 
1275  /*
1276  * Create the hash key for the proclock table.
1277  */
1278  proclocktag.myLock = lock;
1279  proclocktag.myProc = proc;
1280 
1281  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1282 
1283  /*
1284  * Find or create a proclock entry with this tag
1285  */
1287  &proclocktag,
1288  proclock_hashcode,
1290  &found);
1291  if (!proclock)
1292  {
1293  /* Oops, not enough shmem for the proclock */
1294  if (lock->nRequested == 0)
1295  {
1296  /*
1297  * There are no other requestors of this lock, so garbage-collect
1298  * the lock object. We *must* do this to avoid a permanent leak
1299  * of shared memory, because there won't be anything to cause
1300  * anyone to release the lock object later.
1301  */
1302  Assert(dlist_is_empty(&(lock->procLocks)));
1304  &(lock->tag),
1305  hashcode,
1306  HASH_REMOVE,
1307  NULL))
1308  elog(PANIC, "lock table corrupted");
1309  }
1310  return NULL;
1311  }
1312 
1313  /*
1314  * If new, initialize the new entry
1315  */
1316  if (!found)
1317  {
1318  uint32 partition = LockHashPartition(hashcode);
1319 
1320  /*
1321  * It might seem unsafe to access proclock->groupLeader without a
1322  * lock, but it's not really. Either we are initializing a proclock
1323  * on our own behalf, in which case our group leader isn't changing
1324  * because the group leader for a process can only ever be changed by
1325  * the process itself; or else we are transferring a fast-path lock to
1326  * the main lock table, in which case that process can't change it's
1327  * lock group leader without first releasing all of its locks (and in
1328  * particular the one we are currently transferring).
1329  */
1330  proclock->groupLeader = proc->lockGroupLeader != NULL ?
1331  proc->lockGroupLeader : proc;
1332  proclock->holdMask = 0;
1333  proclock->releaseMask = 0;
1334  /* Add proclock to appropriate lists */
1335  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
1336  dlist_push_tail(&proc->myProcLocks[partition], &proclock->procLink);
1337  PROCLOCK_PRINT("LockAcquire: new", proclock);
1338  }
1339  else
1340  {
1341  PROCLOCK_PRINT("LockAcquire: found", proclock);
1342  Assert((proclock->holdMask & ~lock->grantMask) == 0);
1343 
1344 #ifdef CHECK_DEADLOCK_RISK
1345 
1346  /*
1347  * Issue warning if we already hold a lower-level lock on this object
1348  * and do not hold a lock of the requested level or higher. This
1349  * indicates a deadlock-prone coding practice (eg, we'd have a
1350  * deadlock if another backend were following the same code path at
1351  * about the same time).
1352  *
1353  * This is not enabled by default, because it may generate log entries
1354  * about user-level coding practices that are in fact safe in context.
1355  * It can be enabled to help find system-level problems.
1356  *
1357  * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1358  * better to use a table. For now, though, this works.
1359  */
1360  {
1361  int i;
1362 
1363  for (i = lockMethodTable->numLockModes; i > 0; i--)
1364  {
1365  if (proclock->holdMask & LOCKBIT_ON(i))
1366  {
1367  if (i >= (int) lockmode)
1368  break; /* safe: we have a lock >= req level */
1369  elog(LOG, "deadlock risk: raising lock level"
1370  " from %s to %s on object %u/%u/%u",
1371  lockMethodTable->lockModeNames[i],
1372  lockMethodTable->lockModeNames[lockmode],
1373  lock->tag.locktag_field1, lock->tag.locktag_field2,
1374  lock->tag.locktag_field3);
1375  break;
1376  }
1377  }
1378  }
1379 #endif /* CHECK_DEADLOCK_RISK */
1380  }
1381 
1382  /*
1383  * lock->nRequested and lock->requested[] count the total number of
1384  * requests, whether granted or waiting, so increment those immediately.
1385  * The other counts don't increment till we get the lock.
1386  */
1387  lock->nRequested++;
1388  lock->requested[lockmode]++;
1389  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1390 
1391  /*
1392  * We shouldn't already hold the desired lock; else locallock table is
1393  * broken.
1394  */
1395  if (proclock->holdMask & LOCKBIT_ON(lockmode))
1396  elog(ERROR, "lock %s on object %u/%u/%u is already held",
1397  lockMethodTable->lockModeNames[lockmode],
1398  lock->tag.locktag_field1, lock->tag.locktag_field2,
1399  lock->tag.locktag_field3);
1400 
1401  return proclock;
1402 }
1403 
1404 /*
1405  * Check and set/reset the flag that we hold the relation extension lock.
1406  *
1407  * It is callers responsibility that this function is called after
1408  * acquiring/releasing the relation extension lock.
1409  *
1410  * Pass acquired as true if lock is acquired, false otherwise.
1411  */
1412 static inline void
1413 CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1414 {
1415 #ifdef USE_ASSERT_CHECKING
1416  if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1417  IsRelationExtensionLockHeld = acquired;
1418 #endif
1419 }
1420 
1421 /*
1422  * Subroutine to free a locallock entry
1423  */
1424 static void
1426 {
1427  int i;
1428 
1429  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1430  {
1431  if (locallock->lockOwners[i].owner != NULL)
1432  ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1433  }
1434  locallock->numLockOwners = 0;
1435  if (locallock->lockOwners != NULL)
1436  pfree(locallock->lockOwners);
1437  locallock->lockOwners = NULL;
1438 
1439  if (locallock->holdsStrongLockCount)
1440  {
1441  uint32 fasthashcode;
1442 
1443  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1444 
1446  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1447  FastPathStrongRelationLocks->count[fasthashcode]--;
1448  locallock->holdsStrongLockCount = false;
1450  }
1451 
1453  &(locallock->tag),
1454  HASH_REMOVE, NULL))
1455  elog(WARNING, "locallock table corrupted");
1456 
1457  /*
1458  * Indicate that the lock is released for certain types of locks
1459  */
1460  CheckAndSetLockHeld(locallock, false);
1461 }
1462 
1463 /*
1464  * LockCheckConflicts -- test whether requested lock conflicts
1465  * with those already granted
1466  *
1467  * Returns true if conflict, false if no conflict.
1468  *
1469  * NOTES:
1470  * Here's what makes this complicated: one process's locks don't
1471  * conflict with one another, no matter what purpose they are held for
1472  * (eg, session and transaction locks do not conflict). Nor do the locks
1473  * of one process in a lock group conflict with those of another process in
1474  * the same group. So, we must subtract off these locks when determining
1475  * whether the requested new lock conflicts with those already held.
1476  */
1477 bool
1479  LOCKMODE lockmode,
1480  LOCK *lock,
1481  PROCLOCK *proclock)
1482 {
1483  int numLockModes = lockMethodTable->numLockModes;
1484  LOCKMASK myLocks;
1485  int conflictMask = lockMethodTable->conflictTab[lockmode];
1486  int conflictsRemaining[MAX_LOCKMODES];
1487  int totalConflictsRemaining = 0;
1488  dlist_iter proclock_iter;
1489  int i;
1490 
1491  /*
1492  * first check for global conflicts: If no locks conflict with my request,
1493  * then I get the lock.
1494  *
1495  * Checking for conflict: lock->grantMask represents the types of
1496  * currently held locks. conflictTable[lockmode] has a bit set for each
1497  * type of lock that conflicts with request. Bitwise compare tells if
1498  * there is a conflict.
1499  */
1500  if (!(conflictMask & lock->grantMask))
1501  {
1502  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1503  return false;
1504  }
1505 
1506  /*
1507  * Rats. Something conflicts. But it could still be my own lock, or a
1508  * lock held by another member of my locking group. First, figure out how
1509  * many conflicts remain after subtracting out any locks I hold myself.
1510  */
1511  myLocks = proclock->holdMask;
1512  for (i = 1; i <= numLockModes; i++)
1513  {
1514  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1515  {
1516  conflictsRemaining[i] = 0;
1517  continue;
1518  }
1519  conflictsRemaining[i] = lock->granted[i];
1520  if (myLocks & LOCKBIT_ON(i))
1521  --conflictsRemaining[i];
1522  totalConflictsRemaining += conflictsRemaining[i];
1523  }
1524 
1525  /* If no conflicts remain, we get the lock. */
1526  if (totalConflictsRemaining == 0)
1527  {
1528  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1529  return false;
1530  }
1531 
1532  /* If no group locking, it's definitely a conflict. */
1533  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1534  {
1535  Assert(proclock->tag.myProc == MyProc);
1536  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1537  proclock);
1538  return true;
1539  }
1540 
1541  /*
1542  * The relation extension lock conflict even between the group members.
1543  */
1544  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND)
1545  {
1546  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1547  proclock);
1548  return true;
1549  }
1550 
1551  /*
1552  * Locks held in conflicting modes by members of our own lock group are
1553  * not real conflicts; we can subtract those out and see if we still have
1554  * a conflict. This is O(N) in the number of processes holding or
1555  * awaiting locks on this object. We could improve that by making the
1556  * shared memory state more complex (and larger) but it doesn't seem worth
1557  * it.
1558  */
1559  dlist_foreach(proclock_iter, &lock->procLocks)
1560  {
1561  PROCLOCK *otherproclock =
1562  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
1563 
1564  if (proclock != otherproclock &&
1565  proclock->groupLeader == otherproclock->groupLeader &&
1566  (otherproclock->holdMask & conflictMask) != 0)
1567  {
1568  int intersectMask = otherproclock->holdMask & conflictMask;
1569 
1570  for (i = 1; i <= numLockModes; i++)
1571  {
1572  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1573  {
1574  if (conflictsRemaining[i] <= 0)
1575  elog(PANIC, "proclocks held do not match lock");
1576  conflictsRemaining[i]--;
1577  totalConflictsRemaining--;
1578  }
1579  }
1580 
1581  if (totalConflictsRemaining == 0)
1582  {
1583  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1584  proclock);
1585  return false;
1586  }
1587  }
1588  }
1589 
1590  /* Nope, it's a real conflict. */
1591  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1592  return true;
1593 }
1594 
1595 /*
1596  * GrantLock -- update the lock and proclock data structures to show
1597  * the lock request has been granted.
1598  *
1599  * NOTE: if proc was blocked, it also needs to be removed from the wait list
1600  * and have its waitLock/waitProcLock fields cleared. That's not done here.
1601  *
1602  * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1603  * table entry; but since we may be awaking some other process, we can't do
1604  * that here; it's done by GrantLockLocal, instead.
1605  */
1606 void
1607 GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
1608 {
1609  lock->nGranted++;
1610  lock->granted[lockmode]++;
1611  lock->grantMask |= LOCKBIT_ON(lockmode);
1612  if (lock->granted[lockmode] == lock->requested[lockmode])
1613  lock->waitMask &= LOCKBIT_OFF(lockmode);
1614  proclock->holdMask |= LOCKBIT_ON(lockmode);
1615  LOCK_PRINT("GrantLock", lock, lockmode);
1616  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1617  Assert(lock->nGranted <= lock->nRequested);
1618 }
1619 
1620 /*
1621  * UnGrantLock -- opposite of GrantLock.
1622  *
1623  * Updates the lock and proclock data structures to show that the lock
1624  * is no longer held nor requested by the current holder.
1625  *
1626  * Returns true if there were any waiters waiting on the lock that
1627  * should now be woken up with ProcLockWakeup.
1628  */
1629 static bool
1630 UnGrantLock(LOCK *lock, LOCKMODE lockmode,
1631  PROCLOCK *proclock, LockMethod lockMethodTable)
1632 {
1633  bool wakeupNeeded = false;
1634 
1635  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1636  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1637  Assert(lock->nGranted <= lock->nRequested);
1638 
1639  /*
1640  * fix the general lock stats
1641  */
1642  lock->nRequested--;
1643  lock->requested[lockmode]--;
1644  lock->nGranted--;
1645  lock->granted[lockmode]--;
1646 
1647  if (lock->granted[lockmode] == 0)
1648  {
1649  /* change the conflict mask. No more of this lock type. */
1650  lock->grantMask &= LOCKBIT_OFF(lockmode);
1651  }
1652 
1653  LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1654 
1655  /*
1656  * We need only run ProcLockWakeup if the released lock conflicts with at
1657  * least one of the lock types requested by waiter(s). Otherwise whatever
1658  * conflict made them wait must still exist. NOTE: before MVCC, we could
1659  * skip wakeup if lock->granted[lockmode] was still positive. But that's
1660  * not true anymore, because the remaining granted locks might belong to
1661  * some waiter, who could now be awakened because he doesn't conflict with
1662  * his own locks.
1663  */
1664  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1665  wakeupNeeded = true;
1666 
1667  /*
1668  * Now fix the per-proclock state.
1669  */
1670  proclock->holdMask &= LOCKBIT_OFF(lockmode);
1671  PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1672 
1673  return wakeupNeeded;
1674 }
1675 
1676 /*
1677  * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1678  * proclock and lock objects if possible, and call ProcLockWakeup if there
1679  * are remaining requests and the caller says it's OK. (Normally, this
1680  * should be called after UnGrantLock, and wakeupNeeded is the result from
1681  * UnGrantLock.)
1682  *
1683  * The appropriate partition lock must be held at entry, and will be
1684  * held at exit.
1685  */
1686 static void
1687 CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1688  LockMethod lockMethodTable, uint32 hashcode,
1689  bool wakeupNeeded)
1690 {
1691  /*
1692  * If this was my last hold on this lock, delete my entry in the proclock
1693  * table.
1694  */
1695  if (proclock->holdMask == 0)
1696  {
1697  uint32 proclock_hashcode;
1698 
1699  PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1700  dlist_delete(&proclock->lockLink);
1701  dlist_delete(&proclock->procLink);
1702  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1704  &(proclock->tag),
1705  proclock_hashcode,
1706  HASH_REMOVE,
1707  NULL))
1708  elog(PANIC, "proclock table corrupted");
1709  }
1710 
1711  if (lock->nRequested == 0)
1712  {
1713  /*
1714  * The caller just released the last lock, so garbage-collect the lock
1715  * object.
1716  */
1717  LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1718  Assert(dlist_is_empty(&lock->procLocks));
1720  &(lock->tag),
1721  hashcode,
1722  HASH_REMOVE,
1723  NULL))
1724  elog(PANIC, "lock table corrupted");
1725  }
1726  else if (wakeupNeeded)
1727  {
1728  /* There are waiters on this lock, so wake them up. */
1729  ProcLockWakeup(lockMethodTable, lock);
1730  }
1731 }
1732 
1733 /*
1734  * GrantLockLocal -- update the locallock data structures to show
1735  * the lock request has been granted.
1736  *
1737  * We expect that LockAcquire made sure there is room to add a new
1738  * ResourceOwner entry.
1739  */
1740 static void
1742 {
1743  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1744  int i;
1745 
1746  Assert(locallock->numLockOwners < locallock->maxLockOwners);
1747  /* Count the total */
1748  locallock->nLocks++;
1749  /* Count the per-owner lock */
1750  for (i = 0; i < locallock->numLockOwners; i++)
1751  {
1752  if (lockOwners[i].owner == owner)
1753  {
1754  lockOwners[i].nLocks++;
1755  return;
1756  }
1757  }
1758  lockOwners[i].owner = owner;
1759  lockOwners[i].nLocks = 1;
1760  locallock->numLockOwners++;
1761  if (owner != NULL)
1762  ResourceOwnerRememberLock(owner, locallock);
1763 
1764  /* Indicate that the lock is acquired for certain types of locks. */
1765  CheckAndSetLockHeld(locallock, true);
1766 }
1767 
1768 /*
1769  * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1770  * and arrange for error cleanup if it fails
1771  */
1772 static void
1773 BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1774 {
1775  Assert(StrongLockInProgress == NULL);
1776  Assert(locallock->holdsStrongLockCount == false);
1777 
1778  /*
1779  * Adding to a memory location is not atomic, so we take a spinlock to
1780  * ensure we don't collide with someone else trying to bump the count at
1781  * the same time.
1782  *
1783  * XXX: It might be worth considering using an atomic fetch-and-add
1784  * instruction here, on architectures where that is supported.
1785  */
1786 
1788  FastPathStrongRelationLocks->count[fasthashcode]++;
1789  locallock->holdsStrongLockCount = true;
1790  StrongLockInProgress = locallock;
1792 }
1793 
1794 /*
1795  * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1796  * acquisition once it's no longer needed
1797  */
1798 static void
1800 {
1801  StrongLockInProgress = NULL;
1802 }
1803 
1804 /*
1805  * AbortStrongLockAcquire - undo strong lock state changes performed by
1806  * BeginStrongLockAcquire.
1807  */
1808 void
1810 {
1811  uint32 fasthashcode;
1812  LOCALLOCK *locallock = StrongLockInProgress;
1813 
1814  if (locallock == NULL)
1815  return;
1816 
1817  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1818  Assert(locallock->holdsStrongLockCount == true);
1820  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1821  FastPathStrongRelationLocks->count[fasthashcode]--;
1822  locallock->holdsStrongLockCount = false;
1823  StrongLockInProgress = NULL;
1825 }
1826 
1827 /*
1828  * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1829  * WaitOnLock on.
1830  *
1831  * proc.c needs this for the case where we are booted off the lock by
1832  * timeout, but discover that someone granted us the lock anyway.
1833  *
1834  * We could just export GrantLockLocal, but that would require including
1835  * resowner.h in lock.h, which creates circularity.
1836  */
1837 void
1839 {
1841 }
1842 
1843 /*
1844  * MarkLockClear -- mark an acquired lock as "clear"
1845  *
1846  * This means that we know we have absorbed all sinval messages that other
1847  * sessions generated before we acquired this lock, and so we can confidently
1848  * assume we know about any catalog changes protected by this lock.
1849  */
1850 void
1852 {
1853  Assert(locallock->nLocks > 0);
1854  locallock->lockCleared = true;
1855 }
1856 
1857 /*
1858  * WaitOnLock -- wait to acquire a lock
1859  *
1860  * Caller must have set MyProc->heldLocks to reflect locks already held
1861  * on the lockable object by this process.
1862  *
1863  * The appropriate partition lock must be held at entry, and will still be
1864  * held at exit.
1865  */
1866 static void
1867 WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool dontWait)
1868 {
1869  LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1870  LockMethod lockMethodTable = LockMethods[lockmethodid];
1871 
1872  LOCK_PRINT("WaitOnLock: sleeping on lock",
1873  locallock->lock, locallock->tag.mode);
1874 
1875  /* adjust the process title to indicate that it's waiting */
1876  set_ps_display_suffix("waiting");
1877 
1878  awaitedLock = locallock;
1879  awaitedOwner = owner;
1880 
1881  /*
1882  * NOTE: Think not to put any shared-state cleanup after the call to
1883  * ProcSleep, in either the normal or failure path. The lock state must
1884  * be fully set by the lock grantor, or by CheckDeadLock if we give up
1885  * waiting for the lock. This is necessary because of the possibility
1886  * that a cancel/die interrupt will interrupt ProcSleep after someone else
1887  * grants us the lock, but before we've noticed it. Hence, after granting,
1888  * the locktable state must fully reflect the fact that we own the lock;
1889  * we can't do additional work on return.
1890  *
1891  * We can and do use a PG_TRY block to try to clean up after failure, but
1892  * this still has a major limitation: elog(FATAL) can occur while waiting
1893  * (eg, a "die" interrupt), and then control won't come back here. So all
1894  * cleanup of essential state should happen in LockErrorCleanup, not here.
1895  * We can use PG_TRY to clear the "waiting" status flags, since doing that
1896  * is unimportant if the process exits.
1897  */
1898  PG_TRY();
1899  {
1900  /*
1901  * If dontWait = true, we handle success and failure in the same way
1902  * here. The caller will be able to sort out what has happened.
1903  */
1904  if (ProcSleep(locallock, lockMethodTable, dontWait) != PROC_WAIT_STATUS_OK
1905  && !dontWait)
1906  {
1907 
1908  /*
1909  * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1910  * now.
1911  */
1912  awaitedLock = NULL;
1913  LOCK_PRINT("WaitOnLock: aborting on lock",
1914  locallock->lock, locallock->tag.mode);
1916 
1917  /*
1918  * Now that we aren't holding the partition lock, we can give an
1919  * error report including details about the detected deadlock.
1920  */
1921  DeadLockReport();
1922  /* not reached */
1923  }
1924  }
1925  PG_CATCH();
1926  {
1927  /* In this path, awaitedLock remains set until LockErrorCleanup */
1928 
1929  /* reset ps display to remove the suffix */
1931 
1932  /* and propagate the error */
1933  PG_RE_THROW();
1934  }
1935  PG_END_TRY();
1936 
1937  awaitedLock = NULL;
1938 
1939  /* reset ps display to remove the suffix */
1941 
1942  LOCK_PRINT("WaitOnLock: wakeup on lock",
1943  locallock->lock, locallock->tag.mode);
1944 }
1945 
1946 /*
1947  * Remove a proc from the wait-queue it is on (caller must know it is on one).
1948  * This is only used when the proc has failed to get the lock, so we set its
1949  * waitStatus to PROC_WAIT_STATUS_ERROR.
1950  *
1951  * Appropriate partition lock must be held by caller. Also, caller is
1952  * responsible for signaling the proc if needed.
1953  *
1954  * NB: this does not clean up any locallock object that may exist for the lock.
1955  */
1956 void
1958 {
1959  LOCK *waitLock = proc->waitLock;
1960  PROCLOCK *proclock = proc->waitProcLock;
1961  LOCKMODE lockmode = proc->waitLockMode;
1962  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1963 
1964  /* Make sure proc is waiting */
1966  Assert(proc->links.next != NULL);
1967  Assert(waitLock);
1968  Assert(!dclist_is_empty(&waitLock->waitProcs));
1969  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1970 
1971  /* Remove proc from lock's wait queue */
1972  dclist_delete_from_thoroughly(&waitLock->waitProcs, &proc->links);
1973 
1974  /* Undo increments of request counts by waiting process */
1975  Assert(waitLock->nRequested > 0);
1976  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1977  waitLock->nRequested--;
1978  Assert(waitLock->requested[lockmode] > 0);
1979  waitLock->requested[lockmode]--;
1980  /* don't forget to clear waitMask bit if appropriate */
1981  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1982  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1983 
1984  /* Clean up the proc's own state, and pass it the ok/fail signal */
1985  proc->waitLock = NULL;
1986  proc->waitProcLock = NULL;
1988 
1989  /*
1990  * Delete the proclock immediately if it represents no already-held locks.
1991  * (This must happen now because if the owner of the lock decides to
1992  * release it, and the requested/granted counts then go to zero,
1993  * LockRelease expects there to be no remaining proclocks.) Then see if
1994  * any other waiters for the lock can be woken up now.
1995  */
1996  CleanUpLock(waitLock, proclock,
1997  LockMethods[lockmethodid], hashcode,
1998  true);
1999 }
2000 
2001 /*
2002  * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
2003  * Release a session lock if 'sessionLock' is true, else release a
2004  * regular transaction lock.
2005  *
2006  * Side Effects: find any waiting processes that are now wakable,
2007  * grant them their requested locks and awaken them.
2008  * (We have to grant the lock here to avoid a race between
2009  * the waking process and any new process to
2010  * come along and request the lock.)
2011  */
2012 bool
2013 LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
2014 {
2015  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2016  LockMethod lockMethodTable;
2017  LOCALLOCKTAG localtag;
2018  LOCALLOCK *locallock;
2019  LOCK *lock;
2020  PROCLOCK *proclock;
2021  LWLock *partitionLock;
2022  bool wakeupNeeded;
2023 
2024  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2025  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2026  lockMethodTable = LockMethods[lockmethodid];
2027  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2028  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2029 
2030 #ifdef LOCK_DEBUG
2031  if (LOCK_DEBUG_ENABLED(locktag))
2032  elog(LOG, "LockRelease: lock [%u,%u] %s",
2033  locktag->locktag_field1, locktag->locktag_field2,
2034  lockMethodTable->lockModeNames[lockmode]);
2035 #endif
2036 
2037  /*
2038  * Find the LOCALLOCK entry for this lock and lockmode
2039  */
2040  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2041  localtag.lock = *locktag;
2042  localtag.mode = lockmode;
2043 
2044  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
2045  &localtag,
2046  HASH_FIND, NULL);
2047 
2048  /*
2049  * let the caller print its own error message, too. Do not ereport(ERROR).
2050  */
2051  if (!locallock || locallock->nLocks <= 0)
2052  {
2053  elog(WARNING, "you don't own a lock of type %s",
2054  lockMethodTable->lockModeNames[lockmode]);
2055  return false;
2056  }
2057 
2058  /*
2059  * Decrease the count for the resource owner.
2060  */
2061  {
2062  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2063  ResourceOwner owner;
2064  int i;
2065 
2066  /* Identify owner for lock */
2067  if (sessionLock)
2068  owner = NULL;
2069  else
2070  owner = CurrentResourceOwner;
2071 
2072  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2073  {
2074  if (lockOwners[i].owner == owner)
2075  {
2076  Assert(lockOwners[i].nLocks > 0);
2077  if (--lockOwners[i].nLocks == 0)
2078  {
2079  if (owner != NULL)
2080  ResourceOwnerForgetLock(owner, locallock);
2081  /* compact out unused slot */
2082  locallock->numLockOwners--;
2083  if (i < locallock->numLockOwners)
2084  lockOwners[i] = lockOwners[locallock->numLockOwners];
2085  }
2086  break;
2087  }
2088  }
2089  if (i < 0)
2090  {
2091  /* don't release a lock belonging to another owner */
2092  elog(WARNING, "you don't own a lock of type %s",
2093  lockMethodTable->lockModeNames[lockmode]);
2094  return false;
2095  }
2096  }
2097 
2098  /*
2099  * Decrease the total local count. If we're still holding the lock, we're
2100  * done.
2101  */
2102  locallock->nLocks--;
2103 
2104  if (locallock->nLocks > 0)
2105  return true;
2106 
2107  /*
2108  * At this point we can no longer suppose we are clear of invalidation
2109  * messages related to this lock. Although we'll delete the LOCALLOCK
2110  * object before any intentional return from this routine, it seems worth
2111  * the trouble to explicitly reset lockCleared right now, just in case
2112  * some error prevents us from deleting the LOCALLOCK.
2113  */
2114  locallock->lockCleared = false;
2115 
2116  /* Attempt fast release of any lock eligible for the fast path. */
2117  if (EligibleForRelationFastPath(locktag, lockmode) &&
2119  {
2120  bool released;
2121 
2122  /*
2123  * We might not find the lock here, even if we originally entered it
2124  * here. Another backend may have moved it to the main table.
2125  */
2127  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2128  lockmode);
2130  if (released)
2131  {
2132  RemoveLocalLock(locallock);
2133  return true;
2134  }
2135  }
2136 
2137  /*
2138  * Otherwise we've got to mess with the shared lock table.
2139  */
2140  partitionLock = LockHashPartitionLock(locallock->hashcode);
2141 
2142  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2143 
2144  /*
2145  * Normally, we don't need to re-find the lock or proclock, since we kept
2146  * their addresses in the locallock table, and they couldn't have been
2147  * removed while we were holding a lock on them. But it's possible that
2148  * the lock was taken fast-path and has since been moved to the main hash
2149  * table by another backend, in which case we will need to look up the
2150  * objects here. We assume the lock field is NULL if so.
2151  */
2152  lock = locallock->lock;
2153  if (!lock)
2154  {
2155  PROCLOCKTAG proclocktag;
2156 
2157  Assert(EligibleForRelationFastPath(locktag, lockmode));
2159  locktag,
2160  locallock->hashcode,
2161  HASH_FIND,
2162  NULL);
2163  if (!lock)
2164  elog(ERROR, "failed to re-find shared lock object");
2165  locallock->lock = lock;
2166 
2167  proclocktag.myLock = lock;
2168  proclocktag.myProc = MyProc;
2170  &proclocktag,
2171  HASH_FIND,
2172  NULL);
2173  if (!locallock->proclock)
2174  elog(ERROR, "failed to re-find shared proclock object");
2175  }
2176  LOCK_PRINT("LockRelease: found", lock, lockmode);
2177  proclock = locallock->proclock;
2178  PROCLOCK_PRINT("LockRelease: found", proclock);
2179 
2180  /*
2181  * Double-check that we are actually holding a lock of the type we want to
2182  * release.
2183  */
2184  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2185  {
2186  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2187  LWLockRelease(partitionLock);
2188  elog(WARNING, "you don't own a lock of type %s",
2189  lockMethodTable->lockModeNames[lockmode]);
2190  RemoveLocalLock(locallock);
2191  return false;
2192  }
2193 
2194  /*
2195  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2196  */
2197  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2198 
2199  CleanUpLock(lock, proclock,
2200  lockMethodTable, locallock->hashcode,
2201  wakeupNeeded);
2202 
2203  LWLockRelease(partitionLock);
2204 
2205  RemoveLocalLock(locallock);
2206  return true;
2207 }
2208 
2209 /*
2210  * LockReleaseAll -- Release all locks of the specified lock method that
2211  * are held by the current process.
2212  *
2213  * Well, not necessarily *all* locks. The available behaviors are:
2214  * allLocks == true: release all locks including session locks.
2215  * allLocks == false: release all non-session locks.
2216  */
2217 void
2218 LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2219 {
2220  HASH_SEQ_STATUS status;
2221  LockMethod lockMethodTable;
2222  int i,
2223  numLockModes;
2224  LOCALLOCK *locallock;
2225  LOCK *lock;
2226  int partition;
2227  bool have_fast_path_lwlock = false;
2228 
2229  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2230  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2231  lockMethodTable = LockMethods[lockmethodid];
2232 
2233 #ifdef LOCK_DEBUG
2234  if (*(lockMethodTable->trace_flag))
2235  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2236 #endif
2237 
2238  /*
2239  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2240  * the only way that the lock we hold on our own VXID can ever get
2241  * released: it is always and only released when a toplevel transaction
2242  * ends.
2243  */
2244  if (lockmethodid == DEFAULT_LOCKMETHOD)
2246 
2247  numLockModes = lockMethodTable->numLockModes;
2248 
2249  /*
2250  * First we run through the locallock table and get rid of unwanted
2251  * entries, then we scan the process's proclocks and get rid of those. We
2252  * do this separately because we may have multiple locallock entries
2253  * pointing to the same proclock, and we daren't end up with any dangling
2254  * pointers. Fast-path locks are cleaned up during the locallock table
2255  * scan, though.
2256  */
2258 
2259  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2260  {
2261  /*
2262  * If the LOCALLOCK entry is unused, we must've run out of shared
2263  * memory while trying to set up this lock. Just forget the local
2264  * entry.
2265  */
2266  if (locallock->nLocks == 0)
2267  {
2268  RemoveLocalLock(locallock);
2269  continue;
2270  }
2271 
2272  /* Ignore items that are not of the lockmethod to be removed */
2273  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2274  continue;
2275 
2276  /*
2277  * If we are asked to release all locks, we can just zap the entry.
2278  * Otherwise, must scan to see if there are session locks. We assume
2279  * there is at most one lockOwners entry for session locks.
2280  */
2281  if (!allLocks)
2282  {
2283  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2284 
2285  /* If session lock is above array position 0, move it down to 0 */
2286  for (i = 0; i < locallock->numLockOwners; i++)
2287  {
2288  if (lockOwners[i].owner == NULL)
2289  lockOwners[0] = lockOwners[i];
2290  else
2291  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2292  }
2293 
2294  if (locallock->numLockOwners > 0 &&
2295  lockOwners[0].owner == NULL &&
2296  lockOwners[0].nLocks > 0)
2297  {
2298  /* Fix the locallock to show just the session locks */
2299  locallock->nLocks = lockOwners[0].nLocks;
2300  locallock->numLockOwners = 1;
2301  /* We aren't deleting this locallock, so done */
2302  continue;
2303  }
2304  else
2305  locallock->numLockOwners = 0;
2306  }
2307 
2308 #ifdef USE_ASSERT_CHECKING
2309 
2310  /*
2311  * Tuple locks are currently held only for short durations within a
2312  * transaction. Check that we didn't forget to release one.
2313  */
2314  if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_TUPLE && !allLocks)
2315  elog(WARNING, "tuple lock held at commit");
2316 #endif
2317 
2318  /*
2319  * If the lock or proclock pointers are NULL, this lock was taken via
2320  * the relation fast-path (and is not known to have been transferred).
2321  */
2322  if (locallock->proclock == NULL || locallock->lock == NULL)
2323  {
2324  LOCKMODE lockmode = locallock->tag.mode;
2325  Oid relid;
2326 
2327  /* Verify that a fast-path lock is what we've got. */
2328  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2329  elog(PANIC, "locallock table corrupted");
2330 
2331  /*
2332  * If we don't currently hold the LWLock that protects our
2333  * fast-path data structures, we must acquire it before attempting
2334  * to release the lock via the fast-path. We will continue to
2335  * hold the LWLock until we're done scanning the locallock table,
2336  * unless we hit a transferred fast-path lock. (XXX is this
2337  * really such a good idea? There could be a lot of entries ...)
2338  */
2339  if (!have_fast_path_lwlock)
2340  {
2342  have_fast_path_lwlock = true;
2343  }
2344 
2345  /* Attempt fast-path release. */
2346  relid = locallock->tag.lock.locktag_field2;
2347  if (FastPathUnGrantRelationLock(relid, lockmode))
2348  {
2349  RemoveLocalLock(locallock);
2350  continue;
2351  }
2352 
2353  /*
2354  * Our lock, originally taken via the fast path, has been
2355  * transferred to the main lock table. That's going to require
2356  * some extra work, so release our fast-path lock before starting.
2357  */
2359  have_fast_path_lwlock = false;
2360 
2361  /*
2362  * Now dump the lock. We haven't got a pointer to the LOCK or
2363  * PROCLOCK in this case, so we have to handle this a bit
2364  * differently than a normal lock release. Unfortunately, this
2365  * requires an extra LWLock acquire-and-release cycle on the
2366  * partitionLock, but hopefully it shouldn't happen often.
2367  */
2368  LockRefindAndRelease(lockMethodTable, MyProc,
2369  &locallock->tag.lock, lockmode, false);
2370  RemoveLocalLock(locallock);
2371  continue;
2372  }
2373 
2374  /* Mark the proclock to show we need to release this lockmode */
2375  if (locallock->nLocks > 0)
2376  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2377 
2378  /* And remove the locallock hashtable entry */
2379  RemoveLocalLock(locallock);
2380  }
2381 
2382  /* Done with the fast-path data structures */
2383  if (have_fast_path_lwlock)
2385 
2386  /*
2387  * Now, scan each lock partition separately.
2388  */
2389  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2390  {
2391  LWLock *partitionLock;
2392  dlist_head *procLocks = &MyProc->myProcLocks[partition];
2393  dlist_mutable_iter proclock_iter;
2394 
2395  partitionLock = LockHashPartitionLockByIndex(partition);
2396 
2397  /*
2398  * If the proclock list for this partition is empty, we can skip
2399  * acquiring the partition lock. This optimization is trickier than
2400  * it looks, because another backend could be in process of adding
2401  * something to our proclock list due to promoting one of our
2402  * fast-path locks. However, any such lock must be one that we
2403  * decided not to delete above, so it's okay to skip it again now;
2404  * we'd just decide not to delete it again. We must, however, be
2405  * careful to re-fetch the list header once we've acquired the
2406  * partition lock, to be sure we have a valid, up-to-date pointer.
2407  * (There is probably no significant risk if pointer fetch/store is
2408  * atomic, but we don't wish to assume that.)
2409  *
2410  * XXX This argument assumes that the locallock table correctly
2411  * represents all of our fast-path locks. While allLocks mode
2412  * guarantees to clean up all of our normal locks regardless of the
2413  * locallock situation, we lose that guarantee for fast-path locks.
2414  * This is not ideal.
2415  */
2416  if (dlist_is_empty(procLocks))
2417  continue; /* needn't examine this partition */
2418 
2419  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2420 
2421  dlist_foreach_modify(proclock_iter, procLocks)
2422  {
2423  PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
2424  bool wakeupNeeded = false;
2425 
2426  Assert(proclock->tag.myProc == MyProc);
2427 
2428  lock = proclock->tag.myLock;
2429 
2430  /* Ignore items that are not of the lockmethod to be removed */
2431  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2432  continue;
2433 
2434  /*
2435  * In allLocks mode, force release of all locks even if locallock
2436  * table had problems
2437  */
2438  if (allLocks)
2439  proclock->releaseMask = proclock->holdMask;
2440  else
2441  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2442 
2443  /*
2444  * Ignore items that have nothing to be released, unless they have
2445  * holdMask == 0 and are therefore recyclable
2446  */
2447  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2448  continue;
2449 
2450  PROCLOCK_PRINT("LockReleaseAll", proclock);
2451  LOCK_PRINT("LockReleaseAll", lock, 0);
2452  Assert(lock->nRequested >= 0);
2453  Assert(lock->nGranted >= 0);
2454  Assert(lock->nGranted <= lock->nRequested);
2455  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2456 
2457  /*
2458  * Release the previously-marked lock modes
2459  */
2460  for (i = 1; i <= numLockModes; i++)
2461  {
2462  if (proclock->releaseMask & LOCKBIT_ON(i))
2463  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2464  lockMethodTable);
2465  }
2466  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2467  Assert(lock->nGranted <= lock->nRequested);
2468  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2469 
2470  proclock->releaseMask = 0;
2471 
2472  /* CleanUpLock will wake up waiters if needed. */
2473  CleanUpLock(lock, proclock,
2474  lockMethodTable,
2475  LockTagHashCode(&lock->tag),
2476  wakeupNeeded);
2477  } /* loop over PROCLOCKs within this partition */
2478 
2479  LWLockRelease(partitionLock);
2480  } /* loop over partitions */
2481 
2482 #ifdef LOCK_DEBUG
2483  if (*(lockMethodTable->trace_flag))
2484  elog(LOG, "LockReleaseAll done");
2485 #endif
2486 }
2487 
2488 /*
2489  * LockReleaseSession -- Release all session locks of the specified lock method
2490  * that are held by the current process.
2491  */
2492 void
2494 {
2495  HASH_SEQ_STATUS status;
2496  LOCALLOCK *locallock;
2497 
2498  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2499  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2500 
2502 
2503  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2504  {
2505  /* Ignore items that are not of the specified lock method */
2506  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2507  continue;
2508 
2509  ReleaseLockIfHeld(locallock, true);
2510  }
2511 }
2512 
2513 /*
2514  * LockReleaseCurrentOwner
2515  * Release all locks belonging to CurrentResourceOwner
2516  *
2517  * If the caller knows what those locks are, it can pass them as an array.
2518  * That speeds up the call significantly, when a lot of locks are held.
2519  * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2520  * table to find them.
2521  */
2522 void
2523 LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2524 {
2525  if (locallocks == NULL)
2526  {
2527  HASH_SEQ_STATUS status;
2528  LOCALLOCK *locallock;
2529 
2531 
2532  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2533  ReleaseLockIfHeld(locallock, false);
2534  }
2535  else
2536  {
2537  int i;
2538 
2539  for (i = nlocks - 1; i >= 0; i--)
2540  ReleaseLockIfHeld(locallocks[i], false);
2541  }
2542 }
2543 
2544 /*
2545  * ReleaseLockIfHeld
2546  * Release any session-level locks on this lockable object if sessionLock
2547  * is true; else, release any locks held by CurrentResourceOwner.
2548  *
2549  * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2550  * locks), but without refactoring LockRelease() we cannot support releasing
2551  * locks belonging to resource owners other than CurrentResourceOwner.
2552  * If we were to refactor, it'd be a good idea to fix it so we don't have to
2553  * do a hashtable lookup of the locallock, too. However, currently this
2554  * function isn't used heavily enough to justify refactoring for its
2555  * convenience.
2556  */
2557 static void
2558 ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2559 {
2560  ResourceOwner owner;
2561  LOCALLOCKOWNER *lockOwners;
2562  int i;
2563 
2564  /* Identify owner for lock (must match LockRelease!) */
2565  if (sessionLock)
2566  owner = NULL;
2567  else
2568  owner = CurrentResourceOwner;
2569 
2570  /* Scan to see if there are any locks belonging to the target owner */
2571  lockOwners = locallock->lockOwners;
2572  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2573  {
2574  if (lockOwners[i].owner == owner)
2575  {
2576  Assert(lockOwners[i].nLocks > 0);
2577  if (lockOwners[i].nLocks < locallock->nLocks)
2578  {
2579  /*
2580  * We will still hold this lock after forgetting this
2581  * ResourceOwner.
2582  */
2583  locallock->nLocks -= lockOwners[i].nLocks;
2584  /* compact out unused slot */
2585  locallock->numLockOwners--;
2586  if (owner != NULL)
2587  ResourceOwnerForgetLock(owner, locallock);
2588  if (i < locallock->numLockOwners)
2589  lockOwners[i] = lockOwners[locallock->numLockOwners];
2590  }
2591  else
2592  {
2593  Assert(lockOwners[i].nLocks == locallock->nLocks);
2594  /* We want to call LockRelease just once */
2595  lockOwners[i].nLocks = 1;
2596  locallock->nLocks = 1;
2597  if (!LockRelease(&locallock->tag.lock,
2598  locallock->tag.mode,
2599  sessionLock))
2600  elog(WARNING, "ReleaseLockIfHeld: failed??");
2601  }
2602  break;
2603  }
2604  }
2605 }
2606 
2607 /*
2608  * LockReassignCurrentOwner
2609  * Reassign all locks belonging to CurrentResourceOwner to belong
2610  * to its parent resource owner.
2611  *
2612  * If the caller knows what those locks are, it can pass them as an array.
2613  * That speeds up the call significantly, when a lot of locks are held
2614  * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2615  * and we'll traverse through our hash table to find them.
2616  */
2617 void
2618 LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2619 {
2621 
2622  Assert(parent != NULL);
2623 
2624  if (locallocks == NULL)
2625  {
2626  HASH_SEQ_STATUS status;
2627  LOCALLOCK *locallock;
2628 
2630 
2631  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2632  LockReassignOwner(locallock, parent);
2633  }
2634  else
2635  {
2636  int i;
2637 
2638  for (i = nlocks - 1; i >= 0; i--)
2639  LockReassignOwner(locallocks[i], parent);
2640  }
2641 }
2642 
2643 /*
2644  * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2645  * CurrentResourceOwner to its parent.
2646  */
2647 static void
2649 {
2650  LOCALLOCKOWNER *lockOwners;
2651  int i;
2652  int ic = -1;
2653  int ip = -1;
2654 
2655  /*
2656  * Scan to see if there are any locks belonging to current owner or its
2657  * parent
2658  */
2659  lockOwners = locallock->lockOwners;
2660  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2661  {
2662  if (lockOwners[i].owner == CurrentResourceOwner)
2663  ic = i;
2664  else if (lockOwners[i].owner == parent)
2665  ip = i;
2666  }
2667 
2668  if (ic < 0)
2669  return; /* no current locks */
2670 
2671  if (ip < 0)
2672  {
2673  /* Parent has no slot, so just give it the child's slot */
2674  lockOwners[ic].owner = parent;
2675  ResourceOwnerRememberLock(parent, locallock);
2676  }
2677  else
2678  {
2679  /* Merge child's count with parent's */
2680  lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2681  /* compact out unused slot */
2682  locallock->numLockOwners--;
2683  if (ic < locallock->numLockOwners)
2684  lockOwners[ic] = lockOwners[locallock->numLockOwners];
2685  }
2687 }
2688 
2689 /*
2690  * FastPathGrantRelationLock
2691  * Grant lock using per-backend fast-path array, if there is space.
2692  */
2693 static bool
2695 {
2696  uint32 i;
2697  uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2698 
2699  /* fast-path group the lock belongs to */
2700  uint32 group = FAST_PATH_REL_GROUP(relid);
2701 
2702  /* Scan for existing entry for this relid, remembering empty slot. */
2703  for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2704  {
2705  /* index into the whole per-backend array */
2706  uint32 f = FAST_PATH_SLOT(group, i);
2707 
2708  if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2709  unused_slot = f;
2710  else if (MyProc->fpRelId[f] == relid)
2711  {
2712  Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2713  FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2714  return true;
2715  }
2716  }
2717 
2718  /* If no existing entry, use any empty slot. */
2719  if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2720  {
2721  MyProc->fpRelId[unused_slot] = relid;
2722  FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2723  ++FastPathLocalUseCounts[group];
2724  return true;
2725  }
2726 
2727  /* No existing entry, and no empty slot. */
2728  return false;
2729 }
2730 
2731 /*
2732  * FastPathUnGrantRelationLock
2733  * Release fast-path lock, if present. Update backend-private local
2734  * use count, while we're at it.
2735  */
2736 static bool
2738 {
2739  uint32 i;
2740  bool result = false;
2741 
2742  /* fast-path group the lock belongs to */
2743  uint32 group = FAST_PATH_REL_GROUP(relid);
2744 
2745  FastPathLocalUseCounts[group] = 0;
2746  for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2747  {
2748  /* index into the whole per-backend array */
2749  uint32 f = FAST_PATH_SLOT(group, i);
2750 
2751  if (MyProc->fpRelId[f] == relid
2752  && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2753  {
2754  Assert(!result);
2755  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2756  result = true;
2757  /* we continue iterating so as to update FastPathLocalUseCount */
2758  }
2759  if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2760  ++FastPathLocalUseCounts[group];
2761  }
2762  return result;
2763 }
2764 
2765 /*
2766  * FastPathTransferRelationLocks
2767  * Transfer locks matching the given lock tag from per-backend fast-path
2768  * arrays to the shared hash table.
2769  *
2770  * Returns true if successful, false if ran out of shared memory.
2771  */
2772 static bool
2773 FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2774  uint32 hashcode)
2775 {
2776  LWLock *partitionLock = LockHashPartitionLock(hashcode);
2777  Oid relid = locktag->locktag_field2;
2778  uint32 i;
2779 
2780  /*
2781  * Every PGPROC that can potentially hold a fast-path lock is present in
2782  * ProcGlobal->allProcs. Prepared transactions are not, but any
2783  * outstanding fast-path locks held by prepared transactions are
2784  * transferred to the main lock table.
2785  */
2786  for (i = 0; i < ProcGlobal->allProcCount; i++)
2787  {
2788  PGPROC *proc = &ProcGlobal->allProcs[i];
2789  uint32 j,
2790  group;
2791 
2793 
2794  /*
2795  * If the target backend isn't referencing the same database as the
2796  * lock, then we needn't examine the individual relation IDs at all;
2797  * none of them can be relevant.
2798  *
2799  * proc->databaseId is set at backend startup time and never changes
2800  * thereafter, so it might be safe to perform this test before
2801  * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2802  * assume that if the target backend holds any fast-path locks, it
2803  * must have performed a memory-fencing operation (in particular, an
2804  * LWLock acquisition) since setting proc->databaseId. However, it's
2805  * less clear that our backend is certain to have performed a memory
2806  * fencing operation since the other backend set proc->databaseId. So
2807  * for now, we test it after acquiring the LWLock just to be safe.
2808  */
2809  if (proc->databaseId != locktag->locktag_field1)
2810  {
2811  LWLockRelease(&proc->fpInfoLock);
2812  continue;
2813  }
2814 
2815  /* fast-path group the lock belongs to */
2816  group = FAST_PATH_REL_GROUP(relid);
2817 
2818  for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
2819  {
2820  uint32 lockmode;
2821 
2822  /* index into the whole per-backend array */
2823  uint32 f = FAST_PATH_SLOT(group, j);
2824 
2825  /* Look for an allocated slot matching the given relid. */
2826  if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2827  continue;
2828 
2829  /* Find or create lock object. */
2830  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2831  for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2833  ++lockmode)
2834  {
2835  PROCLOCK *proclock;
2836 
2837  if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2838  continue;
2839  proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2840  hashcode, lockmode);
2841  if (!proclock)
2842  {
2843  LWLockRelease(partitionLock);
2844  LWLockRelease(&proc->fpInfoLock);
2845  return false;
2846  }
2847  GrantLock(proclock->tag.myLock, proclock, lockmode);
2848  FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2849  }
2850  LWLockRelease(partitionLock);
2851 
2852  /* No need to examine remaining slots. */
2853  break;
2854  }
2855  LWLockRelease(&proc->fpInfoLock);
2856  }
2857  return true;
2858 }
2859 
2860 /*
2861  * FastPathGetRelationLockEntry
2862  * Return the PROCLOCK for a lock originally taken via the fast-path,
2863  * transferring it to the primary lock table if necessary.
2864  *
2865  * Note: caller takes care of updating the locallock object.
2866  */
2867 static PROCLOCK *
2869 {
2870  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2871  LOCKTAG *locktag = &locallock->tag.lock;
2872  PROCLOCK *proclock = NULL;
2873  LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2874  Oid relid = locktag->locktag_field2;
2875  uint32 i,
2876  group;
2877 
2878  /* fast-path group the lock belongs to */
2879  group = FAST_PATH_REL_GROUP(relid);
2880 
2882 
2883  for (i = 0; i < FP_LOCK_SLOTS_PER_GROUP; i++)
2884  {
2885  uint32 lockmode;
2886 
2887  /* index into the whole per-backend array */
2888  uint32 f = FAST_PATH_SLOT(group, i);
2889 
2890  /* Look for an allocated slot matching the given relid. */
2891  if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2892  continue;
2893 
2894  /* If we don't have a lock of the given mode, forget it! */
2895  lockmode = locallock->tag.mode;
2896  if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2897  break;
2898 
2899  /* Find or create lock object. */
2900  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2901 
2902  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2903  locallock->hashcode, lockmode);
2904  if (!proclock)
2905  {
2906  LWLockRelease(partitionLock);
2908  ereport(ERROR,
2909  (errcode(ERRCODE_OUT_OF_MEMORY),
2910  errmsg("out of shared memory"),
2911  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
2912  }
2913  GrantLock(proclock->tag.myLock, proclock, lockmode);
2914  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2915 
2916  LWLockRelease(partitionLock);
2917 
2918  /* No need to examine remaining slots. */
2919  break;
2920  }
2921 
2923 
2924  /* Lock may have already been transferred by some other backend. */
2925  if (proclock == NULL)
2926  {
2927  LOCK *lock;
2928  PROCLOCKTAG proclocktag;
2929  uint32 proclock_hashcode;
2930 
2931  LWLockAcquire(partitionLock, LW_SHARED);
2932 
2934  locktag,
2935  locallock->hashcode,
2936  HASH_FIND,
2937  NULL);
2938  if (!lock)
2939  elog(ERROR, "failed to re-find shared lock object");
2940 
2941  proclocktag.myLock = lock;
2942  proclocktag.myProc = MyProc;
2943 
2944  proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2945  proclock = (PROCLOCK *)
2947  &proclocktag,
2948  proclock_hashcode,
2949  HASH_FIND,
2950  NULL);
2951  if (!proclock)
2952  elog(ERROR, "failed to re-find shared proclock object");
2953  LWLockRelease(partitionLock);
2954  }
2955 
2956  return proclock;
2957 }
2958 
2959 /*
2960  * GetLockConflicts
2961  * Get an array of VirtualTransactionIds of xacts currently holding locks
2962  * that would conflict with the specified lock/lockmode.
2963  * xacts merely awaiting such a lock are NOT reported.
2964  *
2965  * The result array is palloc'd and is terminated with an invalid VXID.
2966  * *countp, if not null, is updated to the number of items set.
2967  *
2968  * Of course, the result could be out of date by the time it's returned, so
2969  * use of this function has to be thought about carefully. Similarly, a
2970  * PGPROC with no "lxid" will be considered non-conflicting regardless of any
2971  * lock it holds. Existing callers don't care about a locker after that
2972  * locker's pg_xact updates complete. CommitTransaction() clears "lxid" after
2973  * pg_xact updates and before releasing locks.
2974  *
2975  * Note we never include the current xact's vxid in the result array,
2976  * since an xact never blocks itself.
2977  */
2979 GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
2980 {
2981  static VirtualTransactionId *vxids;
2982  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2983  LockMethod lockMethodTable;
2984  LOCK *lock;
2985  LOCKMASK conflictMask;
2986  dlist_iter proclock_iter;
2987  PROCLOCK *proclock;
2988  uint32 hashcode;
2989  LWLock *partitionLock;
2990  int count = 0;
2991  int fast_count = 0;
2992 
2993  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2994  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2995  lockMethodTable = LockMethods[lockmethodid];
2996  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2997  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2998 
2999  /*
3000  * Allocate memory to store results, and fill with InvalidVXID. We only
3001  * need enough space for MaxBackends + max_prepared_xacts + a terminator.
3002  * InHotStandby allocate once in TopMemoryContext.
3003  */
3004  if (InHotStandby)
3005  {
3006  if (vxids == NULL)
3007  vxids = (VirtualTransactionId *)
3009  sizeof(VirtualTransactionId) *
3011  }
3012  else
3013  vxids = (VirtualTransactionId *)
3014  palloc0(sizeof(VirtualTransactionId) *
3016 
3017  /* Compute hash code and partition lock, and look up conflicting modes. */
3018  hashcode = LockTagHashCode(locktag);
3019  partitionLock = LockHashPartitionLock(hashcode);
3020  conflictMask = lockMethodTable->conflictTab[lockmode];
3021 
3022  /*
3023  * Fast path locks might not have been entered in the primary lock table.
3024  * If the lock we're dealing with could conflict with such a lock, we must
3025  * examine each backend's fast-path array for conflicts.
3026  */
3027  if (ConflictsWithRelationFastPath(locktag, lockmode))
3028  {
3029  int i;
3030  Oid relid = locktag->locktag_field2;
3031  VirtualTransactionId vxid;
3032 
3033  /*
3034  * Iterate over relevant PGPROCs. Anything held by a prepared
3035  * transaction will have been transferred to the primary lock table,
3036  * so we need not worry about those. This is all a bit fuzzy, because
3037  * new locks could be taken after we've visited a particular
3038  * partition, but the callers had better be prepared to deal with that
3039  * anyway, since the locks could equally well be taken between the
3040  * time we return the value and the time the caller does something
3041  * with it.
3042  */
3043  for (i = 0; i < ProcGlobal->allProcCount; i++)
3044  {
3045  PGPROC *proc = &ProcGlobal->allProcs[i];
3046  uint32 j,
3047  group;
3048 
3049  /* A backend never blocks itself */
3050  if (proc == MyProc)
3051  continue;
3052 
3054 
3055  /*
3056  * If the target backend isn't referencing the same database as
3057  * the lock, then we needn't examine the individual relation IDs
3058  * at all; none of them can be relevant.
3059  *
3060  * See FastPathTransferRelationLocks() for discussion of why we do
3061  * this test after acquiring the lock.
3062  */
3063  if (proc->databaseId != locktag->locktag_field1)
3064  {
3065  LWLockRelease(&proc->fpInfoLock);
3066  continue;
3067  }
3068 
3069  /* fast-path group the lock belongs to */
3070  group = FAST_PATH_REL_GROUP(relid);
3071 
3072  for (j = 0; j < FP_LOCK_SLOTS_PER_GROUP; j++)
3073  {
3074  uint32 lockmask;
3075 
3076  /* index into the whole per-backend array */
3077  uint32 f = FAST_PATH_SLOT(group, j);
3078 
3079  /* Look for an allocated slot matching the given relid. */
3080  if (relid != proc->fpRelId[f])
3081  continue;
3082  lockmask = FAST_PATH_GET_BITS(proc, f);
3083  if (!lockmask)
3084  continue;
3085  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3086 
3087  /*
3088  * There can only be one entry per relation, so if we found it
3089  * and it doesn't conflict, we can skip the rest of the slots.
3090  */
3091  if ((lockmask & conflictMask) == 0)
3092  break;
3093 
3094  /* Conflict! */
3095  GET_VXID_FROM_PGPROC(vxid, *proc);
3096 
3097  if (VirtualTransactionIdIsValid(vxid))
3098  vxids[count++] = vxid;
3099  /* else, xact already committed or aborted */
3100 
3101  /* No need to examine remaining slots. */
3102  break;
3103  }
3104 
3105  LWLockRelease(&proc->fpInfoLock);
3106  }
3107  }
3108 
3109  /* Remember how many fast-path conflicts we found. */
3110  fast_count = count;
3111 
3112  /*
3113  * Look up the lock object matching the tag.
3114  */
3115  LWLockAcquire(partitionLock, LW_SHARED);
3116 
3118  locktag,
3119  hashcode,
3120  HASH_FIND,
3121  NULL);
3122  if (!lock)
3123  {
3124  /*
3125  * If the lock object doesn't exist, there is nothing holding a lock
3126  * on this lockable object.
3127  */
3128  LWLockRelease(partitionLock);
3129  vxids[count].procNumber = INVALID_PROC_NUMBER;
3131  if (countp)
3132  *countp = count;
3133  return vxids;
3134  }
3135 
3136  /*
3137  * Examine each existing holder (or awaiter) of the lock.
3138  */
3139  dlist_foreach(proclock_iter, &lock->procLocks)
3140  {
3141  proclock = dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
3142 
3143  if (conflictMask & proclock->holdMask)
3144  {
3145  PGPROC *proc = proclock->tag.myProc;
3146 
3147  /* A backend never blocks itself */
3148  if (proc != MyProc)
3149  {
3150  VirtualTransactionId vxid;
3151 
3152  GET_VXID_FROM_PGPROC(vxid, *proc);
3153 
3154  if (VirtualTransactionIdIsValid(vxid))
3155  {
3156  int i;
3157 
3158  /* Avoid duplicate entries. */
3159  for (i = 0; i < fast_count; ++i)
3160  if (VirtualTransactionIdEquals(vxids[i], vxid))
3161  break;
3162  if (i >= fast_count)
3163  vxids[count++] = vxid;
3164  }
3165  /* else, xact already committed or aborted */
3166  }
3167  }
3168  }
3169 
3170  LWLockRelease(partitionLock);
3171 
3172  if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3173  elog(PANIC, "too many conflicting locks found");
3174 
3175  vxids[count].procNumber = INVALID_PROC_NUMBER;
3177  if (countp)
3178  *countp = count;
3179  return vxids;
3180 }
3181 
3182 /*
3183  * Find a lock in the shared lock table and release it. It is the caller's
3184  * responsibility to verify that this is a sane thing to do. (For example, it
3185  * would be bad to release a lock here if there might still be a LOCALLOCK
3186  * object with pointers to it.)
3187  *
3188  * We currently use this in two situations: first, to release locks held by
3189  * prepared transactions on commit (see lock_twophase_postcommit); and second,
3190  * to release locks taken via the fast-path, transferred to the main hash
3191  * table, and then released (see LockReleaseAll).
3192  */
3193 static void
3194 LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3195  LOCKTAG *locktag, LOCKMODE lockmode,
3196  bool decrement_strong_lock_count)
3197 {
3198  LOCK *lock;
3199  PROCLOCK *proclock;
3200  PROCLOCKTAG proclocktag;
3201  uint32 hashcode;
3202  uint32 proclock_hashcode;
3203  LWLock *partitionLock;
3204  bool wakeupNeeded;
3205 
3206  hashcode = LockTagHashCode(locktag);
3207  partitionLock = LockHashPartitionLock(hashcode);
3208 
3209  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3210 
3211  /*
3212  * Re-find the lock object (it had better be there).
3213  */
3215  locktag,
3216  hashcode,
3217  HASH_FIND,
3218  NULL);
3219  if (!lock)
3220  elog(PANIC, "failed to re-find shared lock object");
3221 
3222  /*
3223  * Re-find the proclock object (ditto).
3224  */
3225  proclocktag.myLock = lock;
3226  proclocktag.myProc = proc;
3227 
3228  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3229 
3231  &proclocktag,
3232  proclock_hashcode,
3233  HASH_FIND,
3234  NULL);
3235  if (!proclock)
3236  elog(PANIC, "failed to re-find shared proclock object");
3237 
3238  /*
3239  * Double-check that we are actually holding a lock of the type we want to
3240  * release.
3241  */
3242  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3243  {
3244  PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3245  LWLockRelease(partitionLock);
3246  elog(WARNING, "you don't own a lock of type %s",
3247  lockMethodTable->lockModeNames[lockmode]);
3248  return;
3249  }
3250 
3251  /*
3252  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3253  */
3254  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3255 
3256  CleanUpLock(lock, proclock,
3257  lockMethodTable, hashcode,
3258  wakeupNeeded);
3259 
3260  LWLockRelease(partitionLock);
3261 
3262  /*
3263  * Decrement strong lock count. This logic is needed only for 2PC.
3264  */
3265  if (decrement_strong_lock_count
3266  && ConflictsWithRelationFastPath(locktag, lockmode))
3267  {
3268  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3269 
3271  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3272  FastPathStrongRelationLocks->count[fasthashcode]--;
3274  }
3275 }
3276 
3277 /*
3278  * CheckForSessionAndXactLocks
3279  * Check to see if transaction holds both session-level and xact-level
3280  * locks on the same object; if so, throw an error.
3281  *
3282  * If we have both session- and transaction-level locks on the same object,
3283  * PREPARE TRANSACTION must fail. This should never happen with regular
3284  * locks, since we only take those at session level in some special operations
3285  * like VACUUM. It's possible to hit this with advisory locks, though.
3286  *
3287  * It would be nice if we could keep the session hold and give away the
3288  * transactional hold to the prepared xact. However, that would require two
3289  * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3290  * available when it comes time for PostPrepare_Locks to do the deed.
3291  * So for now, we error out while we can still do so safely.
3292  *
3293  * Since the LOCALLOCK table stores a separate entry for each lockmode,
3294  * we can't implement this check by examining LOCALLOCK entries in isolation.
3295  * We must build a transient hashtable that is indexed by locktag only.
3296  */
3297 static void
3299 {
3300  typedef struct
3301  {
3302  LOCKTAG lock; /* identifies the lockable object */
3303  bool sessLock; /* is any lockmode held at session level? */
3304  bool xactLock; /* is any lockmode held at xact level? */
3305  } PerLockTagEntry;
3306 
3307  HASHCTL hash_ctl;
3308  HTAB *lockhtab;
3309  HASH_SEQ_STATUS status;
3310  LOCALLOCK *locallock;
3311 
3312  /* Create a local hash table keyed by LOCKTAG only */
3313  hash_ctl.keysize = sizeof(LOCKTAG);
3314  hash_ctl.entrysize = sizeof(PerLockTagEntry);
3315  hash_ctl.hcxt = CurrentMemoryContext;
3316 
3317  lockhtab = hash_create("CheckForSessionAndXactLocks table",
3318  256, /* arbitrary initial size */
3319  &hash_ctl,
3321 
3322  /* Scan local lock table to find entries for each LOCKTAG */
3324 
3325  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3326  {
3327  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3328  PerLockTagEntry *hentry;
3329  bool found;
3330  int i;
3331 
3332  /*
3333  * Ignore VXID locks. We don't want those to be held by prepared
3334  * transactions, since they aren't meaningful after a restart.
3335  */
3336  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3337  continue;
3338 
3339  /* Ignore it if we don't actually hold the lock */
3340  if (locallock->nLocks <= 0)
3341  continue;
3342 
3343  /* Otherwise, find or make an entry in lockhtab */
3344  hentry = (PerLockTagEntry *) hash_search(lockhtab,
3345  &locallock->tag.lock,
3346  HASH_ENTER, &found);
3347  if (!found) /* initialize, if newly created */
3348  hentry->sessLock = hentry->xactLock = false;
3349 
3350  /* Scan to see if we hold lock at session or xact level or both */
3351  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3352  {
3353  if (lockOwners[i].owner == NULL)
3354  hentry->sessLock = true;
3355  else
3356  hentry->xactLock = true;
3357  }
3358 
3359  /*
3360  * We can throw error immediately when we see both types of locks; no
3361  * need to wait around to see if there are more violations.
3362  */
3363  if (hentry->sessLock && hentry->xactLock)
3364  ereport(ERROR,
3365  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3366  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3367  }
3368 
3369  /* Success, so clean up */
3370  hash_destroy(lockhtab);
3371 }
3372 
3373 /*
3374  * AtPrepare_Locks
3375  * Do the preparatory work for a PREPARE: make 2PC state file records
3376  * for all locks currently held.
3377  *
3378  * Session-level locks are ignored, as are VXID locks.
3379  *
3380  * For the most part, we don't need to touch shared memory for this ---
3381  * all the necessary state information is in the locallock table.
3382  * Fast-path locks are an exception, however: we move any such locks to
3383  * the main table before allowing PREPARE TRANSACTION to succeed.
3384  */
3385 void
3387 {
3388  HASH_SEQ_STATUS status;
3389  LOCALLOCK *locallock;
3390 
3391  /* First, verify there aren't locks of both xact and session level */
3393 
3394  /* Now do the per-locallock cleanup work */
3396 
3397  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3398  {
3399  TwoPhaseLockRecord record;
3400  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3401  bool haveSessionLock;
3402  bool haveXactLock;
3403  int i;
3404 
3405  /*
3406  * Ignore VXID locks. We don't want those to be held by prepared
3407  * transactions, since they aren't meaningful after a restart.
3408  */
3409  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3410  continue;
3411 
3412  /* Ignore it if we don't actually hold the lock */
3413  if (locallock->nLocks <= 0)
3414  continue;
3415 
3416  /* Scan to see whether we hold it at session or transaction level */
3417  haveSessionLock = haveXactLock = false;
3418  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3419  {
3420  if (lockOwners[i].owner == NULL)
3421  haveSessionLock = true;
3422  else
3423  haveXactLock = true;
3424  }
3425 
3426  /* Ignore it if we have only session lock */
3427  if (!haveXactLock)
3428  continue;
3429 
3430  /* This can't happen, because we already checked it */
3431  if (haveSessionLock)
3432  ereport(ERROR,
3433  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3434  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3435 
3436  /*
3437  * If the local lock was taken via the fast-path, we need to move it
3438  * to the primary lock table, or just get a pointer to the existing
3439  * primary lock table entry if by chance it's already been
3440  * transferred.
3441  */
3442  if (locallock->proclock == NULL)
3443  {
3444  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3445  locallock->lock = locallock->proclock->tag.myLock;
3446  }
3447 
3448  /*
3449  * Arrange to not release any strong lock count held by this lock
3450  * entry. We must retain the count until the prepared transaction is
3451  * committed or rolled back.
3452  */
3453  locallock->holdsStrongLockCount = false;
3454 
3455  /*
3456  * Create a 2PC record.
3457  */
3458  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3459  record.lockmode = locallock->tag.mode;
3460 
3462  &record, sizeof(TwoPhaseLockRecord));
3463  }
3464 }
3465 
3466 /*
3467  * PostPrepare_Locks
3468  * Clean up after successful PREPARE
3469  *
3470  * Here, we want to transfer ownership of our locks to a dummy PGPROC
3471  * that's now associated with the prepared transaction, and we want to
3472  * clean out the corresponding entries in the LOCALLOCK table.
3473  *
3474  * Note: by removing the LOCALLOCK entries, we are leaving dangling
3475  * pointers in the transaction's resource owner. This is OK at the
3476  * moment since resowner.c doesn't try to free locks retail at a toplevel
3477  * transaction commit or abort. We could alternatively zero out nLocks
3478  * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3479  * but that probably costs more cycles.
3480  */
3481 void
3483 {
3484  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3485  HASH_SEQ_STATUS status;
3486  LOCALLOCK *locallock;
3487  LOCK *lock;
3488  PROCLOCK *proclock;
3489  PROCLOCKTAG proclocktag;
3490  int partition;
3491 
3492  /* Can't prepare a lock group follower. */
3493  Assert(MyProc->lockGroupLeader == NULL ||
3495 
3496  /* This is a critical section: any error means big trouble */
3498 
3499  /*
3500  * First we run through the locallock table and get rid of unwanted
3501  * entries, then we scan the process's proclocks and transfer them to the
3502  * target proc.
3503  *
3504  * We do this separately because we may have multiple locallock entries
3505  * pointing to the same proclock, and we daren't end up with any dangling
3506  * pointers.
3507  */
3509 
3510  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3511  {
3512  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3513  bool haveSessionLock;
3514  bool haveXactLock;
3515  int i;
3516 
3517  if (locallock->proclock == NULL || locallock->lock == NULL)
3518  {
3519  /*
3520  * We must've run out of shared memory while trying to set up this
3521  * lock. Just forget the local entry.
3522  */
3523  Assert(locallock->nLocks == 0);
3524  RemoveLocalLock(locallock);
3525  continue;
3526  }
3527 
3528  /* Ignore VXID locks */
3529  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3530  continue;
3531 
3532  /* Scan to see whether we hold it at session or transaction level */
3533  haveSessionLock = haveXactLock = false;
3534  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3535  {
3536  if (lockOwners[i].owner == NULL)
3537  haveSessionLock = true;
3538  else
3539  haveXactLock = true;
3540  }
3541 
3542  /* Ignore it if we have only session lock */
3543  if (!haveXactLock)
3544  continue;
3545 
3546  /* This can't happen, because we already checked it */
3547  if (haveSessionLock)
3548  ereport(PANIC,
3549  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3550  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3551 
3552  /* Mark the proclock to show we need to release this lockmode */
3553  if (locallock->nLocks > 0)
3554  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3555 
3556  /* And remove the locallock hashtable entry */
3557  RemoveLocalLock(locallock);
3558  }
3559 
3560  /*
3561  * Now, scan each lock partition separately.
3562  */
3563  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3564  {
3565  LWLock *partitionLock;
3566  dlist_head *procLocks = &(MyProc->myProcLocks[partition]);
3567  dlist_mutable_iter proclock_iter;
3568 
3569  partitionLock = LockHashPartitionLockByIndex(partition);
3570 
3571  /*
3572  * If the proclock list for this partition is empty, we can skip
3573  * acquiring the partition lock. This optimization is safer than the
3574  * situation in LockReleaseAll, because we got rid of any fast-path
3575  * locks during AtPrepare_Locks, so there cannot be any case where
3576  * another backend is adding something to our lists now. For safety,
3577  * though, we code this the same way as in LockReleaseAll.
3578  */
3579  if (dlist_is_empty(procLocks))
3580  continue; /* needn't examine this partition */
3581 
3582  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3583 
3584  dlist_foreach_modify(proclock_iter, procLocks)
3585  {
3586  proclock = dlist_container(PROCLOCK, procLink, proclock_iter.cur);
3587 
3588  Assert(proclock->tag.myProc == MyProc);
3589 
3590  lock = proclock->tag.myLock;
3591 
3592  /* Ignore VXID locks */
3594  continue;
3595 
3596  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3597  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3598  Assert(lock->nRequested >= 0);
3599  Assert(lock->nGranted >= 0);
3600  Assert(lock->nGranted <= lock->nRequested);
3601  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3602 
3603  /* Ignore it if nothing to release (must be a session lock) */
3604  if (proclock->releaseMask == 0)
3605  continue;
3606 
3607  /* Else we should be releasing all locks */
3608  if (proclock->releaseMask != proclock->holdMask)
3609  elog(PANIC, "we seem to have dropped a bit somewhere");
3610 
3611  /*
3612  * We cannot simply modify proclock->tag.myProc to reassign
3613  * ownership of the lock, because that's part of the hash key and
3614  * the proclock would then be in the wrong hash chain. Instead
3615  * use hash_update_hash_key. (We used to create a new hash entry,
3616  * but that risks out-of-memory failure if other processes are
3617  * busy making proclocks too.) We must unlink the proclock from
3618  * our procLink chain and put it into the new proc's chain, too.
3619  *
3620  * Note: the updated proclock hash key will still belong to the
3621  * same hash partition, cf proclock_hash(). So the partition lock
3622  * we already hold is sufficient for this.
3623  */
3624  dlist_delete(&proclock->procLink);
3625 
3626  /*
3627  * Create the new hash key for the proclock.
3628  */
3629  proclocktag.myLock = lock;
3630  proclocktag.myProc = newproc;
3631 
3632  /*
3633  * Update groupLeader pointer to point to the new proc. (We'd
3634  * better not be a member of somebody else's lock group!)
3635  */
3636  Assert(proclock->groupLeader == proclock->tag.myProc);
3637  proclock->groupLeader = newproc;
3638 
3639  /*
3640  * Update the proclock. We should not find any existing entry for
3641  * the same hash key, since there can be only one entry for any
3642  * given lock with my own proc.
3643  */
3645  proclock,
3646  &proclocktag))
3647  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3648 
3649  /* Re-link into the new proc's proclock list */
3650  dlist_push_tail(&newproc->myProcLocks[partition], &proclock->procLink);
3651 
3652  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3653  } /* loop over PROCLOCKs within this partition */
3654 
3655  LWLockRelease(partitionLock);
3656  } /* loop over partitions */
3657 
3658  END_CRIT_SECTION();
3659 }
3660 
3661 
3662 /*
3663  * Estimate shared-memory space used for lock tables
3664  */
3665 Size
3667 {
3668  Size size = 0;
3669  long max_table_size;
3670 
3671  /* lock hash table */
3672  max_table_size = NLOCKENTS();
3673  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3674 
3675  /* proclock hash table */
3676  max_table_size *= 2;
3677  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3678 
3679  /*
3680  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3681  */
3682  size = add_size(size, size / 10);
3683 
3684  return size;
3685 }
3686 
3687 /*
3688  * GetLockStatusData - Return a summary of the lock manager's internal
3689  * status, for use in a user-level reporting function.
3690  *
3691  * The return data consists of an array of LockInstanceData objects,
3692  * which are a lightly abstracted version of the PROCLOCK data structures,
3693  * i.e. there is one entry for each unique lock and interested PGPROC.
3694  * It is the caller's responsibility to match up related items (such as
3695  * references to the same lockable object or PGPROC) if wanted.
3696  *
3697  * The design goal is to hold the LWLocks for as short a time as possible;
3698  * thus, this function simply makes a copy of the necessary data and releases
3699  * the locks, allowing the caller to contemplate and format the data for as
3700  * long as it pleases.
3701  */
3702 LockData *
3704 {
3705  LockData *data;
3706  PROCLOCK *proclock;
3707  HASH_SEQ_STATUS seqstat;
3708  int els;
3709  int el;
3710  int i;
3711 
3712  data = (LockData *) palloc(sizeof(LockData));
3713 
3714  /* Guess how much space we'll need. */
3715  els = MaxBackends;
3716  el = 0;
3717  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3718 
3719  /*
3720  * First, we iterate through the per-backend fast-path arrays, locking
3721  * them one at a time. This might produce an inconsistent picture of the
3722  * system state, but taking all of those LWLocks at the same time seems
3723  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3724  * matter too much, because none of these locks can be involved in lock
3725  * conflicts anyway - anything that might must be present in the main lock
3726  * table. (For the same reason, we don't sweat about making leaderPid
3727  * completely valid. We cannot safely dereference another backend's
3728  * lockGroupLeader field without holding all lock partition locks, and
3729  * it's not worth that.)
3730  */
3731  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3732  {
3733  PGPROC *proc = &ProcGlobal->allProcs[i];
3734  uint32 f;
3735 
3737 
3738  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3739  {
3740  LockInstanceData *instance;
3741  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3742 
3743  /* Skip unallocated slots. */
3744  if (!lockbits)
3745  continue;
3746 
3747  if (el >= els)
3748  {
3749  els += MaxBackends;
3750  data->locks = (LockInstanceData *)
3751  repalloc(data->locks, sizeof(LockInstanceData) * els);
3752  }
3753 
3754  instance = &data->locks[el];
3755  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3756  proc->fpRelId[f]);
3757  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3758  instance->waitLockMode = NoLock;
3759  instance->vxid.procNumber = proc->vxid.procNumber;
3760  instance->vxid.localTransactionId = proc->vxid.lxid;
3761  instance->pid = proc->pid;
3762  instance->leaderPid = proc->pid;
3763  instance->fastpath = true;
3764 
3765  /*
3766  * Successfully taking fast path lock means there were no
3767  * conflicting locks.
3768  */
3769  instance->waitStart = 0;
3770 
3771  el++;
3772  }
3773 
3774  if (proc->fpVXIDLock)
3775  {
3776  VirtualTransactionId vxid;
3777  LockInstanceData *instance;
3778 
3779  if (el >= els)
3780  {
3781  els += MaxBackends;
3782  data->locks = (LockInstanceData *)
3783  repalloc(data->locks, sizeof(LockInstanceData) * els);
3784  }
3785 
3786  vxid.procNumber = proc->vxid.procNumber;
3788 
3789  instance = &data->locks[el];
3790  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3791  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3792  instance->waitLockMode = NoLock;
3793  instance->vxid.procNumber = proc->vxid.procNumber;
3794  instance->vxid.localTransactionId = proc->vxid.lxid;
3795  instance->pid = proc->pid;
3796  instance->leaderPid = proc->pid;
3797  instance->fastpath = true;
3798  instance->waitStart = 0;
3799 
3800  el++;
3801  }
3802 
3803  LWLockRelease(&proc->fpInfoLock);
3804  }
3805 
3806  /*
3807  * Next, acquire lock on the entire shared lock data structure. We do
3808  * this so that, at least for locks in the primary lock table, the state
3809  * will be self-consistent.
3810  *
3811  * Since this is a read-only operation, we take shared instead of
3812  * exclusive lock. There's not a whole lot of point to this, because all
3813  * the normal operations require exclusive lock, but it doesn't hurt
3814  * anything either. It will at least allow two backends to do
3815  * GetLockStatusData in parallel.
3816  *
3817  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3818  */
3819  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3821 
3822  /* Now we can safely count the number of proclocks */
3824  if (data->nelements > els)
3825  {
3826  els = data->nelements;
3827  data->locks = (LockInstanceData *)
3828  repalloc(data->locks, sizeof(LockInstanceData) * els);
3829  }
3830 
3831  /* Now scan the tables to copy the data */
3833 
3834  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3835  {
3836  PGPROC *proc = proclock->tag.myProc;
3837  LOCK *lock = proclock->tag.myLock;
3838  LockInstanceData *instance = &data->locks[el];
3839 
3840  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3841  instance->holdMask = proclock->holdMask;
3842  if (proc->waitLock == proclock->tag.myLock)
3843  instance->waitLockMode = proc->waitLockMode;
3844  else
3845  instance->waitLockMode = NoLock;
3846  instance->vxid.procNumber = proc->vxid.procNumber;
3847  instance->vxid.localTransactionId = proc->vxid.lxid;
3848  instance->pid = proc->pid;
3849  instance->leaderPid = proclock->groupLeader->pid;
3850  instance->fastpath = false;
3851  instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3852 
3853  el++;
3854  }
3855 
3856  /*
3857  * And release locks. We do this in reverse order for two reasons: (1)
3858  * Anyone else who needs more than one of the locks will be trying to lock
3859  * them in increasing order; we don't want to release the other process
3860  * until it can get all the locks it needs. (2) This avoids O(N^2)
3861  * behavior inside LWLockRelease.
3862  */
3863  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3865 
3866  Assert(el == data->nelements);
3867 
3868  return data;
3869 }
3870 
3871 /*
3872  * GetBlockerStatusData - Return a summary of the lock manager's state
3873  * concerning locks that are blocking the specified PID or any member of
3874  * the PID's lock group, for use in a user-level reporting function.
3875  *
3876  * For each PID within the lock group that is awaiting some heavyweight lock,
3877  * the return data includes an array of LockInstanceData objects, which are
3878  * the same data structure used by GetLockStatusData; but unlike that function,
3879  * this one reports only the PROCLOCKs associated with the lock that that PID
3880  * is blocked on. (Hence, all the locktags should be the same for any one
3881  * blocked PID.) In addition, we return an array of the PIDs of those backends
3882  * that are ahead of the blocked PID in the lock's wait queue. These can be
3883  * compared with the PIDs in the LockInstanceData objects to determine which
3884  * waiters are ahead of or behind the blocked PID in the queue.
3885  *
3886  * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3887  * waiting on any heavyweight lock, return empty arrays.
3888  *
3889  * The design goal is to hold the LWLocks for as short a time as possible;
3890  * thus, this function simply makes a copy of the necessary data and releases
3891  * the locks, allowing the caller to contemplate and format the data for as
3892  * long as it pleases.
3893  */
3895 GetBlockerStatusData(int blocked_pid)
3896 {
3898  PGPROC *proc;
3899  int i;
3900 
3902 
3903  /*
3904  * Guess how much space we'll need, and preallocate. Most of the time
3905  * this will avoid needing to do repalloc while holding the LWLocks. (We
3906  * assume, but check with an Assert, that MaxBackends is enough entries
3907  * for the procs[] array; the other two could need enlargement, though.)
3908  */
3909  data->nprocs = data->nlocks = data->npids = 0;
3910  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3911  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3912  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3913  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3914 
3915  /*
3916  * In order to search the ProcArray for blocked_pid and assume that that
3917  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3918  * In addition, to examine the lock grouping fields of any other backend,
3919  * we must hold all the hash partition locks. (Only one of those locks is
3920  * actually relevant for any one lock group, but we can't know which one
3921  * ahead of time.) It's fairly annoying to hold all those locks
3922  * throughout this, but it's no worse than GetLockStatusData(), and it
3923  * does have the advantage that we're guaranteed to return a
3924  * self-consistent instantaneous state.
3925  */
3926  LWLockAcquire(ProcArrayLock, LW_SHARED);
3927 
3928  proc = BackendPidGetProcWithLock(blocked_pid);
3929 
3930  /* Nothing to do if it's gone */
3931  if (proc != NULL)
3932  {
3933  /*
3934  * Acquire lock on the entire shared lock data structure. See notes
3935  * in GetLockStatusData().
3936  */
3937  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3939 
3940  if (proc->lockGroupLeader == NULL)
3941  {
3942  /* Easy case, proc is not a lock group member */
3944  }
3945  else
3946  {
3947  /* Examine all procs in proc's lock group */
3948  dlist_iter iter;
3949 
3951  {
3952  PGPROC *memberProc;
3953 
3954  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3955  GetSingleProcBlockerStatusData(memberProc, data);
3956  }
3957  }
3958 
3959  /*
3960  * And release locks. See notes in GetLockStatusData().
3961  */
3962  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3964 
3965  Assert(data->nprocs <= data->maxprocs);
3966  }
3967 
3968  LWLockRelease(ProcArrayLock);
3969 
3970  return data;
3971 }
3972 
3973 /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3974 static void
3976 {
3977  LOCK *theLock = blocked_proc->waitLock;
3978  BlockedProcData *bproc;
3979  dlist_iter proclock_iter;
3980  dlist_iter proc_iter;
3981  dclist_head *waitQueue;
3982  int queue_size;
3983 
3984  /* Nothing to do if this proc is not blocked */
3985  if (theLock == NULL)
3986  return;
3987 
3988  /* Set up a procs[] element */
3989  bproc = &data->procs[data->nprocs++];
3990  bproc->pid = blocked_proc->pid;
3991  bproc->first_lock = data->nlocks;
3992  bproc->first_waiter = data->npids;
3993 
3994  /*
3995  * We may ignore the proc's fast-path arrays, since nothing in those could
3996  * be related to a contended lock.
3997  */
3998 
3999  /* Collect all PROCLOCKs associated with theLock */
4000  dlist_foreach(proclock_iter, &theLock->procLocks)
4001  {
4002  PROCLOCK *proclock =
4003  dlist_container(PROCLOCK, lockLink, proclock_iter.cur);
4004  PGPROC *proc = proclock->tag.myProc;
4005  LOCK *lock = proclock->tag.myLock;
4006  LockInstanceData *instance;
4007 
4008  if (data->nlocks >= data->maxlocks)
4009  {
4010  data->maxlocks += MaxBackends;
4011  data->locks = (LockInstanceData *)
4012  repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
4013  }
4014 
4015  instance = &data->locks[data->nlocks];
4016  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
4017  instance->holdMask = proclock->holdMask;
4018  if (proc->waitLock == lock)
4019  instance->waitLockMode = proc->waitLockMode;
4020  else
4021  instance->waitLockMode = NoLock;
4022  instance->vxid.procNumber = proc->vxid.procNumber;
4023  instance->vxid.localTransactionId = proc->vxid.lxid;
4024  instance->pid = proc->pid;
4025  instance->leaderPid = proclock->groupLeader->pid;
4026  instance->fastpath = false;
4027  data->nlocks++;
4028  }
4029 
4030  /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
4031  waitQueue = &(theLock->waitProcs);
4032  queue_size = dclist_count(waitQueue);
4033 
4034  if (queue_size > data->maxpids - data->npids)
4035  {
4036  data->maxpids = Max(data->maxpids + MaxBackends,
4037  data->npids + queue_size);
4038  data->waiter_pids = (int *) repalloc(data->waiter_pids,
4039  sizeof(int) * data->maxpids);
4040  }
4041 
4042  /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
4043  dclist_foreach(proc_iter, waitQueue)
4044  {
4045  PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
4046 
4047  if (queued_proc == blocked_proc)
4048  break;
4049  data->waiter_pids[data->npids++] = queued_proc->pid;
4050  queued_proc = (PGPROC *) queued_proc->links.next;
4051  }
4052 
4053  bproc->num_locks = data->nlocks - bproc->first_lock;
4054  bproc->num_waiters = data->npids - bproc->first_waiter;
4055 }
4056 
4057 /*
4058  * Returns a list of currently held AccessExclusiveLocks, for use by
4059  * LogStandbySnapshot(). The result is a palloc'd array,
4060  * with the number of elements returned into *nlocks.
4061  *
4062  * XXX This currently takes a lock on all partitions of the lock table,
4063  * but it's possible to do better. By reference counting locks and storing
4064  * the value in the ProcArray entry for each backend we could tell if any
4065  * locks need recording without having to acquire the partition locks and
4066  * scan the lock table. Whether that's worth the additional overhead
4067  * is pretty dubious though.
4068  */
4071 {
4072  xl_standby_lock *accessExclusiveLocks;
4073  PROCLOCK *proclock;
4074  HASH_SEQ_STATUS seqstat;
4075  int i;
4076  int index;
4077  int els;
4078 
4079  /*
4080  * Acquire lock on the entire shared lock data structure.
4081  *
4082  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4083  */
4084  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4086 
4087  /* Now we can safely count the number of proclocks */
4089 
4090  /*
4091  * Allocating enough space for all locks in the lock table is overkill,
4092  * but it's more convenient and faster than having to enlarge the array.
4093  */
4094  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4095 
4096  /* Now scan the tables to copy the data */
4098 
4099  /*
4100  * If lock is a currently granted AccessExclusiveLock then it will have
4101  * just one proclock holder, so locks are never accessed twice in this
4102  * particular case. Don't copy this code for use elsewhere because in the
4103  * general case this will give you duplicate locks when looking at
4104  * non-exclusive lock types.
4105  */
4106  index = 0;
4107  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4108  {
4109  /* make sure this definition matches the one used in LockAcquire */
4110  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4111  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4112  {
4113  PGPROC *proc = proclock->tag.myProc;
4114  LOCK *lock = proclock->tag.myLock;
4115  TransactionId xid = proc->xid;
4116 
4117  /*
4118  * Don't record locks for transactions if we know they have
4119  * already issued their WAL record for commit but not yet released
4120  * lock. It is still possible that we see locks held by already
4121  * complete transactions, if they haven't yet zeroed their xids.
4122  */
4123  if (!TransactionIdIsValid(xid))
4124  continue;
4125 
4126  accessExclusiveLocks[index].xid = xid;
4127  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4128  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4129 
4130  index++;
4131  }
4132  }
4133 
4134  Assert(index <= els);
4135 
4136  /*
4137  * And release locks. We do this in reverse order for two reasons: (1)
4138  * Anyone else who needs more than one of the locks will be trying to lock
4139  * them in increasing order; we don't want to release the other process
4140  * until it can get all the locks it needs. (2) This avoids O(N^2)
4141  * behavior inside LWLockRelease.
4142  */
4143  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4145 
4146  *nlocks = index;
4147  return accessExclusiveLocks;
4148 }
4149 
4150 /* Provide the textual name of any lock mode */
4151 const char *
4153 {
4154  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4155  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4156  return LockMethods[lockmethodid]->lockModeNames[mode];
4157 }
4158 
4159 #ifdef LOCK_DEBUG
4160 /*
4161  * Dump all locks in the given proc's myProcLocks lists.
4162  *
4163  * Caller is responsible for having acquired appropriate LWLocks.
4164  */
4165 void
4166 DumpLocks(PGPROC *proc)
4167 {
4168  int i;
4169 
4170  if (proc == NULL)
4171  return;
4172 
4173  if (proc->waitLock)
4174  LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4175 
4176  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4177  {
4178  dlist_head *procLocks = &proc->myProcLocks[i];
4179  dlist_iter iter;
4180 
4181  dlist_foreach(iter, procLocks)
4182  {
4183  PROCLOCK *proclock = dlist_container(PROCLOCK, procLink, iter.cur);
4184  LOCK *lock = proclock->tag.myLock;
4185 
4186  Assert(proclock->tag.myProc == proc);
4187  PROCLOCK_PRINT("DumpLocks", proclock);
4188  LOCK_PRINT("DumpLocks", lock, 0);
4189  }
4190  }
4191 }
4192 
4193 /*
4194  * Dump all lmgr locks.
4195  *
4196  * Caller is responsible for having acquired appropriate LWLocks.
4197  */
4198 void
4199 DumpAllLocks(void)
4200 {
4201  PGPROC *proc;
4202  PROCLOCK *proclock;
4203  LOCK *lock;
4204  HASH_SEQ_STATUS status;
4205 
4206  proc = MyProc;
4207 
4208  if (proc && proc->waitLock)
4209  LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4210 
4212 
4213  while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4214  {
4215  PROCLOCK_PRINT("DumpAllLocks", proclock);
4216 
4217  lock = proclock->tag.myLock;
4218  if (lock)
4219  LOCK_PRINT("DumpAllLocks", lock, 0);
4220  else
4221  elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4222  }
4223 }
4224 #endif /* LOCK_DEBUG */
4225 
4226 /*
4227  * LOCK 2PC resource manager's routines
4228  */
4229 
4230 /*
4231  * Re-acquire a lock belonging to a transaction that was prepared.
4232  *
4233  * Because this function is run at db startup, re-acquiring the locks should
4234  * never conflict with running transactions because there are none. We
4235  * assume that the lock state represented by the stored 2PC files is legal.
4236  *
4237  * When switching from Hot Standby mode to normal operation, the locks will
4238  * be already held by the startup process. The locks are acquired for the new
4239  * procs without checking for conflicts, so we don't get a conflict between the
4240  * startup process and the dummy procs, even though we will momentarily have
4241  * a situation where two procs are holding the same AccessExclusiveLock,
4242  * which isn't normally possible because the conflict. If we're in standby
4243  * mode, but a recovery snapshot hasn't been established yet, it's possible
4244  * that some but not all of the locks are already held by the startup process.
4245  *
4246  * This approach is simple, but also a bit dangerous, because if there isn't
4247  * enough shared memory to acquire the locks, an error will be thrown, which
4248  * is promoted to FATAL and recovery will abort, bringing down postmaster.
4249  * A safer approach would be to transfer the locks like we do in
4250  * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4251  * read-only backends to use up all the shared lock memory anyway, so that
4252  * replaying the WAL record that needs to acquire a lock will throw an error
4253  * and PANIC anyway.
4254  */
4255 void
4257  void *recdata, uint32 len)
4258 {
4259  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4260  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4261  LOCKTAG *locktag;
4262  LOCKMODE lockmode;
4263  LOCKMETHODID lockmethodid;
4264  LOCK *lock;
4265  PROCLOCK *proclock;
4266  PROCLOCKTAG proclocktag;
4267  bool found;
4268  uint32 hashcode;
4269  uint32 proclock_hashcode;
4270  int partition;
4271  LWLock *partitionLock;
4272  LockMethod lockMethodTable;
4273 
4274  Assert(len == sizeof(TwoPhaseLockRecord));
4275  locktag = &rec->locktag;
4276  lockmode = rec->lockmode;
4277  lockmethodid = locktag->locktag_lockmethodid;
4278 
4279  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4280  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4281  lockMethodTable = LockMethods[lockmethodid];
4282 
4283  hashcode = LockTagHashCode(locktag);
4284  partition = LockHashPartition(hashcode);
4285  partitionLock = LockHashPartitionLock(hashcode);
4286 
4287  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4288 
4289  /*
4290  * Find or create a lock with this tag.
4291  */
4293  locktag,
4294  hashcode,
4296  &found);
4297  if (!lock)
4298  {
4299  LWLockRelease(partitionLock);
4300  ereport(ERROR,
4301  (errcode(ERRCODE_OUT_OF_MEMORY),
4302  errmsg("out of shared memory"),
4303  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4304  }
4305 
4306  /*
4307  * if it's a new lock object, initialize it
4308  */
4309  if (!found)
4310  {
4311  lock->grantMask = 0;
4312  lock->waitMask = 0;
4313  dlist_init(&lock->procLocks);
4314  dclist_init(&lock->waitProcs);
4315  lock->nRequested = 0;
4316  lock->nGranted = 0;
4317  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4318  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4319  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4320  }
4321  else
4322  {
4323  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4324  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4325  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4326  Assert(lock->nGranted <= lock->nRequested);
4327  }
4328 
4329  /*
4330  * Create the hash key for the proclock table.
4331  */
4332  proclocktag.myLock = lock;
4333  proclocktag.myProc = proc;
4334 
4335  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4336 
4337  /*
4338  * Find or create a proclock entry with this tag
4339  */
4341  &proclocktag,
4342  proclock_hashcode,
4344  &found);
4345  if (!proclock)
4346  {
4347  /* Oops, not enough shmem for the proclock */
4348  if (lock->nRequested == 0)
4349  {
4350  /*
4351  * There are no other requestors of this lock, so garbage-collect
4352  * the lock object. We *must* do this to avoid a permanent leak
4353  * of shared memory, because there won't be anything to cause
4354  * anyone to release the lock object later.
4355  */
4356  Assert(dlist_is_empty(&lock->procLocks));
4358  &(lock->tag),
4359  hashcode,
4360  HASH_REMOVE,
4361  NULL))
4362  elog(PANIC, "lock table corrupted");
4363  }
4364  LWLockRelease(partitionLock);
4365  ereport(ERROR,
4366  (errcode(ERRCODE_OUT_OF_MEMORY),
4367  errmsg("out of shared memory"),
4368  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4369  }
4370 
4371  /*
4372  * If new, initialize the new entry
4373  */
4374  if (!found)
4375  {
4376  Assert(proc->lockGroupLeader == NULL);
4377  proclock->groupLeader = proc;
4378  proclock->holdMask = 0;
4379  proclock->releaseMask = 0;
4380  /* Add proclock to appropriate lists */
4381  dlist_push_tail(&lock->procLocks, &proclock->lockLink);
4382  dlist_push_tail(&proc->myProcLocks[partition],
4383  &proclock->procLink);
4384  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4385  }
4386  else
4387  {
4388  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4389  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4390  }
4391 
4392  /*
4393  * lock->nRequested and lock->requested[] count the total number of
4394  * requests, whether granted or waiting, so increment those immediately.
4395  */
4396  lock->nRequested++;
4397  lock->requested[lockmode]++;
4398  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4399 
4400  /*
4401  * We shouldn't already hold the desired lock.
4402  */
4403  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4404  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4405  lockMethodTable->lockModeNames[lockmode],
4406  lock->tag.locktag_field1, lock->tag.locktag_field2,
4407  lock->tag.locktag_field3);
4408 
4409  /*
4410  * We ignore any possible conflicts and just grant ourselves the lock. Not
4411  * only because we don't bother, but also to avoid deadlocks when
4412  * switching from standby to normal mode. See function comment.
4413  */
4414  GrantLock(lock, proclock, lockmode);
4415 
4416  /*
4417  * Bump strong lock count, to make sure any fast-path lock requests won't
4418  * be granted without consulting the primary lock table.
4419  */
4420  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4421  {
4422  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4423 
4425  FastPathStrongRelationLocks->count[fasthashcode]++;
4427  }
4428 
4429  LWLockRelease(partitionLock);
4430 }
4431 
4432 /*
4433  * Re-acquire a lock belonging to a transaction that was prepared, when
4434  * starting up into hot standby mode.
4435  */
4436 void
4438  void *recdata, uint32 len)
4439 {
4440  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4441  LOCKTAG *locktag;
4442  LOCKMODE lockmode;
4443  LOCKMETHODID lockmethodid;
4444 
4445  Assert(len == sizeof(TwoPhaseLockRecord));
4446  locktag = &rec->locktag;
4447  lockmode = rec->lockmode;
4448  lockmethodid = locktag->locktag_lockmethodid;
4449 
4450  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4451  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4452 
4453  if (lockmode == AccessExclusiveLock &&
4454  locktag->locktag_type == LOCKTAG_RELATION)
4455  {
4457  locktag->locktag_field1 /* dboid */ ,
4458  locktag->locktag_field2 /* reloid */ );
4459  }
4460 }
4461 
4462 
4463 /*
4464  * 2PC processing routine for COMMIT PREPARED case.
4465  *
4466  * Find and release the lock indicated by the 2PC record.
4467  */
4468 void
4470  void *recdata, uint32 len)
4471 {
4472  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4473  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4474  LOCKTAG *locktag;
4475  LOCKMETHODID lockmethodid;
4476  LockMethod lockMethodTable;
4477 
4478  Assert(len == sizeof(TwoPhaseLockRecord));
4479  locktag = &rec->locktag;
4480  lockmethodid = locktag->locktag_lockmethodid;
4481 
4482  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4483  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4484  lockMethodTable = LockMethods[lockmethodid];
4485 
4486  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4487 }
4488 
4489 /*
4490  * 2PC processing routine for ROLLBACK PREPARED case.
4491  *
4492  * This is actually just the same as the COMMIT case.
4493  */
4494 void
4496  void *recdata, uint32 len)
4497 {
4498  lock_twophase_postcommit(xid, info, recdata, len);
4499 }
4500 
4501 /*
4502  * VirtualXactLockTableInsert
4503  *
4504  * Take vxid lock via the fast-path. There can't be any pre-existing
4505  * lockers, as we haven't advertised this vxid via the ProcArray yet.
4506  *
4507  * Since MyProc->fpLocalTransactionId will normally contain the same data
4508  * as MyProc->vxid.lxid, you might wonder if we really need both. The
4509  * difference is that MyProc->vxid.lxid is set and cleared unlocked, and
4510  * examined by procarray.c, while fpLocalTransactionId is protected by
4511  * fpInfoLock and is used only by the locking subsystem. Doing it this
4512  * way makes it easier to verify that there are no funny race conditions.
4513  *
4514  * We don't bother recording this lock in the local lock table, since it's
4515  * only ever released at the end of a transaction. Instead,
4516  * LockReleaseAll() calls VirtualXactLockTableCleanup().
4517  */
4518 void
4520 {
4522 
4524 
4527  Assert(MyProc->fpVXIDLock == false);
4528 
4529  MyProc->fpVXIDLock = true;
4531 
4533 }
4534 
4535 /*
4536  * VirtualXactLockTableCleanup
4537  *
4538  * Check whether a VXID lock has been materialized; if so, release it,
4539  * unblocking waiters.
4540  */
4541 void
4543 {
4544  bool fastpath;
4545  LocalTransactionId lxid;
4546 
4548 
4549  /*
4550  * Clean up shared memory state.
4551  */
4553 
4554  fastpath = MyProc->fpVXIDLock;
4555  lxid = MyProc->fpLocalTransactionId;
4556  MyProc->fpVXIDLock = false;
4558 
4560 
4561  /*
4562  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4563  * that means someone transferred the lock to the main lock table.
4564  */
4565  if (!fastpath && LocalTransactionIdIsValid(lxid))
4566  {
4567  VirtualTransactionId vxid;
4568  LOCKTAG locktag;
4569 
4570  vxid.procNumber = MyProcNumber;
4571  vxid.localTransactionId = lxid;
4572  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4573 
4575  &locktag, ExclusiveLock, false);
4576  }
4577 }
4578 
4579 /*
4580  * XactLockForVirtualXact
4581  *
4582  * If TransactionIdIsValid(xid), this is essentially XactLockTableWait(xid,
4583  * NULL, NULL, XLTW_None) or ConditionalXactLockTableWait(xid). Unlike those
4584  * functions, it assumes "xid" is never a subtransaction and that "xid" is
4585  * prepared, committed, or aborted.
4586  *
4587  * If !TransactionIdIsValid(xid), this locks every prepared XID having been
4588  * known as "vxid" before its PREPARE TRANSACTION.
4589  */
4590 static bool
4592  TransactionId xid, bool wait)
4593 {
4594  bool more = false;
4595 
4596  /* There is no point to wait for 2PCs if you have no 2PCs. */
4597  if (max_prepared_xacts == 0)
4598  return true;
4599 
4600  do
4601  {
4602  LockAcquireResult lar;
4603  LOCKTAG tag;
4604 
4605  /* Clear state from previous iterations. */
4606  if (more)
4607  {
4608  xid = InvalidTransactionId;
4609  more = false;
4610  }
4611 
4612  /* If we have no xid, try to find one. */
4613  if (!TransactionIdIsValid(xid))
4614  xid = TwoPhaseGetXidByVirtualXID(vxid, &more);
4615  if (!TransactionIdIsValid(xid))
4616  {
4617  Assert(!more);
4618  return true;
4619  }
4620 
4621  /* Check or wait for XID completion. */
4622  SET_LOCKTAG_TRANSACTION(tag, xid);
4623  lar = LockAcquire(&tag, ShareLock, false, !wait);
4624  if (lar == LOCKACQUIRE_NOT_AVAIL)
4625  return false;
4626  LockRelease(&tag, ShareLock, false);
4627  } while (more);
4628 
4629  return true;
4630 }
4631 
4632 /*
4633  * VirtualXactLock
4634  *
4635  * If wait = true, wait as long as the given VXID or any XID acquired by the
4636  * same transaction is still running. Then, return true.
4637  *
4638  * If wait = false, just check whether that VXID or one of those XIDs is still
4639  * running, and return true or false.
4640  */
4641 bool
4643 {
4644  LOCKTAG tag;
4645  PGPROC *proc;
4647 
4649 
4651  /* no vxid lock; localTransactionId is a normal, locked XID */
4652  return XactLockForVirtualXact(vxid, vxid.localTransactionId, wait);
4653 
4654  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4655 
4656  /*
4657  * If a lock table entry must be made, this is the PGPROC on whose behalf
4658  * it must be done. Note that the transaction might end or the PGPROC
4659  * might be reassigned to a new backend before we get around to examining
4660  * it, but it doesn't matter. If we find upon examination that the
4661  * relevant lxid is no longer running here, that's enough to prove that
4662  * it's no longer running anywhere.
4663  */
4664  proc = ProcNumberGetProc(vxid.procNumber);
4665  if (proc == NULL)
4666  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4667 
4668  /*
4669  * We must acquire this lock before checking the procNumber and lxid
4670  * against the ones we're waiting for. The target backend will only set
4671  * or clear lxid while holding this lock.
4672  */
4674 
4675  if (proc->vxid.procNumber != vxid.procNumber
4676  || proc->fpLocalTransactionId != vxid.localTransactionId)
4677  {
4678  /* VXID ended */
4679  LWLockRelease(&proc->fpInfoLock);
4680  return XactLockForVirtualXact(vxid, InvalidTransactionId, wait);
4681  }
4682 
4683  /*
4684  * If we aren't asked to wait, there's no need to set up a lock table
4685  * entry. The transaction is still in progress, so just return false.
4686  */
4687  if (!wait)
4688  {
4689  LWLockRelease(&proc->fpInfoLock);
4690  return false;
4691  }
4692 
4693  /*
4694  * OK, we're going to need to sleep on the VXID. But first, we must set
4695  * up the primary lock table entry, if needed (ie, convert the proc's
4696  * fast-path lock on its VXID to a regular lock).
4697  */
4698  if (proc->fpVXIDLock)
4699  {
4700  PROCLOCK *proclock;
4701  uint32 hashcode;
4702  LWLock *partitionLock;
4703 
4704  hashcode = LockTagHashCode(&tag);
4705 
4706  partitionLock = LockHashPartitionLock(hashcode);
4707  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4708 
4710  &tag, hashcode, ExclusiveLock);
4711  if (!proclock)
4712  {
4713  LWLockRelease(partitionLock);
4714  LWLockRelease(&proc->fpInfoLock);
4715  ereport(ERROR,
4716  (errcode(ERRCODE_OUT_OF_MEMORY),
4717  errmsg("out of shared memory"),
4718  errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
4719  }
4720  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4721 
4722  LWLockRelease(partitionLock);
4723 
4724  proc->fpVXIDLock = false;
4725  }
4726 
4727  /*
4728  * If the proc has an XID now, we'll avoid a TwoPhaseGetXidByVirtualXID()
4729  * search. The proc might have assigned this XID but not yet locked it,
4730  * in which case the proc will lock this XID before releasing the VXID.
4731  * The fpInfoLock critical section excludes VirtualXactLockTableCleanup(),
4732  * so we won't save an XID of a different VXID. It doesn't matter whether
4733  * we save this before or after setting up the primary lock table entry.
4734  */
4735  xid = proc->xid;
4736 
4737  /* Done with proc->fpLockBits */
4738  LWLockRelease(&proc->fpInfoLock);
4739 
4740  /* Time to wait. */
4741  (void) LockAcquire(&tag, ShareLock, false, false);
4742 
4743  LockRelease(&tag, ShareLock, false);
4744  return XactLockForVirtualXact(vxid, xid, wait);
4745 }
4746 
4747 /*
4748  * LockWaiterCount
4749  *
4750  * Find the number of lock requester on this locktag
4751  */
4752 int
4753 LockWaiterCount(const LOCKTAG *locktag)
4754 {
4755  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4756  LOCK *lock;
4757  bool found;
4758  uint32 hashcode;
4759  LWLock *partitionLock;
4760  int waiters = 0;
4761 
4762  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4763  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4764 
4765  hashcode = LockTagHashCode(locktag);
4766  partitionLock = LockHashPartitionLock(hashcode);
4767  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4768 
4770  locktag,
4771  hashcode,
4772  HASH_FIND,
4773  &found);
4774  if (found)
4775  {
4776  Assert(lock != NULL);
4777  waiters = lock->nRequested;
4778  }
4779  LWLockRelease(partitionLock);
4780 
4781  return waiters;
4782 }
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:467
unsigned short uint16
Definition: c.h:505
unsigned int uint32
Definition: c.h:506
#define Max(x, y)
Definition: c.h:989
#define Assert(condition)
Definition: c.h:849
#define lengthof(array)
Definition: c.h:779
uint32 LocalTransactionId
Definition: c.h:645
#define MemSet(start, val, len)
Definition: c.h:1011
uint32 TransactionId
Definition: c.h:643
size_t Size
Definition: c.h:596
int64 TimestampTz
Definition: timestamp.h:39
void DeadLockReport(void)
Definition: deadlock.c:1072
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1341
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:783
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1145
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:911
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:968
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1420
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errhint(const char *fmt,...)
Definition: elog.c:1317
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define LOG
Definition: elog.h:31
#define PG_RE_THROW()
Definition: elog.h:412
#define PG_TRY(...)
Definition: elog.h:371
#define WARNING
Definition: elog.h:36
#define PG_END_TRY(...)
Definition: elog.h:396
#define PANIC
Definition: elog.h:42
#define ERROR
Definition: elog.h:39
#define PG_CATCH(...)
Definition: elog.h:381
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
ProcNumber MyProcNumber
Definition: globals.c:89
int MaxBackends
Definition: globals.c:145
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
@ HASH_ENTER_NULL
Definition: hsearch.h:116
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_FUNCTION
Definition: hsearch.h:98
#define HASH_BLOBS
Definition: hsearch.h:97
#define HASH_PARTITION
Definition: hsearch.h:92
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_init(dlist_head *head)
Definition: ilist.h:314
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static uint32 dclist_count(const dclist_head *head)
Definition: ilist.h:932
static bool dclist_is_empty(const dclist_head *head)
Definition: ilist.h:682
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static bool dlist_is_empty(const dlist_head *head)
Definition: ilist.h:336
static void dlist_push_tail(dlist_head *head, dlist_node *node)
Definition: ilist.h:364
static void dclist_delete_from_thoroughly(dclist_head *head, dlist_node *node)
Definition: ilist.h:776
static void dclist_init(dclist_head *head)
Definition: ilist.h:671
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
#define dclist_foreach(iter, lhead)
Definition: ilist.h:970
int j
Definition: isn.c:74
int i
Definition: isn.c:73
static bool XactLockForVirtualXact(VirtualTransactionId vxid, TransactionId xid, bool wait)
Definition: lock.c:4591
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:805
static LOCALLOCK * awaitedLock
Definition: lock.c:324
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1425
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
Definition: lock.c:829
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2648
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode, bool orstronger)
Definition: lock.c:639
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:2979
static bool Dummy_trace
Definition: lock.c:121
static const char *const lock_mode_names[]
Definition: lock.c:107
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:401
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:619
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1232
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2868
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition: lock.c:4519
#define NLOCKENTS()
Definition: lock.c:55
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:299
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:601
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition: lock.c:4070
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:252
void GrantAwaitedLock(void)
Definition: lock.c:1838
int LockWaiterCount(const LOCKTAG *locktag)
Definition: lock.c:4753
void AtPrepare_Locks(void)
Definition: lock.c:3386
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:2013
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4469
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:238
Size LockManagerShmemSize(void)
Definition: lock.c:3666
#define FAST_PATH_REL_GROUP(rel)
Definition: lock.c:213
void InitLockManagerAccess(void)
Definition: lock.c:501
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1607
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4542
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition: lock.c:3895
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition: lock.c:4642
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:308
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1957
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2218
#define FAST_PATH_SLOT(group, index)
Definition: lock.c:220
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1413
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:269
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2773
static HTAB * LockMethodLocalHash
Definition: lock.c:319
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2618
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1630
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner, bool dontWait)
Definition: lock.c:1867
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:248
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:402
LockData * GetLockStatusData(void)
Definition: lock.c:3703
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1687
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:570
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2737
void AbortStrongLockAcquire(void)
Definition: lock.c:1809
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2694
static int FastPathLocalUseCounts[FP_LOCK_GROUPS_PER_BACKEND_MAX]
Definition: lock.c:175
static HTAB * LockMethodLockHash
Definition: lock.c:317
static ResourceOwner awaitedOwner
Definition: lock.c:325
void LockManagerShmemInit(void)
Definition: lock.c:440
void lock_twophase_postabort(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4495
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:692
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3975
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:250
int max_locks_per_xact
Definition: lock.c:53
static const LockMethod LockMethods[]
Definition: lock.c:149
void lock_twophase_standby_recover(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4437
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2523
void lock_twophase_recover(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4256
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition: lock.c:2493
void MarkLockClear(LOCALLOCK *locallock)
Definition: lock.c:1851
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY
Definition: lock.c:190
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4152
static const LockMethodData default_lockmethod
Definition: lock.c:124
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:241
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:323
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:237
static const LockMethodData user_lockmethod
Definition: lock.c:135
int FastPathLockGroupsPerBackend
Definition: lock.c:201
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:263
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:553
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1773
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1478
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1741
static const LOCKMASK LockConflicts[]
Definition: lock.c:64
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2558
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:523
static void FinishStrongLockAcquire(void)
Definition: lock.c:1799
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition: lock.c:297
void PostPrepare_Locks(TransactionId xid)
Definition: lock.c:3482
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3194
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3298
static HTAB * LockMethodProcLockHash
Definition: lock.c:318
struct TwoPhaseLockRecord TwoPhaseLockRecord
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition: lock.c:535
uint16 LOCKMETHODID
Definition: lock.h:122
#define DEFAULT_LOCKMETHOD
Definition: lock.h:125
struct LOCALLOCK LOCALLOCK
#define LOCK_LOCKTAG(lock)
Definition: lock.h:325
struct LOCK LOCK
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:235
struct PROCLOCK PROCLOCK
@ LOCKTAG_OBJECT
Definition: lock.h:145
@ LOCKTAG_RELATION_EXTEND
Definition: lock.h:138
@ LOCKTAG_RELATION
Definition: lock.h:137
@ LOCKTAG_TUPLE
Definition: lock.h:141
@ LOCKTAG_VIRTUALTRANSACTION
Definition: lock.h:143
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:67
#define LockHashPartitionLock(hashcode)
Definition: lock.h:526
#define GET_VXID_FROM_PGPROC(vxid_dst, proc)
Definition: lock.h:77
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:324
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:85
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:443
#define InvalidLocalTransactionId
Definition: lock.h:65
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:226
struct LOCKTAG LOCKTAG
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:181
#define MAX_LOCKMODES
Definition: lock.h:82
struct PROCLOCKTAG PROCLOCKTAG
#define LOCKBIT_ON(lockmode)
Definition: lock.h:84
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:66
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:444
#define LockHashPartition(hashcode)
Definition: lock.h:524
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:71
struct LOCALLOCKTAG LOCALLOCKTAG
#define PROCLOCK_LOCKMETHOD(proclock)
Definition: lock.h:382
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:529
LockAcquireResult
Definition: lock.h:500
@ LOCKACQUIRE_ALREADY_CLEAR
Definition: lock.h:504
@ LOCKACQUIRE_OK
Definition: lock.h:502
@ LOCKACQUIRE_ALREADY_HELD
Definition: lock.h:503
@ LOCKACQUIRE_NOT_AVAIL
Definition: lock.h:501
#define VirtualTransactionIdIsRecoveredPreparedXact(vxid)
Definition: lock.h:69
int LOCKMODE
Definition: lockdefs.h:26
#define NoLock
Definition: lockdefs.h:34
#define AccessExclusiveLock
Definition: lockdefs.h:43
#define ShareRowExclusiveLock
Definition: lockdefs.h:41
#define AccessShareLock
Definition: lockdefs.h:36
int LOCKMASK
Definition: lockdefs.h:25
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
#define ExclusiveLock
Definition: lockdefs.h:42
#define RowShareLock
Definition: lockdefs.h:37
#define ShareLock
Definition: lockdefs.h:40
#define MaxLockMode
Definition: lockdefs.h:45
#define RowExclusiveLock
Definition: lockdefs.h:38
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1168
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1781
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:97
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:96
@ LW_SHARED
Definition: lwlock.h:115
@ LW_EXCLUSIVE
Definition: lwlock.h:114
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext TopMemoryContext
Definition: mcxt.c:149
void * palloc0(Size size)
Definition: mcxt.c:1347
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1541
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1181
void * palloc(Size size)
Definition: mcxt.c:1317
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151
static PgChecksumMode mode
Definition: pg_checksums.c:56
const void size_t len
const void * data
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
unsigned int Oid
Definition: postgres_ext.h:31
#define FP_LOCK_GROUPS_PER_BACKEND_MAX
Definition: proc.h:83
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:85
#define FP_LOCK_SLOTS_PER_GROUP
Definition: proc.h:84
@ PROC_WAIT_STATUS_OK
Definition: proc.h:124
@ PROC_WAIT_STATUS_WAITING
Definition: proc.h:125
@ PROC_WAIT_STATUS_ERROR
Definition: proc.h:126
PGPROC * ProcNumberGetProc(ProcNumber procNumber)
Definition: procarray.c:3142
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3223
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
void set_ps_display_remove_suffix(void)
Definition: ps_status.c:421
void set_ps_display_suffix(const char *suffix)
Definition: ps_status.c:369
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1045
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:888
ResourceOwner CurrentResourceOwner
Definition: resowner.c:165
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1065
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:332
static pg_noinline void Size size
Definition: slab.c:607
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
PGPROC * MyProc
Definition: proc.c:67
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable, bool dontWait)
Definition: proc.c:1106
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1746
PROC_HDR * ProcGlobal
Definition: proc.c:79
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1436
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:985
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1419
int first_lock
Definition: lock.h:476
int first_waiter
Definition: lock.h:480
int num_waiters
Definition: lock.h:481
int num_locks
Definition: lock.h:477
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:305
Size keysize
Definition: hsearch.h:75
HashValueFunc hash
Definition: hsearch.h:78
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
long num_partitions
Definition: hsearch.h:68
Definition: dynahash.c:220
int64 nLocks
Definition: lock.h:423
struct ResourceOwnerData * owner
Definition: lock.h:422
LOCKTAG lock
Definition: lock.h:410
LOCKMODE mode
Definition: lock.h:411
LOCALLOCKOWNER * lockOwners
Definition: lock.h:438
uint32 hashcode
Definition: lock.h:432
int maxLockOwners
Definition: lock.h:437
LOCK * lock
Definition: lock.h:433
int64 nLocks
Definition: lock.h:435
int numLockOwners
Definition: lock.h:436
bool holdsStrongLockCount
Definition: lock.h:439
PROCLOCK * proclock
Definition: lock.h:434
LOCALLOCKTAG tag
Definition: lock.h:429
bool lockCleared
Definition: lock.h:440
Definition: lock.h:165
uint8 locktag_type
Definition: lock.h:170
uint32 locktag_field3
Definition: lock.h:168
uint32 locktag_field1
Definition: lock.h:166
uint8 locktag_lockmethodid
Definition: lock.h:171
uint16 locktag_field4
Definition: lock.h:169
uint32 locktag_field2
Definition: lock.h:167
Definition: lock.h:309
int nRequested
Definition: lock.h:319
LOCKTAG tag
Definition: lock.h:311
int requested[MAX_LOCKMODES]
Definition: lock.h:318
dclist_head waitProcs
Definition: lock.h:317
int granted[MAX_LOCKMODES]
Definition: lock.h:320
LOCKMASK grantMask
Definition: lock.h:314
LOCKMASK waitMask
Definition: lock.h:315
int nGranted
Definition: lock.h:321
dlist_head procLocks
Definition: lock.h:316
Definition: lwlock.h:42
Definition: lock.h:466
LOCKMASK holdMask
Definition: lock.h:455
LOCKMODE waitLockMode
Definition: lock.h:456
bool fastpath
Definition: lock.h:462
LOCKTAG locktag
Definition: lock.h:454
TimestampTz waitStart
Definition: lock.h:458
int leaderPid
Definition: lock.h:461
VirtualTransactionId vxid
Definition: lock.h:457
const bool * trace_flag
Definition: lock.h:113
const LOCKMASK * conflictTab
Definition: lock.h:111
const char *const * lockModeNames
Definition: lock.h:112
int numLockModes
Definition: lock.h:110
Definition: proc.h:162
LWLock fpInfoLock
Definition: proc.h:293
LocalTransactionId lxid
Definition: proc.h:200
PROCLOCK * waitProcLock
Definition: proc.h:233
dlist_head lockGroupMembers
Definition: proc.h:305
Oid * fpRelId
Definition: proc.h:295
Oid databaseId
Definition: proc.h:207
pg_atomic_uint64 waitStart
Definition: proc.h:237
bool fpVXIDLock
Definition: proc.h:296
ProcNumber procNumber
Definition: proc.h:195
int pid
Definition: proc.h:182
LOCK * waitLock
Definition: proc.h:232
TransactionId xid
Definition: proc.h:172
LOCKMODE waitLockMode
Definition: proc.h:234
struct PGPROC::@119 vxid
PGPROC * lockGroupLeader
Definition: proc.h:304
LocalTransactionId fpLocalTransactionId
Definition: proc.h:297
LOCKMASK heldLocks
Definition: proc.h:235
dlist_head myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:261
ProcWaitStatus waitStatus
Definition: proc.h:167
dlist_node links
Definition: proc.h:163
LOCK * myLock
Definition: lock.h:365
PGPROC * myProc
Definition: lock.h:366
Definition: lock.h:370
LOCKMASK holdMask
Definition: lock.h:376
dlist_node lockLink
Definition: lock.h:378
PGPROC * groupLeader
Definition: lock.h:375
LOCKMASK releaseMask
Definition: lock.h:377
PROCLOCKTAG tag
Definition: lock.h:372
dlist_node procLink
Definition: lock.h:379
PGPROC * allProcs
Definition: proc.h:384
uint32 allProcCount
Definition: proc.h:402
LOCKTAG locktag
Definition: lock.c:159
LOCKMODE lockmode
Definition: lock.c:160
LocalTransactionId localTransactionId
Definition: lock.h:62
ProcNumber procNumber
Definition: lock.h:61
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
dlist_node * next
Definition: ilist.h:140
Definition: type.h:95
TransactionId xid
Definition: lockdefs.h:53
#define InvalidTransactionId
Definition: transam.h:31
#define FirstNormalObjectId
Definition: transam.h:197
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1280
int max_prepared_xacts
Definition: twophase.c:115
TransactionId TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid, bool *have_more)
Definition: twophase.c:852
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:918
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25
const char * type
bool RecoveryInProgress(void)
Definition: xlog.c:6333
#define XLogStandbyInfoActive()
Definition: xlog.h:123
bool InRecovery
Definition: xlogutils.c:50
#define InHotStandby
Definition: xlogutils.h:60
static struct link * links
Definition: zic.c:299