PostgreSQL Source Code  git master
lock.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * lock.c
4  * POSTGRES primary lock mechanism
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/lock.c
12  *
13  * NOTES
14  * A lock table is a shared memory hash table. When
15  * a process tries to acquire a lock of a type that conflicts
16  * with existing locks, it is put to sleep using the routines
17  * in storage/lmgr/proc.c.
18  *
19  * For the most part, this code should be invoked via lmgr.c
20  * or another lock-management module, not directly.
21  *
22  * Interface:
23  *
24  * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25  * LockAcquire(), LockRelease(), LockReleaseAll(),
26  * LockCheckConflicts(), GrantLock()
27  *
28  *-------------------------------------------------------------------------
29  */
30 #include "postgres.h"
31 
32 #include <signal.h>
33 #include <unistd.h>
34 
35 #include "access/transam.h"
36 #include "access/twophase.h"
37 #include "access/twophase_rmgr.h"
38 #include "access/xact.h"
39 #include "access/xlog.h"
40 #include "miscadmin.h"
41 #include "pg_trace.h"
42 #include "pgstat.h"
43 #include "storage/proc.h"
44 #include "storage/procarray.h"
45 #include "storage/sinvaladt.h"
46 #include "storage/spin.h"
47 #include "storage/standby.h"
48 #include "utils/memutils.h"
49 #include "utils/ps_status.h"
50 #include "utils/resowner_private.h"
51 
52 
53 /* This configuration variable is used to set the lock table size */
54 int max_locks_per_xact; /* set by guc.c */
55 
56 #define NLOCKENTS() \
57  mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
58 
59 
60 /*
61  * Data structures defining the semantics of the standard lock methods.
62  *
63  * The conflict table defines the semantics of the various lock modes.
64  */
65 static const LOCKMASK LockConflicts[] = {
66  0,
67 
68  /* AccessShareLock */
70 
71  /* RowShareLock */
73 
74  /* RowExclusiveLock */
77 
78  /* ShareUpdateExclusiveLock */
82 
83  /* ShareLock */
87 
88  /* ShareRowExclusiveLock */
92 
93  /* ExclusiveLock */
98 
99  /* AccessExclusiveLock */
104 
105 };
106 
107 /* Names of lock modes, for debug printouts */
108 static const char *const lock_mode_names[] =
109 {
110  "INVALID",
111  "AccessShareLock",
112  "RowShareLock",
113  "RowExclusiveLock",
114  "ShareUpdateExclusiveLock",
115  "ShareLock",
116  "ShareRowExclusiveLock",
117  "ExclusiveLock",
118  "AccessExclusiveLock"
119 };
120 
121 #ifndef LOCK_DEBUG
122 static bool Dummy_trace = false;
123 #endif
124 
126  AccessExclusiveLock, /* highest valid lock mode number */
129 #ifdef LOCK_DEBUG
130  &Trace_locks
131 #else
132  &Dummy_trace
133 #endif
134 };
135 
137  AccessExclusiveLock, /* highest valid lock mode number */
140 #ifdef LOCK_DEBUG
141  &Trace_userlocks
142 #else
143  &Dummy_trace
144 #endif
145 };
146 
147 /*
148  * map from lock method id to the lock table data structures
149  */
150 static const LockMethod LockMethods[] = {
151  NULL,
153  &user_lockmethod
154 };
155 
156 
157 /* Record that's written to 2PC state file when a lock is persisted */
158 typedef struct TwoPhaseLockRecord
159 {
163 
164 
165 /*
166  * Count of the number of fast path lock slots we believe to be used. This
167  * might be higher than the real number if another backend has transferred
168  * our locks to the primary lock table, but it can never be lower than the
169  * real value, since only we can acquire locks on our own behalf.
170  */
171 static int FastPathLocalUseCount = 0;
172 
173 /*
174  * Flag to indicate if the relation extension lock is held by this backend.
175  * This flag is used to ensure that while holding the relation extension lock
176  * we don't try to acquire a heavyweight lock on any other object. This
177  * restriction implies that the relation extension lock won't ever participate
178  * in the deadlock cycle because we can never wait for any other heavyweight
179  * lock after acquiring this lock.
180  *
181  * Such a restriction is okay for relation extension locks as unlike other
182  * heavyweight locks these are not held till the transaction end. These are
183  * taken for a short duration to extend a particular relation and then
184  * released.
185  */
186 static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
187 
188 /*
189  * Flag to indicate if the page lock is held by this backend. We don't
190  * acquire any other heavyweight lock while holding the page lock except for
191  * relation extension. However, these locks are never taken in reverse order
192  * which implies that page locks will also never participate in the deadlock
193  * cycle.
194  *
195  * Similar to relation extension, page locks are also held for a short
196  * duration, so imposing such a restriction won't hurt.
197  */
198 static bool IsPageLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
199 
200 /* Macros for manipulating proc->fpLockBits */
201 #define FAST_PATH_BITS_PER_SLOT 3
202 #define FAST_PATH_LOCKNUMBER_OFFSET 1
203 #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
204 #define FAST_PATH_GET_BITS(proc, n) \
205  (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
206 #define FAST_PATH_BIT_POSITION(n, l) \
207  (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
208  AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
209  AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
210  ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
211 #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
212  (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
213 #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
214  (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
215 #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
216  ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
217 
218 /*
219  * The fast-path lock mechanism is concerned only with relation locks on
220  * unshared relations by backends bound to a database. The fast-path
221  * mechanism exists mostly to accelerate acquisition and release of locks
222  * that rarely conflict. Because ShareUpdateExclusiveLock is
223  * self-conflicting, it can't use the fast-path mechanism; but it also does
224  * not conflict with any of the locks that do, so we can ignore it completely.
225  */
226 #define EligibleForRelationFastPath(locktag, mode) \
227  ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
228  (locktag)->locktag_type == LOCKTAG_RELATION && \
229  (locktag)->locktag_field1 == MyDatabaseId && \
230  MyDatabaseId != InvalidOid && \
231  (mode) < ShareUpdateExclusiveLock)
232 #define ConflictsWithRelationFastPath(locktag, mode) \
233  ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
234  (locktag)->locktag_type == LOCKTAG_RELATION && \
235  (locktag)->locktag_field1 != InvalidOid && \
236  (mode) > ShareUpdateExclusiveLock)
237 
238 static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
240 static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
241  const LOCKTAG *locktag, uint32 hashcode);
243 
244 /*
245  * To make the fast-path lock mechanism work, we must have some way of
246  * preventing the use of the fast-path when a conflicting lock might be present.
247  * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
248  * and maintain an integer count of the number of "strong" lockers
249  * in each partition. When any "strong" lockers are present (which is
250  * hopefully not very often), the fast-path mechanism can't be used, and we
251  * must fall back to the slower method of pushing matching locks directly
252  * into the main lock tables.
253  *
254  * The deadlock detector does not know anything about the fast path mechanism,
255  * so any locks that might be involved in a deadlock must be transferred from
256  * the fast-path queues to the main lock table.
257  */
258 
259 #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
260 #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
261  (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
262 #define FastPathStrongLockHashPartition(hashcode) \
263  ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
264 
265 typedef struct
266 {
270 
272 
273 
274 /*
275  * Pointers to hash tables containing lock state
276  *
277  * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
278  * shared memory; LockMethodLocalHash is local to each backend.
279  */
283 
284 
285 /* private state for error cleanup */
289 
290 
291 #ifdef LOCK_DEBUG
292 
293 /*------
294  * The following configuration options are available for lock debugging:
295  *
296  * TRACE_LOCKS -- give a bunch of output what's going on in this file
297  * TRACE_USERLOCKS -- same but for user locks
298  * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
299  * (use to avoid output on system tables)
300  * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
301  * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
302  *
303  * Furthermore, but in storage/lmgr/lwlock.c:
304  * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
305  *
306  * Define LOCK_DEBUG at compile time to get all these enabled.
307  * --------
308  */
309 
310 int Trace_lock_oidmin = FirstNormalObjectId;
311 bool Trace_locks = false;
312 bool Trace_userlocks = false;
313 int Trace_lock_table = 0;
314 bool Debug_deadlocks = false;
315 
316 
317 inline static bool
318 LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
319 {
320  return
321  (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
322  ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
323  || (Trace_lock_table &&
324  (tag->locktag_field2 == Trace_lock_table));
325 }
326 
327 
328 inline static void
329 LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
330 {
331  if (LOCK_DEBUG_ENABLED(&lock->tag))
332  elog(LOG,
333  "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
334  "req(%d,%d,%d,%d,%d,%d,%d)=%d "
335  "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
336  where, lock,
337  lock->tag.locktag_field1, lock->tag.locktag_field2,
338  lock->tag.locktag_field3, lock->tag.locktag_field4,
340  lock->grantMask,
341  lock->requested[1], lock->requested[2], lock->requested[3],
342  lock->requested[4], lock->requested[5], lock->requested[6],
343  lock->requested[7], lock->nRequested,
344  lock->granted[1], lock->granted[2], lock->granted[3],
345  lock->granted[4], lock->granted[5], lock->granted[6],
346  lock->granted[7], lock->nGranted,
347  lock->waitProcs.size,
348  LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
349 }
350 
351 
352 inline static void
353 PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
354 {
355  if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
356  elog(LOG,
357  "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
358  where, proclockP, proclockP->tag.myLock,
359  PROCLOCK_LOCKMETHOD(*(proclockP)),
360  proclockP->tag.myProc, (int) proclockP->holdMask);
361 }
362 #else /* not LOCK_DEBUG */
363 
364 #define LOCK_PRINT(where, lock, type) ((void) 0)
365 #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
366 #endif /* not LOCK_DEBUG */
367 
368 
369 static uint32 proclock_hash(const void *key, Size keysize);
370 static void RemoveLocalLock(LOCALLOCK *locallock);
371 static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
372  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
373 static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
374 static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
375 static void FinishStrongLockAcquire(void);
376 static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
377 static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
378 static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
379 static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
380  PROCLOCK *proclock, LockMethod lockMethodTable);
381 static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
382  LockMethod lockMethodTable, uint32 hashcode,
383  bool wakeupNeeded);
384 static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
386  bool decrement_strong_lock_count);
387 static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
388  BlockedProcsData *data);
389 
390 
391 /*
392  * InitLocks -- Initialize the lock manager's data structures.
393  *
394  * This is called from CreateSharedMemoryAndSemaphores(), which see for
395  * more comments. In the normal postmaster case, the shared hash tables
396  * are created here, as well as a locallock hash table that will remain
397  * unused and empty in the postmaster itself. Backends inherit the pointers
398  * to the shared tables via fork(), and also inherit an image of the locallock
399  * hash table, which they proceed to use. In the EXEC_BACKEND case, each
400  * backend re-executes this code to obtain pointers to the already existing
401  * shared hash tables and to create its locallock hash table.
402  */
403 void
405 {
406  HASHCTL info;
407  long init_table_size,
408  max_table_size;
409  bool found;
410 
411  /*
412  * Compute init/max size to request for lock hashtables. Note these
413  * calculations must agree with LockShmemSize!
414  */
415  max_table_size = NLOCKENTS();
416  init_table_size = max_table_size / 2;
417 
418  /*
419  * Allocate hash table for LOCK structs. This stores per-locked-object
420  * information.
421  */
422  info.keysize = sizeof(LOCKTAG);
423  info.entrysize = sizeof(LOCK);
425 
426  LockMethodLockHash = ShmemInitHash("LOCK hash",
427  init_table_size,
428  max_table_size,
429  &info,
431 
432  /* Assume an average of 2 holders per lock */
433  max_table_size *= 2;
434  init_table_size *= 2;
435 
436  /*
437  * Allocate hash table for PROCLOCK structs. This stores
438  * per-lock-per-holder information.
439  */
440  info.keysize = sizeof(PROCLOCKTAG);
441  info.entrysize = sizeof(PROCLOCK);
442  info.hash = proclock_hash;
444 
445  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
446  init_table_size,
447  max_table_size,
448  &info,
450 
451  /*
452  * Allocate fast-path structures.
453  */
454  FastPathStrongRelationLocks =
455  ShmemInitStruct("Fast Path Strong Relation Lock Data",
456  sizeof(FastPathStrongRelationLockData), &found);
457  if (!found)
458  SpinLockInit(&FastPathStrongRelationLocks->mutex);
459 
460  /*
461  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
462  * counts and resource owner information.
463  *
464  * The non-shared table could already exist in this process (this occurs
465  * when the postmaster is recreating shared memory after a backend crash).
466  * If so, delete and recreate it. (We could simply leave it, since it
467  * ought to be empty in the postmaster, but for safety let's zap it.)
468  */
469  if (LockMethodLocalHash)
470  hash_destroy(LockMethodLocalHash);
471 
472  info.keysize = sizeof(LOCALLOCKTAG);
473  info.entrysize = sizeof(LOCALLOCK);
474 
475  LockMethodLocalHash = hash_create("LOCALLOCK hash",
476  16,
477  &info,
479 }
480 
481 
482 /*
483  * Fetch the lock method table associated with a given lock
484  */
487 {
488  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
489 
490  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
491  return LockMethods[lockmethodid];
492 }
493 
494 /*
495  * Fetch the lock method table associated with a given locktag
496  */
499 {
500  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
501 
502  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
503  return LockMethods[lockmethodid];
504 }
505 
506 
507 /*
508  * Compute the hash code associated with a LOCKTAG.
509  *
510  * To avoid unnecessary recomputations of the hash code, we try to do this
511  * just once per function, and then pass it around as needed. Aside from
512  * passing the hashcode to hash_search_with_hash_value(), we can extract
513  * the lock partition number from the hashcode.
514  */
515 uint32
517 {
518  return get_hash_value(LockMethodLockHash, (const void *) locktag);
519 }
520 
521 /*
522  * Compute the hash code associated with a PROCLOCKTAG.
523  *
524  * Because we want to use just one set of partition locks for both the
525  * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
526  * fall into the same partition number as their associated LOCKs.
527  * dynahash.c expects the partition number to be the low-order bits of
528  * the hash code, and therefore a PROCLOCKTAG's hash code must have the
529  * same low-order bits as the associated LOCKTAG's hash code. We achieve
530  * this with this specialized hash function.
531  */
532 static uint32
533 proclock_hash(const void *key, Size keysize)
534 {
535  const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
536  uint32 lockhash;
537  Datum procptr;
538 
539  Assert(keysize == sizeof(PROCLOCKTAG));
540 
541  /* Look into the associated LOCK object, and compute its hash code */
542  lockhash = LockTagHashCode(&proclocktag->myLock->tag);
543 
544  /*
545  * To make the hash code also depend on the PGPROC, we xor the proc
546  * struct's address into the hash code, left-shifted so that the
547  * partition-number bits don't change. Since this is only a hash, we
548  * don't care if we lose high-order bits of the address; use an
549  * intermediate variable to suppress cast-pointer-to-int warnings.
550  */
551  procptr = PointerGetDatum(proclocktag->myProc);
552  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
553 
554  return lockhash;
555 }
556 
557 /*
558  * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
559  * for its underlying LOCK.
560  *
561  * We use this just to avoid redundant calls of LockTagHashCode().
562  */
563 static inline uint32
564 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
565 {
566  uint32 lockhash = hashcode;
567  Datum procptr;
568 
569  /*
570  * This must match proclock_hash()!
571  */
572  procptr = PointerGetDatum(proclocktag->myProc);
573  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
574 
575  return lockhash;
576 }
577 
578 /*
579  * Given two lock modes, return whether they would conflict.
580  */
581 bool
583 {
584  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
585 
586  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
587  return true;
588 
589  return false;
590 }
591 
592 /*
593  * LockHeldByMe -- test whether lock 'locktag' is held with mode 'lockmode'
594  * by the current transaction
595  */
596 bool
598 {
599  LOCALLOCKTAG localtag;
600  LOCALLOCK *locallock;
601 
602  /*
603  * See if there is a LOCALLOCK entry for this lock and lockmode
604  */
605  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
606  localtag.lock = *locktag;
607  localtag.mode = lockmode;
608 
609  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
610  (void *) &localtag,
611  HASH_FIND, NULL);
612 
613  return (locallock && locallock->nLocks > 0);
614 }
615 
616 #ifdef USE_ASSERT_CHECKING
617 /*
618  * GetLockMethodLocalHash -- return the hash of local locks, for modules that
619  * evaluate assertions based on all locks held.
620  */
621 HTAB *
622 GetLockMethodLocalHash(void)
623 {
624  return LockMethodLocalHash;
625 }
626 #endif
627 
628 /*
629  * LockHasWaiters -- look up 'locktag' and check if releasing this
630  * lock would wake up other processes waiting for it.
631  */
632 bool
633 LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
634 {
635  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
636  LockMethod lockMethodTable;
637  LOCALLOCKTAG localtag;
638  LOCALLOCK *locallock;
639  LOCK *lock;
640  PROCLOCK *proclock;
641  LWLock *partitionLock;
642  bool hasWaiters = false;
643 
644  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
645  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
646  lockMethodTable = LockMethods[lockmethodid];
647  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
648  elog(ERROR, "unrecognized lock mode: %d", lockmode);
649 
650 #ifdef LOCK_DEBUG
651  if (LOCK_DEBUG_ENABLED(locktag))
652  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
653  locktag->locktag_field1, locktag->locktag_field2,
654  lockMethodTable->lockModeNames[lockmode]);
655 #endif
656 
657  /*
658  * Find the LOCALLOCK entry for this lock and lockmode
659  */
660  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
661  localtag.lock = *locktag;
662  localtag.mode = lockmode;
663 
664  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
665  (void *) &localtag,
666  HASH_FIND, NULL);
667 
668  /*
669  * let the caller print its own error message, too. Do not ereport(ERROR).
670  */
671  if (!locallock || locallock->nLocks <= 0)
672  {
673  elog(WARNING, "you don't own a lock of type %s",
674  lockMethodTable->lockModeNames[lockmode]);
675  return false;
676  }
677 
678  /*
679  * Check the shared lock table.
680  */
681  partitionLock = LockHashPartitionLock(locallock->hashcode);
682 
683  LWLockAcquire(partitionLock, LW_SHARED);
684 
685  /*
686  * We don't need to re-find the lock or proclock, since we kept their
687  * addresses in the locallock table, and they couldn't have been removed
688  * while we were holding a lock on them.
689  */
690  lock = locallock->lock;
691  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
692  proclock = locallock->proclock;
693  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
694 
695  /*
696  * Double-check that we are actually holding a lock of the type we want to
697  * release.
698  */
699  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
700  {
701  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
702  LWLockRelease(partitionLock);
703  elog(WARNING, "you don't own a lock of type %s",
704  lockMethodTable->lockModeNames[lockmode]);
705  RemoveLocalLock(locallock);
706  return false;
707  }
708 
709  /*
710  * Do the checking.
711  */
712  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
713  hasWaiters = true;
714 
715  LWLockRelease(partitionLock);
716 
717  return hasWaiters;
718 }
719 
720 /*
721  * LockAcquire -- Check for lock conflicts, sleep if conflict found,
722  * set lock if/when no conflicts.
723  *
724  * Inputs:
725  * locktag: unique identifier for the lockable object
726  * lockmode: lock mode to acquire
727  * sessionLock: if true, acquire lock for session not current transaction
728  * dontWait: if true, don't wait to acquire lock
729  *
730  * Returns one of:
731  * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
732  * LOCKACQUIRE_OK lock successfully acquired
733  * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
734  * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
735  *
736  * In the normal case where dontWait=false and the caller doesn't need to
737  * distinguish a freshly acquired lock from one already taken earlier in
738  * this same transaction, there is no need to examine the return value.
739  *
740  * Side Effects: The lock is acquired and recorded in lock tables.
741  *
742  * NOTE: if we wait for the lock, there is no way to abort the wait
743  * short of aborting the transaction.
744  */
748  bool sessionLock,
749  bool dontWait)
750 {
751  return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
752  true, NULL);
753 }
754 
755 /*
756  * LockAcquireExtended - allows us to specify additional options
757  *
758  * reportMemoryError specifies whether a lock request that fills the lock
759  * table should generate an ERROR or not. Passing "false" allows the caller
760  * to attempt to recover from lock-table-full situations, perhaps by forcibly
761  * canceling other lock holders and then retrying. Note, however, that the
762  * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
763  * in combination with dontWait = true, as the cause of failure couldn't be
764  * distinguished.
765  *
766  * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
767  * table entry if a lock is successfully acquired, or NULL if not.
768  */
772  bool sessionLock,
773  bool dontWait,
774  bool reportMemoryError,
775  LOCALLOCK **locallockp)
776 {
777  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
778  LockMethod lockMethodTable;
779  LOCALLOCKTAG localtag;
780  LOCALLOCK *locallock;
781  LOCK *lock;
782  PROCLOCK *proclock;
783  bool found;
784  ResourceOwner owner;
785  uint32 hashcode;
786  LWLock *partitionLock;
787  bool found_conflict;
788  bool log_lock = false;
789 
790  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
791  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
792  lockMethodTable = LockMethods[lockmethodid];
793  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
794  elog(ERROR, "unrecognized lock mode: %d", lockmode);
795 
796  if (RecoveryInProgress() && !InRecovery &&
797  (locktag->locktag_type == LOCKTAG_OBJECT ||
798  locktag->locktag_type == LOCKTAG_RELATION) &&
799  lockmode > RowExclusiveLock)
800  ereport(ERROR,
801  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
802  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
803  lockMethodTable->lockModeNames[lockmode]),
804  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
805 
806 #ifdef LOCK_DEBUG
807  if (LOCK_DEBUG_ENABLED(locktag))
808  elog(LOG, "LockAcquire: lock [%u,%u] %s",
809  locktag->locktag_field1, locktag->locktag_field2,
810  lockMethodTable->lockModeNames[lockmode]);
811 #endif
812 
813  /* Identify owner for lock */
814  if (sessionLock)
815  owner = NULL;
816  else
817  owner = CurrentResourceOwner;
818 
819  /*
820  * Find or create a LOCALLOCK entry for this lock and lockmode
821  */
822  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
823  localtag.lock = *locktag;
824  localtag.mode = lockmode;
825 
826  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
827  (void *) &localtag,
828  HASH_ENTER, &found);
829 
830  /*
831  * if it's a new locallock object, initialize it
832  */
833  if (!found)
834  {
835  locallock->lock = NULL;
836  locallock->proclock = NULL;
837  locallock->hashcode = LockTagHashCode(&(localtag.lock));
838  locallock->nLocks = 0;
839  locallock->holdsStrongLockCount = false;
840  locallock->lockCleared = false;
841  locallock->numLockOwners = 0;
842  locallock->maxLockOwners = 8;
843  locallock->lockOwners = NULL; /* in case next line fails */
844  locallock->lockOwners = (LOCALLOCKOWNER *)
846  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
847  }
848  else
849  {
850  /* Make sure there will be room to remember the lock */
851  if (locallock->numLockOwners >= locallock->maxLockOwners)
852  {
853  int newsize = locallock->maxLockOwners * 2;
854 
855  locallock->lockOwners = (LOCALLOCKOWNER *)
856  repalloc(locallock->lockOwners,
857  newsize * sizeof(LOCALLOCKOWNER));
858  locallock->maxLockOwners = newsize;
859  }
860  }
861  hashcode = locallock->hashcode;
862 
863  if (locallockp)
864  *locallockp = locallock;
865 
866  /*
867  * If we already hold the lock, we can just increase the count locally.
868  *
869  * If lockCleared is already set, caller need not worry about absorbing
870  * sinval messages related to the lock's object.
871  */
872  if (locallock->nLocks > 0)
873  {
874  GrantLockLocal(locallock, owner);
875  if (locallock->lockCleared)
877  else
879  }
880 
881  /*
882  * We don't acquire any other heavyweight lock while holding the relation
883  * extension lock. We do allow to acquire the same relation extension
884  * lock more than once but that case won't reach here.
885  */
886  Assert(!IsRelationExtensionLockHeld);
887 
888  /*
889  * We don't acquire any other heavyweight lock while holding the page lock
890  * except for relation extension.
891  */
892  Assert(!IsPageLockHeld ||
893  (locktag->locktag_type == LOCKTAG_RELATION_EXTEND));
894 
895  /*
896  * Prepare to emit a WAL record if acquisition of this lock needs to be
897  * replayed in a standby server.
898  *
899  * Here we prepare to log; after lock is acquired we'll issue log record.
900  * This arrangement simplifies error recovery in case the preparation step
901  * fails.
902  *
903  * Only AccessExclusiveLocks can conflict with lock types that read-only
904  * transactions can acquire in a standby server. Make sure this definition
905  * matches the one in GetRunningTransactionLocks().
906  */
907  if (lockmode >= AccessExclusiveLock &&
908  locktag->locktag_type == LOCKTAG_RELATION &&
909  !RecoveryInProgress() &&
911  {
913  log_lock = true;
914  }
915 
916  /*
917  * Attempt to take lock via fast path, if eligible. But if we remember
918  * having filled up the fast path array, we don't attempt to make any
919  * further use of it until we release some locks. It's possible that some
920  * other backend has transferred some of those locks to the shared hash
921  * table, leaving space free, but it's not worth acquiring the LWLock just
922  * to check. It's also possible that we're acquiring a second or third
923  * lock type on a relation we have already locked using the fast-path, but
924  * for now we don't worry about that case either.
925  */
926  if (EligibleForRelationFastPath(locktag, lockmode) &&
928  {
929  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
930  bool acquired;
931 
932  /*
933  * LWLockAcquire acts as a memory sequencing point, so it's safe to
934  * assume that any strong locker whose increment to
935  * FastPathStrongRelationLocks->counts becomes visible after we test
936  * it has yet to begin to transfer fast-path locks.
937  */
939  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
940  acquired = false;
941  else
942  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
943  lockmode);
945  if (acquired)
946  {
947  /*
948  * The locallock might contain stale pointers to some old shared
949  * objects; we MUST reset these to null before considering the
950  * lock to be acquired via fast-path.
951  */
952  locallock->lock = NULL;
953  locallock->proclock = NULL;
954  GrantLockLocal(locallock, owner);
955  return LOCKACQUIRE_OK;
956  }
957  }
958 
959  /*
960  * If this lock could potentially have been taken via the fast-path by
961  * some other backend, we must (temporarily) disable further use of the
962  * fast-path for this lock tag, and migrate any locks already taken via
963  * this method to the main lock table.
964  */
965  if (ConflictsWithRelationFastPath(locktag, lockmode))
966  {
967  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
968 
969  BeginStrongLockAcquire(locallock, fasthashcode);
970  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
971  hashcode))
972  {
974  if (locallock->nLocks == 0)
975  RemoveLocalLock(locallock);
976  if (locallockp)
977  *locallockp = NULL;
978  if (reportMemoryError)
979  ereport(ERROR,
980  (errcode(ERRCODE_OUT_OF_MEMORY),
981  errmsg("out of shared memory"),
982  errhint("You might need to increase max_locks_per_transaction.")));
983  else
984  return LOCKACQUIRE_NOT_AVAIL;
985  }
986  }
987 
988  /*
989  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
990  * take it via the fast-path, either, so we've got to mess with the shared
991  * lock table.
992  */
993  partitionLock = LockHashPartitionLock(hashcode);
994 
995  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
996 
997  /*
998  * Find or create lock and proclock entries with this tag
999  *
1000  * Note: if the locallock object already existed, it might have a pointer
1001  * to the lock already ... but we should not assume that that pointer is
1002  * valid, since a lock object with zero hold and request counts can go
1003  * away anytime. So we have to use SetupLockInTable() to recompute the
1004  * lock and proclock pointers, even if they're already set.
1005  */
1006  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1007  hashcode, lockmode);
1008  if (!proclock)
1009  {
1011  LWLockRelease(partitionLock);
1012  if (locallock->nLocks == 0)
1013  RemoveLocalLock(locallock);
1014  if (locallockp)
1015  *locallockp = NULL;
1016  if (reportMemoryError)
1017  ereport(ERROR,
1018  (errcode(ERRCODE_OUT_OF_MEMORY),
1019  errmsg("out of shared memory"),
1020  errhint("You might need to increase max_locks_per_transaction.")));
1021  else
1022  return LOCKACQUIRE_NOT_AVAIL;
1023  }
1024  locallock->proclock = proclock;
1025  lock = proclock->tag.myLock;
1026  locallock->lock = lock;
1027 
1028  /*
1029  * If lock requested conflicts with locks requested by waiters, must join
1030  * wait queue. Otherwise, check for conflict with already-held locks.
1031  * (That's last because most complex check.)
1032  */
1033  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1034  found_conflict = true;
1035  else
1036  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1037  lock, proclock);
1038 
1039  if (!found_conflict)
1040  {
1041  /* No conflict with held or previously requested locks */
1042  GrantLock(lock, proclock, lockmode);
1043  GrantLockLocal(locallock, owner);
1044  }
1045  else
1046  {
1047  /*
1048  * We can't acquire the lock immediately. If caller specified no
1049  * blocking, remove useless table entries and return
1050  * LOCKACQUIRE_NOT_AVAIL without waiting.
1051  */
1052  if (dontWait)
1053  {
1055  if (proclock->holdMask == 0)
1056  {
1057  uint32 proclock_hashcode;
1058 
1059  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1060  SHMQueueDelete(&proclock->lockLink);
1061  SHMQueueDelete(&proclock->procLink);
1062  if (!hash_search_with_hash_value(LockMethodProcLockHash,
1063  (void *) &(proclock->tag),
1064  proclock_hashcode,
1065  HASH_REMOVE,
1066  NULL))
1067  elog(PANIC, "proclock table corrupted");
1068  }
1069  else
1070  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1071  lock->nRequested--;
1072  lock->requested[lockmode]--;
1073  LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1074  Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1075  Assert(lock->nGranted <= lock->nRequested);
1076  LWLockRelease(partitionLock);
1077  if (locallock->nLocks == 0)
1078  RemoveLocalLock(locallock);
1079  if (locallockp)
1080  *locallockp = NULL;
1081  return LOCKACQUIRE_NOT_AVAIL;
1082  }
1083 
1084  /*
1085  * Set bitmask of locks this process already holds on this object.
1086  */
1087  MyProc->heldLocks = proclock->holdMask;
1088 
1089  /*
1090  * Sleep till someone wakes me up.
1091  */
1092 
1093  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1094  locktag->locktag_field2,
1095  locktag->locktag_field3,
1096  locktag->locktag_field4,
1097  locktag->locktag_type,
1098  lockmode);
1099 
1100  WaitOnLock(locallock, owner);
1101 
1102  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1103  locktag->locktag_field2,
1104  locktag->locktag_field3,
1105  locktag->locktag_field4,
1106  locktag->locktag_type,
1107  lockmode);
1108 
1109  /*
1110  * NOTE: do not do any material change of state between here and
1111  * return. All required changes in locktable state must have been
1112  * done when the lock was granted to us --- see notes in WaitOnLock.
1113  */
1114 
1115  /*
1116  * Check the proclock entry status, in case something in the ipc
1117  * communication doesn't work correctly.
1118  */
1119  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1120  {
1122  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1123  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1124  /* Should we retry ? */
1125  LWLockRelease(partitionLock);
1126  elog(ERROR, "LockAcquire failed");
1127  }
1128  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1129  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1130  }
1131 
1132  /*
1133  * Lock state is fully up-to-date now; if we error out after this, no
1134  * special error cleanup is required.
1135  */
1137 
1138  LWLockRelease(partitionLock);
1139 
1140  /*
1141  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1142  * standby server.
1143  */
1144  if (log_lock)
1145  {
1146  /*
1147  * Decode the locktag back to the original values, to avoid sending
1148  * lots of empty bytes with every message. See lock.h to check how a
1149  * locktag is defined for LOCKTAG_RELATION
1150  */
1152  locktag->locktag_field2);
1153  }
1154 
1155  return LOCKACQUIRE_OK;
1156 }
1157 
1158 /*
1159  * Find or create LOCK and PROCLOCK objects as needed for a new lock
1160  * request.
1161  *
1162  * Returns the PROCLOCK object, or NULL if we failed to create the objects
1163  * for lack of shared memory.
1164  *
1165  * The appropriate partition lock must be held at entry, and will be
1166  * held at exit.
1167  */
1168 static PROCLOCK *
1169 SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1170  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1171 {
1172  LOCK *lock;
1173  PROCLOCK *proclock;
1174  PROCLOCKTAG proclocktag;
1175  uint32 proclock_hashcode;
1176  bool found;
1177 
1178  /*
1179  * Find or create a lock with this tag.
1180  */
1181  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1182  (const void *) locktag,
1183  hashcode,
1185  &found);
1186  if (!lock)
1187  return NULL;
1188 
1189  /*
1190  * if it's a new lock object, initialize it
1191  */
1192  if (!found)
1193  {
1194  lock->grantMask = 0;
1195  lock->waitMask = 0;
1196  SHMQueueInit(&(lock->procLocks));
1197  ProcQueueInit(&(lock->waitProcs));
1198  lock->nRequested = 0;
1199  lock->nGranted = 0;
1200  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1201  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1202  LOCK_PRINT("LockAcquire: new", lock, lockmode);
1203  }
1204  else
1205  {
1206  LOCK_PRINT("LockAcquire: found", lock, lockmode);
1207  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1208  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1209  Assert(lock->nGranted <= lock->nRequested);
1210  }
1211 
1212  /*
1213  * Create the hash key for the proclock table.
1214  */
1215  proclocktag.myLock = lock;
1216  proclocktag.myProc = proc;
1217 
1218  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1219 
1220  /*
1221  * Find or create a proclock entry with this tag
1222  */
1223  proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1224  (void *) &proclocktag,
1225  proclock_hashcode,
1227  &found);
1228  if (!proclock)
1229  {
1230  /* Oops, not enough shmem for the proclock */
1231  if (lock->nRequested == 0)
1232  {
1233  /*
1234  * There are no other requestors of this lock, so garbage-collect
1235  * the lock object. We *must* do this to avoid a permanent leak
1236  * of shared memory, because there won't be anything to cause
1237  * anyone to release the lock object later.
1238  */
1239  Assert(SHMQueueEmpty(&(lock->procLocks)));
1240  if (!hash_search_with_hash_value(LockMethodLockHash,
1241  (void *) &(lock->tag),
1242  hashcode,
1243  HASH_REMOVE,
1244  NULL))
1245  elog(PANIC, "lock table corrupted");
1246  }
1247  return NULL;
1248  }
1249 
1250  /*
1251  * If new, initialize the new entry
1252  */
1253  if (!found)
1254  {
1255  uint32 partition = LockHashPartition(hashcode);
1256 
1257  /*
1258  * It might seem unsafe to access proclock->groupLeader without a
1259  * lock, but it's not really. Either we are initializing a proclock
1260  * on our own behalf, in which case our group leader isn't changing
1261  * because the group leader for a process can only ever be changed by
1262  * the process itself; or else we are transferring a fast-path lock to
1263  * the main lock table, in which case that process can't change it's
1264  * lock group leader without first releasing all of its locks (and in
1265  * particular the one we are currently transferring).
1266  */
1267  proclock->groupLeader = proc->lockGroupLeader != NULL ?
1268  proc->lockGroupLeader : proc;
1269  proclock->holdMask = 0;
1270  proclock->releaseMask = 0;
1271  /* Add proclock to appropriate lists */
1272  SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
1273  SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
1274  &proclock->procLink);
1275  PROCLOCK_PRINT("LockAcquire: new", proclock);
1276  }
1277  else
1278  {
1279  PROCLOCK_PRINT("LockAcquire: found", proclock);
1280  Assert((proclock->holdMask & ~lock->grantMask) == 0);
1281 
1282 #ifdef CHECK_DEADLOCK_RISK
1283 
1284  /*
1285  * Issue warning if we already hold a lower-level lock on this object
1286  * and do not hold a lock of the requested level or higher. This
1287  * indicates a deadlock-prone coding practice (eg, we'd have a
1288  * deadlock if another backend were following the same code path at
1289  * about the same time).
1290  *
1291  * This is not enabled by default, because it may generate log entries
1292  * about user-level coding practices that are in fact safe in context.
1293  * It can be enabled to help find system-level problems.
1294  *
1295  * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1296  * better to use a table. For now, though, this works.
1297  */
1298  {
1299  int i;
1300 
1301  for (i = lockMethodTable->numLockModes; i > 0; i--)
1302  {
1303  if (proclock->holdMask & LOCKBIT_ON(i))
1304  {
1305  if (i >= (int) lockmode)
1306  break; /* safe: we have a lock >= req level */
1307  elog(LOG, "deadlock risk: raising lock level"
1308  " from %s to %s on object %u/%u/%u",
1309  lockMethodTable->lockModeNames[i],
1310  lockMethodTable->lockModeNames[lockmode],
1311  lock->tag.locktag_field1, lock->tag.locktag_field2,
1312  lock->tag.locktag_field3);
1313  break;
1314  }
1315  }
1316  }
1317 #endif /* CHECK_DEADLOCK_RISK */
1318  }
1319 
1320  /*
1321  * lock->nRequested and lock->requested[] count the total number of
1322  * requests, whether granted or waiting, so increment those immediately.
1323  * The other counts don't increment till we get the lock.
1324  */
1325  lock->nRequested++;
1326  lock->requested[lockmode]++;
1327  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1328 
1329  /*
1330  * We shouldn't already hold the desired lock; else locallock table is
1331  * broken.
1332  */
1333  if (proclock->holdMask & LOCKBIT_ON(lockmode))
1334  elog(ERROR, "lock %s on object %u/%u/%u is already held",
1335  lockMethodTable->lockModeNames[lockmode],
1336  lock->tag.locktag_field1, lock->tag.locktag_field2,
1337  lock->tag.locktag_field3);
1338 
1339  return proclock;
1340 }
1341 
1342 /*
1343  * Check and set/reset the flag that we hold the relation extension/page lock.
1344  *
1345  * It is callers responsibility that this function is called after
1346  * acquiring/releasing the relation extension/page lock.
1347  *
1348  * Pass acquired as true if lock is acquired, false otherwise.
1349  */
1350 static inline void
1351 CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1352 {
1353 #ifdef USE_ASSERT_CHECKING
1354  if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1355  IsRelationExtensionLockHeld = acquired;
1356  else if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_PAGE)
1357  IsPageLockHeld = acquired;
1358 
1359 #endif
1360 }
1361 
1362 /*
1363  * Subroutine to free a locallock entry
1364  */
1365 static void
1367 {
1368  int i;
1369 
1370  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1371  {
1372  if (locallock->lockOwners[i].owner != NULL)
1373  ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1374  }
1375  locallock->numLockOwners = 0;
1376  if (locallock->lockOwners != NULL)
1377  pfree(locallock->lockOwners);
1378  locallock->lockOwners = NULL;
1379 
1380  if (locallock->holdsStrongLockCount)
1381  {
1382  uint32 fasthashcode;
1383 
1384  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1385 
1386  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1387  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1388  FastPathStrongRelationLocks->count[fasthashcode]--;
1389  locallock->holdsStrongLockCount = false;
1390  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1391  }
1392 
1393  if (!hash_search(LockMethodLocalHash,
1394  (void *) &(locallock->tag),
1395  HASH_REMOVE, NULL))
1396  elog(WARNING, "locallock table corrupted");
1397 
1398  /*
1399  * Indicate that the lock is released for certain types of locks
1400  */
1401  CheckAndSetLockHeld(locallock, false);
1402 }
1403 
1404 /*
1405  * LockCheckConflicts -- test whether requested lock conflicts
1406  * with those already granted
1407  *
1408  * Returns true if conflict, false if no conflict.
1409  *
1410  * NOTES:
1411  * Here's what makes this complicated: one process's locks don't
1412  * conflict with one another, no matter what purpose they are held for
1413  * (eg, session and transaction locks do not conflict). Nor do the locks
1414  * of one process in a lock group conflict with those of another process in
1415  * the same group. So, we must subtract off these locks when determining
1416  * whether the requested new lock conflicts with those already held.
1417  */
1418 bool
1421  LOCK *lock,
1422  PROCLOCK *proclock)
1423 {
1424  int numLockModes = lockMethodTable->numLockModes;
1425  LOCKMASK myLocks;
1426  int conflictMask = lockMethodTable->conflictTab[lockmode];
1427  int conflictsRemaining[MAX_LOCKMODES];
1428  int totalConflictsRemaining = 0;
1429  int i;
1430  SHM_QUEUE *procLocks;
1431  PROCLOCK *otherproclock;
1432 
1433  /*
1434  * first check for global conflicts: If no locks conflict with my request,
1435  * then I get the lock.
1436  *
1437  * Checking for conflict: lock->grantMask represents the types of
1438  * currently held locks. conflictTable[lockmode] has a bit set for each
1439  * type of lock that conflicts with request. Bitwise compare tells if
1440  * there is a conflict.
1441  */
1442  if (!(conflictMask & lock->grantMask))
1443  {
1444  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1445  return false;
1446  }
1447 
1448  /*
1449  * Rats. Something conflicts. But it could still be my own lock, or a
1450  * lock held by another member of my locking group. First, figure out how
1451  * many conflicts remain after subtracting out any locks I hold myself.
1452  */
1453  myLocks = proclock->holdMask;
1454  for (i = 1; i <= numLockModes; i++)
1455  {
1456  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1457  {
1458  conflictsRemaining[i] = 0;
1459  continue;
1460  }
1461  conflictsRemaining[i] = lock->granted[i];
1462  if (myLocks & LOCKBIT_ON(i))
1463  --conflictsRemaining[i];
1464  totalConflictsRemaining += conflictsRemaining[i];
1465  }
1466 
1467  /* If no conflicts remain, we get the lock. */
1468  if (totalConflictsRemaining == 0)
1469  {
1470  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1471  return false;
1472  }
1473 
1474  /* If no group locking, it's definitely a conflict. */
1475  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1476  {
1477  Assert(proclock->tag.myProc == MyProc);
1478  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1479  proclock);
1480  return true;
1481  }
1482 
1483  /*
1484  * The relation extension or page lock conflict even between the group
1485  * members.
1486  */
1487  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
1488  (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
1489  {
1490  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1491  proclock);
1492  return true;
1493  }
1494 
1495  /*
1496  * Locks held in conflicting modes by members of our own lock group are
1497  * not real conflicts; we can subtract those out and see if we still have
1498  * a conflict. This is O(N) in the number of processes holding or
1499  * awaiting locks on this object. We could improve that by making the
1500  * shared memory state more complex (and larger) but it doesn't seem worth
1501  * it.
1502  */
1503  procLocks = &(lock->procLocks);
1504  otherproclock = (PROCLOCK *)
1505  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1506  while (otherproclock != NULL)
1507  {
1508  if (proclock != otherproclock &&
1509  proclock->groupLeader == otherproclock->groupLeader &&
1510  (otherproclock->holdMask & conflictMask) != 0)
1511  {
1512  int intersectMask = otherproclock->holdMask & conflictMask;
1513 
1514  for (i = 1; i <= numLockModes; i++)
1515  {
1516  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1517  {
1518  if (conflictsRemaining[i] <= 0)
1519  elog(PANIC, "proclocks held do not match lock");
1520  conflictsRemaining[i]--;
1521  totalConflictsRemaining--;
1522  }
1523  }
1524 
1525  if (totalConflictsRemaining == 0)
1526  {
1527  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1528  proclock);
1529  return false;
1530  }
1531  }
1532  otherproclock = (PROCLOCK *)
1533  SHMQueueNext(procLocks, &otherproclock->lockLink,
1534  offsetof(PROCLOCK, lockLink));
1535  }
1536 
1537  /* Nope, it's a real conflict. */
1538  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1539  return true;
1540 }
1541 
1542 /*
1543  * GrantLock -- update the lock and proclock data structures to show
1544  * the lock request has been granted.
1545  *
1546  * NOTE: if proc was blocked, it also needs to be removed from the wait list
1547  * and have its waitLock/waitProcLock fields cleared. That's not done here.
1548  *
1549  * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1550  * table entry; but since we may be awaking some other process, we can't do
1551  * that here; it's done by GrantLockLocal, instead.
1552  */
1553 void
1555 {
1556  lock->nGranted++;
1557  lock->granted[lockmode]++;
1558  lock->grantMask |= LOCKBIT_ON(lockmode);
1559  if (lock->granted[lockmode] == lock->requested[lockmode])
1560  lock->waitMask &= LOCKBIT_OFF(lockmode);
1561  proclock->holdMask |= LOCKBIT_ON(lockmode);
1562  LOCK_PRINT("GrantLock", lock, lockmode);
1563  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1564  Assert(lock->nGranted <= lock->nRequested);
1565 }
1566 
1567 /*
1568  * UnGrantLock -- opposite of GrantLock.
1569  *
1570  * Updates the lock and proclock data structures to show that the lock
1571  * is no longer held nor requested by the current holder.
1572  *
1573  * Returns true if there were any waiters waiting on the lock that
1574  * should now be woken up with ProcLockWakeup.
1575  */
1576 static bool
1578  PROCLOCK *proclock, LockMethod lockMethodTable)
1579 {
1580  bool wakeupNeeded = false;
1581 
1582  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1583  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1584  Assert(lock->nGranted <= lock->nRequested);
1585 
1586  /*
1587  * fix the general lock stats
1588  */
1589  lock->nRequested--;
1590  lock->requested[lockmode]--;
1591  lock->nGranted--;
1592  lock->granted[lockmode]--;
1593 
1594  if (lock->granted[lockmode] == 0)
1595  {
1596  /* change the conflict mask. No more of this lock type. */
1597  lock->grantMask &= LOCKBIT_OFF(lockmode);
1598  }
1599 
1600  LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1601 
1602  /*
1603  * We need only run ProcLockWakeup if the released lock conflicts with at
1604  * least one of the lock types requested by waiter(s). Otherwise whatever
1605  * conflict made them wait must still exist. NOTE: before MVCC, we could
1606  * skip wakeup if lock->granted[lockmode] was still positive. But that's
1607  * not true anymore, because the remaining granted locks might belong to
1608  * some waiter, who could now be awakened because he doesn't conflict with
1609  * his own locks.
1610  */
1611  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1612  wakeupNeeded = true;
1613 
1614  /*
1615  * Now fix the per-proclock state.
1616  */
1617  proclock->holdMask &= LOCKBIT_OFF(lockmode);
1618  PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1619 
1620  return wakeupNeeded;
1621 }
1622 
1623 /*
1624  * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1625  * proclock and lock objects if possible, and call ProcLockWakeup if there
1626  * are remaining requests and the caller says it's OK. (Normally, this
1627  * should be called after UnGrantLock, and wakeupNeeded is the result from
1628  * UnGrantLock.)
1629  *
1630  * The appropriate partition lock must be held at entry, and will be
1631  * held at exit.
1632  */
1633 static void
1634 CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1635  LockMethod lockMethodTable, uint32 hashcode,
1636  bool wakeupNeeded)
1637 {
1638  /*
1639  * If this was my last hold on this lock, delete my entry in the proclock
1640  * table.
1641  */
1642  if (proclock->holdMask == 0)
1643  {
1644  uint32 proclock_hashcode;
1645 
1646  PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1647  SHMQueueDelete(&proclock->lockLink);
1648  SHMQueueDelete(&proclock->procLink);
1649  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1650  if (!hash_search_with_hash_value(LockMethodProcLockHash,
1651  (void *) &(proclock->tag),
1652  proclock_hashcode,
1653  HASH_REMOVE,
1654  NULL))
1655  elog(PANIC, "proclock table corrupted");
1656  }
1657 
1658  if (lock->nRequested == 0)
1659  {
1660  /*
1661  * The caller just released the last lock, so garbage-collect the lock
1662  * object.
1663  */
1664  LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1665  Assert(SHMQueueEmpty(&(lock->procLocks)));
1666  if (!hash_search_with_hash_value(LockMethodLockHash,
1667  (void *) &(lock->tag),
1668  hashcode,
1669  HASH_REMOVE,
1670  NULL))
1671  elog(PANIC, "lock table corrupted");
1672  }
1673  else if (wakeupNeeded)
1674  {
1675  /* There are waiters on this lock, so wake them up. */
1676  ProcLockWakeup(lockMethodTable, lock);
1677  }
1678 }
1679 
1680 /*
1681  * GrantLockLocal -- update the locallock data structures to show
1682  * the lock request has been granted.
1683  *
1684  * We expect that LockAcquire made sure there is room to add a new
1685  * ResourceOwner entry.
1686  */
1687 static void
1689 {
1690  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1691  int i;
1692 
1693  Assert(locallock->numLockOwners < locallock->maxLockOwners);
1694  /* Count the total */
1695  locallock->nLocks++;
1696  /* Count the per-owner lock */
1697  for (i = 0; i < locallock->numLockOwners; i++)
1698  {
1699  if (lockOwners[i].owner == owner)
1700  {
1701  lockOwners[i].nLocks++;
1702  return;
1703  }
1704  }
1705  lockOwners[i].owner = owner;
1706  lockOwners[i].nLocks = 1;
1707  locallock->numLockOwners++;
1708  if (owner != NULL)
1709  ResourceOwnerRememberLock(owner, locallock);
1710 
1711  /* Indicate that the lock is acquired for certain types of locks. */
1712  CheckAndSetLockHeld(locallock, true);
1713 }
1714 
1715 /*
1716  * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1717  * and arrange for error cleanup if it fails
1718  */
1719 static void
1720 BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1721 {
1722  Assert(StrongLockInProgress == NULL);
1723  Assert(locallock->holdsStrongLockCount == false);
1724 
1725  /*
1726  * Adding to a memory location is not atomic, so we take a spinlock to
1727  * ensure we don't collide with someone else trying to bump the count at
1728  * the same time.
1729  *
1730  * XXX: It might be worth considering using an atomic fetch-and-add
1731  * instruction here, on architectures where that is supported.
1732  */
1733 
1734  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1735  FastPathStrongRelationLocks->count[fasthashcode]++;
1736  locallock->holdsStrongLockCount = true;
1737  StrongLockInProgress = locallock;
1738  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1739 }
1740 
1741 /*
1742  * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1743  * acquisition once it's no longer needed
1744  */
1745 static void
1747 {
1748  StrongLockInProgress = NULL;
1749 }
1750 
1751 /*
1752  * AbortStrongLockAcquire - undo strong lock state changes performed by
1753  * BeginStrongLockAcquire.
1754  */
1755 void
1757 {
1758  uint32 fasthashcode;
1759  LOCALLOCK *locallock = StrongLockInProgress;
1760 
1761  if (locallock == NULL)
1762  return;
1763 
1764  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1765  Assert(locallock->holdsStrongLockCount == true);
1766  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1767  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1768  FastPathStrongRelationLocks->count[fasthashcode]--;
1769  locallock->holdsStrongLockCount = false;
1770  StrongLockInProgress = NULL;
1771  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1772 }
1773 
1774 /*
1775  * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1776  * WaitOnLock on.
1777  *
1778  * proc.c needs this for the case where we are booted off the lock by
1779  * timeout, but discover that someone granted us the lock anyway.
1780  *
1781  * We could just export GrantLockLocal, but that would require including
1782  * resowner.h in lock.h, which creates circularity.
1783  */
1784 void
1786 {
1787  GrantLockLocal(awaitedLock, awaitedOwner);
1788 }
1789 
1790 /*
1791  * MarkLockClear -- mark an acquired lock as "clear"
1792  *
1793  * This means that we know we have absorbed all sinval messages that other
1794  * sessions generated before we acquired this lock, and so we can confidently
1795  * assume we know about any catalog changes protected by this lock.
1796  */
1797 void
1799 {
1800  Assert(locallock->nLocks > 0);
1801  locallock->lockCleared = true;
1802 }
1803 
1804 /*
1805  * WaitOnLock -- wait to acquire a lock
1806  *
1807  * Caller must have set MyProc->heldLocks to reflect locks already held
1808  * on the lockable object by this process.
1809  *
1810  * The appropriate partition lock must be held at entry.
1811  */
1812 static void
1814 {
1815  LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1816  LockMethod lockMethodTable = LockMethods[lockmethodid];
1817  char *volatile new_status = NULL;
1818 
1819  LOCK_PRINT("WaitOnLock: sleeping on lock",
1820  locallock->lock, locallock->tag.mode);
1821 
1822  /* Report change to waiting status */
1824  {
1825  const char *old_status;
1826  int len;
1827 
1828  old_status = get_ps_display(&len);
1829  new_status = (char *) palloc(len + 8 + 1);
1830  memcpy(new_status, old_status, len);
1831  strcpy(new_status + len, " waiting");
1832  set_ps_display(new_status);
1833  new_status[len] = '\0'; /* truncate off " waiting" */
1834  }
1835 
1836  awaitedLock = locallock;
1837  awaitedOwner = owner;
1838 
1839  /*
1840  * NOTE: Think not to put any shared-state cleanup after the call to
1841  * ProcSleep, in either the normal or failure path. The lock state must
1842  * be fully set by the lock grantor, or by CheckDeadLock if we give up
1843  * waiting for the lock. This is necessary because of the possibility
1844  * that a cancel/die interrupt will interrupt ProcSleep after someone else
1845  * grants us the lock, but before we've noticed it. Hence, after granting,
1846  * the locktable state must fully reflect the fact that we own the lock;
1847  * we can't do additional work on return.
1848  *
1849  * We can and do use a PG_TRY block to try to clean up after failure, but
1850  * this still has a major limitation: elog(FATAL) can occur while waiting
1851  * (eg, a "die" interrupt), and then control won't come back here. So all
1852  * cleanup of essential state should happen in LockErrorCleanup, not here.
1853  * We can use PG_TRY to clear the "waiting" status flags, since doing that
1854  * is unimportant if the process exits.
1855  */
1856  PG_TRY();
1857  {
1858  if (ProcSleep(locallock, lockMethodTable) != PROC_WAIT_STATUS_OK)
1859  {
1860  /*
1861  * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1862  * now.
1863  */
1864  awaitedLock = NULL;
1865  LOCK_PRINT("WaitOnLock: aborting on lock",
1866  locallock->lock, locallock->tag.mode);
1868 
1869  /*
1870  * Now that we aren't holding the partition lock, we can give an
1871  * error report including details about the detected deadlock.
1872  */
1873  DeadLockReport();
1874  /* not reached */
1875  }
1876  }
1877  PG_CATCH();
1878  {
1879  /* In this path, awaitedLock remains set until LockErrorCleanup */
1880 
1881  /* Report change to non-waiting status */
1883  {
1884  set_ps_display(new_status);
1885  pfree(new_status);
1886  }
1887 
1888  /* and propagate the error */
1889  PG_RE_THROW();
1890  }
1891  PG_END_TRY();
1892 
1893  awaitedLock = NULL;
1894 
1895  /* Report change to non-waiting status */
1897  {
1898  set_ps_display(new_status);
1899  pfree(new_status);
1900  }
1901 
1902  LOCK_PRINT("WaitOnLock: wakeup on lock",
1903  locallock->lock, locallock->tag.mode);
1904 }
1905 
1906 /*
1907  * Remove a proc from the wait-queue it is on (caller must know it is on one).
1908  * This is only used when the proc has failed to get the lock, so we set its
1909  * waitStatus to PROC_WAIT_STATUS_ERROR.
1910  *
1911  * Appropriate partition lock must be held by caller. Also, caller is
1912  * responsible for signaling the proc if needed.
1913  *
1914  * NB: this does not clean up any locallock object that may exist for the lock.
1915  */
1916 void
1918 {
1919  LOCK *waitLock = proc->waitLock;
1920  PROCLOCK *proclock = proc->waitProcLock;
1921  LOCKMODE lockmode = proc->waitLockMode;
1922  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1923 
1924  /* Make sure proc is waiting */
1926  Assert(proc->links.next != NULL);
1927  Assert(waitLock);
1928  Assert(waitLock->waitProcs.size > 0);
1929  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1930 
1931  /* Remove proc from lock's wait queue */
1932  SHMQueueDelete(&(proc->links));
1933  waitLock->waitProcs.size--;
1934 
1935  /* Undo increments of request counts by waiting process */
1936  Assert(waitLock->nRequested > 0);
1937  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1938  waitLock->nRequested--;
1939  Assert(waitLock->requested[lockmode] > 0);
1940  waitLock->requested[lockmode]--;
1941  /* don't forget to clear waitMask bit if appropriate */
1942  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1943  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1944 
1945  /* Clean up the proc's own state, and pass it the ok/fail signal */
1946  proc->waitLock = NULL;
1947  proc->waitProcLock = NULL;
1949 
1950  /*
1951  * Delete the proclock immediately if it represents no already-held locks.
1952  * (This must happen now because if the owner of the lock decides to
1953  * release it, and the requested/granted counts then go to zero,
1954  * LockRelease expects there to be no remaining proclocks.) Then see if
1955  * any other waiters for the lock can be woken up now.
1956  */
1957  CleanUpLock(waitLock, proclock,
1958  LockMethods[lockmethodid], hashcode,
1959  true);
1960 }
1961 
1962 /*
1963  * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
1964  * Release a session lock if 'sessionLock' is true, else release a
1965  * regular transaction lock.
1966  *
1967  * Side Effects: find any waiting processes that are now wakable,
1968  * grant them their requested locks and awaken them.
1969  * (We have to grant the lock here to avoid a race between
1970  * the waking process and any new process to
1971  * come along and request the lock.)
1972  */
1973 bool
1974 LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1975 {
1976  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1977  LockMethod lockMethodTable;
1978  LOCALLOCKTAG localtag;
1979  LOCALLOCK *locallock;
1980  LOCK *lock;
1981  PROCLOCK *proclock;
1982  LWLock *partitionLock;
1983  bool wakeupNeeded;
1984 
1985  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1986  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1987  lockMethodTable = LockMethods[lockmethodid];
1988  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1989  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1990 
1991 #ifdef LOCK_DEBUG
1992  if (LOCK_DEBUG_ENABLED(locktag))
1993  elog(LOG, "LockRelease: lock [%u,%u] %s",
1994  locktag->locktag_field1, locktag->locktag_field2,
1995  lockMethodTable->lockModeNames[lockmode]);
1996 #endif
1997 
1998  /*
1999  * Find the LOCALLOCK entry for this lock and lockmode
2000  */
2001  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2002  localtag.lock = *locktag;
2003  localtag.mode = lockmode;
2004 
2005  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
2006  (void *) &localtag,
2007  HASH_FIND, NULL);
2008 
2009  /*
2010  * let the caller print its own error message, too. Do not ereport(ERROR).
2011  */
2012  if (!locallock || locallock->nLocks <= 0)
2013  {
2014  elog(WARNING, "you don't own a lock of type %s",
2015  lockMethodTable->lockModeNames[lockmode]);
2016  return false;
2017  }
2018 
2019  /*
2020  * Decrease the count for the resource owner.
2021  */
2022  {
2023  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2024  ResourceOwner owner;
2025  int i;
2026 
2027  /* Identify owner for lock */
2028  if (sessionLock)
2029  owner = NULL;
2030  else
2031  owner = CurrentResourceOwner;
2032 
2033  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2034  {
2035  if (lockOwners[i].owner == owner)
2036  {
2037  Assert(lockOwners[i].nLocks > 0);
2038  if (--lockOwners[i].nLocks == 0)
2039  {
2040  if (owner != NULL)
2041  ResourceOwnerForgetLock(owner, locallock);
2042  /* compact out unused slot */
2043  locallock->numLockOwners--;
2044  if (i < locallock->numLockOwners)
2045  lockOwners[i] = lockOwners[locallock->numLockOwners];
2046  }
2047  break;
2048  }
2049  }
2050  if (i < 0)
2051  {
2052  /* don't release a lock belonging to another owner */
2053  elog(WARNING, "you don't own a lock of type %s",
2054  lockMethodTable->lockModeNames[lockmode]);
2055  return false;
2056  }
2057  }
2058 
2059  /*
2060  * Decrease the total local count. If we're still holding the lock, we're
2061  * done.
2062  */
2063  locallock->nLocks--;
2064 
2065  if (locallock->nLocks > 0)
2066  return true;
2067 
2068  /*
2069  * At this point we can no longer suppose we are clear of invalidation
2070  * messages related to this lock. Although we'll delete the LOCALLOCK
2071  * object before any intentional return from this routine, it seems worth
2072  * the trouble to explicitly reset lockCleared right now, just in case
2073  * some error prevents us from deleting the LOCALLOCK.
2074  */
2075  locallock->lockCleared = false;
2076 
2077  /* Attempt fast release of any lock eligible for the fast path. */
2078  if (EligibleForRelationFastPath(locktag, lockmode) &&
2080  {
2081  bool released;
2082 
2083  /*
2084  * We might not find the lock here, even if we originally entered it
2085  * here. Another backend may have moved it to the main table.
2086  */
2088  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2089  lockmode);
2091  if (released)
2092  {
2093  RemoveLocalLock(locallock);
2094  return true;
2095  }
2096  }
2097 
2098  /*
2099  * Otherwise we've got to mess with the shared lock table.
2100  */
2101  partitionLock = LockHashPartitionLock(locallock->hashcode);
2102 
2103  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2104 
2105  /*
2106  * Normally, we don't need to re-find the lock or proclock, since we kept
2107  * their addresses in the locallock table, and they couldn't have been
2108  * removed while we were holding a lock on them. But it's possible that
2109  * the lock was taken fast-path and has since been moved to the main hash
2110  * table by another backend, in which case we will need to look up the
2111  * objects here. We assume the lock field is NULL if so.
2112  */
2113  lock = locallock->lock;
2114  if (!lock)
2115  {
2116  PROCLOCKTAG proclocktag;
2117 
2118  Assert(EligibleForRelationFastPath(locktag, lockmode));
2119  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2120  (const void *) locktag,
2121  locallock->hashcode,
2122  HASH_FIND,
2123  NULL);
2124  if (!lock)
2125  elog(ERROR, "failed to re-find shared lock object");
2126  locallock->lock = lock;
2127 
2128  proclocktag.myLock = lock;
2129  proclocktag.myProc = MyProc;
2130  locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2131  (void *) &proclocktag,
2132  HASH_FIND,
2133  NULL);
2134  if (!locallock->proclock)
2135  elog(ERROR, "failed to re-find shared proclock object");
2136  }
2137  LOCK_PRINT("LockRelease: found", lock, lockmode);
2138  proclock = locallock->proclock;
2139  PROCLOCK_PRINT("LockRelease: found", proclock);
2140 
2141  /*
2142  * Double-check that we are actually holding a lock of the type we want to
2143  * release.
2144  */
2145  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2146  {
2147  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2148  LWLockRelease(partitionLock);
2149  elog(WARNING, "you don't own a lock of type %s",
2150  lockMethodTable->lockModeNames[lockmode]);
2151  RemoveLocalLock(locallock);
2152  return false;
2153  }
2154 
2155  /*
2156  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2157  */
2158  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2159 
2160  CleanUpLock(lock, proclock,
2161  lockMethodTable, locallock->hashcode,
2162  wakeupNeeded);
2163 
2164  LWLockRelease(partitionLock);
2165 
2166  RemoveLocalLock(locallock);
2167  return true;
2168 }
2169 
2170 /*
2171  * LockReleaseAll -- Release all locks of the specified lock method that
2172  * are held by the current process.
2173  *
2174  * Well, not necessarily *all* locks. The available behaviors are:
2175  * allLocks == true: release all locks including session locks.
2176  * allLocks == false: release all non-session locks.
2177  */
2178 void
2179 LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2180 {
2182  LockMethod lockMethodTable;
2183  int i,
2184  numLockModes;
2185  LOCALLOCK *locallock;
2186  LOCK *lock;
2187  PROCLOCK *proclock;
2188  int partition;
2189  bool have_fast_path_lwlock = false;
2190 
2191  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2192  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2193  lockMethodTable = LockMethods[lockmethodid];
2194 
2195 #ifdef LOCK_DEBUG
2196  if (*(lockMethodTable->trace_flag))
2197  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2198 #endif
2199 
2200  /*
2201  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2202  * the only way that the lock we hold on our own VXID can ever get
2203  * released: it is always and only released when a toplevel transaction
2204  * ends.
2205  */
2206  if (lockmethodid == DEFAULT_LOCKMETHOD)
2208 
2209  numLockModes = lockMethodTable->numLockModes;
2210 
2211  /*
2212  * First we run through the locallock table and get rid of unwanted
2213  * entries, then we scan the process's proclocks and get rid of those. We
2214  * do this separately because we may have multiple locallock entries
2215  * pointing to the same proclock, and we daren't end up with any dangling
2216  * pointers. Fast-path locks are cleaned up during the locallock table
2217  * scan, though.
2218  */
2219  hash_seq_init(&status, LockMethodLocalHash);
2220 
2221  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2222  {
2223  /*
2224  * If the LOCALLOCK entry is unused, we must've run out of shared
2225  * memory while trying to set up this lock. Just forget the local
2226  * entry.
2227  */
2228  if (locallock->nLocks == 0)
2229  {
2230  RemoveLocalLock(locallock);
2231  continue;
2232  }
2233 
2234  /* Ignore items that are not of the lockmethod to be removed */
2235  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2236  continue;
2237 
2238  /*
2239  * If we are asked to release all locks, we can just zap the entry.
2240  * Otherwise, must scan to see if there are session locks. We assume
2241  * there is at most one lockOwners entry for session locks.
2242  */
2243  if (!allLocks)
2244  {
2245  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2246 
2247  /* If session lock is above array position 0, move it down to 0 */
2248  for (i = 0; i < locallock->numLockOwners; i++)
2249  {
2250  if (lockOwners[i].owner == NULL)
2251  lockOwners[0] = lockOwners[i];
2252  else
2253  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2254  }
2255 
2256  if (locallock->numLockOwners > 0 &&
2257  lockOwners[0].owner == NULL &&
2258  lockOwners[0].nLocks > 0)
2259  {
2260  /* Fix the locallock to show just the session locks */
2261  locallock->nLocks = lockOwners[0].nLocks;
2262  locallock->numLockOwners = 1;
2263  /* We aren't deleting this locallock, so done */
2264  continue;
2265  }
2266  else
2267  locallock->numLockOwners = 0;
2268  }
2269 
2270  /*
2271  * If the lock or proclock pointers are NULL, this lock was taken via
2272  * the relation fast-path (and is not known to have been transferred).
2273  */
2274  if (locallock->proclock == NULL || locallock->lock == NULL)
2275  {
2276  LOCKMODE lockmode = locallock->tag.mode;
2277  Oid relid;
2278 
2279  /* Verify that a fast-path lock is what we've got. */
2280  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2281  elog(PANIC, "locallock table corrupted");
2282 
2283  /*
2284  * If we don't currently hold the LWLock that protects our
2285  * fast-path data structures, we must acquire it before attempting
2286  * to release the lock via the fast-path. We will continue to
2287  * hold the LWLock until we're done scanning the locallock table,
2288  * unless we hit a transferred fast-path lock. (XXX is this
2289  * really such a good idea? There could be a lot of entries ...)
2290  */
2291  if (!have_fast_path_lwlock)
2292  {
2294  have_fast_path_lwlock = true;
2295  }
2296 
2297  /* Attempt fast-path release. */
2298  relid = locallock->tag.lock.locktag_field2;
2299  if (FastPathUnGrantRelationLock(relid, lockmode))
2300  {
2301  RemoveLocalLock(locallock);
2302  continue;
2303  }
2304 
2305  /*
2306  * Our lock, originally taken via the fast path, has been
2307  * transferred to the main lock table. That's going to require
2308  * some extra work, so release our fast-path lock before starting.
2309  */
2311  have_fast_path_lwlock = false;
2312 
2313  /*
2314  * Now dump the lock. We haven't got a pointer to the LOCK or
2315  * PROCLOCK in this case, so we have to handle this a bit
2316  * differently than a normal lock release. Unfortunately, this
2317  * requires an extra LWLock acquire-and-release cycle on the
2318  * partitionLock, but hopefully it shouldn't happen often.
2319  */
2320  LockRefindAndRelease(lockMethodTable, MyProc,
2321  &locallock->tag.lock, lockmode, false);
2322  RemoveLocalLock(locallock);
2323  continue;
2324  }
2325 
2326  /* Mark the proclock to show we need to release this lockmode */
2327  if (locallock->nLocks > 0)
2328  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2329 
2330  /* And remove the locallock hashtable entry */
2331  RemoveLocalLock(locallock);
2332  }
2333 
2334  /* Done with the fast-path data structures */
2335  if (have_fast_path_lwlock)
2337 
2338  /*
2339  * Now, scan each lock partition separately.
2340  */
2341  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2342  {
2343  LWLock *partitionLock;
2344  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
2345  PROCLOCK *nextplock;
2346 
2347  partitionLock = LockHashPartitionLockByIndex(partition);
2348 
2349  /*
2350  * If the proclock list for this partition is empty, we can skip
2351  * acquiring the partition lock. This optimization is trickier than
2352  * it looks, because another backend could be in process of adding
2353  * something to our proclock list due to promoting one of our
2354  * fast-path locks. However, any such lock must be one that we
2355  * decided not to delete above, so it's okay to skip it again now;
2356  * we'd just decide not to delete it again. We must, however, be
2357  * careful to re-fetch the list header once we've acquired the
2358  * partition lock, to be sure we have a valid, up-to-date pointer.
2359  * (There is probably no significant risk if pointer fetch/store is
2360  * atomic, but we don't wish to assume that.)
2361  *
2362  * XXX This argument assumes that the locallock table correctly
2363  * represents all of our fast-path locks. While allLocks mode
2364  * guarantees to clean up all of our normal locks regardless of the
2365  * locallock situation, we lose that guarantee for fast-path locks.
2366  * This is not ideal.
2367  */
2368  if (SHMQueueNext(procLocks, procLocks,
2369  offsetof(PROCLOCK, procLink)) == NULL)
2370  continue; /* needn't examine this partition */
2371 
2372  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2373 
2374  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2375  offsetof(PROCLOCK, procLink));
2376  proclock;
2377  proclock = nextplock)
2378  {
2379  bool wakeupNeeded = false;
2380 
2381  /* Get link first, since we may unlink/delete this proclock */
2382  nextplock = (PROCLOCK *)
2383  SHMQueueNext(procLocks, &proclock->procLink,
2384  offsetof(PROCLOCK, procLink));
2385 
2386  Assert(proclock->tag.myProc == MyProc);
2387 
2388  lock = proclock->tag.myLock;
2389 
2390  /* Ignore items that are not of the lockmethod to be removed */
2391  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2392  continue;
2393 
2394  /*
2395  * In allLocks mode, force release of all locks even if locallock
2396  * table had problems
2397  */
2398  if (allLocks)
2399  proclock->releaseMask = proclock->holdMask;
2400  else
2401  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2402 
2403  /*
2404  * Ignore items that have nothing to be released, unless they have
2405  * holdMask == 0 and are therefore recyclable
2406  */
2407  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2408  continue;
2409 
2410  PROCLOCK_PRINT("LockReleaseAll", proclock);
2411  LOCK_PRINT("LockReleaseAll", lock, 0);
2412  Assert(lock->nRequested >= 0);
2413  Assert(lock->nGranted >= 0);
2414  Assert(lock->nGranted <= lock->nRequested);
2415  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2416 
2417  /*
2418  * Release the previously-marked lock modes
2419  */
2420  for (i = 1; i <= numLockModes; i++)
2421  {
2422  if (proclock->releaseMask & LOCKBIT_ON(i))
2423  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2424  lockMethodTable);
2425  }
2426  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2427  Assert(lock->nGranted <= lock->nRequested);
2428  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2429 
2430  proclock->releaseMask = 0;
2431 
2432  /* CleanUpLock will wake up waiters if needed. */
2433  CleanUpLock(lock, proclock,
2434  lockMethodTable,
2435  LockTagHashCode(&lock->tag),
2436  wakeupNeeded);
2437  } /* loop over PROCLOCKs within this partition */
2438 
2439  LWLockRelease(partitionLock);
2440  } /* loop over partitions */
2441 
2442 #ifdef LOCK_DEBUG
2443  if (*(lockMethodTable->trace_flag))
2444  elog(LOG, "LockReleaseAll done");
2445 #endif
2446 }
2447 
2448 /*
2449  * LockReleaseSession -- Release all session locks of the specified lock method
2450  * that are held by the current process.
2451  */
2452 void
2454 {
2456  LOCALLOCK *locallock;
2457 
2458  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2459  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2460 
2461  hash_seq_init(&status, LockMethodLocalHash);
2462 
2463  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2464  {
2465  /* Ignore items that are not of the specified lock method */
2466  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2467  continue;
2468 
2469  ReleaseLockIfHeld(locallock, true);
2470  }
2471 }
2472 
2473 /*
2474  * LockReleaseCurrentOwner
2475  * Release all locks belonging to CurrentResourceOwner
2476  *
2477  * If the caller knows what those locks are, it can pass them as an array.
2478  * That speeds up the call significantly, when a lot of locks are held.
2479  * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2480  * table to find them.
2481  */
2482 void
2483 LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2484 {
2485  if (locallocks == NULL)
2486  {
2488  LOCALLOCK *locallock;
2489 
2490  hash_seq_init(&status, LockMethodLocalHash);
2491 
2492  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2493  ReleaseLockIfHeld(locallock, false);
2494  }
2495  else
2496  {
2497  int i;
2498 
2499  for (i = nlocks - 1; i >= 0; i--)
2500  ReleaseLockIfHeld(locallocks[i], false);
2501  }
2502 }
2503 
2504 /*
2505  * ReleaseLockIfHeld
2506  * Release any session-level locks on this lockable object if sessionLock
2507  * is true; else, release any locks held by CurrentResourceOwner.
2508  *
2509  * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2510  * locks), but without refactoring LockRelease() we cannot support releasing
2511  * locks belonging to resource owners other than CurrentResourceOwner.
2512  * If we were to refactor, it'd be a good idea to fix it so we don't have to
2513  * do a hashtable lookup of the locallock, too. However, currently this
2514  * function isn't used heavily enough to justify refactoring for its
2515  * convenience.
2516  */
2517 static void
2518 ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2519 {
2520  ResourceOwner owner;
2521  LOCALLOCKOWNER *lockOwners;
2522  int i;
2523 
2524  /* Identify owner for lock (must match LockRelease!) */
2525  if (sessionLock)
2526  owner = NULL;
2527  else
2528  owner = CurrentResourceOwner;
2529 
2530  /* Scan to see if there are any locks belonging to the target owner */
2531  lockOwners = locallock->lockOwners;
2532  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2533  {
2534  if (lockOwners[i].owner == owner)
2535  {
2536  Assert(lockOwners[i].nLocks > 0);
2537  if (lockOwners[i].nLocks < locallock->nLocks)
2538  {
2539  /*
2540  * We will still hold this lock after forgetting this
2541  * ResourceOwner.
2542  */
2543  locallock->nLocks -= lockOwners[i].nLocks;
2544  /* compact out unused slot */
2545  locallock->numLockOwners--;
2546  if (owner != NULL)
2547  ResourceOwnerForgetLock(owner, locallock);
2548  if (i < locallock->numLockOwners)
2549  lockOwners[i] = lockOwners[locallock->numLockOwners];
2550  }
2551  else
2552  {
2553  Assert(lockOwners[i].nLocks == locallock->nLocks);
2554  /* We want to call LockRelease just once */
2555  lockOwners[i].nLocks = 1;
2556  locallock->nLocks = 1;
2557  if (!LockRelease(&locallock->tag.lock,
2558  locallock->tag.mode,
2559  sessionLock))
2560  elog(WARNING, "ReleaseLockIfHeld: failed??");
2561  }
2562  break;
2563  }
2564  }
2565 }
2566 
2567 /*
2568  * LockReassignCurrentOwner
2569  * Reassign all locks belonging to CurrentResourceOwner to belong
2570  * to its parent resource owner.
2571  *
2572  * If the caller knows what those locks are, it can pass them as an array.
2573  * That speeds up the call significantly, when a lot of locks are held
2574  * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2575  * and we'll traverse through our hash table to find them.
2576  */
2577 void
2578 LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2579 {
2581 
2582  Assert(parent != NULL);
2583 
2584  if (locallocks == NULL)
2585  {
2587  LOCALLOCK *locallock;
2588 
2589  hash_seq_init(&status, LockMethodLocalHash);
2590 
2591  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2592  LockReassignOwner(locallock, parent);
2593  }
2594  else
2595  {
2596  int i;
2597 
2598  for (i = nlocks - 1; i >= 0; i--)
2599  LockReassignOwner(locallocks[i], parent);
2600  }
2601 }
2602 
2603 /*
2604  * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2605  * CurrentResourceOwner to its parent.
2606  */
2607 static void
2609 {
2610  LOCALLOCKOWNER *lockOwners;
2611  int i;
2612  int ic = -1;
2613  int ip = -1;
2614 
2615  /*
2616  * Scan to see if there are any locks belonging to current owner or its
2617  * parent
2618  */
2619  lockOwners = locallock->lockOwners;
2620  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2621  {
2622  if (lockOwners[i].owner == CurrentResourceOwner)
2623  ic = i;
2624  else if (lockOwners[i].owner == parent)
2625  ip = i;
2626  }
2627 
2628  if (ic < 0)
2629  return; /* no current locks */
2630 
2631  if (ip < 0)
2632  {
2633  /* Parent has no slot, so just give it the child's slot */
2634  lockOwners[ic].owner = parent;
2635  ResourceOwnerRememberLock(parent, locallock);
2636  }
2637  else
2638  {
2639  /* Merge child's count with parent's */
2640  lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2641  /* compact out unused slot */
2642  locallock->numLockOwners--;
2643  if (ic < locallock->numLockOwners)
2644  lockOwners[ic] = lockOwners[locallock->numLockOwners];
2645  }
2647 }
2648 
2649 /*
2650  * FastPathGrantRelationLock
2651  * Grant lock using per-backend fast-path array, if there is space.
2652  */
2653 static bool
2655 {
2656  uint32 f;
2657  uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2658 
2659  /* Scan for existing entry for this relid, remembering empty slot. */
2660  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2661  {
2662  if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2663  unused_slot = f;
2664  else if (MyProc->fpRelId[f] == relid)
2665  {
2666  Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2667  FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2668  return true;
2669  }
2670  }
2671 
2672  /* If no existing entry, use any empty slot. */
2673  if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2674  {
2675  MyProc->fpRelId[unused_slot] = relid;
2676  FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2678  return true;
2679  }
2680 
2681  /* No existing entry, and no empty slot. */
2682  return false;
2683 }
2684 
2685 /*
2686  * FastPathUnGrantRelationLock
2687  * Release fast-path lock, if present. Update backend-private local
2688  * use count, while we're at it.
2689  */
2690 static bool
2692 {
2693  uint32 f;
2694  bool result = false;
2695 
2697  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2698  {
2699  if (MyProc->fpRelId[f] == relid
2700  && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2701  {
2702  Assert(!result);
2703  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2704  result = true;
2705  /* we continue iterating so as to update FastPathLocalUseCount */
2706  }
2707  if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2709  }
2710  return result;
2711 }
2712 
2713 /*
2714  * FastPathTransferRelationLocks
2715  * Transfer locks matching the given lock tag from per-backend fast-path
2716  * arrays to the shared hash table.
2717  *
2718  * Returns true if successful, false if ran out of shared memory.
2719  */
2720 static bool
2722  uint32 hashcode)
2723 {
2724  LWLock *partitionLock = LockHashPartitionLock(hashcode);
2725  Oid relid = locktag->locktag_field2;
2726  uint32 i;
2727 
2728  /*
2729  * Every PGPROC that can potentially hold a fast-path lock is present in
2730  * ProcGlobal->allProcs. Prepared transactions are not, but any
2731  * outstanding fast-path locks held by prepared transactions are
2732  * transferred to the main lock table.
2733  */
2734  for (i = 0; i < ProcGlobal->allProcCount; i++)
2735  {
2736  PGPROC *proc = &ProcGlobal->allProcs[i];
2737  uint32 f;
2738 
2740 
2741  /*
2742  * If the target backend isn't referencing the same database as the
2743  * lock, then we needn't examine the individual relation IDs at all;
2744  * none of them can be relevant.
2745  *
2746  * proc->databaseId is set at backend startup time and never changes
2747  * thereafter, so it might be safe to perform this test before
2748  * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2749  * assume that if the target backend holds any fast-path locks, it
2750  * must have performed a memory-fencing operation (in particular, an
2751  * LWLock acquisition) since setting proc->databaseId. However, it's
2752  * less clear that our backend is certain to have performed a memory
2753  * fencing operation since the other backend set proc->databaseId. So
2754  * for now, we test it after acquiring the LWLock just to be safe.
2755  */
2756  if (proc->databaseId != locktag->locktag_field1)
2757  {
2758  LWLockRelease(&proc->fpInfoLock);
2759  continue;
2760  }
2761 
2762  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2763  {
2764  uint32 lockmode;
2765 
2766  /* Look for an allocated slot matching the given relid. */
2767  if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2768  continue;
2769 
2770  /* Find or create lock object. */
2771  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2772  for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2774  ++lockmode)
2775  {
2776  PROCLOCK *proclock;
2777 
2778  if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2779  continue;
2780  proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2781  hashcode, lockmode);
2782  if (!proclock)
2783  {
2784  LWLockRelease(partitionLock);
2785  LWLockRelease(&proc->fpInfoLock);
2786  return false;
2787  }
2788  GrantLock(proclock->tag.myLock, proclock, lockmode);
2789  FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2790  }
2791  LWLockRelease(partitionLock);
2792 
2793  /* No need to examine remaining slots. */
2794  break;
2795  }
2796  LWLockRelease(&proc->fpInfoLock);
2797  }
2798  return true;
2799 }
2800 
2801 /*
2802  * FastPathGetRelationLockEntry
2803  * Return the PROCLOCK for a lock originally taken via the fast-path,
2804  * transferring it to the primary lock table if necessary.
2805  *
2806  * Note: caller takes care of updating the locallock object.
2807  */
2808 static PROCLOCK *
2810 {
2811  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2812  LOCKTAG *locktag = &locallock->tag.lock;
2813  PROCLOCK *proclock = NULL;
2814  LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2815  Oid relid = locktag->locktag_field2;
2816  uint32 f;
2817 
2819 
2820  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2821  {
2822  uint32 lockmode;
2823 
2824  /* Look for an allocated slot matching the given relid. */
2825  if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2826  continue;
2827 
2828  /* If we don't have a lock of the given mode, forget it! */
2829  lockmode = locallock->tag.mode;
2830  if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2831  break;
2832 
2833  /* Find or create lock object. */
2834  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2835 
2836  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2837  locallock->hashcode, lockmode);
2838  if (!proclock)
2839  {
2840  LWLockRelease(partitionLock);
2842  ereport(ERROR,
2843  (errcode(ERRCODE_OUT_OF_MEMORY),
2844  errmsg("out of shared memory"),
2845  errhint("You might need to increase max_locks_per_transaction.")));
2846  }
2847  GrantLock(proclock->tag.myLock, proclock, lockmode);
2848  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2849 
2850  LWLockRelease(partitionLock);
2851 
2852  /* No need to examine remaining slots. */
2853  break;
2854  }
2855 
2857 
2858  /* Lock may have already been transferred by some other backend. */
2859  if (proclock == NULL)
2860  {
2861  LOCK *lock;
2862  PROCLOCKTAG proclocktag;
2863  uint32 proclock_hashcode;
2864 
2865  LWLockAcquire(partitionLock, LW_SHARED);
2866 
2867  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2868  (void *) locktag,
2869  locallock->hashcode,
2870  HASH_FIND,
2871  NULL);
2872  if (!lock)
2873  elog(ERROR, "failed to re-find shared lock object");
2874 
2875  proclocktag.myLock = lock;
2876  proclocktag.myProc = MyProc;
2877 
2878  proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2879  proclock = (PROCLOCK *)
2880  hash_search_with_hash_value(LockMethodProcLockHash,
2881  (void *) &proclocktag,
2882  proclock_hashcode,
2883  HASH_FIND,
2884  NULL);
2885  if (!proclock)
2886  elog(ERROR, "failed to re-find shared proclock object");
2887  LWLockRelease(partitionLock);
2888  }
2889 
2890  return proclock;
2891 }
2892 
2893 /*
2894  * GetLockConflicts
2895  * Get an array of VirtualTransactionIds of xacts currently holding locks
2896  * that would conflict with the specified lock/lockmode.
2897  * xacts merely awaiting such a lock are NOT reported.
2898  *
2899  * The result array is palloc'd and is terminated with an invalid VXID.
2900  * *countp, if not null, is updated to the number of items set.
2901  *
2902  * Of course, the result could be out of date by the time it's returned,
2903  * so use of this function has to be thought about carefully.
2904  *
2905  * Note we never include the current xact's vxid in the result array,
2906  * since an xact never blocks itself. Also, prepared transactions are
2907  * ignored, which is a bit more debatable but is appropriate for current
2908  * uses of the result.
2909  */
2912 {
2913  static VirtualTransactionId *vxids;
2914  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2915  LockMethod lockMethodTable;
2916  LOCK *lock;
2917  LOCKMASK conflictMask;
2918  SHM_QUEUE *procLocks;
2919  PROCLOCK *proclock;
2920  uint32 hashcode;
2921  LWLock *partitionLock;
2922  int count = 0;
2923  int fast_count = 0;
2924 
2925  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2926  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2927  lockMethodTable = LockMethods[lockmethodid];
2928  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2929  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2930 
2931  /*
2932  * Allocate memory to store results, and fill with InvalidVXID. We only
2933  * need enough space for MaxBackends + a terminator, since prepared xacts
2934  * don't count. InHotStandby allocate once in TopMemoryContext.
2935  */
2936  if (InHotStandby)
2937  {
2938  if (vxids == NULL)
2939  vxids = (VirtualTransactionId *)
2941  sizeof(VirtualTransactionId) * (MaxBackends + 1));
2942  }
2943  else
2944  vxids = (VirtualTransactionId *)
2945  palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
2946 
2947  /* Compute hash code and partition lock, and look up conflicting modes. */
2948  hashcode = LockTagHashCode(locktag);
2949  partitionLock = LockHashPartitionLock(hashcode);
2950  conflictMask = lockMethodTable->conflictTab[lockmode];
2951 
2952  /*
2953  * Fast path locks might not have been entered in the primary lock table.
2954  * If the lock we're dealing with could conflict with such a lock, we must
2955  * examine each backend's fast-path array for conflicts.
2956  */
2957  if (ConflictsWithRelationFastPath(locktag, lockmode))
2958  {
2959  int i;
2960  Oid relid = locktag->locktag_field2;
2961  VirtualTransactionId vxid;
2962 
2963  /*
2964  * Iterate over relevant PGPROCs. Anything held by a prepared
2965  * transaction will have been transferred to the primary lock table,
2966  * so we need not worry about those. This is all a bit fuzzy, because
2967  * new locks could be taken after we've visited a particular
2968  * partition, but the callers had better be prepared to deal with that
2969  * anyway, since the locks could equally well be taken between the
2970  * time we return the value and the time the caller does something
2971  * with it.
2972  */
2973  for (i = 0; i < ProcGlobal->allProcCount; i++)
2974  {
2975  PGPROC *proc = &ProcGlobal->allProcs[i];
2976  uint32 f;
2977 
2978  /* A backend never blocks itself */
2979  if (proc == MyProc)
2980  continue;
2981 
2983 
2984  /*
2985  * If the target backend isn't referencing the same database as
2986  * the lock, then we needn't examine the individual relation IDs
2987  * at all; none of them can be relevant.
2988  *
2989  * See FastPathTransferRelationLocks() for discussion of why we do
2990  * this test after acquiring the lock.
2991  */
2992  if (proc->databaseId != locktag->locktag_field1)
2993  {
2994  LWLockRelease(&proc->fpInfoLock);
2995  continue;
2996  }
2997 
2998  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2999  {
3000  uint32 lockmask;
3001 
3002  /* Look for an allocated slot matching the given relid. */
3003  if (relid != proc->fpRelId[f])
3004  continue;
3005  lockmask = FAST_PATH_GET_BITS(proc, f);
3006  if (!lockmask)
3007  continue;
3008  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3009 
3010  /*
3011  * There can only be one entry per relation, so if we found it
3012  * and it doesn't conflict, we can skip the rest of the slots.
3013  */
3014  if ((lockmask & conflictMask) == 0)
3015  break;
3016 
3017  /* Conflict! */
3018  GET_VXID_FROM_PGPROC(vxid, *proc);
3019 
3020  /*
3021  * If we see an invalid VXID, then either the xact has already
3022  * committed (or aborted), or it's a prepared xact. In either
3023  * case we may ignore it.
3024  */
3025  if (VirtualTransactionIdIsValid(vxid))
3026  vxids[count++] = vxid;
3027 
3028  /* No need to examine remaining slots. */
3029  break;
3030  }
3031 
3032  LWLockRelease(&proc->fpInfoLock);
3033  }
3034  }
3035 
3036  /* Remember how many fast-path conflicts we found. */
3037  fast_count = count;
3038 
3039  /*
3040  * Look up the lock object matching the tag.
3041  */
3042  LWLockAcquire(partitionLock, LW_SHARED);
3043 
3044  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3045  (const void *) locktag,
3046  hashcode,
3047  HASH_FIND,
3048  NULL);
3049  if (!lock)
3050  {
3051  /*
3052  * If the lock object doesn't exist, there is nothing holding a lock
3053  * on this lockable object.
3054  */
3055  LWLockRelease(partitionLock);
3056  vxids[count].backendId = InvalidBackendId;
3058  if (countp)
3059  *countp = count;
3060  return vxids;
3061  }
3062 
3063  /*
3064  * Examine each existing holder (or awaiter) of the lock.
3065  */
3066 
3067  procLocks = &(lock->procLocks);
3068 
3069  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3070  offsetof(PROCLOCK, lockLink));
3071 
3072  while (proclock)
3073  {
3074  if (conflictMask & proclock->holdMask)
3075  {
3076  PGPROC *proc = proclock->tag.myProc;
3077 
3078  /* A backend never blocks itself */
3079  if (proc != MyProc)
3080  {
3081  VirtualTransactionId vxid;
3082 
3083  GET_VXID_FROM_PGPROC(vxid, *proc);
3084 
3085  /*
3086  * If we see an invalid VXID, then either the xact has already
3087  * committed (or aborted), or it's a prepared xact. In either
3088  * case we may ignore it.
3089  */
3090  if (VirtualTransactionIdIsValid(vxid))
3091  {
3092  int i;
3093 
3094  /* Avoid duplicate entries. */
3095  for (i = 0; i < fast_count; ++i)
3096  if (VirtualTransactionIdEquals(vxids[i], vxid))
3097  break;
3098  if (i >= fast_count)
3099  vxids[count++] = vxid;
3100  }
3101  }
3102  }
3103 
3104  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3105  offsetof(PROCLOCK, lockLink));
3106  }
3107 
3108  LWLockRelease(partitionLock);
3109 
3110  if (count > MaxBackends) /* should never happen */
3111  elog(PANIC, "too many conflicting locks found");
3112 
3113  vxids[count].backendId = InvalidBackendId;
3115  if (countp)
3116  *countp = count;
3117  return vxids;
3118 }
3119 
3120 /*
3121  * Find a lock in the shared lock table and release it. It is the caller's
3122  * responsibility to verify that this is a sane thing to do. (For example, it
3123  * would be bad to release a lock here if there might still be a LOCALLOCK
3124  * object with pointers to it.)
3125  *
3126  * We currently use this in two situations: first, to release locks held by
3127  * prepared transactions on commit (see lock_twophase_postcommit); and second,
3128  * to release locks taken via the fast-path, transferred to the main hash
3129  * table, and then released (see LockReleaseAll).
3130  */
3131 static void
3132 LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3134  bool decrement_strong_lock_count)
3135 {
3136  LOCK *lock;
3137  PROCLOCK *proclock;
3138  PROCLOCKTAG proclocktag;
3139  uint32 hashcode;
3140  uint32 proclock_hashcode;
3141  LWLock *partitionLock;
3142  bool wakeupNeeded;
3143 
3144  hashcode = LockTagHashCode(locktag);
3145  partitionLock = LockHashPartitionLock(hashcode);
3146 
3147  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3148 
3149  /*
3150  * Re-find the lock object (it had better be there).
3151  */
3152  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3153  (void *) locktag,
3154  hashcode,
3155  HASH_FIND,
3156  NULL);
3157  if (!lock)
3158  elog(PANIC, "failed to re-find shared lock object");
3159 
3160  /*
3161  * Re-find the proclock object (ditto).
3162  */
3163  proclocktag.myLock = lock;
3164  proclocktag.myProc = proc;
3165 
3166  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3167 
3168  proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3169  (void *) &proclocktag,
3170  proclock_hashcode,
3171  HASH_FIND,
3172  NULL);
3173  if (!proclock)
3174  elog(PANIC, "failed to re-find shared proclock object");
3175 
3176  /*
3177  * Double-check that we are actually holding a lock of the type we want to
3178  * release.
3179  */
3180  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3181  {
3182  PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3183  LWLockRelease(partitionLock);
3184  elog(WARNING, "you don't own a lock of type %s",
3185  lockMethodTable->lockModeNames[lockmode]);
3186  return;
3187  }
3188 
3189  /*
3190  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3191  */
3192  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3193 
3194  CleanUpLock(lock, proclock,
3195  lockMethodTable, hashcode,
3196  wakeupNeeded);
3197 
3198  LWLockRelease(partitionLock);
3199 
3200  /*
3201  * Decrement strong lock count. This logic is needed only for 2PC.
3202  */
3203  if (decrement_strong_lock_count
3204  && ConflictsWithRelationFastPath(locktag, lockmode))
3205  {
3206  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3207 
3208  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3209  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3210  FastPathStrongRelationLocks->count[fasthashcode]--;
3211  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3212  }
3213 }
3214 
3215 /*
3216  * AtPrepare_Locks
3217  * Do the preparatory work for a PREPARE: make 2PC state file records
3218  * for all locks currently held.
3219  *
3220  * Session-level locks are ignored, as are VXID locks.
3221  *
3222  * There are some special cases that we error out on: we can't be holding any
3223  * locks at both session and transaction level (since we must either keep or
3224  * give away the PROCLOCK object), and we can't be holding any locks on
3225  * temporary objects (since that would mess up the current backend if it tries
3226  * to exit before the prepared xact is committed).
3227  */
3228 void
3230 {
3232  LOCALLOCK *locallock;
3233 
3234  /*
3235  * For the most part, we don't need to touch shared memory for this ---
3236  * all the necessary state information is in the locallock table.
3237  * Fast-path locks are an exception, however: we move any such locks to
3238  * the main table before allowing PREPARE TRANSACTION to succeed.
3239  */
3240  hash_seq_init(&status, LockMethodLocalHash);
3241 
3242  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3243  {
3244  TwoPhaseLockRecord record;
3245  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3246  bool haveSessionLock;
3247  bool haveXactLock;
3248  int i;
3249 
3250  /*
3251  * Ignore VXID locks. We don't want those to be held by prepared
3252  * transactions, since they aren't meaningful after a restart.
3253  */
3254  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3255  continue;
3256 
3257  /* Ignore it if we don't actually hold the lock */
3258  if (locallock->nLocks <= 0)
3259  continue;
3260 
3261  /* Scan to see whether we hold it at session or transaction level */
3262  haveSessionLock = haveXactLock = false;
3263  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3264  {
3265  if (lockOwners[i].owner == NULL)
3266  haveSessionLock = true;
3267  else
3268  haveXactLock = true;
3269  }
3270 
3271  /* Ignore it if we have only session lock */
3272  if (!haveXactLock)
3273  continue;
3274 
3275  /*
3276  * If we have both session- and transaction-level locks, fail. This
3277  * should never happen with regular locks, since we only take those at
3278  * session level in some special operations like VACUUM. It's
3279  * possible to hit this with advisory locks, though.
3280  *
3281  * It would be nice if we could keep the session hold and give away
3282  * the transactional hold to the prepared xact. However, that would
3283  * require two PROCLOCK objects, and we cannot be sure that another
3284  * PROCLOCK will be available when it comes time for PostPrepare_Locks
3285  * to do the deed. So for now, we error out while we can still do so
3286  * safely.
3287  */
3288  if (haveSessionLock)
3289  ereport(ERROR,
3290  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3291  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3292 
3293  /*
3294  * If the local lock was taken via the fast-path, we need to move it
3295  * to the primary lock table, or just get a pointer to the existing
3296  * primary lock table entry if by chance it's already been
3297  * transferred.
3298  */
3299  if (locallock->proclock == NULL)
3300  {
3301  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3302  locallock->lock = locallock->proclock->tag.myLock;
3303  }
3304 
3305  /*
3306  * Arrange to not release any strong lock count held by this lock
3307  * entry. We must retain the count until the prepared transaction is
3308  * committed or rolled back.
3309  */
3310  locallock->holdsStrongLockCount = false;
3311 
3312  /*
3313  * Create a 2PC record.
3314  */
3315  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3316  record.lockmode = locallock->tag.mode;
3317 
3319  &record, sizeof(TwoPhaseLockRecord));
3320  }
3321 }
3322 
3323 /*
3324  * PostPrepare_Locks
3325  * Clean up after successful PREPARE
3326  *
3327  * Here, we want to transfer ownership of our locks to a dummy PGPROC
3328  * that's now associated with the prepared transaction, and we want to
3329  * clean out the corresponding entries in the LOCALLOCK table.
3330  *
3331  * Note: by removing the LOCALLOCK entries, we are leaving dangling
3332  * pointers in the transaction's resource owner. This is OK at the
3333  * moment since resowner.c doesn't try to free locks retail at a toplevel
3334  * transaction commit or abort. We could alternatively zero out nLocks
3335  * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3336  * but that probably costs more cycles.
3337  */
3338 void
3340 {
3341  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3343  LOCALLOCK *locallock;
3344  LOCK *lock;
3345  PROCLOCK *proclock;
3346  PROCLOCKTAG proclocktag;
3347  int partition;
3348 
3349  /* Can't prepare a lock group follower. */
3350  Assert(MyProc->lockGroupLeader == NULL ||
3352 
3353  /* This is a critical section: any error means big trouble */
3355 
3356  /*
3357  * First we run through the locallock table and get rid of unwanted
3358  * entries, then we scan the process's proclocks and transfer them to the
3359  * target proc.
3360  *
3361  * We do this separately because we may have multiple locallock entries
3362  * pointing to the same proclock, and we daren't end up with any dangling
3363  * pointers.
3364  */
3365  hash_seq_init(&status, LockMethodLocalHash);
3366 
3367  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3368  {
3369  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3370  bool haveSessionLock;
3371  bool haveXactLock;
3372  int i;
3373 
3374  if (locallock->proclock == NULL || locallock->lock == NULL)
3375  {
3376  /*
3377  * We must've run out of shared memory while trying to set up this
3378  * lock. Just forget the local entry.
3379  */
3380  Assert(locallock->nLocks == 0);
3381  RemoveLocalLock(locallock);
3382  continue;
3383  }
3384 
3385  /* Ignore VXID locks */
3386  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3387  continue;
3388 
3389  /* Scan to see whether we hold it at session or transaction level */
3390  haveSessionLock = haveXactLock = false;
3391  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3392  {
3393  if (lockOwners[i].owner == NULL)
3394  haveSessionLock = true;
3395  else
3396  haveXactLock = true;
3397  }
3398 
3399  /* Ignore it if we have only session lock */
3400  if (!haveXactLock)
3401  continue;
3402 
3403  /* This can't happen, because we already checked it */
3404  if (haveSessionLock)
3405  ereport(PANIC,
3406  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3407  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3408 
3409  /* Mark the proclock to show we need to release this lockmode */
3410  if (locallock->nLocks > 0)
3411  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3412 
3413  /* And remove the locallock hashtable entry */
3414  RemoveLocalLock(locallock);
3415  }
3416 
3417  /*
3418  * Now, scan each lock partition separately.
3419  */
3420  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3421  {
3422  LWLock *partitionLock;
3423  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
3424  PROCLOCK *nextplock;
3425 
3426  partitionLock = LockHashPartitionLockByIndex(partition);
3427 
3428  /*
3429  * If the proclock list for this partition is empty, we can skip
3430  * acquiring the partition lock. This optimization is safer than the
3431  * situation in LockReleaseAll, because we got rid of any fast-path
3432  * locks during AtPrepare_Locks, so there cannot be any case where
3433  * another backend is adding something to our lists now. For safety,
3434  * though, we code this the same way as in LockReleaseAll.
3435  */
3436  if (SHMQueueNext(procLocks, procLocks,
3437  offsetof(PROCLOCK, procLink)) == NULL)
3438  continue; /* needn't examine this partition */
3439 
3440  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3441 
3442  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3443  offsetof(PROCLOCK, procLink));
3444  proclock;
3445  proclock = nextplock)
3446  {
3447  /* Get link first, since we may unlink/relink this proclock */
3448  nextplock = (PROCLOCK *)
3449  SHMQueueNext(procLocks, &proclock->procLink,
3450  offsetof(PROCLOCK, procLink));
3451 
3452  Assert(proclock->tag.myProc == MyProc);
3453 
3454  lock = proclock->tag.myLock;
3455 
3456  /* Ignore VXID locks */
3458  continue;
3459 
3460  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3461  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3462  Assert(lock->nRequested >= 0);
3463  Assert(lock->nGranted >= 0);
3464  Assert(lock->nGranted <= lock->nRequested);
3465  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3466 
3467  /* Ignore it if nothing to release (must be a session lock) */
3468  if (proclock->releaseMask == 0)
3469  continue;
3470 
3471  /* Else we should be releasing all locks */
3472  if (proclock->releaseMask != proclock->holdMask)
3473  elog(PANIC, "we seem to have dropped a bit somewhere");
3474 
3475  /*
3476  * We cannot simply modify proclock->tag.myProc to reassign
3477  * ownership of the lock, because that's part of the hash key and
3478  * the proclock would then be in the wrong hash chain. Instead
3479  * use hash_update_hash_key. (We used to create a new hash entry,
3480  * but that risks out-of-memory failure if other processes are
3481  * busy making proclocks too.) We must unlink the proclock from
3482  * our procLink chain and put it into the new proc's chain, too.
3483  *
3484  * Note: the updated proclock hash key will still belong to the
3485  * same hash partition, cf proclock_hash(). So the partition lock
3486  * we already hold is sufficient for this.
3487  */
3488  SHMQueueDelete(&proclock->procLink);
3489 
3490  /*
3491  * Create the new hash key for the proclock.
3492  */
3493  proclocktag.myLock = lock;
3494  proclocktag.myProc = newproc;
3495 
3496  /*
3497  * Update groupLeader pointer to point to the new proc. (We'd
3498  * better not be a member of somebody else's lock group!)
3499  */
3500  Assert(proclock->groupLeader == proclock->tag.myProc);
3501  proclock->groupLeader = newproc;
3502 
3503  /*
3504  * Update the proclock. We should not find any existing entry for
3505  * the same hash key, since there can be only one entry for any
3506  * given lock with my own proc.
3507  */
3508  if (!hash_update_hash_key(LockMethodProcLockHash,
3509  (void *) proclock,
3510  (void *) &proclocktag))
3511  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3512 
3513  /* Re-link into the new proc's proclock list */
3514  SHMQueueInsertBefore(&(newproc->myProcLocks[partition]),
3515  &proclock->procLink);
3516 
3517  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3518  } /* loop over PROCLOCKs within this partition */
3519 
3520  LWLockRelease(partitionLock);
3521  } /* loop over partitions */
3522 
3523  END_CRIT_SECTION();
3524 }
3525 
3526 
3527 /*
3528  * Estimate shared-memory space used for lock tables
3529  */
3530 Size
3532 {
3533  Size size = 0;
3534  long max_table_size;
3535 
3536  /* lock hash table */
3537  max_table_size = NLOCKENTS();
3538  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3539 
3540  /* proclock hash table */
3541  max_table_size *= 2;
3542  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3543 
3544  /*
3545  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3546  */
3547  size = add_size(size, size / 10);
3548 
3549  return size;
3550 }
3551 
3552 /*
3553  * GetLockStatusData - Return a summary of the lock manager's internal
3554  * status, for use in a user-level reporting function.
3555  *
3556  * The return data consists of an array of LockInstanceData objects,
3557  * which are a lightly abstracted version of the PROCLOCK data structures,
3558  * i.e. there is one entry for each unique lock and interested PGPROC.
3559  * It is the caller's responsibility to match up related items (such as
3560  * references to the same lockable object or PGPROC) if wanted.
3561  *
3562  * The design goal is to hold the LWLocks for as short a time as possible;
3563  * thus, this function simply makes a copy of the necessary data and releases
3564  * the locks, allowing the caller to contemplate and format the data for as
3565  * long as it pleases.
3566  */
3567 LockData *
3569 {
3570  LockData *data;
3571  PROCLOCK *proclock;
3572  HASH_SEQ_STATUS seqstat;
3573  int els;
3574  int el;
3575  int i;
3576 
3577  data = (LockData *) palloc(sizeof(LockData));
3578 
3579  /* Guess how much space we'll need. */
3580  els = MaxBackends;
3581  el = 0;
3582  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3583 
3584  /*
3585  * First, we iterate through the per-backend fast-path arrays, locking
3586  * them one at a time. This might produce an inconsistent picture of the
3587  * system state, but taking all of those LWLocks at the same time seems
3588  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3589  * matter too much, because none of these locks can be involved in lock
3590  * conflicts anyway - anything that might must be present in the main lock
3591  * table. (For the same reason, we don't sweat about making leaderPid
3592  * completely valid. We cannot safely dereference another backend's
3593  * lockGroupLeader field without holding all lock partition locks, and
3594  * it's not worth that.)
3595  */
3596  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3597  {
3598  PGPROC *proc = &ProcGlobal->allProcs[i];
3599  uint32 f;
3600 
3602 
3603  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3604  {
3605  LockInstanceData *instance;
3606  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3607 
3608  /* Skip unallocated slots. */
3609  if (!lockbits)
3610  continue;
3611 
3612  if (el >= els)
3613  {
3614  els += MaxBackends;
3615  data->locks = (LockInstanceData *)
3616  repalloc(data->locks, sizeof(LockInstanceData) * els);
3617  }
3618 
3619  instance = &data->locks[el];
3620  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3621  proc->fpRelId[f]);
3622  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3623  instance->waitLockMode = NoLock;
3624  instance->backend = proc->backendId;
3625  instance->lxid = proc->lxid;
3626  instance->pid = proc->pid;
3627  instance->leaderPid = proc->pid;
3628  instance->fastpath = true;
3629 
3630  el++;
3631  }
3632 
3633  if (proc->fpVXIDLock)
3634  {
3635  VirtualTransactionId vxid;
3636  LockInstanceData *instance;
3637 
3638  if (el >= els)
3639  {
3640  els += MaxBackends;
3641  data->locks = (LockInstanceData *)
3642  repalloc(data->locks, sizeof(LockInstanceData) * els);
3643  }
3644 
3645  vxid.backendId = proc->backendId;
3647 
3648  instance = &data->locks[el];
3649  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3650  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3651  instance->waitLockMode = NoLock;
3652  instance->backend = proc->backendId;
3653  instance->lxid = proc->lxid;
3654  instance->pid = proc->pid;
3655  instance->leaderPid = proc->pid;
3656  instance->fastpath = true;
3657 
3658  el++;
3659  }
3660 
3661  LWLockRelease(&proc->fpInfoLock);
3662  }
3663 
3664  /*
3665  * Next, acquire lock on the entire shared lock data structure. We do
3666  * this so that, at least for locks in the primary lock table, the state
3667  * will be self-consistent.
3668  *
3669  * Since this is a read-only operation, we take shared instead of
3670  * exclusive lock. There's not a whole lot of point to this, because all
3671  * the normal operations require exclusive lock, but it doesn't hurt
3672  * anything either. It will at least allow two backends to do
3673  * GetLockStatusData in parallel.
3674  *
3675  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3676  */
3677  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3679 
3680  /* Now we can safely count the number of proclocks */
3681  data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3682  if (data->nelements > els)
3683  {
3684  els = data->nelements;
3685  data->locks = (LockInstanceData *)
3686  repalloc(data->locks, sizeof(LockInstanceData) * els);
3687  }
3688 
3689  /* Now scan the tables to copy the data */
3690  hash_seq_init(&seqstat, LockMethodProcLockHash);
3691 
3692  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3693  {
3694  PGPROC *proc = proclock->tag.myProc;
3695  LOCK *lock = proclock->tag.myLock;
3696  LockInstanceData *instance = &data->locks[el];
3697 
3698  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3699  instance->holdMask = proclock->holdMask;
3700  if (proc->waitLock == proclock->tag.myLock)
3701  instance->waitLockMode = proc->waitLockMode;
3702  else
3703  instance->waitLockMode = NoLock;
3704  instance->backend = proc->backendId;
3705  instance->lxid = proc->lxid;
3706  instance->pid = proc->pid;
3707  instance->leaderPid = proclock->groupLeader->pid;
3708  instance->fastpath = false;
3709 
3710  el++;
3711  }
3712 
3713  /*
3714  * And release locks. We do this in reverse order for two reasons: (1)
3715  * Anyone else who needs more than one of the locks will be trying to lock
3716  * them in increasing order; we don't want to release the other process
3717  * until it can get all the locks it needs. (2) This avoids O(N^2)
3718  * behavior inside LWLockRelease.
3719  */
3720  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3722 
3723  Assert(el == data->nelements);
3724 
3725  return data;
3726 }
3727 
3728 /*
3729  * GetBlockerStatusData - Return a summary of the lock manager's state
3730  * concerning locks that are blocking the specified PID or any member of
3731  * the PID's lock group, for use in a user-level reporting function.
3732  *
3733  * For each PID within the lock group that is awaiting some heavyweight lock,
3734  * the return data includes an array of LockInstanceData objects, which are
3735  * the same data structure used by GetLockStatusData; but unlike that function,
3736  * this one reports only the PROCLOCKs associated with the lock that that PID
3737  * is blocked on. (Hence, all the locktags should be the same for any one
3738  * blocked PID.) In addition, we return an array of the PIDs of those backends
3739  * that are ahead of the blocked PID in the lock's wait queue. These can be
3740  * compared with the PIDs in the LockInstanceData objects to determine which
3741  * waiters are ahead of or behind the blocked PID in the queue.
3742  *
3743  * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3744  * waiting on any heavyweight lock, return empty arrays.
3745  *
3746  * The design goal is to hold the LWLocks for as short a time as possible;
3747  * thus, this function simply makes a copy of the necessary data and releases
3748  * the locks, allowing the caller to contemplate and format the data for as
3749  * long as it pleases.
3750  */
3752 GetBlockerStatusData(int blocked_pid)
3753 {
3754  BlockedProcsData *data;
3755  PGPROC *proc;
3756  int i;
3757 
3758  data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3759 
3760  /*
3761  * Guess how much space we'll need, and preallocate. Most of the time
3762  * this will avoid needing to do repalloc while holding the LWLocks. (We
3763  * assume, but check with an Assert, that MaxBackends is enough entries
3764  * for the procs[] array; the other two could need enlargement, though.)
3765  */
3766  data->nprocs = data->nlocks = data->npids = 0;
3767  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3768  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3769  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3770  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3771 
3772  /*
3773  * In order to search the ProcArray for blocked_pid and assume that that
3774  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3775  * In addition, to examine the lock grouping fields of any other backend,
3776  * we must hold all the hash partition locks. (Only one of those locks is
3777  * actually relevant for any one lock group, but we can't know which one
3778  * ahead of time.) It's fairly annoying to hold all those locks
3779  * throughout this, but it's no worse than GetLockStatusData(), and it
3780  * does have the advantage that we're guaranteed to return a
3781  * self-consistent instantaneous state.
3782  */
3783  LWLockAcquire(ProcArrayLock, LW_SHARED);
3784 
3785  proc = BackendPidGetProcWithLock(blocked_pid);
3786 
3787  /* Nothing to do if it's gone */
3788  if (proc != NULL)
3789  {
3790  /*
3791  * Acquire lock on the entire shared lock data structure. See notes
3792  * in GetLockStatusData().
3793  */
3794  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3796 
3797  if (proc->lockGroupLeader == NULL)
3798  {
3799  /* Easy case, proc is not a lock group member */
3800  GetSingleProcBlockerStatusData(proc, data);
3801  }
3802  else
3803  {
3804  /* Examine all procs in proc's lock group */
3805  dlist_iter iter;
3806 
3808  {
3809  PGPROC *memberProc;
3810 
3811  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3812  GetSingleProcBlockerStatusData(memberProc, data);
3813  }
3814  }
3815 
3816  /*
3817  * And release locks. See notes in GetLockStatusData().
3818  */
3819  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3821 
3822  Assert(data->nprocs <= data->maxprocs);
3823  }
3824 
3825  LWLockRelease(ProcArrayLock);
3826 
3827  return data;
3828 }
3829 
3830 /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3831 static void
3833 {
3834  LOCK *theLock = blocked_proc->waitLock;
3835  BlockedProcData *bproc;
3836  SHM_QUEUE *procLocks;
3837  PROCLOCK *proclock;
3838  PROC_QUEUE *waitQueue;
3839  PGPROC *proc;
3840  int queue_size;
3841  int i;
3842 
3843  /* Nothing to do if this proc is not blocked */
3844  if (theLock == NULL)
3845  return;
3846 
3847  /* Set up a procs[] element */
3848  bproc = &data->procs[data->nprocs++];
3849  bproc->pid = blocked_proc->pid;
3850  bproc->first_lock = data->nlocks;
3851  bproc->first_waiter = data->npids;
3852 
3853  /*
3854  * We may ignore the proc's fast-path arrays, since nothing in those could
3855  * be related to a contended lock.
3856  */
3857 
3858  /* Collect all PROCLOCKs associated with theLock */
3859  procLocks = &(theLock->procLocks);
3860  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3861  offsetof(PROCLOCK, lockLink));
3862  while (proclock)
3863  {
3864  PGPROC *proc = proclock->tag.myProc;
3865  LOCK *lock = proclock->tag.myLock;
3866  LockInstanceData *instance;
3867 
3868  if (data->nlocks >= data->maxlocks)
3869  {
3870  data->maxlocks += MaxBackends;
3871  data->locks = (LockInstanceData *)
3872  repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3873  }
3874 
3875  instance = &data->locks[data->nlocks];
3876  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3877  instance->holdMask = proclock->holdMask;
3878  if (proc->waitLock == lock)
3879  instance->waitLockMode = proc->waitLockMode;
3880  else
3881  instance->waitLockMode = NoLock;
3882  instance->backend = proc->backendId;
3883  instance->lxid = proc->lxid;
3884  instance->pid = proc->pid;
3885  instance->leaderPid = proclock->groupLeader->pid;
3886  instance->fastpath = false;
3887  data->nlocks++;
3888 
3889  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3890  offsetof(PROCLOCK, lockLink));
3891  }
3892 
3893  /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3894  waitQueue = &(theLock->waitProcs);
3895  queue_size = waitQueue->size;
3896 
3897  if (queue_size > data->maxpids - data->npids)
3898  {
3899  data->maxpids = Max(data->maxpids + MaxBackends,
3900  data->npids + queue_size);
3901  data->waiter_pids = (int *) repalloc(data->waiter_pids,
3902  sizeof(int) * data->maxpids);
3903  }
3904 
3905  /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3906  proc = (PGPROC *) waitQueue->links.next;
3907  for (i = 0; i < queue_size; i++)
3908  {
3909  if (proc == blocked_proc)
3910  break;
3911  data->waiter_pids[data->npids++] = proc->pid;
3912  proc = (PGPROC *) proc->links.next;
3913  }
3914 
3915  bproc->num_locks = data->nlocks - bproc->first_lock;
3916  bproc->num_waiters = data->npids - bproc->first_waiter;
3917 }
3918 
3919 /*
3920  * Returns a list of currently held AccessExclusiveLocks, for use by
3921  * LogStandbySnapshot(). The result is a palloc'd array,
3922  * with the number of elements returned into *nlocks.
3923  *
3924  * XXX This currently takes a lock on all partitions of the lock table,
3925  * but it's possible to do better. By reference counting locks and storing
3926  * the value in the ProcArray entry for each backend we could tell if any
3927  * locks need recording without having to acquire the partition locks and
3928  * scan the lock table. Whether that's worth the additional overhead
3929  * is pretty dubious though.
3930  */
3933 {
3934  xl_standby_lock *accessExclusiveLocks;
3935  PROCLOCK *proclock;
3936  HASH_SEQ_STATUS seqstat;
3937  int i;
3938  int index;
3939  int els;
3940 
3941  /*
3942  * Acquire lock on the entire shared lock data structure.
3943  *
3944  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3945  */
3946  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3948 
3949  /* Now we can safely count the number of proclocks */
3950  els = hash_get_num_entries(LockMethodProcLockHash);
3951 
3952  /*
3953  * Allocating enough space for all locks in the lock table is overkill,
3954  * but it's more convenient and faster than having to enlarge the array.
3955  */
3956  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3957 
3958  /* Now scan the tables to copy the data */
3959  hash_seq_init(&seqstat, LockMethodProcLockHash);
3960 
3961  /*
3962  * If lock is a currently granted AccessExclusiveLock then it will have
3963  * just one proclock holder, so locks are never accessed twice in this
3964  * particular case. Don't copy this code for use elsewhere because in the
3965  * general case this will give you duplicate locks when looking at
3966  * non-exclusive lock types.
3967  */
3968  index = 0;
3969  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3970  {
3971  /* make sure this definition matches the one used in LockAcquire */
3972  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
3973  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
3974  {
3975  PGPROC *proc = proclock->tag.myProc;
3976  LOCK *lock = proclock->tag.myLock;
3977  TransactionId xid = proc->xid;
3978 
3979  /*
3980  * Don't record locks for transactions if we know they have
3981  * already issued their WAL record for commit but not yet released
3982  * lock. It is still possible that we see locks held by already
3983  * complete transactions, if they haven't yet zeroed their xids.
3984  */
3985  if (!TransactionIdIsValid(xid))
3986  continue;
3987 
3988  accessExclusiveLocks[index].xid = xid;
3989  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
3990  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
3991 
3992  index++;
3993  }
3994  }
3995 
3996  Assert(index <= els);
3997 
3998  /*
3999  * And release locks. We do this in reverse order for two reasons: (1)
4000  * Anyone else who needs more than one of the locks will be trying to lock
4001  * them in increasing order; we don't want to release the other process
4002  * until it can get all the locks it needs. (2) This avoids O(N^2)
4003  * behavior inside LWLockRelease.
4004  */
4005  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4007 
4008  *nlocks = index;
4009  return accessExclusiveLocks;
4010 }
4011 
4012 /* Provide the textual name of any lock mode */
4013 const char *
4015 {
4016  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4017  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4018  return LockMethods[lockmethodid]->lockModeNames[mode];
4019 }
4020 
4021 #ifdef LOCK_DEBUG
4022 /*
4023  * Dump all locks in the given proc's myProcLocks lists.
4024  *
4025  * Caller is responsible for having acquired appropriate LWLocks.
4026  */
4027 void
4028 DumpLocks(PGPROC *proc)
4029 {
4030  SHM_QUEUE *procLocks;
4031  PROCLOCK *proclock;
4032  LOCK *lock;
4033  int i;
4034 
4035  if (proc == NULL)
4036  return;
4037 
4038  if (proc->waitLock)
4039  LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4040 
4041  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4042  {
4043  procLocks = &(proc->myProcLocks[i]);
4044 
4045  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
4046  offsetof(PROCLOCK, procLink));
4047 
4048  while (proclock)
4049  {
4050  Assert(proclock->tag.myProc == proc);
4051 
4052  lock = proclock->tag.myLock;
4053 
4054  PROCLOCK_PRINT("DumpLocks", proclock);
4055  LOCK_PRINT("DumpLocks", lock, 0);
4056 
4057  proclock = (PROCLOCK *)
4058  SHMQueueNext(procLocks, &proclock->procLink,
4059  offsetof(PROCLOCK, procLink));
4060  }
4061  }
4062 }
4063 
4064 /*
4065  * Dump all lmgr locks.
4066  *
4067  * Caller is responsible for having acquired appropriate LWLocks.
4068  */
4069 void
4070 DumpAllLocks(void)
4071 {
4072  PGPROC *proc;
4073  PROCLOCK *proclock;
4074  LOCK *lock;
4076 
4077  proc = MyProc;
4078 
4079  if (proc && proc->waitLock)
4080  LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4081 
4082  hash_seq_init(&status, LockMethodProcLockHash);
4083 
4084  while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4085  {
4086  PROCLOCK_PRINT("DumpAllLocks", proclock);
4087 
4088  lock = proclock->tag.myLock;
4089  if (lock)
4090  LOCK_PRINT("DumpAllLocks", lock, 0);
4091  else
4092  elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4093  }
4094 }
4095 #endif /* LOCK_DEBUG */
4096 
4097 /*
4098  * LOCK 2PC resource manager's routines
4099  */
4100 
4101 /*
4102  * Re-acquire a lock belonging to a transaction that was prepared.
4103  *
4104  * Because this function is run at db startup, re-acquiring the locks should
4105  * never conflict with running transactions because there are none. We
4106  * assume that the lock state represented by the stored 2PC files is legal.
4107  *
4108  * When switching from Hot Standby mode to normal operation, the locks will
4109  * be already held by the startup process. The locks are acquired for the new
4110  * procs without checking for conflicts, so we don't get a conflict between the
4111  * startup process and the dummy procs, even though we will momentarily have
4112  * a situation where two procs are holding the same AccessExclusiveLock,
4113  * which isn't normally possible because the conflict. If we're in standby
4114  * mode, but a recovery snapshot hasn't been established yet, it's possible
4115  * that some but not all of the locks are already held by the startup process.
4116  *
4117  * This approach is simple, but also a bit dangerous, because if there isn't
4118  * enough shared memory to acquire the locks, an error will be thrown, which
4119  * is promoted to FATAL and recovery will abort, bringing down postmaster.
4120  * A safer approach would be to transfer the locks like we do in
4121  * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4122  * read-only backends to use up all the shared lock memory anyway, so that
4123  * replaying the WAL record that needs to acquire a lock will throw an error
4124  * and PANIC anyway.
4125  */
4126 void
4128  void *recdata, uint32 len)
4129 {
4130  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4131  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4132  LOCKTAG *locktag;
4134  LOCKMETHODID lockmethodid;
4135  LOCK *lock;
4136  PROCLOCK *proclock;
4137  PROCLOCKTAG proclocktag;
4138  bool found;
4139  uint32 hashcode;
4140  uint32 proclock_hashcode;
4141  int partition;
4142  LWLock *partitionLock;
4143  LockMethod lockMethodTable;
4144 
4145  Assert(len == sizeof(TwoPhaseLockRecord));
4146  locktag = &rec->locktag;
4147  lockmode = rec->lockmode;
4148  lockmethodid = locktag->locktag_lockmethodid;
4149 
4150  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4151  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4152  lockMethodTable = LockMethods[lockmethodid];
4153 
4154  hashcode = LockTagHashCode(locktag);
4155  partition = LockHashPartition(hashcode);
4156  partitionLock = LockHashPartitionLock(hashcode);
4157 
4158  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4159 
4160  /*
4161  * Find or create a lock with this tag.
4162  */
4163  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4164  (void *) locktag,
4165  hashcode,
4167  &found);
4168  if (!lock)
4169  {
4170  LWLockRelease(partitionLock);
4171  ereport(ERROR,
4172  (errcode(ERRCODE_OUT_OF_MEMORY),
4173  errmsg("out of shared memory"),
4174  errhint("You might need to increase max_locks_per_transaction.")));
4175  }
4176 
4177  /*
4178  * if it's a new lock object, initialize it
4179  */
4180  if (!found)
4181  {
4182  lock->grantMask = 0;
4183  lock->waitMask = 0;
4184  SHMQueueInit(&(lock->procLocks));
4185  ProcQueueInit(&(lock->waitProcs));
4186  lock->nRequested = 0;
4187  lock->nGranted = 0;
4188  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4189  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4190  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4191  }
4192  else
4193  {
4194  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4195  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4196  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4197  Assert(lock->nGranted <= lock->nRequested);
4198  }
4199 
4200  /*
4201  * Create the hash key for the proclock table.
4202  */
4203  proclocktag.myLock = lock;
4204  proclocktag.myProc = proc;
4205 
4206  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4207 
4208  /*
4209  * Find or create a proclock entry with this tag
4210  */
4211  proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4212  (void *) &proclocktag,
4213  proclock_hashcode,
4215  &found);
4216  if (!proclock)
4217  {
4218  /* Oops, not enough shmem for the proclock */
4219  if (lock->nRequested == 0)
4220  {
4221  /*
4222  * There are no other requestors of this lock, so garbage-collect
4223  * the lock object. We *must* do this to avoid a permanent leak
4224  * of shared memory, because there won't be anything to cause
4225  * anyone to release the lock object later.
4226  */
4227  Assert(SHMQueueEmpty(&(lock->procLocks)));
4228  if (!hash_search_with_hash_value(LockMethodLockHash,
4229  (void *) &(lock->tag),
4230  hashcode,
4231  HASH_REMOVE,
4232  NULL))
4233  elog(PANIC, "lock table corrupted");
4234  }
4235  LWLockRelease(partitionLock);
4236  ereport(ERROR,
4237  (errcode(ERRCODE_OUT_OF_MEMORY),
4238  errmsg("out of shared memory"),
4239  errhint("You might need to increase max_locks_per_transaction.")));
4240  }
4241 
4242  /*
4243  * If new, initialize the new entry
4244  */
4245  if (!found)
4246  {
4247  Assert(proc->lockGroupLeader == NULL);
4248  proclock->groupLeader = proc;
4249  proclock->holdMask = 0;
4250  proclock->releaseMask = 0;
4251  /* Add proclock to appropriate lists */
4252  SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
4253  SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
4254  &proclock->procLink);
4255  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4256  }
4257  else
4258  {
4259  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4260  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4261  }
4262 
4263  /*
4264  * lock->nRequested and lock->requested[] count the total number of
4265  * requests, whether granted or waiting, so increment those immediately.
4266  */
4267  lock->nRequested++;
4268  lock->requested[lockmode]++;
4269  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4270 
4271  /*
4272  * We shouldn't already hold the desired lock.
4273  */
4274  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4275  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4276  lockMethodTable->lockModeNames[lockmode],
4277  lock->tag.locktag_field1, lock->tag.locktag_field2,
4278  lock->tag.locktag_field3);
4279 
4280  /*
4281  * We ignore any possible conflicts and just grant ourselves the lock. Not
4282  * only because we don't bother, but also to avoid deadlocks when
4283  * switching from standby to normal mode. See function comment.
4284  */
4285  GrantLock(lock, proclock, lockmode);
4286 
4287  /*
4288  * Bump strong lock count, to make sure any fast-path lock requests won't
4289  * be granted without consulting the primary lock table.
4290  */
4291  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4292  {
4293  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4294 
4295  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4296  FastPathStrongRelationLocks->count[fasthashcode]++;
4297  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4298  }
4299 
4300  LWLockRelease(partitionLock);
4301 }
4302 
4303 /*
4304  * Re-acquire a lock belonging to a transaction that was prepared, when
4305  * starting up into hot standby mode.
4306  */
4307 void
4309  void *recdata, uint32 len)
4310 {
4311  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4312  LOCKTAG *locktag;
4314  LOCKMETHODID lockmethodid;
4315 
4316  Assert(len == sizeof(TwoPhaseLockRecord));
4317  locktag = &rec->locktag;
4318  lockmode = rec->lockmode;
4319  lockmethodid = locktag->locktag_lockmethodid;
4320 
4321  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4322  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4323 
4324  if (lockmode == AccessExclusiveLock &&
4325  locktag->locktag_type == LOCKTAG_RELATION)
4326  {
4328  locktag->locktag_field1 /* dboid */ ,
4329  locktag->locktag_field2 /* reloid */ );
4330  }
4331 }
4332 
4333 
4334 /*
4335  * 2PC processing routine for COMMIT PREPARED case.
4336  *
4337  * Find and release the lock indicated by the 2PC record.
4338  */
4339 void
4341  void *recdata, uint32 len)
4342 {
4343  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4344  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4345  LOCKTAG *locktag;
4346  LOCKMETHODID lockmethodid;
4347  LockMethod lockMethodTable;
4348 
4349  Assert(len == sizeof(TwoPhaseLockRecord));
4350  locktag = &rec->locktag;
4351  lockmethodid = locktag->locktag_lockmethodid;
4352 
4353  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4354  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4355  lockMethodTable = LockMethods[lockmethodid];
4356 
4357  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4358 }
4359 
4360 /*
4361  * 2PC processing routine for ROLLBACK PREPARED case.
4362  *
4363  * This is actually just the same as the COMMIT case.
4364  */
4365 void
4367  void *recdata, uint32 len)
4368 {
4369  lock_twophase_postcommit(xid, info, recdata, len);
4370 }
4371 
4372 /*
4373  * VirtualXactLockTableInsert
4374  *
4375  * Take vxid lock via the fast-path. There can't be any pre-existing
4376  * lockers, as we haven't advertised this vxid via the ProcArray yet.
4377  *
4378  * Since MyProc->fpLocalTransactionId will normally contain the same data
4379  * as MyProc->lxid, you might wonder if we really need both. The
4380  * difference is that MyProc->lxid is set and cleared unlocked, and
4381  * examined by procarray.c, while fpLocalTransactionId is protected by
4382  * fpInfoLock and is used only by the locking subsystem. Doing it this
4383  * way makes it easier to verify that there are no funny race conditions.
4384  *
4385  * We don't bother recording this lock in the local lock table, since it's
4386  * only ever released at the end of a transaction. Instead,
4387  * LockReleaseAll() calls VirtualXactLockTableCleanup().
4388  */
4389 void
4391 {
4393 
4395 
4396  Assert(MyProc->backendId == vxid.backendId);
4398  Assert(MyProc->fpVXIDLock == false);
4399 
4400  MyProc->fpVXIDLock = true;
4402 
4404 }
4405 
4406 /*
4407  * VirtualXactLockTableCleanup
4408  *
4409  * Check whether a VXID lock has been materialized; if so, release it,
4410  * unblocking waiters.
4411  */
4412 void
4414 {
4415  bool fastpath;
4416  LocalTransactionId lxid;
4417 
4419 
4420  /*
4421  * Clean up shared memory state.
4422  */
4424 
4425  fastpath = MyProc->fpVXIDLock;
4426  lxid = MyProc->fpLocalTransactionId;
4427  MyProc->fpVXIDLock = false;
4429 
4431 
4432  /*
4433  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4434  * that means someone transferred the lock to the main lock table.
4435  */
4436  if (!fastpath && LocalTransactionIdIsValid(lxid))
4437  {
4438  VirtualTransactionId vxid;
4439  LOCKTAG locktag;
4440 
4441  vxid.backendId = MyBackendId;
4442  vxid.localTransactionId = lxid;
4443  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4444 
4446  &locktag, ExclusiveLock, false);
4447  }
4448 }
4449 
4450 /*
4451  * VirtualXactLock
4452  *
4453  * If wait = true, wait until the given VXID has been released, and then
4454  * return true.
4455  *
4456  * If wait = false, just check whether the VXID is still running, and return
4457  * true or false.
4458  */
4459 bool
4461 {
4462  LOCKTAG tag;
4463  PGPROC *proc;
4464 
4466 
4467  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4468 
4469  /*
4470  * If a lock table entry must be made, this is the PGPROC on whose behalf
4471  * it must be done. Note that the transaction might end or the PGPROC
4472  * might be reassigned to a new backend before we get around to examining
4473  * it, but it doesn't matter. If we find upon examination that the
4474  * relevant lxid is no longer running here, that's enough to prove that
4475  * it's no longer running anywhere.
4476  */
4477  proc = BackendIdGetProc(vxid.backendId);
4478  if (proc == NULL)
4479  return true;
4480 
4481  /*
4482  * We must acquire this lock before checking the backendId and lxid
4483  * against the ones we're waiting for. The target backend will only set
4484  * or clear lxid while holding this lock.
4485  */
4487 
4488  /* If the transaction has ended, our work here is done. */
4489  if (proc->backendId != vxid.backendId
4490  || proc->fpLocalTransactionId != vxid.localTransactionId)
4491  {
4492  LWLockRelease(&proc->fpInfoLock);
4493  return true;
4494  }
4495 
4496  /*
4497  * If we aren't asked to wait, there's no need to set up a lock table
4498  * entry. The transaction is still in progress, so just return false.
4499  */
4500  if (!wait)
4501  {
4502  LWLockRelease(&proc->fpInfoLock);
4503  return false;
4504  }
4505 
4506  /*
4507  * OK, we're going to need to sleep on the VXID. But first, we must set
4508  * up the primary lock table entry, if needed (ie, convert the proc's
4509  * fast-path lock on its VXID to a regular lock).
4510  */
4511  if (proc->fpVXIDLock)
4512  {
4513  PROCLOCK *proclock;
4514  uint32 hashcode;
4515  LWLock *partitionLock;
4516 
4517  hashcode = LockTagHashCode(&tag);
4518 
4519  partitionLock = LockHashPartitionLock(hashcode);
4520  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4521 
4522  proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4523  &tag, hashcode, ExclusiveLock);
4524  if (!proclock)
4525  {
4526  LWLockRelease(partitionLock);
4527  LWLockRelease(&proc->fpInfoLock);
4528  ereport(ERROR,
4529  (errcode(ERRCODE_OUT_OF_MEMORY),
4530  errmsg("out of shared memory"),
4531  errhint("You might need to increase max_locks_per_transaction.")));
4532  }
4533  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4534 
4535  LWLockRelease(partitionLock);
4536 
4537  proc->fpVXIDLock = false;
4538  }
4539 
4540  /* Done with proc->fpLockBits */
4541  LWLockRelease(&proc->fpInfoLock);
4542 
4543  /* Time to wait. */
4544  (void) LockAcquire(&tag, ShareLock, false, false);
4545 
4546  LockRelease(&tag, ShareLock, false);
4547  return true;
4548 }
4549 
4550 /*
4551  * LockWaiterCount
4552  *
4553  * Find the number of lock requester on this locktag
4554  */
4555 int
4557 {
4558  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4559  LOCK *lock;
4560  bool found;
4561  uint32 hashcode;
4562  LWLock *partitionLock;
4563  int waiters = 0;
4564 
4565  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4566  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4567 
4568  hashcode = LockTagHashCode(locktag);
4569  partitionLock = LockHashPartitionLock(hashcode);
4570  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4571 
4572  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4573  (const void *) locktag,
4574  hashcode,
4575  HASH_FIND,
4576  &found);
4577  if (found)
4578  {
4579  Assert(lock != NULL);
4580  waiters = lock->nRequested;
4581  }
4582  LWLockRelease(partitionLock);
4583 
4584  return waiters;
4585 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2608
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:967
PROCLOCKTAG tag
Definition: lock.h:361
int slock_t
Definition: s_lock.h:934
static PgChecksumMode mode
Definition: pg_checksums.c:61
uint32 hashcode
Definition: lock.h:421
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:862
Definition: lwlock.h:31
static const LockMethodData user_lockmethod
Definition: lock.c:136
LOCALLOCKTAG tag
Definition: lock.h:418
static HTAB * LockMethodLocalHash
Definition: lock.c:282
bool holdsStrongLockCount
Definition: lock.h:428
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3132
int * waiter_pids
Definition: lock.h:476
int errhint(const char *fmt,...)
Definition: elog.c:1162
BackendId MyBackendId
Definition: globals.c:82
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:746
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4413
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4014
int numLockOwners
Definition: lock.h:425
void lock_twophase_recover(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4127
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2691
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1169
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:232
#define GET_VXID_FROM_PGPROC(vxid, proc)
Definition: lock.h:79
LockInstanceData * locks
Definition: lock.h:456
LOCKTAG lock
Definition: lock.h:399
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2654
void GrantAwaitedLock(void)
Definition: lock.c:1785
#define HASH_ELEM
Definition: hsearch.h:95
BackendId backendId
Definition: proc.h:153
uint32 TransactionId
Definition: c.h:575
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2809
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode)
Definition: lock.c:597
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition: lock.c:3752
int LOCKMODE
Definition: lockdefs.h:26
int first_lock
Definition: lock.h:464
dlist_head lockGroupMembers
Definition: proc.h:250
LOCKMODE mode
Definition: lock.h:400
PROCLOCK * proclock
Definition: lock.h:423
bool update_process_title
Definition: ps_status.c:36
int nRequested
Definition: lock.h:308
SHM_QUEUE links
Definition: lock.h:31
PGPROC * MyProc
Definition: proc.c:68
#define ExclusiveLock
Definition: lockdefs.h:44
int num_waiters
Definition: lock.h:469
LOCKMASK holdMask
Definition: lock.h:365
#define PointerGetDatum(X)
Definition: postgres.h:556
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1554
int64 nLocks
Definition: lock.h:412
SHM_QUEUE links
Definition: proc.h:124
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
ResourceOwner CurrentResourceOwner
Definition: resowner.c:144
struct SHM_QUEUE * next
Definition: shmem.h:31
bool fastpath
Definition: lock.h:450
#define SpinLockInit(lock)
Definition: spin.h:60
LOCKMODE waitLockMode
Definition: proc.h:181
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:268
bool InRecovery
Definition: xlog.c:206
LOCKTAG tag
Definition: lock.h:300
#define END_CRIT_SECTION()
Definition: miscadmin.h:135
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:204
Definition: lock.h:164
#define AccessShareLock
Definition: lockdefs.h:36
static int FastPathLocalUseCount
Definition: lock.c:171
Size entrysize
Definition: hsearch.h:76
const LOCKMASK * conflictTab
Definition: lock.h:113
#define LockHashPartitionLock(hashcode)
Definition: lock.h:514
static const char *const lock_mode_names[]
Definition: lock.c:108
SHM_QUEUE lockLink
Definition: lock.h:367
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:533
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1293
#define InHotStandby
Definition: xlog.h:74
#define START_CRIT_SECTION()
Definition: miscadmin.h:133
int errcode(int sqlerrcode)
Definition: elog.c:704
PROC_HDR * ProcGlobal
Definition: proc.c:80
#define MemSet(start, val, len)
Definition: c.h:996
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:262
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1382
#define LockHashPartition(hashcode)
Definition: lock.h:512
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
static HTAB * LockMethodProcLockHash
Definition: lock.c:281
BlockedProcData * procs
Definition: lock.h:474
#define lengthof(array)
Definition: c.h:722
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:286
ProcWaitStatus waitStatus
Definition: proc.h:128
bool fpVXIDLock
Definition: proc.h:241
#define LOG
Definition: elog.h:26
unsigned int Oid
Definition: postgres_ext.h:31
bool RecoveryInProgress(void)
Definition: xlog.c:8148
Definition: lock.h:453
LocalTransactionId localTransactionId
Definition: lock.h:65
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:226
struct LOCALLOCKTAG LOCALLOCKTAG
static LOCALLOCK * awaitedLock
Definition: lock.c:287
LOCKTAG locktag
Definition: lock.h:443
#define PANIC
Definition: elog.h:55
LOCKMODE waitLockMode
Definition: lock.h:445
void lock_twophase_standby_recover(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4308
int maxLockOwners
Definition: lock.h:426
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition: lock.c:3932
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:433
int nGranted
Definition: lock.h:310
#define FirstNormalObjectId
Definition: transam.h:190
#define HASH_PARTITION
Definition: hsearch.h:92
int num_locks
Definition: lock.h:465
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1181
#define NLOCKENTS()
Definition: lock.c:56
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1813
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
PROC_QUEUE waitProcs
Definition: lock.h:306
LOCKTAG locktag
Definition: lock.c:160
uint16 locktag_field4
Definition: lock.h:169
Definition: type.h:89
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1810
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition: lock.c:4460
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4340
int leaderPid
Definition: lock.h:449
#define PROCLOCK_LOCKMETHOD(proclock)
Definition: lock.h:371
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:73
void set_ps_display(const char *activity)
Definition: ps_status.c:349
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2483
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:215
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3059
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3832
#define SpinLockAcquire(lock)
Definition: spin.h:62
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:87
Definition: dynahash.c:219
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:908
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:517
struct LOCALLOCK LOCALLOCK
unsigned short uint16
Definition: c.h:428
void pfree(void *pointer)
Definition: mcxt.c:1057
static const LockMethodData default_lockmethod
Definition: lock.c:125
#define LOCK_LOCKTAG(lock)
Definition: lock.h:314
#define ERROR
Definition: elog.h:45
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:271
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1366
long num_partitions
Definition: hsearch.h:68
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2518
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1058
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
void PostPrepare_Locks(TransactionId xid)
Definition: lock.c:3339
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:564
static void FinishStrongLockAcquire(void)
Definition: lock.c:1746
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:211
int MaxBackends
Definition: globals.c:137
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2578
PROCLOCK * waitProcLock
Definition: proc.h:180
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition: lock.c:4390
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:181
const char * get_ps_display(int *displen)
Definition: ps_status.c:430
LockData * GetLockStatusData(void)
Definition: lock.c:3568
#define NoLock
Definition: lockdefs.h:34
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:74
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:582
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1034
#define RowExclusiveLock
Definition: lockdefs.h:38
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1310
void AbortStrongLockAcquire(void)
Definition: lock.c:1756
uint32 locktag_field2
Definition: lock.h:167
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
Oid databaseId
Definition: proc.h:154
unsigned int uint32
Definition: c.h:429
int granted[MAX_LOCKMODES]
Definition: lock.h:309
Definition: lock.h:297
uint32 LocalTransactionId
Definition: c.h:577
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:873
LOCK * waitLock
Definition: proc.h:179
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:516
struct PROCLOCK PROCLOCK
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:432
int max_locks_per_xact
Definition: lock.c:54
MemoryContext TopMemoryContext
Definition: mcxt.c:44
LOCKMASK waitMask
Definition: lock.h:304
void InitLocks(void)
Definition: lock.c:404
int maxlocks
Definition: lock.h:480
uint16 LOCKMETHODID
Definition: lock.h:124
SHM_QUEUE procLocks
Definition: lock.h:305
TransactionId xid
Definition: lockdefs.h:54
struct LOCKTAG LOCKTAG
#define WARNING
Definition: elog.h:40
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:70
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:780
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition: lock.c:260
#define SpinLockRelease(lock)
Definition: spin.h:64
int requested[MAX_LOCKMODES]
Definition: lock.h:307
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1696
#define HASH_BLOBS
Definition: hsearch.h:97
int nelements
Definition: lock.h:455
#define MAX_LOCKMODES
Definition: lock.h:84
#define InvalidBackendId
Definition: backendid.h:23
SHM_QUEUE procLink
Definition: lock.h:368
#define RowShareLock
Definition: lockdefs.h:37
static const LOCKMASK LockConflicts[]
Definition: lock.c:65
void * palloc0(Size size)
Definition: mcxt.c:981
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:780
LockInstanceData * locks
Definition: lock.h:475
struct PROCLOCKTAG PROCLOCKTAG
uintptr_t Datum
Definition: postgres.h:367
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1351
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
BackendId backend
Definition: lock.h:446
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:994
static ResourceOwner awaitedOwner
Definition: lock.c:288
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1720
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1917
Size keysize
Definition: hsearch.h:75
#define XLogStandbyInfoActive()
Definition: xlog.h:205
dlist_node * cur
Definition: ilist.h:161
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:364
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition: lock.c:498
void DeadLockReport(void)
Definition: deadlock.c:1090
struct LOCK LOCK
int first_waiter
Definition: lock.h:468
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:2911
#define ereport(elevel,...)
Definition: elog.h:155
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1419
#define PG_CATCH()
Definition: elog.h:319
#define Max(x, y)
Definition: c.h:968
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
uint8 locktag_type
Definition: lock.h:170
LocalTransactionId lxid
Definition: lock.h:447
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:240
#define Assert(condition)
Definition: c.h:792
BackendId backendId
Definition: lock.h:64
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2179
#define ShareRowExclusiveLock
Definition: lockdefs.h:42
void AtPrepare_Locks(void)
Definition: lock.c:3229
static HTAB * LockMethodLockHash
Definition: lock.c:280
size_t Size
Definition: c.h:528
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition: lock.c:2453
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25
LOCALLOCKOWNER * lockOwners
Definition: lock.h:427
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:235
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1206
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1162
LOCK * lock
Definition: lock.h:422
#define PG_RE_THROW()
Definition: elog.h:350
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
uint32 allProcCount
Definition: proc.h:334
int LOCKMASK
Definition: lockdefs.h:25
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1070
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
LOCKMASK holdMask
Definition: lock.h:444
uint8 locktag_lockmethodid
Definition: lock.h:171
PGPROC * myProc
Definition: lock.h:355
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:202
int LockWaiterCount(const LOCKTAG *locktag)
Definition: lock.c:4556
TransactionId xid
Definition: proc.h:133
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
void MarkLockClear(LOCALLOCK *locallock)
Definition: lock.c:1798
Size LockShmemSize(void)
Definition: lock.c:3531
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY
Definition: lock.c:186
LOCKMASK grantMask
Definition: lock.h:303
#define AccessExclusiveLock
Definition: lockdefs.h:45
Definition: lock.h:358
int64 nLocks
Definition: lock.h:424
void * palloc(Size size)
Definition: mcxt.c:950
int errmsg(const char *fmt,...)
Definition: elog.c:915
LockAcquireResult
Definition: lock.h:487
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:797
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:883
#define elog(elevel,...)
Definition: elog.h:228
#define InvalidLocalTransactionId
Definition: lock.h:68
#define ShareLock
Definition: lockdefs.h:41
int i
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:1974
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:69
const bool * trace_flag
Definition: lock.h:115
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:633
void lock_twophase_postabort(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4366
int size
Definition: lock.h:32
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
LOCK * myLock
Definition: lock.h:354
struct ResourceOwnerData * owner
Definition: lock.h:411
PGPROC * allProcs
Definition: proc.h:316
static bool Dummy_trace
Definition: lock.c:122
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1577
static const LockMethod LockMethods[]
Definition: lock.c:150
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:206
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:365
LWLock fpInfoLock
Definition: proc.h:238
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:341
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1634
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2721
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:201
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
#define PG_TRY()
Definition: elog.h:309
uint32 locktag_field1
Definition: lock.h:166
LOCKMODE lockmode
Definition: lock.c:161
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
Definition: lock.c:770
Definition: proc.h:121
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:974
int pid
Definition: proc.h:146
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:486
uint32 locktag_field3
Definition: lock.h:168
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:114
#define PG_END_TRY()
Definition: elog.h:334
PGPROC * lockGroupLeader
Definition: proc.h:249
LocalTransactionId fpLocalTransactionId
Definition: proc.h:242
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1688
#define offsetof(type, field)
Definition: c.h:715
bool lockCleared
Definition: lock.h:429
const char *const * lockModeNames
Definition: lock.h:114
LOCKMASK heldLocks
Definition: proc.h:182
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:115
PGPROC * groupLeader
Definition: lock.h:364
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:213
struct TwoPhaseLockRecord TwoPhaseLockRecord
HashValueFunc hash
Definition: hsearch.h:78
#define HASH_FUNCTION
Definition: hsearch.h:98
PGPROC * BackendIdGetProc(int backendID)
Definition: sinvaladt.c:376
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:313
int numLockModes
Definition: lock.h:112
LocalTransactionId lxid
Definition: proc.h:143
int maxprocs
Definition: lock.h:478
LOCKMASK releaseMask
Definition: lock.h:366