PostgreSQL Source Code  git master
lock.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * lock.c
4  * POSTGRES primary lock mechanism
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/lock.c
12  *
13  * NOTES
14  * A lock table is a shared memory hash table. When
15  * a process tries to acquire a lock of a type that conflicts
16  * with existing locks, it is put to sleep using the routines
17  * in storage/lmgr/proc.c.
18  *
19  * For the most part, this code should be invoked via lmgr.c
20  * or another lock-management module, not directly.
21  *
22  * Interface:
23  *
24  * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25  * LockAcquire(), LockRelease(), LockReleaseAll(),
26  * LockCheckConflicts(), GrantLock()
27  *
28  *-------------------------------------------------------------------------
29  */
30 #include "postgres.h"
31 
32 #include <signal.h>
33 #include <unistd.h>
34 
35 #include "access/transam.h"
36 #include "access/twophase.h"
37 #include "access/twophase_rmgr.h"
38 #include "access/xact.h"
39 #include "access/xlog.h"
40 #include "access/xlogutils.h"
41 #include "miscadmin.h"
42 #include "pg_trace.h"
43 #include "pgstat.h"
44 #include "storage/proc.h"
45 #include "storage/procarray.h"
46 #include "storage/sinvaladt.h"
47 #include "storage/spin.h"
48 #include "storage/standby.h"
49 #include "utils/memutils.h"
50 #include "utils/ps_status.h"
51 #include "utils/resowner_private.h"
52 
53 
54 /* This configuration variable is used to set the lock table size */
55 int max_locks_per_xact; /* set by guc.c */
56 
57 #define NLOCKENTS() \
58  mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
59 
60 
61 /*
62  * Data structures defining the semantics of the standard lock methods.
63  *
64  * The conflict table defines the semantics of the various lock modes.
65  */
66 static const LOCKMASK LockConflicts[] = {
67  0,
68 
69  /* AccessShareLock */
71 
72  /* RowShareLock */
74 
75  /* RowExclusiveLock */
78 
79  /* ShareUpdateExclusiveLock */
83 
84  /* ShareLock */
88 
89  /* ShareRowExclusiveLock */
93 
94  /* ExclusiveLock */
99 
100  /* AccessExclusiveLock */
105 
106 };
107 
108 /* Names of lock modes, for debug printouts */
109 static const char *const lock_mode_names[] =
110 {
111  "INVALID",
112  "AccessShareLock",
113  "RowShareLock",
114  "RowExclusiveLock",
115  "ShareUpdateExclusiveLock",
116  "ShareLock",
117  "ShareRowExclusiveLock",
118  "ExclusiveLock",
119  "AccessExclusiveLock"
120 };
121 
122 #ifndef LOCK_DEBUG
123 static bool Dummy_trace = false;
124 #endif
125 
127  AccessExclusiveLock, /* highest valid lock mode number */
130 #ifdef LOCK_DEBUG
131  &Trace_locks
132 #else
133  &Dummy_trace
134 #endif
135 };
136 
138  AccessExclusiveLock, /* highest valid lock mode number */
141 #ifdef LOCK_DEBUG
142  &Trace_userlocks
143 #else
144  &Dummy_trace
145 #endif
146 };
147 
148 /*
149  * map from lock method id to the lock table data structures
150  */
151 static const LockMethod LockMethods[] = {
152  NULL,
154  &user_lockmethod
155 };
156 
157 
158 /* Record that's written to 2PC state file when a lock is persisted */
159 typedef struct TwoPhaseLockRecord
160 {
164 
165 
166 /*
167  * Count of the number of fast path lock slots we believe to be used. This
168  * might be higher than the real number if another backend has transferred
169  * our locks to the primary lock table, but it can never be lower than the
170  * real value, since only we can acquire locks on our own behalf.
171  */
172 static int FastPathLocalUseCount = 0;
173 
174 /*
175  * Flag to indicate if the relation extension lock is held by this backend.
176  * This flag is used to ensure that while holding the relation extension lock
177  * we don't try to acquire a heavyweight lock on any other object. This
178  * restriction implies that the relation extension lock won't ever participate
179  * in the deadlock cycle because we can never wait for any other heavyweight
180  * lock after acquiring this lock.
181  *
182  * Such a restriction is okay for relation extension locks as unlike other
183  * heavyweight locks these are not held till the transaction end. These are
184  * taken for a short duration to extend a particular relation and then
185  * released.
186  */
187 static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
188 
189 /*
190  * Flag to indicate if the page lock is held by this backend. We don't
191  * acquire any other heavyweight lock while holding the page lock except for
192  * relation extension. However, these locks are never taken in reverse order
193  * which implies that page locks will also never participate in the deadlock
194  * cycle.
195  *
196  * Similar to relation extension, page locks are also held for a short
197  * duration, so imposing such a restriction won't hurt.
198  */
199 static bool IsPageLockHeld PG_USED_FOR_ASSERTS_ONLY = false;
200 
201 /* Macros for manipulating proc->fpLockBits */
202 #define FAST_PATH_BITS_PER_SLOT 3
203 #define FAST_PATH_LOCKNUMBER_OFFSET 1
204 #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
205 #define FAST_PATH_GET_BITS(proc, n) \
206  (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
207 #define FAST_PATH_BIT_POSITION(n, l) \
208  (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
209  AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
210  AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
211  ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
212 #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
213  (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
214 #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
215  (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
216 #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
217  ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
218 
219 /*
220  * The fast-path lock mechanism is concerned only with relation locks on
221  * unshared relations by backends bound to a database. The fast-path
222  * mechanism exists mostly to accelerate acquisition and release of locks
223  * that rarely conflict. Because ShareUpdateExclusiveLock is
224  * self-conflicting, it can't use the fast-path mechanism; but it also does
225  * not conflict with any of the locks that do, so we can ignore it completely.
226  */
227 #define EligibleForRelationFastPath(locktag, mode) \
228  ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
229  (locktag)->locktag_type == LOCKTAG_RELATION && \
230  (locktag)->locktag_field1 == MyDatabaseId && \
231  MyDatabaseId != InvalidOid && \
232  (mode) < ShareUpdateExclusiveLock)
233 #define ConflictsWithRelationFastPath(locktag, mode) \
234  ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
235  (locktag)->locktag_type == LOCKTAG_RELATION && \
236  (locktag)->locktag_field1 != InvalidOid && \
237  (mode) > ShareUpdateExclusiveLock)
238 
239 static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
241 static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
242  const LOCKTAG *locktag, uint32 hashcode);
244 
245 /*
246  * To make the fast-path lock mechanism work, we must have some way of
247  * preventing the use of the fast-path when a conflicting lock might be present.
248  * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
249  * and maintain an integer count of the number of "strong" lockers
250  * in each partition. When any "strong" lockers are present (which is
251  * hopefully not very often), the fast-path mechanism can't be used, and we
252  * must fall back to the slower method of pushing matching locks directly
253  * into the main lock tables.
254  *
255  * The deadlock detector does not know anything about the fast path mechanism,
256  * so any locks that might be involved in a deadlock must be transferred from
257  * the fast-path queues to the main lock table.
258  */
259 
260 #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
261 #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
262  (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
263 #define FastPathStrongLockHashPartition(hashcode) \
264  ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
265 
266 typedef struct
267 {
271 
273 
274 
275 /*
276  * Pointers to hash tables containing lock state
277  *
278  * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
279  * shared memory; LockMethodLocalHash is local to each backend.
280  */
284 
285 
286 /* private state for error cleanup */
290 
291 
292 #ifdef LOCK_DEBUG
293 
294 /*------
295  * The following configuration options are available for lock debugging:
296  *
297  * TRACE_LOCKS -- give a bunch of output what's going on in this file
298  * TRACE_USERLOCKS -- same but for user locks
299  * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
300  * (use to avoid output on system tables)
301  * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
302  * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
303  *
304  * Furthermore, but in storage/lmgr/lwlock.c:
305  * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
306  *
307  * Define LOCK_DEBUG at compile time to get all these enabled.
308  * --------
309  */
310 
311 int Trace_lock_oidmin = FirstNormalObjectId;
312 bool Trace_locks = false;
313 bool Trace_userlocks = false;
314 int Trace_lock_table = 0;
315 bool Debug_deadlocks = false;
316 
317 
318 inline static bool
319 LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
320 {
321  return
322  (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
323  ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
324  || (Trace_lock_table &&
325  (tag->locktag_field2 == Trace_lock_table));
326 }
327 
328 
329 inline static void
330 LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
331 {
332  if (LOCK_DEBUG_ENABLED(&lock->tag))
333  elog(LOG,
334  "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
335  "req(%d,%d,%d,%d,%d,%d,%d)=%d "
336  "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
337  where, lock,
338  lock->tag.locktag_field1, lock->tag.locktag_field2,
339  lock->tag.locktag_field3, lock->tag.locktag_field4,
341  lock->grantMask,
342  lock->requested[1], lock->requested[2], lock->requested[3],
343  lock->requested[4], lock->requested[5], lock->requested[6],
344  lock->requested[7], lock->nRequested,
345  lock->granted[1], lock->granted[2], lock->granted[3],
346  lock->granted[4], lock->granted[5], lock->granted[6],
347  lock->granted[7], lock->nGranted,
348  lock->waitProcs.size,
349  LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
350 }
351 
352 
353 inline static void
354 PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
355 {
356  if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
357  elog(LOG,
358  "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
359  where, proclockP, proclockP->tag.myLock,
360  PROCLOCK_LOCKMETHOD(*(proclockP)),
361  proclockP->tag.myProc, (int) proclockP->holdMask);
362 }
363 #else /* not LOCK_DEBUG */
364 
365 #define LOCK_PRINT(where, lock, type) ((void) 0)
366 #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
367 #endif /* not LOCK_DEBUG */
368 
369 
370 static uint32 proclock_hash(const void *key, Size keysize);
371 static void RemoveLocalLock(LOCALLOCK *locallock);
372 static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
373  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
374 static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
375 static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
376 static void FinishStrongLockAcquire(void);
377 static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
378 static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
379 static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
380 static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
381  PROCLOCK *proclock, LockMethod lockMethodTable);
382 static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
383  LockMethod lockMethodTable, uint32 hashcode,
384  bool wakeupNeeded);
385 static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
387  bool decrement_strong_lock_count);
388 static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
389  BlockedProcsData *data);
390 
391 
392 /*
393  * InitLocks -- Initialize the lock manager's data structures.
394  *
395  * This is called from CreateSharedMemoryAndSemaphores(), which see for
396  * more comments. In the normal postmaster case, the shared hash tables
397  * are created here, as well as a locallock hash table that will remain
398  * unused and empty in the postmaster itself. Backends inherit the pointers
399  * to the shared tables via fork(), and also inherit an image of the locallock
400  * hash table, which they proceed to use. In the EXEC_BACKEND case, each
401  * backend re-executes this code to obtain pointers to the already existing
402  * shared hash tables and to create its locallock hash table.
403  */
404 void
406 {
407  HASHCTL info;
408  long init_table_size,
409  max_table_size;
410  bool found;
411 
412  /*
413  * Compute init/max size to request for lock hashtables. Note these
414  * calculations must agree with LockShmemSize!
415  */
416  max_table_size = NLOCKENTS();
417  init_table_size = max_table_size / 2;
418 
419  /*
420  * Allocate hash table for LOCK structs. This stores per-locked-object
421  * information.
422  */
423  info.keysize = sizeof(LOCKTAG);
424  info.entrysize = sizeof(LOCK);
426 
427  LockMethodLockHash = ShmemInitHash("LOCK hash",
428  init_table_size,
429  max_table_size,
430  &info,
432 
433  /* Assume an average of 2 holders per lock */
434  max_table_size *= 2;
435  init_table_size *= 2;
436 
437  /*
438  * Allocate hash table for PROCLOCK structs. This stores
439  * per-lock-per-holder information.
440  */
441  info.keysize = sizeof(PROCLOCKTAG);
442  info.entrysize = sizeof(PROCLOCK);
443  info.hash = proclock_hash;
445 
446  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
447  init_table_size,
448  max_table_size,
449  &info,
451 
452  /*
453  * Allocate fast-path structures.
454  */
455  FastPathStrongRelationLocks =
456  ShmemInitStruct("Fast Path Strong Relation Lock Data",
457  sizeof(FastPathStrongRelationLockData), &found);
458  if (!found)
459  SpinLockInit(&FastPathStrongRelationLocks->mutex);
460 
461  /*
462  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
463  * counts and resource owner information.
464  *
465  * The non-shared table could already exist in this process (this occurs
466  * when the postmaster is recreating shared memory after a backend crash).
467  * If so, delete and recreate it. (We could simply leave it, since it
468  * ought to be empty in the postmaster, but for safety let's zap it.)
469  */
470  if (LockMethodLocalHash)
471  hash_destroy(LockMethodLocalHash);
472 
473  info.keysize = sizeof(LOCALLOCKTAG);
474  info.entrysize = sizeof(LOCALLOCK);
475 
476  LockMethodLocalHash = hash_create("LOCALLOCK hash",
477  16,
478  &info,
480 }
481 
482 
483 /*
484  * Fetch the lock method table associated with a given lock
485  */
488 {
489  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
490 
491  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
492  return LockMethods[lockmethodid];
493 }
494 
495 /*
496  * Fetch the lock method table associated with a given locktag
497  */
500 {
501  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
502 
503  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
504  return LockMethods[lockmethodid];
505 }
506 
507 
508 /*
509  * Compute the hash code associated with a LOCKTAG.
510  *
511  * To avoid unnecessary recomputations of the hash code, we try to do this
512  * just once per function, and then pass it around as needed. Aside from
513  * passing the hashcode to hash_search_with_hash_value(), we can extract
514  * the lock partition number from the hashcode.
515  */
516 uint32
518 {
519  return get_hash_value(LockMethodLockHash, (const void *) locktag);
520 }
521 
522 /*
523  * Compute the hash code associated with a PROCLOCKTAG.
524  *
525  * Because we want to use just one set of partition locks for both the
526  * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
527  * fall into the same partition number as their associated LOCKs.
528  * dynahash.c expects the partition number to be the low-order bits of
529  * the hash code, and therefore a PROCLOCKTAG's hash code must have the
530  * same low-order bits as the associated LOCKTAG's hash code. We achieve
531  * this with this specialized hash function.
532  */
533 static uint32
534 proclock_hash(const void *key, Size keysize)
535 {
536  const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
537  uint32 lockhash;
538  Datum procptr;
539 
540  Assert(keysize == sizeof(PROCLOCKTAG));
541 
542  /* Look into the associated LOCK object, and compute its hash code */
543  lockhash = LockTagHashCode(&proclocktag->myLock->tag);
544 
545  /*
546  * To make the hash code also depend on the PGPROC, we xor the proc
547  * struct's address into the hash code, left-shifted so that the
548  * partition-number bits don't change. Since this is only a hash, we
549  * don't care if we lose high-order bits of the address; use an
550  * intermediate variable to suppress cast-pointer-to-int warnings.
551  */
552  procptr = PointerGetDatum(proclocktag->myProc);
553  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
554 
555  return lockhash;
556 }
557 
558 /*
559  * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
560  * for its underlying LOCK.
561  *
562  * We use this just to avoid redundant calls of LockTagHashCode().
563  */
564 static inline uint32
565 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
566 {
567  uint32 lockhash = hashcode;
568  Datum procptr;
569 
570  /*
571  * This must match proclock_hash()!
572  */
573  procptr = PointerGetDatum(proclocktag->myProc);
574  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
575 
576  return lockhash;
577 }
578 
579 /*
580  * Given two lock modes, return whether they would conflict.
581  */
582 bool
584 {
585  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
586 
587  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
588  return true;
589 
590  return false;
591 }
592 
593 /*
594  * LockHeldByMe -- test whether lock 'locktag' is held with mode 'lockmode'
595  * by the current transaction
596  */
597 bool
599 {
600  LOCALLOCKTAG localtag;
601  LOCALLOCK *locallock;
602 
603  /*
604  * See if there is a LOCALLOCK entry for this lock and lockmode
605  */
606  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
607  localtag.lock = *locktag;
608  localtag.mode = lockmode;
609 
610  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
611  (void *) &localtag,
612  HASH_FIND, NULL);
613 
614  return (locallock && locallock->nLocks > 0);
615 }
616 
617 #ifdef USE_ASSERT_CHECKING
618 /*
619  * GetLockMethodLocalHash -- return the hash of local locks, for modules that
620  * evaluate assertions based on all locks held.
621  */
622 HTAB *
623 GetLockMethodLocalHash(void)
624 {
625  return LockMethodLocalHash;
626 }
627 #endif
628 
629 /*
630  * LockHasWaiters -- look up 'locktag' and check if releasing this
631  * lock would wake up other processes waiting for it.
632  */
633 bool
634 LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
635 {
636  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
637  LockMethod lockMethodTable;
638  LOCALLOCKTAG localtag;
639  LOCALLOCK *locallock;
640  LOCK *lock;
641  PROCLOCK *proclock;
642  LWLock *partitionLock;
643  bool hasWaiters = false;
644 
645  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
646  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
647  lockMethodTable = LockMethods[lockmethodid];
648  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
649  elog(ERROR, "unrecognized lock mode: %d", lockmode);
650 
651 #ifdef LOCK_DEBUG
652  if (LOCK_DEBUG_ENABLED(locktag))
653  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
654  locktag->locktag_field1, locktag->locktag_field2,
655  lockMethodTable->lockModeNames[lockmode]);
656 #endif
657 
658  /*
659  * Find the LOCALLOCK entry for this lock and lockmode
660  */
661  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
662  localtag.lock = *locktag;
663  localtag.mode = lockmode;
664 
665  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
666  (void *) &localtag,
667  HASH_FIND, NULL);
668 
669  /*
670  * let the caller print its own error message, too. Do not ereport(ERROR).
671  */
672  if (!locallock || locallock->nLocks <= 0)
673  {
674  elog(WARNING, "you don't own a lock of type %s",
675  lockMethodTable->lockModeNames[lockmode]);
676  return false;
677  }
678 
679  /*
680  * Check the shared lock table.
681  */
682  partitionLock = LockHashPartitionLock(locallock->hashcode);
683 
684  LWLockAcquire(partitionLock, LW_SHARED);
685 
686  /*
687  * We don't need to re-find the lock or proclock, since we kept their
688  * addresses in the locallock table, and they couldn't have been removed
689  * while we were holding a lock on them.
690  */
691  lock = locallock->lock;
692  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
693  proclock = locallock->proclock;
694  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
695 
696  /*
697  * Double-check that we are actually holding a lock of the type we want to
698  * release.
699  */
700  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
701  {
702  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
703  LWLockRelease(partitionLock);
704  elog(WARNING, "you don't own a lock of type %s",
705  lockMethodTable->lockModeNames[lockmode]);
706  RemoveLocalLock(locallock);
707  return false;
708  }
709 
710  /*
711  * Do the checking.
712  */
713  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
714  hasWaiters = true;
715 
716  LWLockRelease(partitionLock);
717 
718  return hasWaiters;
719 }
720 
721 /*
722  * LockAcquire -- Check for lock conflicts, sleep if conflict found,
723  * set lock if/when no conflicts.
724  *
725  * Inputs:
726  * locktag: unique identifier for the lockable object
727  * lockmode: lock mode to acquire
728  * sessionLock: if true, acquire lock for session not current transaction
729  * dontWait: if true, don't wait to acquire lock
730  *
731  * Returns one of:
732  * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
733  * LOCKACQUIRE_OK lock successfully acquired
734  * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
735  * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
736  *
737  * In the normal case where dontWait=false and the caller doesn't need to
738  * distinguish a freshly acquired lock from one already taken earlier in
739  * this same transaction, there is no need to examine the return value.
740  *
741  * Side Effects: The lock is acquired and recorded in lock tables.
742  *
743  * NOTE: if we wait for the lock, there is no way to abort the wait
744  * short of aborting the transaction.
745  */
749  bool sessionLock,
750  bool dontWait)
751 {
752  return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
753  true, NULL);
754 }
755 
756 /*
757  * LockAcquireExtended - allows us to specify additional options
758  *
759  * reportMemoryError specifies whether a lock request that fills the lock
760  * table should generate an ERROR or not. Passing "false" allows the caller
761  * to attempt to recover from lock-table-full situations, perhaps by forcibly
762  * canceling other lock holders and then retrying. Note, however, that the
763  * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
764  * in combination with dontWait = true, as the cause of failure couldn't be
765  * distinguished.
766  *
767  * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
768  * table entry if a lock is successfully acquired, or NULL if not.
769  */
773  bool sessionLock,
774  bool dontWait,
775  bool reportMemoryError,
776  LOCALLOCK **locallockp)
777 {
778  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
779  LockMethod lockMethodTable;
780  LOCALLOCKTAG localtag;
781  LOCALLOCK *locallock;
782  LOCK *lock;
783  PROCLOCK *proclock;
784  bool found;
785  ResourceOwner owner;
786  uint32 hashcode;
787  LWLock *partitionLock;
788  bool found_conflict;
789  bool log_lock = false;
790 
791  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
792  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
793  lockMethodTable = LockMethods[lockmethodid];
794  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
795  elog(ERROR, "unrecognized lock mode: %d", lockmode);
796 
797  if (RecoveryInProgress() && !InRecovery &&
798  (locktag->locktag_type == LOCKTAG_OBJECT ||
799  locktag->locktag_type == LOCKTAG_RELATION) &&
800  lockmode > RowExclusiveLock)
801  ereport(ERROR,
802  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
803  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
804  lockMethodTable->lockModeNames[lockmode]),
805  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
806 
807 #ifdef LOCK_DEBUG
808  if (LOCK_DEBUG_ENABLED(locktag))
809  elog(LOG, "LockAcquire: lock [%u,%u] %s",
810  locktag->locktag_field1, locktag->locktag_field2,
811  lockMethodTable->lockModeNames[lockmode]);
812 #endif
813 
814  /* Identify owner for lock */
815  if (sessionLock)
816  owner = NULL;
817  else
818  owner = CurrentResourceOwner;
819 
820  /*
821  * Find or create a LOCALLOCK entry for this lock and lockmode
822  */
823  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
824  localtag.lock = *locktag;
825  localtag.mode = lockmode;
826 
827  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
828  (void *) &localtag,
829  HASH_ENTER, &found);
830 
831  /*
832  * if it's a new locallock object, initialize it
833  */
834  if (!found)
835  {
836  locallock->lock = NULL;
837  locallock->proclock = NULL;
838  locallock->hashcode = LockTagHashCode(&(localtag.lock));
839  locallock->nLocks = 0;
840  locallock->holdsStrongLockCount = false;
841  locallock->lockCleared = false;
842  locallock->numLockOwners = 0;
843  locallock->maxLockOwners = 8;
844  locallock->lockOwners = NULL; /* in case next line fails */
845  locallock->lockOwners = (LOCALLOCKOWNER *)
847  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
848  }
849  else
850  {
851  /* Make sure there will be room to remember the lock */
852  if (locallock->numLockOwners >= locallock->maxLockOwners)
853  {
854  int newsize = locallock->maxLockOwners * 2;
855 
856  locallock->lockOwners = (LOCALLOCKOWNER *)
857  repalloc(locallock->lockOwners,
858  newsize * sizeof(LOCALLOCKOWNER));
859  locallock->maxLockOwners = newsize;
860  }
861  }
862  hashcode = locallock->hashcode;
863 
864  if (locallockp)
865  *locallockp = locallock;
866 
867  /*
868  * If we already hold the lock, we can just increase the count locally.
869  *
870  * If lockCleared is already set, caller need not worry about absorbing
871  * sinval messages related to the lock's object.
872  */
873  if (locallock->nLocks > 0)
874  {
875  GrantLockLocal(locallock, owner);
876  if (locallock->lockCleared)
878  else
880  }
881 
882  /*
883  * We don't acquire any other heavyweight lock while holding the relation
884  * extension lock. We do allow to acquire the same relation extension
885  * lock more than once but that case won't reach here.
886  */
887  Assert(!IsRelationExtensionLockHeld);
888 
889  /*
890  * We don't acquire any other heavyweight lock while holding the page lock
891  * except for relation extension.
892  */
893  Assert(!IsPageLockHeld ||
894  (locktag->locktag_type == LOCKTAG_RELATION_EXTEND));
895 
896  /*
897  * Prepare to emit a WAL record if acquisition of this lock needs to be
898  * replayed in a standby server.
899  *
900  * Here we prepare to log; after lock is acquired we'll issue log record.
901  * This arrangement simplifies error recovery in case the preparation step
902  * fails.
903  *
904  * Only AccessExclusiveLocks can conflict with lock types that read-only
905  * transactions can acquire in a standby server. Make sure this definition
906  * matches the one in GetRunningTransactionLocks().
907  */
908  if (lockmode >= AccessExclusiveLock &&
909  locktag->locktag_type == LOCKTAG_RELATION &&
910  !RecoveryInProgress() &&
912  {
914  log_lock = true;
915  }
916 
917  /*
918  * Attempt to take lock via fast path, if eligible. But if we remember
919  * having filled up the fast path array, we don't attempt to make any
920  * further use of it until we release some locks. It's possible that some
921  * other backend has transferred some of those locks to the shared hash
922  * table, leaving space free, but it's not worth acquiring the LWLock just
923  * to check. It's also possible that we're acquiring a second or third
924  * lock type on a relation we have already locked using the fast-path, but
925  * for now we don't worry about that case either.
926  */
927  if (EligibleForRelationFastPath(locktag, lockmode) &&
929  {
930  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
931  bool acquired;
932 
933  /*
934  * LWLockAcquire acts as a memory sequencing point, so it's safe to
935  * assume that any strong locker whose increment to
936  * FastPathStrongRelationLocks->counts becomes visible after we test
937  * it has yet to begin to transfer fast-path locks.
938  */
940  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
941  acquired = false;
942  else
943  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
944  lockmode);
946  if (acquired)
947  {
948  /*
949  * The locallock might contain stale pointers to some old shared
950  * objects; we MUST reset these to null before considering the
951  * lock to be acquired via fast-path.
952  */
953  locallock->lock = NULL;
954  locallock->proclock = NULL;
955  GrantLockLocal(locallock, owner);
956  return LOCKACQUIRE_OK;
957  }
958  }
959 
960  /*
961  * If this lock could potentially have been taken via the fast-path by
962  * some other backend, we must (temporarily) disable further use of the
963  * fast-path for this lock tag, and migrate any locks already taken via
964  * this method to the main lock table.
965  */
966  if (ConflictsWithRelationFastPath(locktag, lockmode))
967  {
968  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
969 
970  BeginStrongLockAcquire(locallock, fasthashcode);
971  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
972  hashcode))
973  {
975  if (locallock->nLocks == 0)
976  RemoveLocalLock(locallock);
977  if (locallockp)
978  *locallockp = NULL;
979  if (reportMemoryError)
980  ereport(ERROR,
981  (errcode(ERRCODE_OUT_OF_MEMORY),
982  errmsg("out of shared memory"),
983  errhint("You might need to increase max_locks_per_transaction.")));
984  else
985  return LOCKACQUIRE_NOT_AVAIL;
986  }
987  }
988 
989  /*
990  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
991  * take it via the fast-path, either, so we've got to mess with the shared
992  * lock table.
993  */
994  partitionLock = LockHashPartitionLock(hashcode);
995 
996  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
997 
998  /*
999  * Find or create lock and proclock entries with this tag
1000  *
1001  * Note: if the locallock object already existed, it might have a pointer
1002  * to the lock already ... but we should not assume that that pointer is
1003  * valid, since a lock object with zero hold and request counts can go
1004  * away anytime. So we have to use SetupLockInTable() to recompute the
1005  * lock and proclock pointers, even if they're already set.
1006  */
1007  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
1008  hashcode, lockmode);
1009  if (!proclock)
1010  {
1012  LWLockRelease(partitionLock);
1013  if (locallock->nLocks == 0)
1014  RemoveLocalLock(locallock);
1015  if (locallockp)
1016  *locallockp = NULL;
1017  if (reportMemoryError)
1018  ereport(ERROR,
1019  (errcode(ERRCODE_OUT_OF_MEMORY),
1020  errmsg("out of shared memory"),
1021  errhint("You might need to increase max_locks_per_transaction.")));
1022  else
1023  return LOCKACQUIRE_NOT_AVAIL;
1024  }
1025  locallock->proclock = proclock;
1026  lock = proclock->tag.myLock;
1027  locallock->lock = lock;
1028 
1029  /*
1030  * If lock requested conflicts with locks requested by waiters, must join
1031  * wait queue. Otherwise, check for conflict with already-held locks.
1032  * (That's last because most complex check.)
1033  */
1034  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1035  found_conflict = true;
1036  else
1037  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
1038  lock, proclock);
1039 
1040  if (!found_conflict)
1041  {
1042  /* No conflict with held or previously requested locks */
1043  GrantLock(lock, proclock, lockmode);
1044  GrantLockLocal(locallock, owner);
1045  }
1046  else
1047  {
1048  /*
1049  * We can't acquire the lock immediately. If caller specified no
1050  * blocking, remove useless table entries and return
1051  * LOCKACQUIRE_NOT_AVAIL without waiting.
1052  */
1053  if (dontWait)
1054  {
1056  if (proclock->holdMask == 0)
1057  {
1058  uint32 proclock_hashcode;
1059 
1060  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1061  SHMQueueDelete(&proclock->lockLink);
1062  SHMQueueDelete(&proclock->procLink);
1063  if (!hash_search_with_hash_value(LockMethodProcLockHash,
1064  (void *) &(proclock->tag),
1065  proclock_hashcode,
1066  HASH_REMOVE,
1067  NULL))
1068  elog(PANIC, "proclock table corrupted");
1069  }
1070  else
1071  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1072  lock->nRequested--;
1073  lock->requested[lockmode]--;
1074  LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1075  Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1076  Assert(lock->nGranted <= lock->nRequested);
1077  LWLockRelease(partitionLock);
1078  if (locallock->nLocks == 0)
1079  RemoveLocalLock(locallock);
1080  if (locallockp)
1081  *locallockp = NULL;
1082  return LOCKACQUIRE_NOT_AVAIL;
1083  }
1084 
1085  /*
1086  * Set bitmask of locks this process already holds on this object.
1087  */
1088  MyProc->heldLocks = proclock->holdMask;
1089 
1090  /*
1091  * Sleep till someone wakes me up.
1092  */
1093 
1094  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1095  locktag->locktag_field2,
1096  locktag->locktag_field3,
1097  locktag->locktag_field4,
1098  locktag->locktag_type,
1099  lockmode);
1100 
1101  WaitOnLock(locallock, owner);
1102 
1103  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1104  locktag->locktag_field2,
1105  locktag->locktag_field3,
1106  locktag->locktag_field4,
1107  locktag->locktag_type,
1108  lockmode);
1109 
1110  /*
1111  * NOTE: do not do any material change of state between here and
1112  * return. All required changes in locktable state must have been
1113  * done when the lock was granted to us --- see notes in WaitOnLock.
1114  */
1115 
1116  /*
1117  * Check the proclock entry status, in case something in the ipc
1118  * communication doesn't work correctly.
1119  */
1120  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1121  {
1123  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1124  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1125  /* Should we retry ? */
1126  LWLockRelease(partitionLock);
1127  elog(ERROR, "LockAcquire failed");
1128  }
1129  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1130  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1131  }
1132 
1133  /*
1134  * Lock state is fully up-to-date now; if we error out after this, no
1135  * special error cleanup is required.
1136  */
1138 
1139  LWLockRelease(partitionLock);
1140 
1141  /*
1142  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1143  * standby server.
1144  */
1145  if (log_lock)
1146  {
1147  /*
1148  * Decode the locktag back to the original values, to avoid sending
1149  * lots of empty bytes with every message. See lock.h to check how a
1150  * locktag is defined for LOCKTAG_RELATION
1151  */
1153  locktag->locktag_field2);
1154  }
1155 
1156  return LOCKACQUIRE_OK;
1157 }
1158 
1159 /*
1160  * Find or create LOCK and PROCLOCK objects as needed for a new lock
1161  * request.
1162  *
1163  * Returns the PROCLOCK object, or NULL if we failed to create the objects
1164  * for lack of shared memory.
1165  *
1166  * The appropriate partition lock must be held at entry, and will be
1167  * held at exit.
1168  */
1169 static PROCLOCK *
1170 SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1171  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1172 {
1173  LOCK *lock;
1174  PROCLOCK *proclock;
1175  PROCLOCKTAG proclocktag;
1176  uint32 proclock_hashcode;
1177  bool found;
1178 
1179  /*
1180  * Find or create a lock with this tag.
1181  */
1182  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1183  (const void *) locktag,
1184  hashcode,
1186  &found);
1187  if (!lock)
1188  return NULL;
1189 
1190  /*
1191  * if it's a new lock object, initialize it
1192  */
1193  if (!found)
1194  {
1195  lock->grantMask = 0;
1196  lock->waitMask = 0;
1197  SHMQueueInit(&(lock->procLocks));
1198  ProcQueueInit(&(lock->waitProcs));
1199  lock->nRequested = 0;
1200  lock->nGranted = 0;
1201  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1202  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1203  LOCK_PRINT("LockAcquire: new", lock, lockmode);
1204  }
1205  else
1206  {
1207  LOCK_PRINT("LockAcquire: found", lock, lockmode);
1208  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1209  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1210  Assert(lock->nGranted <= lock->nRequested);
1211  }
1212 
1213  /*
1214  * Create the hash key for the proclock table.
1215  */
1216  proclocktag.myLock = lock;
1217  proclocktag.myProc = proc;
1218 
1219  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1220 
1221  /*
1222  * Find or create a proclock entry with this tag
1223  */
1224  proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1225  (void *) &proclocktag,
1226  proclock_hashcode,
1228  &found);
1229  if (!proclock)
1230  {
1231  /* Oops, not enough shmem for the proclock */
1232  if (lock->nRequested == 0)
1233  {
1234  /*
1235  * There are no other requestors of this lock, so garbage-collect
1236  * the lock object. We *must* do this to avoid a permanent leak
1237  * of shared memory, because there won't be anything to cause
1238  * anyone to release the lock object later.
1239  */
1240  Assert(SHMQueueEmpty(&(lock->procLocks)));
1241  if (!hash_search_with_hash_value(LockMethodLockHash,
1242  (void *) &(lock->tag),
1243  hashcode,
1244  HASH_REMOVE,
1245  NULL))
1246  elog(PANIC, "lock table corrupted");
1247  }
1248  return NULL;
1249  }
1250 
1251  /*
1252  * If new, initialize the new entry
1253  */
1254  if (!found)
1255  {
1256  uint32 partition = LockHashPartition(hashcode);
1257 
1258  /*
1259  * It might seem unsafe to access proclock->groupLeader without a
1260  * lock, but it's not really. Either we are initializing a proclock
1261  * on our own behalf, in which case our group leader isn't changing
1262  * because the group leader for a process can only ever be changed by
1263  * the process itself; or else we are transferring a fast-path lock to
1264  * the main lock table, in which case that process can't change it's
1265  * lock group leader without first releasing all of its locks (and in
1266  * particular the one we are currently transferring).
1267  */
1268  proclock->groupLeader = proc->lockGroupLeader != NULL ?
1269  proc->lockGroupLeader : proc;
1270  proclock->holdMask = 0;
1271  proclock->releaseMask = 0;
1272  /* Add proclock to appropriate lists */
1273  SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
1274  SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
1275  &proclock->procLink);
1276  PROCLOCK_PRINT("LockAcquire: new", proclock);
1277  }
1278  else
1279  {
1280  PROCLOCK_PRINT("LockAcquire: found", proclock);
1281  Assert((proclock->holdMask & ~lock->grantMask) == 0);
1282 
1283 #ifdef CHECK_DEADLOCK_RISK
1284 
1285  /*
1286  * Issue warning if we already hold a lower-level lock on this object
1287  * and do not hold a lock of the requested level or higher. This
1288  * indicates a deadlock-prone coding practice (eg, we'd have a
1289  * deadlock if another backend were following the same code path at
1290  * about the same time).
1291  *
1292  * This is not enabled by default, because it may generate log entries
1293  * about user-level coding practices that are in fact safe in context.
1294  * It can be enabled to help find system-level problems.
1295  *
1296  * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1297  * better to use a table. For now, though, this works.
1298  */
1299  {
1300  int i;
1301 
1302  for (i = lockMethodTable->numLockModes; i > 0; i--)
1303  {
1304  if (proclock->holdMask & LOCKBIT_ON(i))
1305  {
1306  if (i >= (int) lockmode)
1307  break; /* safe: we have a lock >= req level */
1308  elog(LOG, "deadlock risk: raising lock level"
1309  " from %s to %s on object %u/%u/%u",
1310  lockMethodTable->lockModeNames[i],
1311  lockMethodTable->lockModeNames[lockmode],
1312  lock->tag.locktag_field1, lock->tag.locktag_field2,
1313  lock->tag.locktag_field3);
1314  break;
1315  }
1316  }
1317  }
1318 #endif /* CHECK_DEADLOCK_RISK */
1319  }
1320 
1321  /*
1322  * lock->nRequested and lock->requested[] count the total number of
1323  * requests, whether granted or waiting, so increment those immediately.
1324  * The other counts don't increment till we get the lock.
1325  */
1326  lock->nRequested++;
1327  lock->requested[lockmode]++;
1328  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1329 
1330  /*
1331  * We shouldn't already hold the desired lock; else locallock table is
1332  * broken.
1333  */
1334  if (proclock->holdMask & LOCKBIT_ON(lockmode))
1335  elog(ERROR, "lock %s on object %u/%u/%u is already held",
1336  lockMethodTable->lockModeNames[lockmode],
1337  lock->tag.locktag_field1, lock->tag.locktag_field2,
1338  lock->tag.locktag_field3);
1339 
1340  return proclock;
1341 }
1342 
1343 /*
1344  * Check and set/reset the flag that we hold the relation extension/page lock.
1345  *
1346  * It is callers responsibility that this function is called after
1347  * acquiring/releasing the relation extension/page lock.
1348  *
1349  * Pass acquired as true if lock is acquired, false otherwise.
1350  */
1351 static inline void
1352 CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
1353 {
1354 #ifdef USE_ASSERT_CHECKING
1355  if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_RELATION_EXTEND)
1356  IsRelationExtensionLockHeld = acquired;
1357  else if (LOCALLOCK_LOCKTAG(*locallock) == LOCKTAG_PAGE)
1358  IsPageLockHeld = acquired;
1359 
1360 #endif
1361 }
1362 
1363 /*
1364  * Subroutine to free a locallock entry
1365  */
1366 static void
1368 {
1369  int i;
1370 
1371  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1372  {
1373  if (locallock->lockOwners[i].owner != NULL)
1374  ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1375  }
1376  locallock->numLockOwners = 0;
1377  if (locallock->lockOwners != NULL)
1378  pfree(locallock->lockOwners);
1379  locallock->lockOwners = NULL;
1380 
1381  if (locallock->holdsStrongLockCount)
1382  {
1383  uint32 fasthashcode;
1384 
1385  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1386 
1387  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1388  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1389  FastPathStrongRelationLocks->count[fasthashcode]--;
1390  locallock->holdsStrongLockCount = false;
1391  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1392  }
1393 
1394  if (!hash_search(LockMethodLocalHash,
1395  (void *) &(locallock->tag),
1396  HASH_REMOVE, NULL))
1397  elog(WARNING, "locallock table corrupted");
1398 
1399  /*
1400  * Indicate that the lock is released for certain types of locks
1401  */
1402  CheckAndSetLockHeld(locallock, false);
1403 }
1404 
1405 /*
1406  * LockCheckConflicts -- test whether requested lock conflicts
1407  * with those already granted
1408  *
1409  * Returns true if conflict, false if no conflict.
1410  *
1411  * NOTES:
1412  * Here's what makes this complicated: one process's locks don't
1413  * conflict with one another, no matter what purpose they are held for
1414  * (eg, session and transaction locks do not conflict). Nor do the locks
1415  * of one process in a lock group conflict with those of another process in
1416  * the same group. So, we must subtract off these locks when determining
1417  * whether the requested new lock conflicts with those already held.
1418  */
1419 bool
1422  LOCK *lock,
1423  PROCLOCK *proclock)
1424 {
1425  int numLockModes = lockMethodTable->numLockModes;
1426  LOCKMASK myLocks;
1427  int conflictMask = lockMethodTable->conflictTab[lockmode];
1428  int conflictsRemaining[MAX_LOCKMODES];
1429  int totalConflictsRemaining = 0;
1430  int i;
1431  SHM_QUEUE *procLocks;
1432  PROCLOCK *otherproclock;
1433 
1434  /*
1435  * first check for global conflicts: If no locks conflict with my request,
1436  * then I get the lock.
1437  *
1438  * Checking for conflict: lock->grantMask represents the types of
1439  * currently held locks. conflictTable[lockmode] has a bit set for each
1440  * type of lock that conflicts with request. Bitwise compare tells if
1441  * there is a conflict.
1442  */
1443  if (!(conflictMask & lock->grantMask))
1444  {
1445  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1446  return false;
1447  }
1448 
1449  /*
1450  * Rats. Something conflicts. But it could still be my own lock, or a
1451  * lock held by another member of my locking group. First, figure out how
1452  * many conflicts remain after subtracting out any locks I hold myself.
1453  */
1454  myLocks = proclock->holdMask;
1455  for (i = 1; i <= numLockModes; i++)
1456  {
1457  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1458  {
1459  conflictsRemaining[i] = 0;
1460  continue;
1461  }
1462  conflictsRemaining[i] = lock->granted[i];
1463  if (myLocks & LOCKBIT_ON(i))
1464  --conflictsRemaining[i];
1465  totalConflictsRemaining += conflictsRemaining[i];
1466  }
1467 
1468  /* If no conflicts remain, we get the lock. */
1469  if (totalConflictsRemaining == 0)
1470  {
1471  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1472  return false;
1473  }
1474 
1475  /* If no group locking, it's definitely a conflict. */
1476  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1477  {
1478  Assert(proclock->tag.myProc == MyProc);
1479  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1480  proclock);
1481  return true;
1482  }
1483 
1484  /*
1485  * The relation extension or page lock conflict even between the group
1486  * members.
1487  */
1488  if (LOCK_LOCKTAG(*lock) == LOCKTAG_RELATION_EXTEND ||
1489  (LOCK_LOCKTAG(*lock) == LOCKTAG_PAGE))
1490  {
1491  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)",
1492  proclock);
1493  return true;
1494  }
1495 
1496  /*
1497  * Locks held in conflicting modes by members of our own lock group are
1498  * not real conflicts; we can subtract those out and see if we still have
1499  * a conflict. This is O(N) in the number of processes holding or
1500  * awaiting locks on this object. We could improve that by making the
1501  * shared memory state more complex (and larger) but it doesn't seem worth
1502  * it.
1503  */
1504  procLocks = &(lock->procLocks);
1505  otherproclock = (PROCLOCK *)
1506  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1507  while (otherproclock != NULL)
1508  {
1509  if (proclock != otherproclock &&
1510  proclock->groupLeader == otherproclock->groupLeader &&
1511  (otherproclock->holdMask & conflictMask) != 0)
1512  {
1513  int intersectMask = otherproclock->holdMask & conflictMask;
1514 
1515  for (i = 1; i <= numLockModes; i++)
1516  {
1517  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1518  {
1519  if (conflictsRemaining[i] <= 0)
1520  elog(PANIC, "proclocks held do not match lock");
1521  conflictsRemaining[i]--;
1522  totalConflictsRemaining--;
1523  }
1524  }
1525 
1526  if (totalConflictsRemaining == 0)
1527  {
1528  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1529  proclock);
1530  return false;
1531  }
1532  }
1533  otherproclock = (PROCLOCK *)
1534  SHMQueueNext(procLocks, &otherproclock->lockLink,
1535  offsetof(PROCLOCK, lockLink));
1536  }
1537 
1538  /* Nope, it's a real conflict. */
1539  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1540  return true;
1541 }
1542 
1543 /*
1544  * GrantLock -- update the lock and proclock data structures to show
1545  * the lock request has been granted.
1546  *
1547  * NOTE: if proc was blocked, it also needs to be removed from the wait list
1548  * and have its waitLock/waitProcLock fields cleared. That's not done here.
1549  *
1550  * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1551  * table entry; but since we may be awaking some other process, we can't do
1552  * that here; it's done by GrantLockLocal, instead.
1553  */
1554 void
1556 {
1557  lock->nGranted++;
1558  lock->granted[lockmode]++;
1559  lock->grantMask |= LOCKBIT_ON(lockmode);
1560  if (lock->granted[lockmode] == lock->requested[lockmode])
1561  lock->waitMask &= LOCKBIT_OFF(lockmode);
1562  proclock->holdMask |= LOCKBIT_ON(lockmode);
1563  LOCK_PRINT("GrantLock", lock, lockmode);
1564  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1565  Assert(lock->nGranted <= lock->nRequested);
1566 }
1567 
1568 /*
1569  * UnGrantLock -- opposite of GrantLock.
1570  *
1571  * Updates the lock and proclock data structures to show that the lock
1572  * is no longer held nor requested by the current holder.
1573  *
1574  * Returns true if there were any waiters waiting on the lock that
1575  * should now be woken up with ProcLockWakeup.
1576  */
1577 static bool
1579  PROCLOCK *proclock, LockMethod lockMethodTable)
1580 {
1581  bool wakeupNeeded = false;
1582 
1583  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1584  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1585  Assert(lock->nGranted <= lock->nRequested);
1586 
1587  /*
1588  * fix the general lock stats
1589  */
1590  lock->nRequested--;
1591  lock->requested[lockmode]--;
1592  lock->nGranted--;
1593  lock->granted[lockmode]--;
1594 
1595  if (lock->granted[lockmode] == 0)
1596  {
1597  /* change the conflict mask. No more of this lock type. */
1598  lock->grantMask &= LOCKBIT_OFF(lockmode);
1599  }
1600 
1601  LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1602 
1603  /*
1604  * We need only run ProcLockWakeup if the released lock conflicts with at
1605  * least one of the lock types requested by waiter(s). Otherwise whatever
1606  * conflict made them wait must still exist. NOTE: before MVCC, we could
1607  * skip wakeup if lock->granted[lockmode] was still positive. But that's
1608  * not true anymore, because the remaining granted locks might belong to
1609  * some waiter, who could now be awakened because he doesn't conflict with
1610  * his own locks.
1611  */
1612  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1613  wakeupNeeded = true;
1614 
1615  /*
1616  * Now fix the per-proclock state.
1617  */
1618  proclock->holdMask &= LOCKBIT_OFF(lockmode);
1619  PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1620 
1621  return wakeupNeeded;
1622 }
1623 
1624 /*
1625  * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1626  * proclock and lock objects if possible, and call ProcLockWakeup if there
1627  * are remaining requests and the caller says it's OK. (Normally, this
1628  * should be called after UnGrantLock, and wakeupNeeded is the result from
1629  * UnGrantLock.)
1630  *
1631  * The appropriate partition lock must be held at entry, and will be
1632  * held at exit.
1633  */
1634 static void
1635 CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1636  LockMethod lockMethodTable, uint32 hashcode,
1637  bool wakeupNeeded)
1638 {
1639  /*
1640  * If this was my last hold on this lock, delete my entry in the proclock
1641  * table.
1642  */
1643  if (proclock->holdMask == 0)
1644  {
1645  uint32 proclock_hashcode;
1646 
1647  PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1648  SHMQueueDelete(&proclock->lockLink);
1649  SHMQueueDelete(&proclock->procLink);
1650  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1651  if (!hash_search_with_hash_value(LockMethodProcLockHash,
1652  (void *) &(proclock->tag),
1653  proclock_hashcode,
1654  HASH_REMOVE,
1655  NULL))
1656  elog(PANIC, "proclock table corrupted");
1657  }
1658 
1659  if (lock->nRequested == 0)
1660  {
1661  /*
1662  * The caller just released the last lock, so garbage-collect the lock
1663  * object.
1664  */
1665  LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1666  Assert(SHMQueueEmpty(&(lock->procLocks)));
1667  if (!hash_search_with_hash_value(LockMethodLockHash,
1668  (void *) &(lock->tag),
1669  hashcode,
1670  HASH_REMOVE,
1671  NULL))
1672  elog(PANIC, "lock table corrupted");
1673  }
1674  else if (wakeupNeeded)
1675  {
1676  /* There are waiters on this lock, so wake them up. */
1677  ProcLockWakeup(lockMethodTable, lock);
1678  }
1679 }
1680 
1681 /*
1682  * GrantLockLocal -- update the locallock data structures to show
1683  * the lock request has been granted.
1684  *
1685  * We expect that LockAcquire made sure there is room to add a new
1686  * ResourceOwner entry.
1687  */
1688 static void
1690 {
1691  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1692  int i;
1693 
1694  Assert(locallock->numLockOwners < locallock->maxLockOwners);
1695  /* Count the total */
1696  locallock->nLocks++;
1697  /* Count the per-owner lock */
1698  for (i = 0; i < locallock->numLockOwners; i++)
1699  {
1700  if (lockOwners[i].owner == owner)
1701  {
1702  lockOwners[i].nLocks++;
1703  return;
1704  }
1705  }
1706  lockOwners[i].owner = owner;
1707  lockOwners[i].nLocks = 1;
1708  locallock->numLockOwners++;
1709  if (owner != NULL)
1710  ResourceOwnerRememberLock(owner, locallock);
1711 
1712  /* Indicate that the lock is acquired for certain types of locks. */
1713  CheckAndSetLockHeld(locallock, true);
1714 }
1715 
1716 /*
1717  * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1718  * and arrange for error cleanup if it fails
1719  */
1720 static void
1721 BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1722 {
1723  Assert(StrongLockInProgress == NULL);
1724  Assert(locallock->holdsStrongLockCount == false);
1725 
1726  /*
1727  * Adding to a memory location is not atomic, so we take a spinlock to
1728  * ensure we don't collide with someone else trying to bump the count at
1729  * the same time.
1730  *
1731  * XXX: It might be worth considering using an atomic fetch-and-add
1732  * instruction here, on architectures where that is supported.
1733  */
1734 
1735  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1736  FastPathStrongRelationLocks->count[fasthashcode]++;
1737  locallock->holdsStrongLockCount = true;
1738  StrongLockInProgress = locallock;
1739  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1740 }
1741 
1742 /*
1743  * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1744  * acquisition once it's no longer needed
1745  */
1746 static void
1748 {
1749  StrongLockInProgress = NULL;
1750 }
1751 
1752 /*
1753  * AbortStrongLockAcquire - undo strong lock state changes performed by
1754  * BeginStrongLockAcquire.
1755  */
1756 void
1758 {
1759  uint32 fasthashcode;
1760  LOCALLOCK *locallock = StrongLockInProgress;
1761 
1762  if (locallock == NULL)
1763  return;
1764 
1765  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1766  Assert(locallock->holdsStrongLockCount == true);
1767  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1768  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1769  FastPathStrongRelationLocks->count[fasthashcode]--;
1770  locallock->holdsStrongLockCount = false;
1771  StrongLockInProgress = NULL;
1772  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1773 }
1774 
1775 /*
1776  * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1777  * WaitOnLock on.
1778  *
1779  * proc.c needs this for the case where we are booted off the lock by
1780  * timeout, but discover that someone granted us the lock anyway.
1781  *
1782  * We could just export GrantLockLocal, but that would require including
1783  * resowner.h in lock.h, which creates circularity.
1784  */
1785 void
1787 {
1788  GrantLockLocal(awaitedLock, awaitedOwner);
1789 }
1790 
1791 /*
1792  * MarkLockClear -- mark an acquired lock as "clear"
1793  *
1794  * This means that we know we have absorbed all sinval messages that other
1795  * sessions generated before we acquired this lock, and so we can confidently
1796  * assume we know about any catalog changes protected by this lock.
1797  */
1798 void
1800 {
1801  Assert(locallock->nLocks > 0);
1802  locallock->lockCleared = true;
1803 }
1804 
1805 /*
1806  * WaitOnLock -- wait to acquire a lock
1807  *
1808  * Caller must have set MyProc->heldLocks to reflect locks already held
1809  * on the lockable object by this process.
1810  *
1811  * The appropriate partition lock must be held at entry.
1812  */
1813 static void
1815 {
1816  LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1817  LockMethod lockMethodTable = LockMethods[lockmethodid];
1818  char *volatile new_status = NULL;
1819 
1820  LOCK_PRINT("WaitOnLock: sleeping on lock",
1821  locallock->lock, locallock->tag.mode);
1822 
1823  /* Report change to waiting status */
1825  {
1826  const char *old_status;
1827  int len;
1828 
1829  old_status = get_ps_display(&len);
1830  new_status = (char *) palloc(len + 8 + 1);
1831  memcpy(new_status, old_status, len);
1832  strcpy(new_status + len, " waiting");
1833  set_ps_display(new_status);
1834  new_status[len] = '\0'; /* truncate off " waiting" */
1835  }
1836 
1837  awaitedLock = locallock;
1838  awaitedOwner = owner;
1839 
1840  /*
1841  * NOTE: Think not to put any shared-state cleanup after the call to
1842  * ProcSleep, in either the normal or failure path. The lock state must
1843  * be fully set by the lock grantor, or by CheckDeadLock if we give up
1844  * waiting for the lock. This is necessary because of the possibility
1845  * that a cancel/die interrupt will interrupt ProcSleep after someone else
1846  * grants us the lock, but before we've noticed it. Hence, after granting,
1847  * the locktable state must fully reflect the fact that we own the lock;
1848  * we can't do additional work on return.
1849  *
1850  * We can and do use a PG_TRY block to try to clean up after failure, but
1851  * this still has a major limitation: elog(FATAL) can occur while waiting
1852  * (eg, a "die" interrupt), and then control won't come back here. So all
1853  * cleanup of essential state should happen in LockErrorCleanup, not here.
1854  * We can use PG_TRY to clear the "waiting" status flags, since doing that
1855  * is unimportant if the process exits.
1856  */
1857  PG_TRY();
1858  {
1859  if (ProcSleep(locallock, lockMethodTable) != PROC_WAIT_STATUS_OK)
1860  {
1861  /*
1862  * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1863  * now.
1864  */
1865  awaitedLock = NULL;
1866  LOCK_PRINT("WaitOnLock: aborting on lock",
1867  locallock->lock, locallock->tag.mode);
1869 
1870  /*
1871  * Now that we aren't holding the partition lock, we can give an
1872  * error report including details about the detected deadlock.
1873  */
1874  DeadLockReport();
1875  /* not reached */
1876  }
1877  }
1878  PG_CATCH();
1879  {
1880  /* In this path, awaitedLock remains set until LockErrorCleanup */
1881 
1882  /* Report change to non-waiting status */
1884  {
1885  set_ps_display(new_status);
1886  pfree(new_status);
1887  }
1888 
1889  /* and propagate the error */
1890  PG_RE_THROW();
1891  }
1892  PG_END_TRY();
1893 
1894  awaitedLock = NULL;
1895 
1896  /* Report change to non-waiting status */
1898  {
1899  set_ps_display(new_status);
1900  pfree(new_status);
1901  }
1902 
1903  LOCK_PRINT("WaitOnLock: wakeup on lock",
1904  locallock->lock, locallock->tag.mode);
1905 }
1906 
1907 /*
1908  * Remove a proc from the wait-queue it is on (caller must know it is on one).
1909  * This is only used when the proc has failed to get the lock, so we set its
1910  * waitStatus to PROC_WAIT_STATUS_ERROR.
1911  *
1912  * Appropriate partition lock must be held by caller. Also, caller is
1913  * responsible for signaling the proc if needed.
1914  *
1915  * NB: this does not clean up any locallock object that may exist for the lock.
1916  */
1917 void
1919 {
1920  LOCK *waitLock = proc->waitLock;
1921  PROCLOCK *proclock = proc->waitProcLock;
1922  LOCKMODE lockmode = proc->waitLockMode;
1923  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1924 
1925  /* Make sure proc is waiting */
1927  Assert(proc->links.next != NULL);
1928  Assert(waitLock);
1929  Assert(waitLock->waitProcs.size > 0);
1930  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1931 
1932  /* Remove proc from lock's wait queue */
1933  SHMQueueDelete(&(proc->links));
1934  waitLock->waitProcs.size--;
1935 
1936  /* Undo increments of request counts by waiting process */
1937  Assert(waitLock->nRequested > 0);
1938  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1939  waitLock->nRequested--;
1940  Assert(waitLock->requested[lockmode] > 0);
1941  waitLock->requested[lockmode]--;
1942  /* don't forget to clear waitMask bit if appropriate */
1943  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1944  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1945 
1946  /* Clean up the proc's own state, and pass it the ok/fail signal */
1947  proc->waitLock = NULL;
1948  proc->waitProcLock = NULL;
1950 
1951  /*
1952  * Delete the proclock immediately if it represents no already-held locks.
1953  * (This must happen now because if the owner of the lock decides to
1954  * release it, and the requested/granted counts then go to zero,
1955  * LockRelease expects there to be no remaining proclocks.) Then see if
1956  * any other waiters for the lock can be woken up now.
1957  */
1958  CleanUpLock(waitLock, proclock,
1959  LockMethods[lockmethodid], hashcode,
1960  true);
1961 }
1962 
1963 /*
1964  * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
1965  * Release a session lock if 'sessionLock' is true, else release a
1966  * regular transaction lock.
1967  *
1968  * Side Effects: find any waiting processes that are now wakable,
1969  * grant them their requested locks and awaken them.
1970  * (We have to grant the lock here to avoid a race between
1971  * the waking process and any new process to
1972  * come along and request the lock.)
1973  */
1974 bool
1975 LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1976 {
1977  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1978  LockMethod lockMethodTable;
1979  LOCALLOCKTAG localtag;
1980  LOCALLOCK *locallock;
1981  LOCK *lock;
1982  PROCLOCK *proclock;
1983  LWLock *partitionLock;
1984  bool wakeupNeeded;
1985 
1986  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1987  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1988  lockMethodTable = LockMethods[lockmethodid];
1989  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1990  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1991 
1992 #ifdef LOCK_DEBUG
1993  if (LOCK_DEBUG_ENABLED(locktag))
1994  elog(LOG, "LockRelease: lock [%u,%u] %s",
1995  locktag->locktag_field1, locktag->locktag_field2,
1996  lockMethodTable->lockModeNames[lockmode]);
1997 #endif
1998 
1999  /*
2000  * Find the LOCALLOCK entry for this lock and lockmode
2001  */
2002  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
2003  localtag.lock = *locktag;
2004  localtag.mode = lockmode;
2005 
2006  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
2007  (void *) &localtag,
2008  HASH_FIND, NULL);
2009 
2010  /*
2011  * let the caller print its own error message, too. Do not ereport(ERROR).
2012  */
2013  if (!locallock || locallock->nLocks <= 0)
2014  {
2015  elog(WARNING, "you don't own a lock of type %s",
2016  lockMethodTable->lockModeNames[lockmode]);
2017  return false;
2018  }
2019 
2020  /*
2021  * Decrease the count for the resource owner.
2022  */
2023  {
2024  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2025  ResourceOwner owner;
2026  int i;
2027 
2028  /* Identify owner for lock */
2029  if (sessionLock)
2030  owner = NULL;
2031  else
2032  owner = CurrentResourceOwner;
2033 
2034  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2035  {
2036  if (lockOwners[i].owner == owner)
2037  {
2038  Assert(lockOwners[i].nLocks > 0);
2039  if (--lockOwners[i].nLocks == 0)
2040  {
2041  if (owner != NULL)
2042  ResourceOwnerForgetLock(owner, locallock);
2043  /* compact out unused slot */
2044  locallock->numLockOwners--;
2045  if (i < locallock->numLockOwners)
2046  lockOwners[i] = lockOwners[locallock->numLockOwners];
2047  }
2048  break;
2049  }
2050  }
2051  if (i < 0)
2052  {
2053  /* don't release a lock belonging to another owner */
2054  elog(WARNING, "you don't own a lock of type %s",
2055  lockMethodTable->lockModeNames[lockmode]);
2056  return false;
2057  }
2058  }
2059 
2060  /*
2061  * Decrease the total local count. If we're still holding the lock, we're
2062  * done.
2063  */
2064  locallock->nLocks--;
2065 
2066  if (locallock->nLocks > 0)
2067  return true;
2068 
2069  /*
2070  * At this point we can no longer suppose we are clear of invalidation
2071  * messages related to this lock. Although we'll delete the LOCALLOCK
2072  * object before any intentional return from this routine, it seems worth
2073  * the trouble to explicitly reset lockCleared right now, just in case
2074  * some error prevents us from deleting the LOCALLOCK.
2075  */
2076  locallock->lockCleared = false;
2077 
2078  /* Attempt fast release of any lock eligible for the fast path. */
2079  if (EligibleForRelationFastPath(locktag, lockmode) &&
2081  {
2082  bool released;
2083 
2084  /*
2085  * We might not find the lock here, even if we originally entered it
2086  * here. Another backend may have moved it to the main table.
2087  */
2089  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
2090  lockmode);
2092  if (released)
2093  {
2094  RemoveLocalLock(locallock);
2095  return true;
2096  }
2097  }
2098 
2099  /*
2100  * Otherwise we've got to mess with the shared lock table.
2101  */
2102  partitionLock = LockHashPartitionLock(locallock->hashcode);
2103 
2104  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2105 
2106  /*
2107  * Normally, we don't need to re-find the lock or proclock, since we kept
2108  * their addresses in the locallock table, and they couldn't have been
2109  * removed while we were holding a lock on them. But it's possible that
2110  * the lock was taken fast-path and has since been moved to the main hash
2111  * table by another backend, in which case we will need to look up the
2112  * objects here. We assume the lock field is NULL if so.
2113  */
2114  lock = locallock->lock;
2115  if (!lock)
2116  {
2117  PROCLOCKTAG proclocktag;
2118 
2119  Assert(EligibleForRelationFastPath(locktag, lockmode));
2120  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2121  (const void *) locktag,
2122  locallock->hashcode,
2123  HASH_FIND,
2124  NULL);
2125  if (!lock)
2126  elog(ERROR, "failed to re-find shared lock object");
2127  locallock->lock = lock;
2128 
2129  proclocktag.myLock = lock;
2130  proclocktag.myProc = MyProc;
2131  locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2132  (void *) &proclocktag,
2133  HASH_FIND,
2134  NULL);
2135  if (!locallock->proclock)
2136  elog(ERROR, "failed to re-find shared proclock object");
2137  }
2138  LOCK_PRINT("LockRelease: found", lock, lockmode);
2139  proclock = locallock->proclock;
2140  PROCLOCK_PRINT("LockRelease: found", proclock);
2141 
2142  /*
2143  * Double-check that we are actually holding a lock of the type we want to
2144  * release.
2145  */
2146  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2147  {
2148  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2149  LWLockRelease(partitionLock);
2150  elog(WARNING, "you don't own a lock of type %s",
2151  lockMethodTable->lockModeNames[lockmode]);
2152  RemoveLocalLock(locallock);
2153  return false;
2154  }
2155 
2156  /*
2157  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2158  */
2159  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2160 
2161  CleanUpLock(lock, proclock,
2162  lockMethodTable, locallock->hashcode,
2163  wakeupNeeded);
2164 
2165  LWLockRelease(partitionLock);
2166 
2167  RemoveLocalLock(locallock);
2168  return true;
2169 }
2170 
2171 /*
2172  * LockReleaseAll -- Release all locks of the specified lock method that
2173  * are held by the current process.
2174  *
2175  * Well, not necessarily *all* locks. The available behaviors are:
2176  * allLocks == true: release all locks including session locks.
2177  * allLocks == false: release all non-session locks.
2178  */
2179 void
2180 LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2181 {
2183  LockMethod lockMethodTable;
2184  int i,
2185  numLockModes;
2186  LOCALLOCK *locallock;
2187  LOCK *lock;
2188  PROCLOCK *proclock;
2189  int partition;
2190  bool have_fast_path_lwlock = false;
2191 
2192  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2193  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2194  lockMethodTable = LockMethods[lockmethodid];
2195 
2196 #ifdef LOCK_DEBUG
2197  if (*(lockMethodTable->trace_flag))
2198  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2199 #endif
2200 
2201  /*
2202  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2203  * the only way that the lock we hold on our own VXID can ever get
2204  * released: it is always and only released when a toplevel transaction
2205  * ends.
2206  */
2207  if (lockmethodid == DEFAULT_LOCKMETHOD)
2209 
2210  numLockModes = lockMethodTable->numLockModes;
2211 
2212  /*
2213  * First we run through the locallock table and get rid of unwanted
2214  * entries, then we scan the process's proclocks and get rid of those. We
2215  * do this separately because we may have multiple locallock entries
2216  * pointing to the same proclock, and we daren't end up with any dangling
2217  * pointers. Fast-path locks are cleaned up during the locallock table
2218  * scan, though.
2219  */
2220  hash_seq_init(&status, LockMethodLocalHash);
2221 
2222  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2223  {
2224  /*
2225  * If the LOCALLOCK entry is unused, we must've run out of shared
2226  * memory while trying to set up this lock. Just forget the local
2227  * entry.
2228  */
2229  if (locallock->nLocks == 0)
2230  {
2231  RemoveLocalLock(locallock);
2232  continue;
2233  }
2234 
2235  /* Ignore items that are not of the lockmethod to be removed */
2236  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2237  continue;
2238 
2239  /*
2240  * If we are asked to release all locks, we can just zap the entry.
2241  * Otherwise, must scan to see if there are session locks. We assume
2242  * there is at most one lockOwners entry for session locks.
2243  */
2244  if (!allLocks)
2245  {
2246  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2247 
2248  /* If session lock is above array position 0, move it down to 0 */
2249  for (i = 0; i < locallock->numLockOwners; i++)
2250  {
2251  if (lockOwners[i].owner == NULL)
2252  lockOwners[0] = lockOwners[i];
2253  else
2254  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2255  }
2256 
2257  if (locallock->numLockOwners > 0 &&
2258  lockOwners[0].owner == NULL &&
2259  lockOwners[0].nLocks > 0)
2260  {
2261  /* Fix the locallock to show just the session locks */
2262  locallock->nLocks = lockOwners[0].nLocks;
2263  locallock->numLockOwners = 1;
2264  /* We aren't deleting this locallock, so done */
2265  continue;
2266  }
2267  else
2268  locallock->numLockOwners = 0;
2269  }
2270 
2271  /*
2272  * If the lock or proclock pointers are NULL, this lock was taken via
2273  * the relation fast-path (and is not known to have been transferred).
2274  */
2275  if (locallock->proclock == NULL || locallock->lock == NULL)
2276  {
2277  LOCKMODE lockmode = locallock->tag.mode;
2278  Oid relid;
2279 
2280  /* Verify that a fast-path lock is what we've got. */
2281  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2282  elog(PANIC, "locallock table corrupted");
2283 
2284  /*
2285  * If we don't currently hold the LWLock that protects our
2286  * fast-path data structures, we must acquire it before attempting
2287  * to release the lock via the fast-path. We will continue to
2288  * hold the LWLock until we're done scanning the locallock table,
2289  * unless we hit a transferred fast-path lock. (XXX is this
2290  * really such a good idea? There could be a lot of entries ...)
2291  */
2292  if (!have_fast_path_lwlock)
2293  {
2295  have_fast_path_lwlock = true;
2296  }
2297 
2298  /* Attempt fast-path release. */
2299  relid = locallock->tag.lock.locktag_field2;
2300  if (FastPathUnGrantRelationLock(relid, lockmode))
2301  {
2302  RemoveLocalLock(locallock);
2303  continue;
2304  }
2305 
2306  /*
2307  * Our lock, originally taken via the fast path, has been
2308  * transferred to the main lock table. That's going to require
2309  * some extra work, so release our fast-path lock before starting.
2310  */
2312  have_fast_path_lwlock = false;
2313 
2314  /*
2315  * Now dump the lock. We haven't got a pointer to the LOCK or
2316  * PROCLOCK in this case, so we have to handle this a bit
2317  * differently than a normal lock release. Unfortunately, this
2318  * requires an extra LWLock acquire-and-release cycle on the
2319  * partitionLock, but hopefully it shouldn't happen often.
2320  */
2321  LockRefindAndRelease(lockMethodTable, MyProc,
2322  &locallock->tag.lock, lockmode, false);
2323  RemoveLocalLock(locallock);
2324  continue;
2325  }
2326 
2327  /* Mark the proclock to show we need to release this lockmode */
2328  if (locallock->nLocks > 0)
2329  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2330 
2331  /* And remove the locallock hashtable entry */
2332  RemoveLocalLock(locallock);
2333  }
2334 
2335  /* Done with the fast-path data structures */
2336  if (have_fast_path_lwlock)
2338 
2339  /*
2340  * Now, scan each lock partition separately.
2341  */
2342  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2343  {
2344  LWLock *partitionLock;
2345  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
2346  PROCLOCK *nextplock;
2347 
2348  partitionLock = LockHashPartitionLockByIndex(partition);
2349 
2350  /*
2351  * If the proclock list for this partition is empty, we can skip
2352  * acquiring the partition lock. This optimization is trickier than
2353  * it looks, because another backend could be in process of adding
2354  * something to our proclock list due to promoting one of our
2355  * fast-path locks. However, any such lock must be one that we
2356  * decided not to delete above, so it's okay to skip it again now;
2357  * we'd just decide not to delete it again. We must, however, be
2358  * careful to re-fetch the list header once we've acquired the
2359  * partition lock, to be sure we have a valid, up-to-date pointer.
2360  * (There is probably no significant risk if pointer fetch/store is
2361  * atomic, but we don't wish to assume that.)
2362  *
2363  * XXX This argument assumes that the locallock table correctly
2364  * represents all of our fast-path locks. While allLocks mode
2365  * guarantees to clean up all of our normal locks regardless of the
2366  * locallock situation, we lose that guarantee for fast-path locks.
2367  * This is not ideal.
2368  */
2369  if (SHMQueueNext(procLocks, procLocks,
2370  offsetof(PROCLOCK, procLink)) == NULL)
2371  continue; /* needn't examine this partition */
2372 
2373  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2374 
2375  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2376  offsetof(PROCLOCK, procLink));
2377  proclock;
2378  proclock = nextplock)
2379  {
2380  bool wakeupNeeded = false;
2381 
2382  /* Get link first, since we may unlink/delete this proclock */
2383  nextplock = (PROCLOCK *)
2384  SHMQueueNext(procLocks, &proclock->procLink,
2385  offsetof(PROCLOCK, procLink));
2386 
2387  Assert(proclock->tag.myProc == MyProc);
2388 
2389  lock = proclock->tag.myLock;
2390 
2391  /* Ignore items that are not of the lockmethod to be removed */
2392  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2393  continue;
2394 
2395  /*
2396  * In allLocks mode, force release of all locks even if locallock
2397  * table had problems
2398  */
2399  if (allLocks)
2400  proclock->releaseMask = proclock->holdMask;
2401  else
2402  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2403 
2404  /*
2405  * Ignore items that have nothing to be released, unless they have
2406  * holdMask == 0 and are therefore recyclable
2407  */
2408  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2409  continue;
2410 
2411  PROCLOCK_PRINT("LockReleaseAll", proclock);
2412  LOCK_PRINT("LockReleaseAll", lock, 0);
2413  Assert(lock->nRequested >= 0);
2414  Assert(lock->nGranted >= 0);
2415  Assert(lock->nGranted <= lock->nRequested);
2416  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2417 
2418  /*
2419  * Release the previously-marked lock modes
2420  */
2421  for (i = 1; i <= numLockModes; i++)
2422  {
2423  if (proclock->releaseMask & LOCKBIT_ON(i))
2424  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2425  lockMethodTable);
2426  }
2427  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2428  Assert(lock->nGranted <= lock->nRequested);
2429  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2430 
2431  proclock->releaseMask = 0;
2432 
2433  /* CleanUpLock will wake up waiters if needed. */
2434  CleanUpLock(lock, proclock,
2435  lockMethodTable,
2436  LockTagHashCode(&lock->tag),
2437  wakeupNeeded);
2438  } /* loop over PROCLOCKs within this partition */
2439 
2440  LWLockRelease(partitionLock);
2441  } /* loop over partitions */
2442 
2443 #ifdef LOCK_DEBUG
2444  if (*(lockMethodTable->trace_flag))
2445  elog(LOG, "LockReleaseAll done");
2446 #endif
2447 }
2448 
2449 /*
2450  * LockReleaseSession -- Release all session locks of the specified lock method
2451  * that are held by the current process.
2452  */
2453 void
2455 {
2457  LOCALLOCK *locallock;
2458 
2459  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2460  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2461 
2462  hash_seq_init(&status, LockMethodLocalHash);
2463 
2464  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2465  {
2466  /* Ignore items that are not of the specified lock method */
2467  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2468  continue;
2469 
2470  ReleaseLockIfHeld(locallock, true);
2471  }
2472 }
2473 
2474 /*
2475  * LockReleaseCurrentOwner
2476  * Release all locks belonging to CurrentResourceOwner
2477  *
2478  * If the caller knows what those locks are, it can pass them as an array.
2479  * That speeds up the call significantly, when a lot of locks are held.
2480  * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2481  * table to find them.
2482  */
2483 void
2484 LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2485 {
2486  if (locallocks == NULL)
2487  {
2489  LOCALLOCK *locallock;
2490 
2491  hash_seq_init(&status, LockMethodLocalHash);
2492 
2493  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2494  ReleaseLockIfHeld(locallock, false);
2495  }
2496  else
2497  {
2498  int i;
2499 
2500  for (i = nlocks - 1; i >= 0; i--)
2501  ReleaseLockIfHeld(locallocks[i], false);
2502  }
2503 }
2504 
2505 /*
2506  * ReleaseLockIfHeld
2507  * Release any session-level locks on this lockable object if sessionLock
2508  * is true; else, release any locks held by CurrentResourceOwner.
2509  *
2510  * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2511  * locks), but without refactoring LockRelease() we cannot support releasing
2512  * locks belonging to resource owners other than CurrentResourceOwner.
2513  * If we were to refactor, it'd be a good idea to fix it so we don't have to
2514  * do a hashtable lookup of the locallock, too. However, currently this
2515  * function isn't used heavily enough to justify refactoring for its
2516  * convenience.
2517  */
2518 static void
2519 ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2520 {
2521  ResourceOwner owner;
2522  LOCALLOCKOWNER *lockOwners;
2523  int i;
2524 
2525  /* Identify owner for lock (must match LockRelease!) */
2526  if (sessionLock)
2527  owner = NULL;
2528  else
2529  owner = CurrentResourceOwner;
2530 
2531  /* Scan to see if there are any locks belonging to the target owner */
2532  lockOwners = locallock->lockOwners;
2533  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2534  {
2535  if (lockOwners[i].owner == owner)
2536  {
2537  Assert(lockOwners[i].nLocks > 0);
2538  if (lockOwners[i].nLocks < locallock->nLocks)
2539  {
2540  /*
2541  * We will still hold this lock after forgetting this
2542  * ResourceOwner.
2543  */
2544  locallock->nLocks -= lockOwners[i].nLocks;
2545  /* compact out unused slot */
2546  locallock->numLockOwners--;
2547  if (owner != NULL)
2548  ResourceOwnerForgetLock(owner, locallock);
2549  if (i < locallock->numLockOwners)
2550  lockOwners[i] = lockOwners[locallock->numLockOwners];
2551  }
2552  else
2553  {
2554  Assert(lockOwners[i].nLocks == locallock->nLocks);
2555  /* We want to call LockRelease just once */
2556  lockOwners[i].nLocks = 1;
2557  locallock->nLocks = 1;
2558  if (!LockRelease(&locallock->tag.lock,
2559  locallock->tag.mode,
2560  sessionLock))
2561  elog(WARNING, "ReleaseLockIfHeld: failed??");
2562  }
2563  break;
2564  }
2565  }
2566 }
2567 
2568 /*
2569  * LockReassignCurrentOwner
2570  * Reassign all locks belonging to CurrentResourceOwner to belong
2571  * to its parent resource owner.
2572  *
2573  * If the caller knows what those locks are, it can pass them as an array.
2574  * That speeds up the call significantly, when a lot of locks are held
2575  * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2576  * and we'll traverse through our hash table to find them.
2577  */
2578 void
2579 LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2580 {
2582 
2583  Assert(parent != NULL);
2584 
2585  if (locallocks == NULL)
2586  {
2588  LOCALLOCK *locallock;
2589 
2590  hash_seq_init(&status, LockMethodLocalHash);
2591 
2592  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2593  LockReassignOwner(locallock, parent);
2594  }
2595  else
2596  {
2597  int i;
2598 
2599  for (i = nlocks - 1; i >= 0; i--)
2600  LockReassignOwner(locallocks[i], parent);
2601  }
2602 }
2603 
2604 /*
2605  * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2606  * CurrentResourceOwner to its parent.
2607  */
2608 static void
2610 {
2611  LOCALLOCKOWNER *lockOwners;
2612  int i;
2613  int ic = -1;
2614  int ip = -1;
2615 
2616  /*
2617  * Scan to see if there are any locks belonging to current owner or its
2618  * parent
2619  */
2620  lockOwners = locallock->lockOwners;
2621  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2622  {
2623  if (lockOwners[i].owner == CurrentResourceOwner)
2624  ic = i;
2625  else if (lockOwners[i].owner == parent)
2626  ip = i;
2627  }
2628 
2629  if (ic < 0)
2630  return; /* no current locks */
2631 
2632  if (ip < 0)
2633  {
2634  /* Parent has no slot, so just give it the child's slot */
2635  lockOwners[ic].owner = parent;
2636  ResourceOwnerRememberLock(parent, locallock);
2637  }
2638  else
2639  {
2640  /* Merge child's count with parent's */
2641  lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2642  /* compact out unused slot */
2643  locallock->numLockOwners--;
2644  if (ic < locallock->numLockOwners)
2645  lockOwners[ic] = lockOwners[locallock->numLockOwners];
2646  }
2648 }
2649 
2650 /*
2651  * FastPathGrantRelationLock
2652  * Grant lock using per-backend fast-path array, if there is space.
2653  */
2654 static bool
2656 {
2657  uint32 f;
2658  uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2659 
2660  /* Scan for existing entry for this relid, remembering empty slot. */
2661  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2662  {
2663  if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2664  unused_slot = f;
2665  else if (MyProc->fpRelId[f] == relid)
2666  {
2667  Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2668  FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2669  return true;
2670  }
2671  }
2672 
2673  /* If no existing entry, use any empty slot. */
2674  if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2675  {
2676  MyProc->fpRelId[unused_slot] = relid;
2677  FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2679  return true;
2680  }
2681 
2682  /* No existing entry, and no empty slot. */
2683  return false;
2684 }
2685 
2686 /*
2687  * FastPathUnGrantRelationLock
2688  * Release fast-path lock, if present. Update backend-private local
2689  * use count, while we're at it.
2690  */
2691 static bool
2693 {
2694  uint32 f;
2695  bool result = false;
2696 
2698  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2699  {
2700  if (MyProc->fpRelId[f] == relid
2701  && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2702  {
2703  Assert(!result);
2704  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2705  result = true;
2706  /* we continue iterating so as to update FastPathLocalUseCount */
2707  }
2708  if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2710  }
2711  return result;
2712 }
2713 
2714 /*
2715  * FastPathTransferRelationLocks
2716  * Transfer locks matching the given lock tag from per-backend fast-path
2717  * arrays to the shared hash table.
2718  *
2719  * Returns true if successful, false if ran out of shared memory.
2720  */
2721 static bool
2723  uint32 hashcode)
2724 {
2725  LWLock *partitionLock = LockHashPartitionLock(hashcode);
2726  Oid relid = locktag->locktag_field2;
2727  uint32 i;
2728 
2729  /*
2730  * Every PGPROC that can potentially hold a fast-path lock is present in
2731  * ProcGlobal->allProcs. Prepared transactions are not, but any
2732  * outstanding fast-path locks held by prepared transactions are
2733  * transferred to the main lock table.
2734  */
2735  for (i = 0; i < ProcGlobal->allProcCount; i++)
2736  {
2737  PGPROC *proc = &ProcGlobal->allProcs[i];
2738  uint32 f;
2739 
2741 
2742  /*
2743  * If the target backend isn't referencing the same database as the
2744  * lock, then we needn't examine the individual relation IDs at all;
2745  * none of them can be relevant.
2746  *
2747  * proc->databaseId is set at backend startup time and never changes
2748  * thereafter, so it might be safe to perform this test before
2749  * acquiring &proc->fpInfoLock. In particular, it's certainly safe to
2750  * assume that if the target backend holds any fast-path locks, it
2751  * must have performed a memory-fencing operation (in particular, an
2752  * LWLock acquisition) since setting proc->databaseId. However, it's
2753  * less clear that our backend is certain to have performed a memory
2754  * fencing operation since the other backend set proc->databaseId. So
2755  * for now, we test it after acquiring the LWLock just to be safe.
2756  */
2757  if (proc->databaseId != locktag->locktag_field1)
2758  {
2759  LWLockRelease(&proc->fpInfoLock);
2760  continue;
2761  }
2762 
2763  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2764  {
2765  uint32 lockmode;
2766 
2767  /* Look for an allocated slot matching the given relid. */
2768  if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2769  continue;
2770 
2771  /* Find or create lock object. */
2772  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2773  for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2775  ++lockmode)
2776  {
2777  PROCLOCK *proclock;
2778 
2779  if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2780  continue;
2781  proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2782  hashcode, lockmode);
2783  if (!proclock)
2784  {
2785  LWLockRelease(partitionLock);
2786  LWLockRelease(&proc->fpInfoLock);
2787  return false;
2788  }
2789  GrantLock(proclock->tag.myLock, proclock, lockmode);
2790  FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2791  }
2792  LWLockRelease(partitionLock);
2793 
2794  /* No need to examine remaining slots. */
2795  break;
2796  }
2797  LWLockRelease(&proc->fpInfoLock);
2798  }
2799  return true;
2800 }
2801 
2802 /*
2803  * FastPathGetRelationLockEntry
2804  * Return the PROCLOCK for a lock originally taken via the fast-path,
2805  * transferring it to the primary lock table if necessary.
2806  *
2807  * Note: caller takes care of updating the locallock object.
2808  */
2809 static PROCLOCK *
2811 {
2812  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2813  LOCKTAG *locktag = &locallock->tag.lock;
2814  PROCLOCK *proclock = NULL;
2815  LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2816  Oid relid = locktag->locktag_field2;
2817  uint32 f;
2818 
2820 
2821  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2822  {
2823  uint32 lockmode;
2824 
2825  /* Look for an allocated slot matching the given relid. */
2826  if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2827  continue;
2828 
2829  /* If we don't have a lock of the given mode, forget it! */
2830  lockmode = locallock->tag.mode;
2831  if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2832  break;
2833 
2834  /* Find or create lock object. */
2835  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2836 
2837  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2838  locallock->hashcode, lockmode);
2839  if (!proclock)
2840  {
2841  LWLockRelease(partitionLock);
2843  ereport(ERROR,
2844  (errcode(ERRCODE_OUT_OF_MEMORY),
2845  errmsg("out of shared memory"),
2846  errhint("You might need to increase max_locks_per_transaction.")));
2847  }
2848  GrantLock(proclock->tag.myLock, proclock, lockmode);
2849  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2850 
2851  LWLockRelease(partitionLock);
2852 
2853  /* No need to examine remaining slots. */
2854  break;
2855  }
2856 
2858 
2859  /* Lock may have already been transferred by some other backend. */
2860  if (proclock == NULL)
2861  {
2862  LOCK *lock;
2863  PROCLOCKTAG proclocktag;
2864  uint32 proclock_hashcode;
2865 
2866  LWLockAcquire(partitionLock, LW_SHARED);
2867 
2868  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2869  (void *) locktag,
2870  locallock->hashcode,
2871  HASH_FIND,
2872  NULL);
2873  if (!lock)
2874  elog(ERROR, "failed to re-find shared lock object");
2875 
2876  proclocktag.myLock = lock;
2877  proclocktag.myProc = MyProc;
2878 
2879  proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2880  proclock = (PROCLOCK *)
2881  hash_search_with_hash_value(LockMethodProcLockHash,
2882  (void *) &proclocktag,
2883  proclock_hashcode,
2884  HASH_FIND,
2885  NULL);
2886  if (!proclock)
2887  elog(ERROR, "failed to re-find shared proclock object");
2888  LWLockRelease(partitionLock);
2889  }
2890 
2891  return proclock;
2892 }
2893 
2894 /*
2895  * GetLockConflicts
2896  * Get an array of VirtualTransactionIds of xacts currently holding locks
2897  * that would conflict with the specified lock/lockmode.
2898  * xacts merely awaiting such a lock are NOT reported.
2899  *
2900  * The result array is palloc'd and is terminated with an invalid VXID.
2901  * *countp, if not null, is updated to the number of items set.
2902  *
2903  * Of course, the result could be out of date by the time it's returned,
2904  * so use of this function has to be thought about carefully.
2905  *
2906  * Note we never include the current xact's vxid in the result array,
2907  * since an xact never blocks itself.
2908  */
2911 {
2912  static VirtualTransactionId *vxids;
2913  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2914  LockMethod lockMethodTable;
2915  LOCK *lock;
2916  LOCKMASK conflictMask;
2917  SHM_QUEUE *procLocks;
2918  PROCLOCK *proclock;
2919  uint32 hashcode;
2920  LWLock *partitionLock;
2921  int count = 0;
2922  int fast_count = 0;
2923 
2924  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2925  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2926  lockMethodTable = LockMethods[lockmethodid];
2927  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2928  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2929 
2930  /*
2931  * Allocate memory to store results, and fill with InvalidVXID. We only
2932  * need enough space for MaxBackends + max_prepared_xacts + a terminator.
2933  * InHotStandby allocate once in TopMemoryContext.
2934  */
2935  if (InHotStandby)
2936  {
2937  if (vxids == NULL)
2938  vxids = (VirtualTransactionId *)
2940  sizeof(VirtualTransactionId) *
2942  }
2943  else
2944  vxids = (VirtualTransactionId *)
2945  palloc0(sizeof(VirtualTransactionId) *
2947 
2948  /* Compute hash code and partition lock, and look up conflicting modes. */
2949  hashcode = LockTagHashCode(locktag);
2950  partitionLock = LockHashPartitionLock(hashcode);
2951  conflictMask = lockMethodTable->conflictTab[lockmode];
2952 
2953  /*
2954  * Fast path locks might not have been entered in the primary lock table.
2955  * If the lock we're dealing with could conflict with such a lock, we must
2956  * examine each backend's fast-path array for conflicts.
2957  */
2958  if (ConflictsWithRelationFastPath(locktag, lockmode))
2959  {
2960  int i;
2961  Oid relid = locktag->locktag_field2;
2962  VirtualTransactionId vxid;
2963 
2964  /*
2965  * Iterate over relevant PGPROCs. Anything held by a prepared
2966  * transaction will have been transferred to the primary lock table,
2967  * so we need not worry about those. This is all a bit fuzzy, because
2968  * new locks could be taken after we've visited a particular
2969  * partition, but the callers had better be prepared to deal with that
2970  * anyway, since the locks could equally well be taken between the
2971  * time we return the value and the time the caller does something
2972  * with it.
2973  */
2974  for (i = 0; i < ProcGlobal->allProcCount; i++)
2975  {
2976  PGPROC *proc = &ProcGlobal->allProcs[i];
2977  uint32 f;
2978 
2979  /* A backend never blocks itself */
2980  if (proc == MyProc)
2981  continue;
2982 
2984 
2985  /*
2986  * If the target backend isn't referencing the same database as
2987  * the lock, then we needn't examine the individual relation IDs
2988  * at all; none of them can be relevant.
2989  *
2990  * See FastPathTransferRelationLocks() for discussion of why we do
2991  * this test after acquiring the lock.
2992  */
2993  if (proc->databaseId != locktag->locktag_field1)
2994  {
2995  LWLockRelease(&proc->fpInfoLock);
2996  continue;
2997  }
2998 
2999  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
3000  {
3001  uint32 lockmask;
3002 
3003  /* Look for an allocated slot matching the given relid. */
3004  if (relid != proc->fpRelId[f])
3005  continue;
3006  lockmask = FAST_PATH_GET_BITS(proc, f);
3007  if (!lockmask)
3008  continue;
3009  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
3010 
3011  /*
3012  * There can only be one entry per relation, so if we found it
3013  * and it doesn't conflict, we can skip the rest of the slots.
3014  */
3015  if ((lockmask & conflictMask) == 0)
3016  break;
3017 
3018  /* Conflict! */
3019  GET_VXID_FROM_PGPROC(vxid, *proc);
3020 
3021  if (VirtualTransactionIdIsValid(vxid))
3022  vxids[count++] = vxid;
3023  /* else, xact already committed or aborted */
3024 
3025  /* No need to examine remaining slots. */
3026  break;
3027  }
3028 
3029  LWLockRelease(&proc->fpInfoLock);
3030  }
3031  }
3032 
3033  /* Remember how many fast-path conflicts we found. */
3034  fast_count = count;
3035 
3036  /*
3037  * Look up the lock object matching the tag.
3038  */
3039  LWLockAcquire(partitionLock, LW_SHARED);
3040 
3041  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3042  (const void *) locktag,
3043  hashcode,
3044  HASH_FIND,
3045  NULL);
3046  if (!lock)
3047  {
3048  /*
3049  * If the lock object doesn't exist, there is nothing holding a lock
3050  * on this lockable object.
3051  */
3052  LWLockRelease(partitionLock);
3053  vxids[count].backendId = InvalidBackendId;
3055  if (countp)
3056  *countp = count;
3057  return vxids;
3058  }
3059 
3060  /*
3061  * Examine each existing holder (or awaiter) of the lock.
3062  */
3063 
3064  procLocks = &(lock->procLocks);
3065 
3066  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3067  offsetof(PROCLOCK, lockLink));
3068 
3069  while (proclock)
3070  {
3071  if (conflictMask & proclock->holdMask)
3072  {
3073  PGPROC *proc = proclock->tag.myProc;
3074 
3075  /* A backend never blocks itself */
3076  if (proc != MyProc)
3077  {
3078  VirtualTransactionId vxid;
3079 
3080  GET_VXID_FROM_PGPROC(vxid, *proc);
3081 
3082  if (VirtualTransactionIdIsValid(vxid))
3083  {
3084  int i;
3085 
3086  /* Avoid duplicate entries. */
3087  for (i = 0; i < fast_count; ++i)
3088  if (VirtualTransactionIdEquals(vxids[i], vxid))
3089  break;
3090  if (i >= fast_count)
3091  vxids[count++] = vxid;
3092  }
3093  /* else, xact already committed or aborted */
3094  }
3095  }
3096 
3097  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3098  offsetof(PROCLOCK, lockLink));
3099  }
3100 
3101  LWLockRelease(partitionLock);
3102 
3103  if (count > MaxBackends + max_prepared_xacts) /* should never happen */
3104  elog(PANIC, "too many conflicting locks found");
3105 
3106  vxids[count].backendId = InvalidBackendId;
3108  if (countp)
3109  *countp = count;
3110  return vxids;
3111 }
3112 
3113 /*
3114  * Find a lock in the shared lock table and release it. It is the caller's
3115  * responsibility to verify that this is a sane thing to do. (For example, it
3116  * would be bad to release a lock here if there might still be a LOCALLOCK
3117  * object with pointers to it.)
3118  *
3119  * We currently use this in two situations: first, to release locks held by
3120  * prepared transactions on commit (see lock_twophase_postcommit); and second,
3121  * to release locks taken via the fast-path, transferred to the main hash
3122  * table, and then released (see LockReleaseAll).
3123  */
3124 static void
3125 LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3127  bool decrement_strong_lock_count)
3128 {
3129  LOCK *lock;
3130  PROCLOCK *proclock;
3131  PROCLOCKTAG proclocktag;
3132  uint32 hashcode;
3133  uint32 proclock_hashcode;
3134  LWLock *partitionLock;
3135  bool wakeupNeeded;
3136 
3137  hashcode = LockTagHashCode(locktag);
3138  partitionLock = LockHashPartitionLock(hashcode);
3139 
3140  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3141 
3142  /*
3143  * Re-find the lock object (it had better be there).
3144  */
3145  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3146  (void *) locktag,
3147  hashcode,
3148  HASH_FIND,
3149  NULL);
3150  if (!lock)
3151  elog(PANIC, "failed to re-find shared lock object");
3152 
3153  /*
3154  * Re-find the proclock object (ditto).
3155  */
3156  proclocktag.myLock = lock;
3157  proclocktag.myProc = proc;
3158 
3159  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3160 
3161  proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3162  (void *) &proclocktag,
3163  proclock_hashcode,
3164  HASH_FIND,
3165  NULL);
3166  if (!proclock)
3167  elog(PANIC, "failed to re-find shared proclock object");
3168 
3169  /*
3170  * Double-check that we are actually holding a lock of the type we want to
3171  * release.
3172  */
3173  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3174  {
3175  PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3176  LWLockRelease(partitionLock);
3177  elog(WARNING, "you don't own a lock of type %s",
3178  lockMethodTable->lockModeNames[lockmode]);
3179  return;
3180  }
3181 
3182  /*
3183  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3184  */
3185  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3186 
3187  CleanUpLock(lock, proclock,
3188  lockMethodTable, hashcode,
3189  wakeupNeeded);
3190 
3191  LWLockRelease(partitionLock);
3192 
3193  /*
3194  * Decrement strong lock count. This logic is needed only for 2PC.
3195  */
3196  if (decrement_strong_lock_count
3197  && ConflictsWithRelationFastPath(locktag, lockmode))
3198  {
3199  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3200 
3201  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3202  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3203  FastPathStrongRelationLocks->count[fasthashcode]--;
3204  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3205  }
3206 }
3207 
3208 /*
3209  * CheckForSessionAndXactLocks
3210  * Check to see if transaction holds both session-level and xact-level
3211  * locks on the same object; if so, throw an error.
3212  *
3213  * If we have both session- and transaction-level locks on the same object,
3214  * PREPARE TRANSACTION must fail. This should never happen with regular
3215  * locks, since we only take those at session level in some special operations
3216  * like VACUUM. It's possible to hit this with advisory locks, though.
3217  *
3218  * It would be nice if we could keep the session hold and give away the
3219  * transactional hold to the prepared xact. However, that would require two
3220  * PROCLOCK objects, and we cannot be sure that another PROCLOCK will be
3221  * available when it comes time for PostPrepare_Locks to do the deed.
3222  * So for now, we error out while we can still do so safely.
3223  *
3224  * Since the LOCALLOCK table stores a separate entry for each lockmode,
3225  * we can't implement this check by examining LOCALLOCK entries in isolation.
3226  * We must build a transient hashtable that is indexed by locktag only.
3227  */
3228 static void
3230 {
3231  typedef struct
3232  {
3233  LOCKTAG lock; /* identifies the lockable object */
3234  bool sessLock; /* is any lockmode held at session level? */
3235  bool xactLock; /* is any lockmode held at xact level? */
3236  } PerLockTagEntry;
3237 
3238  HASHCTL hash_ctl;
3239  HTAB *lockhtab;
3241  LOCALLOCK *locallock;
3242 
3243  /* Create a local hash table keyed by LOCKTAG only */
3244  hash_ctl.keysize = sizeof(LOCKTAG);
3245  hash_ctl.entrysize = sizeof(PerLockTagEntry);
3246  hash_ctl.hcxt = CurrentMemoryContext;
3247 
3248  lockhtab = hash_create("CheckForSessionAndXactLocks table",
3249  256, /* arbitrary initial size */
3250  &hash_ctl,
3252 
3253  /* Scan local lock table to find entries for each LOCKTAG */
3254  hash_seq_init(&status, LockMethodLocalHash);
3255 
3256  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3257  {
3258  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3259  PerLockTagEntry *hentry;
3260  bool found;
3261  int i;
3262 
3263  /*
3264  * Ignore VXID locks. We don't want those to be held by prepared
3265  * transactions, since they aren't meaningful after a restart.
3266  */
3267  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3268  continue;
3269 
3270  /* Ignore it if we don't actually hold the lock */
3271  if (locallock->nLocks <= 0)
3272  continue;
3273 
3274  /* Otherwise, find or make an entry in lockhtab */
3275  hentry = (PerLockTagEntry *) hash_search(lockhtab,
3276  (void *) &locallock->tag.lock,
3277  HASH_ENTER, &found);
3278  if (!found) /* initialize, if newly created */
3279  hentry->sessLock = hentry->xactLock = false;
3280 
3281  /* Scan to see if we hold lock at session or xact level or both */
3282  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3283  {
3284  if (lockOwners[i].owner == NULL)
3285  hentry->sessLock = true;
3286  else
3287  hentry->xactLock = true;
3288  }
3289 
3290  /*
3291  * We can throw error immediately when we see both types of locks; no
3292  * need to wait around to see if there are more violations.
3293  */
3294  if (hentry->sessLock && hentry->xactLock)
3295  ereport(ERROR,
3296  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3297  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3298  }
3299 
3300  /* Success, so clean up */
3301  hash_destroy(lockhtab);
3302 }
3303 
3304 /*
3305  * AtPrepare_Locks
3306  * Do the preparatory work for a PREPARE: make 2PC state file records
3307  * for all locks currently held.
3308  *
3309  * Session-level locks are ignored, as are VXID locks.
3310  *
3311  * For the most part, we don't need to touch shared memory for this ---
3312  * all the necessary state information is in the locallock table.
3313  * Fast-path locks are an exception, however: we move any such locks to
3314  * the main table before allowing PREPARE TRANSACTION to succeed.
3315  */
3316 void
3318 {
3320  LOCALLOCK *locallock;
3321 
3322  /* First, verify there aren't locks of both xact and session level */
3324 
3325  /* Now do the per-locallock cleanup work */
3326  hash_seq_init(&status, LockMethodLocalHash);
3327 
3328  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3329  {
3330  TwoPhaseLockRecord record;
3331  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3332  bool haveSessionLock;
3333  bool haveXactLock;
3334  int i;
3335 
3336  /*
3337  * Ignore VXID locks. We don't want those to be held by prepared
3338  * transactions, since they aren't meaningful after a restart.
3339  */
3340  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3341  continue;
3342 
3343  /* Ignore it if we don't actually hold the lock */
3344  if (locallock->nLocks <= 0)
3345  continue;
3346 
3347  /* Scan to see whether we hold it at session or transaction level */
3348  haveSessionLock = haveXactLock = false;
3349  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3350  {
3351  if (lockOwners[i].owner == NULL)
3352  haveSessionLock = true;
3353  else
3354  haveXactLock = true;
3355  }
3356 
3357  /* Ignore it if we have only session lock */
3358  if (!haveXactLock)
3359  continue;
3360 
3361  /* This can't happen, because we already checked it */
3362  if (haveSessionLock)
3363  ereport(ERROR,
3364  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3365  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3366 
3367  /*
3368  * If the local lock was taken via the fast-path, we need to move it
3369  * to the primary lock table, or just get a pointer to the existing
3370  * primary lock table entry if by chance it's already been
3371  * transferred.
3372  */
3373  if (locallock->proclock == NULL)
3374  {
3375  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3376  locallock->lock = locallock->proclock->tag.myLock;
3377  }
3378 
3379  /*
3380  * Arrange to not release any strong lock count held by this lock
3381  * entry. We must retain the count until the prepared transaction is
3382  * committed or rolled back.
3383  */
3384  locallock->holdsStrongLockCount = false;
3385 
3386  /*
3387  * Create a 2PC record.
3388  */
3389  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3390  record.lockmode = locallock->tag.mode;
3391 
3393  &record, sizeof(TwoPhaseLockRecord));
3394  }
3395 }
3396 
3397 /*
3398  * PostPrepare_Locks
3399  * Clean up after successful PREPARE
3400  *
3401  * Here, we want to transfer ownership of our locks to a dummy PGPROC
3402  * that's now associated with the prepared transaction, and we want to
3403  * clean out the corresponding entries in the LOCALLOCK table.
3404  *
3405  * Note: by removing the LOCALLOCK entries, we are leaving dangling
3406  * pointers in the transaction's resource owner. This is OK at the
3407  * moment since resowner.c doesn't try to free locks retail at a toplevel
3408  * transaction commit or abort. We could alternatively zero out nLocks
3409  * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3410  * but that probably costs more cycles.
3411  */
3412 void
3414 {
3415  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3417  LOCALLOCK *locallock;
3418  LOCK *lock;
3419  PROCLOCK *proclock;
3420  PROCLOCKTAG proclocktag;
3421  int partition;
3422 
3423  /* Can't prepare a lock group follower. */
3424  Assert(MyProc->lockGroupLeader == NULL ||
3426 
3427  /* This is a critical section: any error means big trouble */
3429 
3430  /*
3431  * First we run through the locallock table and get rid of unwanted
3432  * entries, then we scan the process's proclocks and transfer them to the
3433  * target proc.
3434  *
3435  * We do this separately because we may have multiple locallock entries
3436  * pointing to the same proclock, and we daren't end up with any dangling
3437  * pointers.
3438  */
3439  hash_seq_init(&status, LockMethodLocalHash);
3440 
3441  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3442  {
3443  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3444  bool haveSessionLock;
3445  bool haveXactLock;
3446  int i;
3447 
3448  if (locallock->proclock == NULL || locallock->lock == NULL)
3449  {
3450  /*
3451  * We must've run out of shared memory while trying to set up this
3452  * lock. Just forget the local entry.
3453  */
3454  Assert(locallock->nLocks == 0);
3455  RemoveLocalLock(locallock);
3456  continue;
3457  }
3458 
3459  /* Ignore VXID locks */
3460  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3461  continue;
3462 
3463  /* Scan to see whether we hold it at session or transaction level */
3464  haveSessionLock = haveXactLock = false;
3465  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3466  {
3467  if (lockOwners[i].owner == NULL)
3468  haveSessionLock = true;
3469  else
3470  haveXactLock = true;
3471  }
3472 
3473  /* Ignore it if we have only session lock */
3474  if (!haveXactLock)
3475  continue;
3476 
3477  /* This can't happen, because we already checked it */
3478  if (haveSessionLock)
3479  ereport(PANIC,
3480  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3481  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3482 
3483  /* Mark the proclock to show we need to release this lockmode */
3484  if (locallock->nLocks > 0)
3485  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3486 
3487  /* And remove the locallock hashtable entry */
3488  RemoveLocalLock(locallock);
3489  }
3490 
3491  /*
3492  * Now, scan each lock partition separately.
3493  */
3494  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3495  {
3496  LWLock *partitionLock;
3497  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
3498  PROCLOCK *nextplock;
3499 
3500  partitionLock = LockHashPartitionLockByIndex(partition);
3501 
3502  /*
3503  * If the proclock list for this partition is empty, we can skip
3504  * acquiring the partition lock. This optimization is safer than the
3505  * situation in LockReleaseAll, because we got rid of any fast-path
3506  * locks during AtPrepare_Locks, so there cannot be any case where
3507  * another backend is adding something to our lists now. For safety,
3508  * though, we code this the same way as in LockReleaseAll.
3509  */
3510  if (SHMQueueNext(procLocks, procLocks,
3511  offsetof(PROCLOCK, procLink)) == NULL)
3512  continue; /* needn't examine this partition */
3513 
3514  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3515 
3516  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3517  offsetof(PROCLOCK, procLink));
3518  proclock;
3519  proclock = nextplock)
3520  {
3521  /* Get link first, since we may unlink/relink this proclock */
3522  nextplock = (PROCLOCK *)
3523  SHMQueueNext(procLocks, &proclock->procLink,
3524  offsetof(PROCLOCK, procLink));
3525 
3526  Assert(proclock->tag.myProc == MyProc);
3527 
3528  lock = proclock->tag.myLock;
3529 
3530  /* Ignore VXID locks */
3532  continue;
3533 
3534  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3535  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3536  Assert(lock->nRequested >= 0);
3537  Assert(lock->nGranted >= 0);
3538  Assert(lock->nGranted <= lock->nRequested);
3539  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3540 
3541  /* Ignore it if nothing to release (must be a session lock) */
3542  if (proclock->releaseMask == 0)
3543  continue;
3544 
3545  /* Else we should be releasing all locks */
3546  if (proclock->releaseMask != proclock->holdMask)
3547  elog(PANIC, "we seem to have dropped a bit somewhere");
3548 
3549  /*
3550  * We cannot simply modify proclock->tag.myProc to reassign
3551  * ownership of the lock, because that's part of the hash key and
3552  * the proclock would then be in the wrong hash chain. Instead
3553  * use hash_update_hash_key. (We used to create a new hash entry,
3554  * but that risks out-of-memory failure if other processes are
3555  * busy making proclocks too.) We must unlink the proclock from
3556  * our procLink chain and put it into the new proc's chain, too.
3557  *
3558  * Note: the updated proclock hash key will still belong to the
3559  * same hash partition, cf proclock_hash(). So the partition lock
3560  * we already hold is sufficient for this.
3561  */
3562  SHMQueueDelete(&proclock->procLink);
3563 
3564  /*
3565  * Create the new hash key for the proclock.
3566  */
3567  proclocktag.myLock = lock;
3568  proclocktag.myProc = newproc;
3569 
3570  /*
3571  * Update groupLeader pointer to point to the new proc. (We'd
3572  * better not be a member of somebody else's lock group!)
3573  */
3574  Assert(proclock->groupLeader == proclock->tag.myProc);
3575  proclock->groupLeader = newproc;
3576 
3577  /*
3578  * Update the proclock. We should not find any existing entry for
3579  * the same hash key, since there can be only one entry for any
3580  * given lock with my own proc.
3581  */
3582  if (!hash_update_hash_key(LockMethodProcLockHash,
3583  (void *) proclock,
3584  (void *) &proclocktag))
3585  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3586 
3587  /* Re-link into the new proc's proclock list */
3588  SHMQueueInsertBefore(&(newproc->myProcLocks[partition]),
3589  &proclock->procLink);
3590 
3591  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3592  } /* loop over PROCLOCKs within this partition */
3593 
3594  LWLockRelease(partitionLock);
3595  } /* loop over partitions */
3596 
3597  END_CRIT_SECTION();
3598 }
3599 
3600 
3601 /*
3602  * Estimate shared-memory space used for lock tables
3603  */
3604 Size
3606 {
3607  Size size = 0;
3608  long max_table_size;
3609 
3610  /* lock hash table */
3611  max_table_size = NLOCKENTS();
3612  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3613 
3614  /* proclock hash table */
3615  max_table_size *= 2;
3616  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3617 
3618  /*
3619  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3620  */
3621  size = add_size(size, size / 10);
3622 
3623  return size;
3624 }
3625 
3626 /*
3627  * GetLockStatusData - Return a summary of the lock manager's internal
3628  * status, for use in a user-level reporting function.
3629  *
3630  * The return data consists of an array of LockInstanceData objects,
3631  * which are a lightly abstracted version of the PROCLOCK data structures,
3632  * i.e. there is one entry for each unique lock and interested PGPROC.
3633  * It is the caller's responsibility to match up related items (such as
3634  * references to the same lockable object or PGPROC) if wanted.
3635  *
3636  * The design goal is to hold the LWLocks for as short a time as possible;
3637  * thus, this function simply makes a copy of the necessary data and releases
3638  * the locks, allowing the caller to contemplate and format the data for as
3639  * long as it pleases.
3640  */
3641 LockData *
3643 {
3644  LockData *data;
3645  PROCLOCK *proclock;
3646  HASH_SEQ_STATUS seqstat;
3647  int els;
3648  int el;
3649  int i;
3650 
3651  data = (LockData *) palloc(sizeof(LockData));
3652 
3653  /* Guess how much space we'll need. */
3654  els = MaxBackends;
3655  el = 0;
3656  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3657 
3658  /*
3659  * First, we iterate through the per-backend fast-path arrays, locking
3660  * them one at a time. This might produce an inconsistent picture of the
3661  * system state, but taking all of those LWLocks at the same time seems
3662  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3663  * matter too much, because none of these locks can be involved in lock
3664  * conflicts anyway - anything that might must be present in the main lock
3665  * table. (For the same reason, we don't sweat about making leaderPid
3666  * completely valid. We cannot safely dereference another backend's
3667  * lockGroupLeader field without holding all lock partition locks, and
3668  * it's not worth that.)
3669  */
3670  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3671  {
3672  PGPROC *proc = &ProcGlobal->allProcs[i];
3673  uint32 f;
3674 
3676 
3677  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3678  {
3679  LockInstanceData *instance;
3680  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3681 
3682  /* Skip unallocated slots. */
3683  if (!lockbits)
3684  continue;
3685 
3686  if (el >= els)
3687  {
3688  els += MaxBackends;
3689  data->locks = (LockInstanceData *)
3690  repalloc(data->locks, sizeof(LockInstanceData) * els);
3691  }
3692 
3693  instance = &data->locks[el];
3694  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3695  proc->fpRelId[f]);
3696  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3697  instance->waitLockMode = NoLock;
3698  instance->backend = proc->backendId;
3699  instance->lxid = proc->lxid;
3700  instance->pid = proc->pid;
3701  instance->leaderPid = proc->pid;
3702  instance->fastpath = true;
3703 
3704  /*
3705  * Successfully taking fast path lock means there were no
3706  * conflicting locks.
3707  */
3708  instance->waitStart = 0;
3709 
3710  el++;
3711  }
3712 
3713  if (proc->fpVXIDLock)
3714  {
3715  VirtualTransactionId vxid;
3716  LockInstanceData *instance;
3717 
3718  if (el >= els)
3719  {
3720  els += MaxBackends;
3721  data->locks = (LockInstanceData *)
3722  repalloc(data->locks, sizeof(LockInstanceData) * els);
3723  }
3724 
3725  vxid.backendId = proc->backendId;
3727 
3728  instance = &data->locks[el];
3729  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3730  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3731  instance->waitLockMode = NoLock;
3732  instance->backend = proc->backendId;
3733  instance->lxid = proc->lxid;
3734  instance->pid = proc->pid;
3735  instance->leaderPid = proc->pid;
3736  instance->fastpath = true;
3737  instance->waitStart = 0;
3738 
3739  el++;
3740  }
3741 
3742  LWLockRelease(&proc->fpInfoLock);
3743  }
3744 
3745  /*
3746  * Next, acquire lock on the entire shared lock data structure. We do
3747  * this so that, at least for locks in the primary lock table, the state
3748  * will be self-consistent.
3749  *
3750  * Since this is a read-only operation, we take shared instead of
3751  * exclusive lock. There's not a whole lot of point to this, because all
3752  * the normal operations require exclusive lock, but it doesn't hurt
3753  * anything either. It will at least allow two backends to do
3754  * GetLockStatusData in parallel.
3755  *
3756  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3757  */
3758  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3760 
3761  /* Now we can safely count the number of proclocks */
3762  data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3763  if (data->nelements > els)
3764  {
3765  els = data->nelements;
3766  data->locks = (LockInstanceData *)
3767  repalloc(data->locks, sizeof(LockInstanceData) * els);
3768  }
3769 
3770  /* Now scan the tables to copy the data */
3771  hash_seq_init(&seqstat, LockMethodProcLockHash);
3772 
3773  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3774  {
3775  PGPROC *proc = proclock->tag.myProc;
3776  LOCK *lock = proclock->tag.myLock;
3777  LockInstanceData *instance = &data->locks[el];
3778 
3779  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3780  instance->holdMask = proclock->holdMask;
3781  if (proc->waitLock == proclock->tag.myLock)
3782  instance->waitLockMode = proc->waitLockMode;
3783  else
3784  instance->waitLockMode = NoLock;
3785  instance->backend = proc->backendId;
3786  instance->lxid = proc->lxid;
3787  instance->pid = proc->pid;
3788  instance->leaderPid = proclock->groupLeader->pid;
3789  instance->fastpath = false;
3790  instance->waitStart = (TimestampTz) pg_atomic_read_u64(&proc->waitStart);
3791 
3792  el++;
3793  }
3794 
3795  /*
3796  * And release locks. We do this in reverse order for two reasons: (1)
3797  * Anyone else who needs more than one of the locks will be trying to lock
3798  * them in increasing order; we don't want to release the other process
3799  * until it can get all the locks it needs. (2) This avoids O(N^2)
3800  * behavior inside LWLockRelease.
3801  */
3802  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3804 
3805  Assert(el == data->nelements);
3806 
3807  return data;
3808 }
3809 
3810 /*
3811  * GetBlockerStatusData - Return a summary of the lock manager's state
3812  * concerning locks that are blocking the specified PID or any member of
3813  * the PID's lock group, for use in a user-level reporting function.
3814  *
3815  * For each PID within the lock group that is awaiting some heavyweight lock,
3816  * the return data includes an array of LockInstanceData objects, which are
3817  * the same data structure used by GetLockStatusData; but unlike that function,
3818  * this one reports only the PROCLOCKs associated with the lock that that PID
3819  * is blocked on. (Hence, all the locktags should be the same for any one
3820  * blocked PID.) In addition, we return an array of the PIDs of those backends
3821  * that are ahead of the blocked PID in the lock's wait queue. These can be
3822  * compared with the PIDs in the LockInstanceData objects to determine which
3823  * waiters are ahead of or behind the blocked PID in the queue.
3824  *
3825  * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3826  * waiting on any heavyweight lock, return empty arrays.
3827  *
3828  * The design goal is to hold the LWLocks for as short a time as possible;
3829  * thus, this function simply makes a copy of the necessary data and releases
3830  * the locks, allowing the caller to contemplate and format the data for as
3831  * long as it pleases.
3832  */
3834 GetBlockerStatusData(int blocked_pid)
3835 {
3836  BlockedProcsData *data;
3837  PGPROC *proc;
3838  int i;
3839 
3840  data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3841 
3842  /*
3843  * Guess how much space we'll need, and preallocate. Most of the time
3844  * this will avoid needing to do repalloc while holding the LWLocks. (We
3845  * assume, but check with an Assert, that MaxBackends is enough entries
3846  * for the procs[] array; the other two could need enlargement, though.)
3847  */
3848  data->nprocs = data->nlocks = data->npids = 0;
3849  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3850  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3851  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3852  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3853 
3854  /*
3855  * In order to search the ProcArray for blocked_pid and assume that that
3856  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3857  * In addition, to examine the lock grouping fields of any other backend,
3858  * we must hold all the hash partition locks. (Only one of those locks is
3859  * actually relevant for any one lock group, but we can't know which one
3860  * ahead of time.) It's fairly annoying to hold all those locks
3861  * throughout this, but it's no worse than GetLockStatusData(), and it
3862  * does have the advantage that we're guaranteed to return a
3863  * self-consistent instantaneous state.
3864  */
3865  LWLockAcquire(ProcArrayLock, LW_SHARED);
3866 
3867  proc = BackendPidGetProcWithLock(blocked_pid);
3868 
3869  /* Nothing to do if it's gone */
3870  if (proc != NULL)
3871  {
3872  /*
3873  * Acquire lock on the entire shared lock data structure. See notes
3874  * in GetLockStatusData().
3875  */
3876  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3878 
3879  if (proc->lockGroupLeader == NULL)
3880  {
3881  /* Easy case, proc is not a lock group member */
3882  GetSingleProcBlockerStatusData(proc, data);
3883  }
3884  else
3885  {
3886  /* Examine all procs in proc's lock group */
3887  dlist_iter iter;
3888 
3890  {
3891  PGPROC *memberProc;
3892 
3893  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3894  GetSingleProcBlockerStatusData(memberProc, data);
3895  }
3896  }
3897 
3898  /*
3899  * And release locks. See notes in GetLockStatusData().
3900  */
3901  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3903 
3904  Assert(data->nprocs <= data->maxprocs);
3905  }
3906 
3907  LWLockRelease(ProcArrayLock);
3908 
3909  return data;
3910 }
3911 
3912 /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3913 static void
3915 {
3916  LOCK *theLock = blocked_proc->waitLock;
3917  BlockedProcData *bproc;
3918  SHM_QUEUE *procLocks;
3919  PROCLOCK *proclock;
3920  PROC_QUEUE *waitQueue;
3921  PGPROC *proc;
3922  int queue_size;
3923  int i;
3924 
3925  /* Nothing to do if this proc is not blocked */
3926  if (theLock == NULL)
3927  return;
3928 
3929  /* Set up a procs[] element */
3930  bproc = &data->procs[data->nprocs++];
3931  bproc->pid = blocked_proc->pid;
3932  bproc->first_lock = data->nlocks;
3933  bproc->first_waiter = data->npids;
3934 
3935  /*
3936  * We may ignore the proc's fast-path arrays, since nothing in those could
3937  * be related to a contended lock.
3938  */
3939 
3940  /* Collect all PROCLOCKs associated with theLock */
3941  procLocks = &(theLock->procLocks);
3942  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3943  offsetof(PROCLOCK, lockLink));
3944  while (proclock)
3945  {
3946  PGPROC *proc = proclock->tag.myProc;
3947  LOCK *lock = proclock->tag.myLock;
3948  LockInstanceData *instance;
3949 
3950  if (data->nlocks >= data->maxlocks)
3951  {
3952  data->maxlocks += MaxBackends;
3953  data->locks = (LockInstanceData *)
3954  repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3955  }
3956 
3957  instance = &data->locks[data->nlocks];
3958  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3959  instance->holdMask = proclock->holdMask;
3960  if (proc->waitLock == lock)
3961  instance->waitLockMode = proc->waitLockMode;
3962  else
3963  instance->waitLockMode = NoLock;
3964  instance->backend = proc->backendId;
3965  instance->lxid = proc->lxid;
3966  instance->pid = proc->pid;
3967  instance->leaderPid = proclock->groupLeader->pid;
3968  instance->fastpath = false;
3969  data->nlocks++;
3970 
3971  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3972  offsetof(PROCLOCK, lockLink));
3973  }
3974 
3975  /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3976  waitQueue = &(theLock->waitProcs);
3977  queue_size = waitQueue->size;
3978 
3979  if (queue_size > data->maxpids - data->npids)
3980  {
3981  data->maxpids = Max(data->maxpids + MaxBackends,
3982  data->npids + queue_size);
3983  data->waiter_pids = (int *) repalloc(data->waiter_pids,
3984  sizeof(int) * data->maxpids);
3985  }
3986 
3987  /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3988  proc = (PGPROC *) waitQueue->links.next;
3989  for (i = 0; i < queue_size; i++)
3990  {
3991  if (proc == blocked_proc)
3992  break;
3993  data->waiter_pids[data->npids++] = proc->pid;
3994  proc = (PGPROC *) proc->links.next;
3995  }
3996 
3997  bproc->num_locks = data->nlocks - bproc->first_lock;
3998  bproc->num_waiters = data->npids - bproc->first_waiter;
3999 }
4000 
4001 /*
4002  * Returns a list of currently held AccessExclusiveLocks, for use by
4003  * LogStandbySnapshot(). The result is a palloc'd array,
4004  * with the number of elements returned into *nlocks.
4005  *
4006  * XXX This currently takes a lock on all partitions of the lock table,
4007  * but it's possible to do better. By reference counting locks and storing
4008  * the value in the ProcArray entry for each backend we could tell if any
4009  * locks need recording without having to acquire the partition locks and
4010  * scan the lock table. Whether that's worth the additional overhead
4011  * is pretty dubious though.
4012  */
4015 {
4016  xl_standby_lock *accessExclusiveLocks;
4017  PROCLOCK *proclock;
4018  HASH_SEQ_STATUS seqstat;
4019  int i;
4020  int index;
4021  int els;
4022 
4023  /*
4024  * Acquire lock on the entire shared lock data structure.
4025  *
4026  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
4027  */
4028  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4030 
4031  /* Now we can safely count the number of proclocks */
4032  els = hash_get_num_entries(LockMethodProcLockHash);
4033 
4034  /*
4035  * Allocating enough space for all locks in the lock table is overkill,
4036  * but it's more convenient and faster than having to enlarge the array.
4037  */
4038  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
4039 
4040  /* Now scan the tables to copy the data */
4041  hash_seq_init(&seqstat, LockMethodProcLockHash);
4042 
4043  /*
4044  * If lock is a currently granted AccessExclusiveLock then it will have
4045  * just one proclock holder, so locks are never accessed twice in this
4046  * particular case. Don't copy this code for use elsewhere because in the
4047  * general case this will give you duplicate locks when looking at
4048  * non-exclusive lock types.
4049  */
4050  index = 0;
4051  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
4052  {
4053  /* make sure this definition matches the one used in LockAcquire */
4054  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
4055  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
4056  {
4057  PGPROC *proc = proclock->tag.myProc;
4058  LOCK *lock = proclock->tag.myLock;
4059  TransactionId xid = proc->xid;
4060 
4061  /*
4062  * Don't record locks for transactions if we know they have
4063  * already issued their WAL record for commit but not yet released
4064  * lock. It is still possible that we see locks held by already
4065  * complete transactions, if they haven't yet zeroed their xids.
4066  */
4067  if (!TransactionIdIsValid(xid))
4068  continue;
4069 
4070  accessExclusiveLocks[index].xid = xid;
4071  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
4072  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
4073 
4074  index++;
4075  }
4076  }
4077 
4078  Assert(index <= els);
4079 
4080  /*
4081  * And release locks. We do this in reverse order for two reasons: (1)
4082  * Anyone else who needs more than one of the locks will be trying to lock
4083  * them in increasing order; we don't want to release the other process
4084  * until it can get all the locks it needs. (2) This avoids O(N^2)
4085  * behavior inside LWLockRelease.
4086  */
4087  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
4089 
4090  *nlocks = index;
4091  return accessExclusiveLocks;
4092 }
4093 
4094 /* Provide the textual name of any lock mode */
4095 const char *
4097 {
4098  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
4099  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
4100  return LockMethods[lockmethodid]->lockModeNames[mode];
4101 }
4102 
4103 #ifdef LOCK_DEBUG
4104 /*
4105  * Dump all locks in the given proc's myProcLocks lists.
4106  *
4107  * Caller is responsible for having acquired appropriate LWLocks.
4108  */
4109 void
4110 DumpLocks(PGPROC *proc)
4111 {
4112  SHM_QUEUE *procLocks;
4113  PROCLOCK *proclock;
4114  LOCK *lock;
4115  int i;
4116 
4117  if (proc == NULL)
4118  return;
4119 
4120  if (proc->waitLock)
4121  LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
4122 
4123  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
4124  {
4125  procLocks = &(proc->myProcLocks[i]);
4126 
4127  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
4128  offsetof(PROCLOCK, procLink));
4129 
4130  while (proclock)
4131  {
4132  Assert(proclock->tag.myProc == proc);
4133 
4134  lock = proclock->tag.myLock;
4135 
4136  PROCLOCK_PRINT("DumpLocks", proclock);
4137  LOCK_PRINT("DumpLocks", lock, 0);
4138 
4139  proclock = (PROCLOCK *)
4140  SHMQueueNext(procLocks, &proclock->procLink,
4141  offsetof(PROCLOCK, procLink));
4142  }
4143  }
4144 }
4145 
4146 /*
4147  * Dump all lmgr locks.
4148  *
4149  * Caller is responsible for having acquired appropriate LWLocks.
4150  */
4151 void
4152 DumpAllLocks(void)
4153 {
4154  PGPROC *proc;
4155  PROCLOCK *proclock;
4156  LOCK *lock;
4158 
4159  proc = MyProc;
4160 
4161  if (proc && proc->waitLock)
4162  LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
4163 
4164  hash_seq_init(&status, LockMethodProcLockHash);
4165 
4166  while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
4167  {
4168  PROCLOCK_PRINT("DumpAllLocks", proclock);
4169 
4170  lock = proclock->tag.myLock;
4171  if (lock)
4172  LOCK_PRINT("DumpAllLocks", lock, 0);
4173  else
4174  elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4175  }
4176 }
4177 #endif /* LOCK_DEBUG */
4178 
4179 /*
4180  * LOCK 2PC resource manager's routines
4181  */
4182 
4183 /*
4184  * Re-acquire a lock belonging to a transaction that was prepared.
4185  *
4186  * Because this function is run at db startup, re-acquiring the locks should
4187  * never conflict with running transactions because there are none. We
4188  * assume that the lock state represented by the stored 2PC files is legal.
4189  *
4190  * When switching from Hot Standby mode to normal operation, the locks will
4191  * be already held by the startup process. The locks are acquired for the new
4192  * procs without checking for conflicts, so we don't get a conflict between the
4193  * startup process and the dummy procs, even though we will momentarily have
4194  * a situation where two procs are holding the same AccessExclusiveLock,
4195  * which isn't normally possible because the conflict. If we're in standby
4196  * mode, but a recovery snapshot hasn't been established yet, it's possible
4197  * that some but not all of the locks are already held by the startup process.
4198  *
4199  * This approach is simple, but also a bit dangerous, because if there isn't
4200  * enough shared memory to acquire the locks, an error will be thrown, which
4201  * is promoted to FATAL and recovery will abort, bringing down postmaster.
4202  * A safer approach would be to transfer the locks like we do in
4203  * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4204  * read-only backends to use up all the shared lock memory anyway, so that
4205  * replaying the WAL record that needs to acquire a lock will throw an error
4206  * and PANIC anyway.
4207  */
4208 void
4210  void *recdata, uint32 len)
4211 {
4212  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4213  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4214  LOCKTAG *locktag;
4216  LOCKMETHODID lockmethodid;
4217  LOCK *lock;
4218  PROCLOCK *proclock;
4219  PROCLOCKTAG proclocktag;
4220  bool found;
4221  uint32 hashcode;
4222  uint32 proclock_hashcode;
4223  int partition;
4224  LWLock *partitionLock;
4225  LockMethod lockMethodTable;
4226 
4227  Assert(len == sizeof(TwoPhaseLockRecord));
4228  locktag = &rec->locktag;
4229  lockmode = rec->lockmode;
4230  lockmethodid = locktag->locktag_lockmethodid;
4231 
4232  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4233  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4234  lockMethodTable = LockMethods[lockmethodid];
4235 
4236  hashcode = LockTagHashCode(locktag);
4237  partition = LockHashPartition(hashcode);
4238  partitionLock = LockHashPartitionLock(hashcode);
4239 
4240  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4241 
4242  /*
4243  * Find or create a lock with this tag.
4244  */
4245  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4246  (void *) locktag,
4247  hashcode,
4249  &found);
4250  if (!lock)
4251  {
4252  LWLockRelease(partitionLock);
4253  ereport(ERROR,
4254  (errcode(ERRCODE_OUT_OF_MEMORY),
4255  errmsg("out of shared memory"),
4256  errhint("You might need to increase max_locks_per_transaction.")));
4257  }
4258 
4259  /*
4260  * if it's a new lock object, initialize it
4261  */
4262  if (!found)
4263  {
4264  lock->grantMask = 0;
4265  lock->waitMask = 0;
4266  SHMQueueInit(&(lock->procLocks));
4267  ProcQueueInit(&(lock->waitProcs));
4268  lock->nRequested = 0;
4269  lock->nGranted = 0;
4270  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4271  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4272  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4273  }
4274  else
4275  {
4276  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4277  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4278  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4279  Assert(lock->nGranted <= lock->nRequested);
4280  }
4281 
4282  /*
4283  * Create the hash key for the proclock table.
4284  */
4285  proclocktag.myLock = lock;
4286  proclocktag.myProc = proc;
4287 
4288  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4289 
4290  /*
4291  * Find or create a proclock entry with this tag
4292  */
4293  proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4294  (void *) &proclocktag,
4295  proclock_hashcode,
4297  &found);
4298  if (!proclock)
4299  {
4300  /* Oops, not enough shmem for the proclock */
4301  if (lock->nRequested == 0)
4302  {
4303  /*
4304  * There are no other requestors of this lock, so garbage-collect
4305  * the lock object. We *must* do this to avoid a permanent leak
4306  * of shared memory, because there won't be anything to cause
4307  * anyone to release the lock object later.
4308  */
4309  Assert(SHMQueueEmpty(&(lock->procLocks)));
4310  if (!hash_search_with_hash_value(LockMethodLockHash,
4311  (void *) &(lock->tag),
4312  hashcode,
4313  HASH_REMOVE,
4314  NULL))
4315  elog(PANIC, "lock table corrupted");
4316  }
4317  LWLockRelease(partitionLock);
4318  ereport(ERROR,
4319  (errcode(ERRCODE_OUT_OF_MEMORY),
4320  errmsg("out of shared memory"),
4321  errhint("You might need to increase max_locks_per_transaction.")));
4322  }
4323 
4324  /*
4325  * If new, initialize the new entry
4326  */
4327  if (!found)
4328  {
4329  Assert(proc->lockGroupLeader == NULL);
4330  proclock->groupLeader = proc;
4331  proclock->holdMask = 0;
4332  proclock->releaseMask = 0;
4333  /* Add proclock to appropriate lists */
4334  SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
4335  SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
4336  &proclock->procLink);
4337  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4338  }
4339  else
4340  {
4341  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4342  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4343  }
4344 
4345  /*
4346  * lock->nRequested and lock->requested[] count the total number of
4347  * requests, whether granted or waiting, so increment those immediately.
4348  */
4349  lock->nRequested++;
4350  lock->requested[lockmode]++;
4351  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4352 
4353  /*
4354  * We shouldn't already hold the desired lock.
4355  */
4356  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4357  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4358  lockMethodTable->lockModeNames[lockmode],
4359  lock->tag.locktag_field1, lock->tag.locktag_field2,
4360  lock->tag.locktag_field3);
4361 
4362  /*
4363  * We ignore any possible conflicts and just grant ourselves the lock. Not
4364  * only because we don't bother, but also to avoid deadlocks when
4365  * switching from standby to normal mode. See function comment.
4366  */
4367  GrantLock(lock, proclock, lockmode);
4368 
4369  /*
4370  * Bump strong lock count, to make sure any fast-path lock requests won't
4371  * be granted without consulting the primary lock table.
4372  */
4373  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4374  {
4375  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4376 
4377  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4378  FastPathStrongRelationLocks->count[fasthashcode]++;
4379  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4380  }
4381 
4382  LWLockRelease(partitionLock);
4383 }
4384 
4385 /*
4386  * Re-acquire a lock belonging to a transaction that was prepared, when
4387  * starting up into hot standby mode.
4388  */
4389 void
4391  void *recdata, uint32 len)
4392 {
4393  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4394  LOCKTAG *locktag;
4396  LOCKMETHODID lockmethodid;
4397 
4398  Assert(len == sizeof(TwoPhaseLockRecord));
4399  locktag = &rec->locktag;
4400  lockmode = rec->lockmode;
4401  lockmethodid = locktag->locktag_lockmethodid;
4402 
4403  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4404  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4405 
4406  if (lockmode == AccessExclusiveLock &&
4407  locktag->locktag_type == LOCKTAG_RELATION)
4408  {
4410  locktag->locktag_field1 /* dboid */ ,
4411  locktag->locktag_field2 /* reloid */ );
4412  }
4413 }
4414 
4415 
4416 /*
4417  * 2PC processing routine for COMMIT PREPARED case.
4418  *
4419  * Find and release the lock indicated by the 2PC record.
4420  */
4421 void
4423  void *recdata, uint32 len)
4424 {
4425  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4426  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4427  LOCKTAG *locktag;
4428  LOCKMETHODID lockmethodid;
4429  LockMethod lockMethodTable;
4430 
4431  Assert(len == sizeof(TwoPhaseLockRecord));
4432  locktag = &rec->locktag;
4433  lockmethodid = locktag->locktag_lockmethodid;
4434 
4435  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4436  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4437  lockMethodTable = LockMethods[lockmethodid];
4438 
4439  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4440 }
4441 
4442 /*
4443  * 2PC processing routine for ROLLBACK PREPARED case.
4444  *
4445  * This is actually just the same as the COMMIT case.
4446  */
4447 void
4449  void *recdata, uint32 len)
4450 {
4451  lock_twophase_postcommit(xid, info, recdata, len);
4452 }
4453 
4454 /*
4455  * VirtualXactLockTableInsert
4456  *
4457  * Take vxid lock via the fast-path. There can't be any pre-existing
4458  * lockers, as we haven't advertised this vxid via the ProcArray yet.
4459  *
4460  * Since MyProc->fpLocalTransactionId will normally contain the same data
4461  * as MyProc->lxid, you might wonder if we really need both. The
4462  * difference is that MyProc->lxid is set and cleared unlocked, and
4463  * examined by procarray.c, while fpLocalTransactionId is protected by
4464  * fpInfoLock and is used only by the locking subsystem. Doing it this
4465  * way makes it easier to verify that there are no funny race conditions.
4466  *
4467  * We don't bother recording this lock in the local lock table, since it's
4468  * only ever released at the end of a transaction. Instead,
4469  * LockReleaseAll() calls VirtualXactLockTableCleanup().
4470  */
4471 void
4473 {
4475 
4477 
4478  Assert(MyProc->backendId == vxid.backendId);
4480  Assert(MyProc->fpVXIDLock == false);
4481 
4482  MyProc->fpVXIDLock = true;
4484 
4486 }
4487 
4488 /*
4489  * VirtualXactLockTableCleanup
4490  *
4491  * Check whether a VXID lock has been materialized; if so, release it,
4492  * unblocking waiters.
4493  */
4494 void
4496 {
4497  bool fastpath;
4498  LocalTransactionId lxid;
4499 
4501 
4502  /*
4503  * Clean up shared memory state.
4504  */
4506 
4507  fastpath = MyProc->fpVXIDLock;
4508  lxid = MyProc->fpLocalTransactionId;
4509  MyProc->fpVXIDLock = false;
4511 
4513 
4514  /*
4515  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4516  * that means someone transferred the lock to the main lock table.
4517  */
4518  if (!fastpath && LocalTransactionIdIsValid(lxid))
4519  {
4520  VirtualTransactionId vxid;
4521  LOCKTAG locktag;
4522 
4523  vxid.backendId = MyBackendId;
4524  vxid.localTransactionId = lxid;
4525  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4526 
4528  &locktag, ExclusiveLock, false);
4529  }
4530 }
4531 
4532 /*
4533  * VirtualXactLock
4534  *
4535  * If wait = true, wait until the given VXID has been released, and then
4536  * return true.
4537  *
4538  * If wait = false, just check whether the VXID is still running, and return
4539  * true or false.
4540  */
4541 bool
4543 {
4544  LOCKTAG tag;
4545  PGPROC *proc;
4546 
4548 
4550  {
4551  LockAcquireResult lar;
4552 
4553  /*
4554  * Prepared transactions don't hold vxid locks. The
4555  * LocalTransactionId is always a normal, locked XID.
4556  */
4558  lar = LockAcquire(&tag, ShareLock, false, !wait);
4559  if (lar != LOCKACQUIRE_NOT_AVAIL)
4560  LockRelease(&tag, ShareLock, false);
4561  return lar != LOCKACQUIRE_NOT_AVAIL;
4562  }
4563 
4564  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4565 
4566  /*
4567  * If a lock table entry must be made, this is the PGPROC on whose behalf
4568  * it must be done. Note that the transaction might end or the PGPROC
4569  * might be reassigned to a new backend before we get around to examining
4570  * it, but it doesn't matter. If we find upon examination that the
4571  * relevant lxid is no longer running here, that's enough to prove that
4572  * it's no longer running anywhere.
4573  */
4574  proc = BackendIdGetProc(vxid.backendId);
4575  if (proc == NULL)
4576  return true;
4577 
4578  /*
4579  * We must acquire this lock before checking the backendId and lxid
4580  * against the ones we're waiting for. The target backend will only set
4581  * or clear lxid while holding this lock.
4582  */
4584 
4585  /* If the transaction has ended, our work here is done. */
4586  if (proc->backendId != vxid.backendId
4587  || proc->fpLocalTransactionId != vxid.localTransactionId)
4588  {
4589  LWLockRelease(&proc->fpInfoLock);
4590  return true;
4591  }
4592 
4593  /*
4594  * If we aren't asked to wait, there's no need to set up a lock table
4595  * entry. The transaction is still in progress, so just return false.
4596  */
4597  if (!wait)
4598  {
4599  LWLockRelease(&proc->fpInfoLock);
4600  return false;
4601  }
4602 
4603  /*
4604  * OK, we're going to need to sleep on the VXID. But first, we must set
4605  * up the primary lock table entry, if needed (ie, convert the proc's
4606  * fast-path lock on its VXID to a regular lock).
4607  */
4608  if (proc->fpVXIDLock)
4609  {
4610  PROCLOCK *proclock;
4611  uint32 hashcode;
4612  LWLock *partitionLock;
4613 
4614  hashcode = LockTagHashCode(&tag);
4615 
4616  partitionLock = LockHashPartitionLock(hashcode);
4617  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4618 
4619  proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4620  &tag, hashcode, ExclusiveLock);
4621  if (!proclock)
4622  {
4623  LWLockRelease(partitionLock);
4624  LWLockRelease(&proc->fpInfoLock);
4625  ereport(ERROR,
4626  (errcode(ERRCODE_OUT_OF_MEMORY),
4627  errmsg("out of shared memory"),
4628  errhint("You might need to increase max_locks_per_transaction.")));
4629  }
4630  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4631 
4632  LWLockRelease(partitionLock);
4633 
4634  proc->fpVXIDLock = false;
4635  }
4636 
4637  /* Done with proc->fpLockBits */
4638  LWLockRelease(&proc->fpInfoLock);
4639 
4640  /* Time to wait. */
4641  (void) LockAcquire(&tag, ShareLock, false, false);
4642 
4643  LockRelease(&tag, ShareLock, false);
4644  return true;
4645 }
4646 
4647 /*
4648  * LockWaiterCount
4649  *
4650  * Find the number of lock requester on this locktag
4651  */
4652 int
4654 {
4655  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4656  LOCK *lock;
4657  bool found;
4658  uint32 hashcode;
4659  LWLock *partitionLock;
4660  int waiters = 0;
4661 
4662  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4663  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4664 
4665  hashcode = LockTagHashCode(locktag);
4666  partitionLock = LockHashPartitionLock(hashcode);
4667  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4668 
4669  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4670  (const void *) locktag,
4671  hashcode,
4672  HASH_FIND,
4673  &found);
4674  if (found)
4675  {
4676  Assert(lock != NULL);
4677  waiters = lock->nRequested;
4678  }
4679  LWLockRelease(partitionLock);
4680 
4681  return waiters;
4682 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2609
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:967
PROCLOCKTAG tag
Definition: lock.h:363
int slock_t
Definition: s_lock.h:934
static PgChecksumMode mode
Definition: pg_checksums.c:65
uint32 hashcode
Definition: lock.h:423
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:862
Definition: lwlock.h:31
static const LockMethodData user_lockmethod
Definition: lock.c:137
LOCALLOCKTAG tag
Definition: lock.h:420
static HTAB * LockMethodLocalHash
Definition: lock.c:283
bool holdsStrongLockCount
Definition: lock.h:430
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3125
int * waiter_pids
Definition: lock.h:480
int errhint(const char *fmt,...)
Definition: elog.c:1156
BackendId MyBackendId
Definition: globals.c:84
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:747
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4495
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:4096
int numLockOwners
Definition: lock.h:427
void lock_twophase_recover(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4209
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2692
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1170
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:233
#define GET_VXID_FROM_PGPROC(vxid, proc)
Definition: lock.h:81
LockInstanceData * locks
Definition: lock.h:460
LOCKTAG lock
Definition: lock.h:401
#define HASH_CONTEXT
Definition: hsearch.h:102
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2655
void GrantAwaitedLock(void)
Definition: lock.c:1786
#define HASH_ELEM
Definition: hsearch.h:95
BackendId backendId
Definition: proc.h:153
uint32 TransactionId
Definition: c.h:587
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2810
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode)
Definition: lock.c:598
MemoryContext hcxt
Definition: hsearch.h:86
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition: lock.c:3834
int LOCKMODE
Definition: lockdefs.h:26
int first_lock
Definition: lock.h:468
dlist_head lockGroupMembers
Definition: proc.h:252
LOCKMODE mode
Definition: lock.h:402
PROCLOCK * proclock
Definition: lock.h:425
bool update_process_title
Definition: ps_status.c:36
int nRequested
Definition: lock.h:310
SHM_QUEUE links
Definition: lock.h:32
PGPROC * MyProc
Definition: proc.c:68
#define ExclusiveLock
Definition: lockdefs.h:44
int64 TimestampTz
Definition: timestamp.h:39
int num_waiters
Definition: lock.h:473
LOCKMASK holdMask
Definition: lock.h:367
#define PointerGetDatum(X)
Definition: postgres.h:600
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1555
int64 nLocks
Definition: lock.h:414
SHM_QUEUE links
Definition: proc.h:124
#define dlist_foreach(iter, lhead)
Definition: ilist.h:526
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
struct SHM_QUEUE * next
Definition: shmem.h:31
bool fastpath
Definition: lock.h:454
#define SpinLockInit(lock)
Definition: spin.h:60
LOCKMODE waitLockMode
Definition: proc.h:181
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:269
LOCKTAG tag
Definition: lock.h:302
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:205
Definition: lock.h:166
#define AccessShareLock
Definition: lockdefs.h:36
static int FastPathLocalUseCount
Definition: lock.c:172
Size entrysize
Definition: hsearch.h:76
const LOCKMASK * conflictTab
Definition: lock.h:115
#define LockHashPartitionLock(hashcode)
Definition: lock.h:518
static const char *const lock_mode_names[]
Definition: lock.c:109
SHM_QUEUE lockLink
Definition: lock.h:369
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:534
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1358
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
int errcode(int sqlerrcode)
Definition: elog.c:698
PROC_HDR * ProcGlobal
Definition: proc.c:80
#define MemSet(start, val, len)
Definition: c.h:1008
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:263
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1382
#define LockHashPartition(hashcode)
Definition: lock.h:516
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
static HTAB * LockMethodProcLockHash
Definition: lock.c:282
BlockedProcData * procs
Definition: lock.h:478
#define lengthof(array)
Definition: c.h:734
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
static void CheckForSessionAndXactLocks(void)
Definition: lock.c:3229
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:287
ProcWaitStatus waitStatus
Definition: proc.h:128
bool fpVXIDLock
Definition: proc.h:243
#define LOG
Definition: elog.h:26
unsigned int Oid
Definition: postgres_ext.h:31
bool RecoveryInProgress(void)
Definition: xlog.c:8211
Definition: lock.h:457
LocalTransactionId localTransactionId
Definition: lock.h:66
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:227
struct LOCALLOCKTAG LOCALLOCKTAG
static LOCALLOCK * awaitedLock
Definition: lock.c:288
LOCKTAG locktag
Definition: lock.h:445
#define PANIC
Definition: elog.h:50
LOCKMODE waitLockMode
Definition: lock.h:447
void lock_twophase_standby_recover(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4390
int maxLockOwners
Definition: lock.h:428
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition: lock.c:4014
#define LOCALLOCK_LOCKTAG(llock)
Definition: lock.h:435
int nGranted
Definition: lock.h:312
#define FirstNormalObjectId
Definition: transam.h:197
#define HASH_PARTITION
Definition: hsearch.h:92
int num_locks
Definition: lock.h:469
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1182
#define NLOCKENTS()
Definition: lock.c:57
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1814
#define DEFAULT_LOCKMETHOD
Definition: lock.h:129
PROC_QUEUE waitProcs
Definition: lock.h:308
LOCKTAG locktag
Definition: lock.c:161
uint16 locktag_field4
Definition: lock.h:171
Definition: type.h:89
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition: lock.c:4542
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4422
int leaderPid
Definition: lock.h:453
#define PROCLOCK_LOCKMETHOD(proclock)
Definition: lock.h:373
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:75
void set_ps_display(const char *activity)
Definition: ps_status.c:349
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2484
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:216
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:3156
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3914
#define SpinLockAcquire(lock)
Definition: spin.h:62
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:89
Definition: dynahash.c:219
#define dlist_container(type, membername, ptr)
Definition: ilist.h:496
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:908
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:521
struct LOCALLOCK LOCALLOCK
unsigned short uint16
Definition: c.h:440
void pfree(void *pointer)
Definition: mcxt.c:1169
static const LockMethodData default_lockmethod
Definition: lock.c:126
#define LOCK_LOCKTAG(lock)
Definition: lock.h:316
#define ERROR
Definition: elog.h:46
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:272
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1367
int max_prepared_xacts
Definition: twophase.c:117
long num_partitions
Definition: hsearch.h:68
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2519
ProcWaitStatus ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1069
#define VirtualTransactionIdIsPreparedXact(vxid)
Definition: lock.h:73
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
void PostPrepare_Locks(TransactionId xid)
Definition: lock.c:3413
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:565
static void FinishStrongLockAcquire(void)
Definition: lock.c:1747
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:212
int MaxBackends
Definition: globals.c:139
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2579
PROCLOCK * waitProcLock
Definition: proc.h:180
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition: lock.c:4472
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:183
const char * get_ps_display(int *displen)
Definition: ps_status.c:430
LockData * GetLockStatusData(void)
Definition: lock.c:3642
#define NoLock
Definition: lockdefs.h:34
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:74
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:583
#define InHotStandby
Definition: xlogutils.h:57
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1045
#define RowExclusiveLock
Definition: lockdefs.h:38
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1375
void AbortStrongLockAcquire(void)
Definition: lock.c:1757
uint32 locktag_field2
Definition: lock.h:169
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
Oid databaseId
Definition: proc.h:154
unsigned int uint32
Definition: c.h:441
int granted[MAX_LOCKMODES]
Definition: lock.h:311
MemoryContext CurrentMemoryContext
Definition: mcxt.c:42
Definition: lock.h:299
uint32 LocalTransactionId
Definition: c.h:589
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:874
LOCK * waitLock
Definition: proc.h:179
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:517
struct PROCLOCK PROCLOCK
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:434
int max_locks_per_xact
Definition: lock.c:55
MemoryContext TopMemoryContext
Definition: mcxt.c:48
LOCKMASK waitMask
Definition: lock.h:306
void InitLocks(void)
Definition: lock.c:405
int maxlocks
Definition: lock.h:484
uint16 LOCKMETHODID
Definition: lock.h:126
SHM_QUEUE procLocks
Definition: lock.h:307
TransactionId xid
Definition: lockdefs.h:54
struct LOCKTAG LOCKTAG
#define WARNING
Definition: elog.h:40
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:71
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:780
TimestampTz waitStart
Definition: lock.h:450
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition: lock.c:261
#define SpinLockRelease(lock)
Definition: spin.h:64
int requested[MAX_LOCKMODES]
Definition: lock.h:309
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1726
#define HASH_BLOBS
Definition: hsearch.h:97
int nelements
Definition: lock.h:459
#define MAX_LOCKMODES
Definition: lock.h:86
#define InvalidBackendId
Definition: backendid.h:23
SHM_QUEUE procLink
Definition: lock.h:370
#define RowShareLock
Definition: lockdefs.h:37
static const LOCKMASK LockConflicts[]
Definition: lock.c:66
void * palloc0(Size size)
Definition: mcxt.c:1093
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:792
LockInstanceData * locks
Definition: lock.h:479
struct PROCLOCKTAG PROCLOCKTAG
uintptr_t Datum
Definition: postgres.h:411
static void CheckAndSetLockHeld(LOCALLOCK *locallock, bool acquired)
Definition: lock.c:1352
Size add_size(Size s1, Size s2)
Definition: shmem.c:502
BackendId backend
Definition: lock.h:448
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:1006
static ResourceOwner awaitedOwner
Definition: lock.c:289
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1721
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1918
Size keysize
Definition: hsearch.h:75
#define XLogStandbyInfoActive()
Definition: xlog.h:180
dlist_node * cur
Definition: ilist.h:161
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:365
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition: lock.c:499
void DeadLockReport(void)
Definition: deadlock.c:1090
struct LOCK LOCK
int first_waiter
Definition: lock.h:472
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:2910
#define ereport(elevel,...)
Definition: elog.h:157
bool InRecovery
Definition: xlogutils.c:52
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1420
#define PG_CATCH()
Definition: elog.h:323
#define Max(x, y)
Definition: c.h:980
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
uint8 locktag_type
Definition: lock.h:172
LocalTransactionId lxid
Definition: lock.h:449
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:242
#define Assert(condition)
Definition: c.h:804
BackendId backendId
Definition: lock.h:65
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2180
#define ShareRowExclusiveLock
Definition: lockdefs.h:42
void AtPrepare_Locks(void)
Definition: lock.c:3317
#define SET_LOCKTAG_TRANSACTION(locktag, xid)
Definition: lock.h:228
static HTAB * LockMethodLockHash
Definition: lock.c:281
size_t Size
Definition: c.h:540
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition: lock.c:2454
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25
LOCALLOCKOWNER * lockOwners
Definition: lock.h:429
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:237
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1199
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1162
LOCK * lock
Definition: lock.h:424
#define PG_RE_THROW()
Definition: elog.h:354
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
uint32 allProcCount
Definition: proc.h:336
int LOCKMASK
Definition: lockdefs.h:25
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1182
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
LOCKMASK holdMask
Definition: lock.h:446
uint8 locktag_lockmethodid
Definition: lock.h:173
PGPROC * myProc
Definition: lock.h:357
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:203
int LockWaiterCount(const LOCKTAG *locktag)
Definition: lock.c:4653
TransactionId xid
Definition: proc.h:133
#define LOCKBIT_ON(lockmode)
Definition: lock.h:88
void MarkLockClear(LOCALLOCK *locallock)
Definition: lock.c:1799
Size LockShmemSize(void)
Definition: lock.c:3605
static bool IsRelationExtensionLockHeld PG_USED_FOR_ASSERTS_ONLY
Definition: lock.c:187
LOCKMASK grantMask
Definition: lock.h:305
#define AccessExclusiveLock
Definition: lockdefs.h:45
Definition: lock.h:360
int64 nLocks
Definition: lock.h:426
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:429
void * palloc(Size size)
Definition: mcxt.c:1062
int errmsg(const char *fmt,...)
Definition: elog.c:909
LockAcquireResult
Definition: lock.h:491
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:863
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:948
#define elog(elevel,...)
Definition: elog.h:232
#define InvalidLocalTransactionId
Definition: lock.h:69
#define ShareLock
Definition: lockdefs.h:41
int i
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:1975
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:70
const bool * trace_flag
Definition: lock.h:117
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:634
void lock_twophase_postabort(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4448
int size
Definition: lock.h:33
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
LOCK * myLock
Definition: lock.h:356
struct ResourceOwnerData * owner
Definition: lock.h:413
PGPROC * allProcs
Definition: proc.h:318
static bool Dummy_trace
Definition: lock.c:123
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1578
static const LockMethod LockMethods[]
Definition: lock.c:151
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:208
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:366
LWLock fpInfoLock
Definition: proc.h:240
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:341
pg_atomic_uint64 waitStart
Definition: proc.h:184
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1635
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2722
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
#define PG_TRY()
Definition: elog.h:313
uint32 locktag_field1
Definition: lock.h:168
LOCKMODE lockmode
Definition: lock.c:162
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
Definition: lock.c:771
Definition: proc.h:121
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:986
int pid
Definition: proc.h:146
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:487
uint32 locktag_field3
Definition: lock.h:170
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:86
#define PG_END_TRY()
Definition: elog.h:338
PGPROC * lockGroupLeader
Definition: proc.h:251
LocalTransactionId fpLocalTransactionId
Definition: proc.h:244
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1689
#define offsetof(type, field)
Definition: c.h:727
bool lockCleared
Definition: lock.h:431
const char *const * lockModeNames
Definition: lock.h:116
LOCKMASK heldLocks
Definition: proc.h:182
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:87
PGPROC * groupLeader
Definition: lock.h:366
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:214
struct TwoPhaseLockRecord TwoPhaseLockRecord
HashValueFunc hash
Definition: hsearch.h:78
#define HASH_FUNCTION
Definition: hsearch.h:98
PGPROC * BackendIdGetProc(int backendID)
Definition: sinvaladt.c:376
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:315
int numLockModes
Definition: lock.h:114
LocalTransactionId lxid
Definition: proc.h:143
int maxprocs
Definition: lock.h:482
LOCKMASK releaseMask
Definition: lock.h:368