PostgreSQL Source Code  git master
lock.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * lock.c
4  * POSTGRES primary lock mechanism
5  *
6  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/lmgr/lock.c
12  *
13  * NOTES
14  * A lock table is a shared memory hash table. When
15  * a process tries to acquire a lock of a type that conflicts
16  * with existing locks, it is put to sleep using the routines
17  * in storage/lmgr/proc.c.
18  *
19  * For the most part, this code should be invoked via lmgr.c
20  * or another lock-management module, not directly.
21  *
22  * Interface:
23  *
24  * InitLocks(), GetLocksMethodTable(), GetLockTagsMethodTable(),
25  * LockAcquire(), LockRelease(), LockReleaseAll(),
26  * LockCheckConflicts(), GrantLock()
27  *
28  *-------------------------------------------------------------------------
29  */
30 #include "postgres.h"
31 
32 #include <signal.h>
33 #include <unistd.h>
34 
35 #include "access/transam.h"
36 #include "access/twophase.h"
37 #include "access/twophase_rmgr.h"
38 #include "access/xact.h"
39 #include "access/xlog.h"
40 #include "miscadmin.h"
41 #include "pg_trace.h"
42 #include "pgstat.h"
43 #include "storage/proc.h"
44 #include "storage/procarray.h"
45 #include "storage/sinvaladt.h"
46 #include "storage/spin.h"
47 #include "storage/standby.h"
48 #include "utils/memutils.h"
49 #include "utils/ps_status.h"
50 #include "utils/resowner_private.h"
51 
52 
53 /* This configuration variable is used to set the lock table size */
54 int max_locks_per_xact; /* set by guc.c */
55 
56 #define NLOCKENTS() \
57  mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
58 
59 
60 /*
61  * Data structures defining the semantics of the standard lock methods.
62  *
63  * The conflict table defines the semantics of the various lock modes.
64  */
65 static const LOCKMASK LockConflicts[] = {
66  0,
67 
68  /* AccessShareLock */
70 
71  /* RowShareLock */
73 
74  /* RowExclusiveLock */
77 
78  /* ShareUpdateExclusiveLock */
82 
83  /* ShareLock */
87 
88  /* ShareRowExclusiveLock */
92 
93  /* ExclusiveLock */
98 
99  /* AccessExclusiveLock */
104 
105 };
106 
107 /* Names of lock modes, for debug printouts */
108 static const char *const lock_mode_names[] =
109 {
110  "INVALID",
111  "AccessShareLock",
112  "RowShareLock",
113  "RowExclusiveLock",
114  "ShareUpdateExclusiveLock",
115  "ShareLock",
116  "ShareRowExclusiveLock",
117  "ExclusiveLock",
118  "AccessExclusiveLock"
119 };
120 
121 #ifndef LOCK_DEBUG
122 static bool Dummy_trace = false;
123 #endif
124 
126  AccessExclusiveLock, /* highest valid lock mode number */
129 #ifdef LOCK_DEBUG
130  &Trace_locks
131 #else
132  &Dummy_trace
133 #endif
134 };
135 
137  AccessExclusiveLock, /* highest valid lock mode number */
140 #ifdef LOCK_DEBUG
141  &Trace_userlocks
142 #else
143  &Dummy_trace
144 #endif
145 };
146 
147 /*
148  * map from lock method id to the lock table data structures
149  */
150 static const LockMethod LockMethods[] = {
151  NULL,
153  &user_lockmethod
154 };
155 
156 
157 /* Record that's written to 2PC state file when a lock is persisted */
158 typedef struct TwoPhaseLockRecord
159 {
163 
164 
165 /*
166  * Count of the number of fast path lock slots we believe to be used. This
167  * might be higher than the real number if another backend has transferred
168  * our locks to the primary lock table, but it can never be lower than the
169  * real value, since only we can acquire locks on our own behalf.
170  */
171 static int FastPathLocalUseCount = 0;
172 
173 /* Macros for manipulating proc->fpLockBits */
174 #define FAST_PATH_BITS_PER_SLOT 3
175 #define FAST_PATH_LOCKNUMBER_OFFSET 1
176 #define FAST_PATH_MASK ((1 << FAST_PATH_BITS_PER_SLOT) - 1)
177 #define FAST_PATH_GET_BITS(proc, n) \
178  (((proc)->fpLockBits >> (FAST_PATH_BITS_PER_SLOT * n)) & FAST_PATH_MASK)
179 #define FAST_PATH_BIT_POSITION(n, l) \
180  (AssertMacro((l) >= FAST_PATH_LOCKNUMBER_OFFSET), \
181  AssertMacro((l) < FAST_PATH_BITS_PER_SLOT+FAST_PATH_LOCKNUMBER_OFFSET), \
182  AssertMacro((n) < FP_LOCK_SLOTS_PER_BACKEND), \
183  ((l) - FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT * (n)))
184 #define FAST_PATH_SET_LOCKMODE(proc, n, l) \
185  (proc)->fpLockBits |= UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)
186 #define FAST_PATH_CLEAR_LOCKMODE(proc, n, l) \
187  (proc)->fpLockBits &= ~(UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l))
188 #define FAST_PATH_CHECK_LOCKMODE(proc, n, l) \
189  ((proc)->fpLockBits & (UINT64CONST(1) << FAST_PATH_BIT_POSITION(n, l)))
190 
191 /*
192  * The fast-path lock mechanism is concerned only with relation locks on
193  * unshared relations by backends bound to a database. The fast-path
194  * mechanism exists mostly to accelerate acquisition and release of locks
195  * that rarely conflict. Because ShareUpdateExclusiveLock is
196  * self-conflicting, it can't use the fast-path mechanism; but it also does
197  * not conflict with any of the locks that do, so we can ignore it completely.
198  */
199 #define EligibleForRelationFastPath(locktag, mode) \
200  ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
201  (locktag)->locktag_type == LOCKTAG_RELATION && \
202  (locktag)->locktag_field1 == MyDatabaseId && \
203  MyDatabaseId != InvalidOid && \
204  (mode) < ShareUpdateExclusiveLock)
205 #define ConflictsWithRelationFastPath(locktag, mode) \
206  ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
207  (locktag)->locktag_type == LOCKTAG_RELATION && \
208  (locktag)->locktag_field1 != InvalidOid && \
209  (mode) > ShareUpdateExclusiveLock)
210 
211 static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
213 static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
214  const LOCKTAG *locktag, uint32 hashcode);
216 
217 /*
218  * To make the fast-path lock mechanism work, we must have some way of
219  * preventing the use of the fast-path when a conflicting lock might be present.
220  * We partition* the locktag space into FAST_PATH_STRONG_LOCK_HASH_PARTITIONS,
221  * and maintain an integer count of the number of "strong" lockers
222  * in each partition. When any "strong" lockers are present (which is
223  * hopefully not very often), the fast-path mechanism can't be used, and we
224  * must fall back to the slower method of pushing matching locks directly
225  * into the main lock tables.
226  *
227  * The deadlock detector does not know anything about the fast path mechanism,
228  * so any locks that might be involved in a deadlock must be transferred from
229  * the fast-path queues to the main lock table.
230  */
231 
232 #define FAST_PATH_STRONG_LOCK_HASH_BITS 10
233 #define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS \
234  (1 << FAST_PATH_STRONG_LOCK_HASH_BITS)
235 #define FastPathStrongLockHashPartition(hashcode) \
236  ((hashcode) % FAST_PATH_STRONG_LOCK_HASH_PARTITIONS)
237 
238 typedef struct
239 {
243 
245 
246 
247 /*
248  * Pointers to hash tables containing lock state
249  *
250  * The LockMethodLockHash and LockMethodProcLockHash hash tables are in
251  * shared memory; LockMethodLocalHash is local to each backend.
252  */
256 
257 
258 /* private state for error cleanup */
262 
263 
264 #ifdef LOCK_DEBUG
265 
266 /*------
267  * The following configuration options are available for lock debugging:
268  *
269  * TRACE_LOCKS -- give a bunch of output what's going on in this file
270  * TRACE_USERLOCKS -- same but for user locks
271  * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
272  * (use to avoid output on system tables)
273  * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
274  * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
275  *
276  * Furthermore, but in storage/lmgr/lwlock.c:
277  * TRACE_LWLOCKS -- trace lightweight locks (pretty useless)
278  *
279  * Define LOCK_DEBUG at compile time to get all these enabled.
280  * --------
281  */
282 
283 int Trace_lock_oidmin = FirstNormalObjectId;
284 bool Trace_locks = false;
285 bool Trace_userlocks = false;
286 int Trace_lock_table = 0;
287 bool Debug_deadlocks = false;
288 
289 
290 inline static bool
291 LOCK_DEBUG_ENABLED(const LOCKTAG *tag)
292 {
293  return
294  (*(LockMethods[tag->locktag_lockmethodid]->trace_flag) &&
295  ((Oid) tag->locktag_field2 >= (Oid) Trace_lock_oidmin))
296  || (Trace_lock_table &&
297  (tag->locktag_field2 == Trace_lock_table));
298 }
299 
300 
301 inline static void
302 LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
303 {
304  if (LOCK_DEBUG_ENABLED(&lock->tag))
305  elog(LOG,
306  "%s: lock(%p) id(%u,%u,%u,%u,%u,%u) grantMask(%x) "
307  "req(%d,%d,%d,%d,%d,%d,%d)=%d "
308  "grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
309  where, lock,
310  lock->tag.locktag_field1, lock->tag.locktag_field2,
311  lock->tag.locktag_field3, lock->tag.locktag_field4,
313  lock->grantMask,
314  lock->requested[1], lock->requested[2], lock->requested[3],
315  lock->requested[4], lock->requested[5], lock->requested[6],
316  lock->requested[7], lock->nRequested,
317  lock->granted[1], lock->granted[2], lock->granted[3],
318  lock->granted[4], lock->granted[5], lock->granted[6],
319  lock->granted[7], lock->nGranted,
320  lock->waitProcs.size,
321  LockMethods[LOCK_LOCKMETHOD(*lock)]->lockModeNames[type]);
322 }
323 
324 
325 inline static void
326 PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
327 {
328  if (LOCK_DEBUG_ENABLED(&proclockP->tag.myLock->tag))
329  elog(LOG,
330  "%s: proclock(%p) lock(%p) method(%u) proc(%p) hold(%x)",
331  where, proclockP, proclockP->tag.myLock,
332  PROCLOCK_LOCKMETHOD(*(proclockP)),
333  proclockP->tag.myProc, (int) proclockP->holdMask);
334 }
335 #else /* not LOCK_DEBUG */
336 
337 #define LOCK_PRINT(where, lock, type) ((void) 0)
338 #define PROCLOCK_PRINT(where, proclockP) ((void) 0)
339 #endif /* not LOCK_DEBUG */
340 
341 
342 static uint32 proclock_hash(const void *key, Size keysize);
343 static void RemoveLocalLock(LOCALLOCK *locallock);
344 static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
345  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
346 static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
347 static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
348 static void FinishStrongLockAcquire(void);
349 static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner);
350 static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock);
351 static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent);
352 static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode,
353  PROCLOCK *proclock, LockMethod lockMethodTable);
354 static void CleanUpLock(LOCK *lock, PROCLOCK *proclock,
355  LockMethod lockMethodTable, uint32 hashcode,
356  bool wakeupNeeded);
357 static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
359  bool decrement_strong_lock_count);
360 static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc,
361  BlockedProcsData *data);
362 
363 
364 /*
365  * InitLocks -- Initialize the lock manager's data structures.
366  *
367  * This is called from CreateSharedMemoryAndSemaphores(), which see for
368  * more comments. In the normal postmaster case, the shared hash tables
369  * are created here, as well as a locallock hash table that will remain
370  * unused and empty in the postmaster itself. Backends inherit the pointers
371  * to the shared tables via fork(), and also inherit an image of the locallock
372  * hash table, which they proceed to use. In the EXEC_BACKEND case, each
373  * backend re-executes this code to obtain pointers to the already existing
374  * shared hash tables and to create its locallock hash table.
375  */
376 void
378 {
379  HASHCTL info;
380  long init_table_size,
381  max_table_size;
382  bool found;
383 
384  /*
385  * Compute init/max size to request for lock hashtables. Note these
386  * calculations must agree with LockShmemSize!
387  */
388  max_table_size = NLOCKENTS();
389  init_table_size = max_table_size / 2;
390 
391  /*
392  * Allocate hash table for LOCK structs. This stores per-locked-object
393  * information.
394  */
395  MemSet(&info, 0, sizeof(info));
396  info.keysize = sizeof(LOCKTAG);
397  info.entrysize = sizeof(LOCK);
399 
400  LockMethodLockHash = ShmemInitHash("LOCK hash",
401  init_table_size,
402  max_table_size,
403  &info,
405 
406  /* Assume an average of 2 holders per lock */
407  max_table_size *= 2;
408  init_table_size *= 2;
409 
410  /*
411  * Allocate hash table for PROCLOCK structs. This stores
412  * per-lock-per-holder information.
413  */
414  info.keysize = sizeof(PROCLOCKTAG);
415  info.entrysize = sizeof(PROCLOCK);
416  info.hash = proclock_hash;
418 
419  LockMethodProcLockHash = ShmemInitHash("PROCLOCK hash",
420  init_table_size,
421  max_table_size,
422  &info,
424 
425  /*
426  * Allocate fast-path structures.
427  */
428  FastPathStrongRelationLocks =
429  ShmemInitStruct("Fast Path Strong Relation Lock Data",
430  sizeof(FastPathStrongRelationLockData), &found);
431  if (!found)
432  SpinLockInit(&FastPathStrongRelationLocks->mutex);
433 
434  /*
435  * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
436  * counts and resource owner information.
437  *
438  * The non-shared table could already exist in this process (this occurs
439  * when the postmaster is recreating shared memory after a backend crash).
440  * If so, delete and recreate it. (We could simply leave it, since it
441  * ought to be empty in the postmaster, but for safety let's zap it.)
442  */
443  if (LockMethodLocalHash)
444  hash_destroy(LockMethodLocalHash);
445 
446  info.keysize = sizeof(LOCALLOCKTAG);
447  info.entrysize = sizeof(LOCALLOCK);
448 
449  LockMethodLocalHash = hash_create("LOCALLOCK hash",
450  16,
451  &info,
453 }
454 
455 
456 /*
457  * Fetch the lock method table associated with a given lock
458  */
461 {
462  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
463 
464  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
465  return LockMethods[lockmethodid];
466 }
467 
468 /*
469  * Fetch the lock method table associated with a given locktag
470  */
473 {
474  LOCKMETHODID lockmethodid = (LOCKMETHODID) locktag->locktag_lockmethodid;
475 
476  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
477  return LockMethods[lockmethodid];
478 }
479 
480 
481 /*
482  * Compute the hash code associated with a LOCKTAG.
483  *
484  * To avoid unnecessary recomputations of the hash code, we try to do this
485  * just once per function, and then pass it around as needed. Aside from
486  * passing the hashcode to hash_search_with_hash_value(), we can extract
487  * the lock partition number from the hashcode.
488  */
489 uint32
491 {
492  return get_hash_value(LockMethodLockHash, (const void *) locktag);
493 }
494 
495 /*
496  * Compute the hash code associated with a PROCLOCKTAG.
497  *
498  * Because we want to use just one set of partition locks for both the
499  * LOCK and PROCLOCK hash tables, we have to make sure that PROCLOCKs
500  * fall into the same partition number as their associated LOCKs.
501  * dynahash.c expects the partition number to be the low-order bits of
502  * the hash code, and therefore a PROCLOCKTAG's hash code must have the
503  * same low-order bits as the associated LOCKTAG's hash code. We achieve
504  * this with this specialized hash function.
505  */
506 static uint32
507 proclock_hash(const void *key, Size keysize)
508 {
509  const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
510  uint32 lockhash;
511  Datum procptr;
512 
513  Assert(keysize == sizeof(PROCLOCKTAG));
514 
515  /* Look into the associated LOCK object, and compute its hash code */
516  lockhash = LockTagHashCode(&proclocktag->myLock->tag);
517 
518  /*
519  * To make the hash code also depend on the PGPROC, we xor the proc
520  * struct's address into the hash code, left-shifted so that the
521  * partition-number bits don't change. Since this is only a hash, we
522  * don't care if we lose high-order bits of the address; use an
523  * intermediate variable to suppress cast-pointer-to-int warnings.
524  */
525  procptr = PointerGetDatum(proclocktag->myProc);
526  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
527 
528  return lockhash;
529 }
530 
531 /*
532  * Compute the hash code associated with a PROCLOCKTAG, given the hashcode
533  * for its underlying LOCK.
534  *
535  * We use this just to avoid redundant calls of LockTagHashCode().
536  */
537 static inline uint32
538 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
539 {
540  uint32 lockhash = hashcode;
541  Datum procptr;
542 
543  /*
544  * This must match proclock_hash()!
545  */
546  procptr = PointerGetDatum(proclocktag->myProc);
547  lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
548 
549  return lockhash;
550 }
551 
552 /*
553  * Given two lock modes, return whether they would conflict.
554  */
555 bool
557 {
558  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
559 
560  if (lockMethodTable->conflictTab[mode1] & LOCKBIT_ON(mode2))
561  return true;
562 
563  return false;
564 }
565 
566 /*
567  * LockHeldByMe -- test whether lock 'locktag' is held with mode 'lockmode'
568  * by the current transaction
569  */
570 bool
572 {
573  LOCALLOCKTAG localtag;
574  LOCALLOCK *locallock;
575 
576  /*
577  * See if there is a LOCALLOCK entry for this lock and lockmode
578  */
579  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
580  localtag.lock = *locktag;
581  localtag.mode = lockmode;
582 
583  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
584  (void *) &localtag,
585  HASH_FIND, NULL);
586 
587  return (locallock && locallock->nLocks > 0);
588 }
589 
590 /*
591  * LockHasWaiters -- look up 'locktag' and check if releasing this
592  * lock would wake up other processes waiting for it.
593  */
594 bool
595 LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
596 {
597  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
598  LockMethod lockMethodTable;
599  LOCALLOCKTAG localtag;
600  LOCALLOCK *locallock;
601  LOCK *lock;
602  PROCLOCK *proclock;
603  LWLock *partitionLock;
604  bool hasWaiters = false;
605 
606  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
607  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
608  lockMethodTable = LockMethods[lockmethodid];
609  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
610  elog(ERROR, "unrecognized lock mode: %d", lockmode);
611 
612 #ifdef LOCK_DEBUG
613  if (LOCK_DEBUG_ENABLED(locktag))
614  elog(LOG, "LockHasWaiters: lock [%u,%u] %s",
615  locktag->locktag_field1, locktag->locktag_field2,
616  lockMethodTable->lockModeNames[lockmode]);
617 #endif
618 
619  /*
620  * Find the LOCALLOCK entry for this lock and lockmode
621  */
622  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
623  localtag.lock = *locktag;
624  localtag.mode = lockmode;
625 
626  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
627  (void *) &localtag,
628  HASH_FIND, NULL);
629 
630  /*
631  * let the caller print its own error message, too. Do not ereport(ERROR).
632  */
633  if (!locallock || locallock->nLocks <= 0)
634  {
635  elog(WARNING, "you don't own a lock of type %s",
636  lockMethodTable->lockModeNames[lockmode]);
637  return false;
638  }
639 
640  /*
641  * Check the shared lock table.
642  */
643  partitionLock = LockHashPartitionLock(locallock->hashcode);
644 
645  LWLockAcquire(partitionLock, LW_SHARED);
646 
647  /*
648  * We don't need to re-find the lock or proclock, since we kept their
649  * addresses in the locallock table, and they couldn't have been removed
650  * while we were holding a lock on them.
651  */
652  lock = locallock->lock;
653  LOCK_PRINT("LockHasWaiters: found", lock, lockmode);
654  proclock = locallock->proclock;
655  PROCLOCK_PRINT("LockHasWaiters: found", proclock);
656 
657  /*
658  * Double-check that we are actually holding a lock of the type we want to
659  * release.
660  */
661  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
662  {
663  PROCLOCK_PRINT("LockHasWaiters: WRONGTYPE", proclock);
664  LWLockRelease(partitionLock);
665  elog(WARNING, "you don't own a lock of type %s",
666  lockMethodTable->lockModeNames[lockmode]);
667  RemoveLocalLock(locallock);
668  return false;
669  }
670 
671  /*
672  * Do the checking.
673  */
674  if ((lockMethodTable->conflictTab[lockmode] & lock->waitMask) != 0)
675  hasWaiters = true;
676 
677  LWLockRelease(partitionLock);
678 
679  return hasWaiters;
680 }
681 
682 /*
683  * LockAcquire -- Check for lock conflicts, sleep if conflict found,
684  * set lock if/when no conflicts.
685  *
686  * Inputs:
687  * locktag: unique identifier for the lockable object
688  * lockmode: lock mode to acquire
689  * sessionLock: if true, acquire lock for session not current transaction
690  * dontWait: if true, don't wait to acquire lock
691  *
692  * Returns one of:
693  * LOCKACQUIRE_NOT_AVAIL lock not available, and dontWait=true
694  * LOCKACQUIRE_OK lock successfully acquired
695  * LOCKACQUIRE_ALREADY_HELD incremented count for lock already held
696  * LOCKACQUIRE_ALREADY_CLEAR incremented count for lock already clear
697  *
698  * In the normal case where dontWait=false and the caller doesn't need to
699  * distinguish a freshly acquired lock from one already taken earlier in
700  * this same transaction, there is no need to examine the return value.
701  *
702  * Side Effects: The lock is acquired and recorded in lock tables.
703  *
704  * NOTE: if we wait for the lock, there is no way to abort the wait
705  * short of aborting the transaction.
706  */
710  bool sessionLock,
711  bool dontWait)
712 {
713  return LockAcquireExtended(locktag, lockmode, sessionLock, dontWait,
714  true, NULL);
715 }
716 
717 /*
718  * LockAcquireExtended - allows us to specify additional options
719  *
720  * reportMemoryError specifies whether a lock request that fills the lock
721  * table should generate an ERROR or not. Passing "false" allows the caller
722  * to attempt to recover from lock-table-full situations, perhaps by forcibly
723  * cancelling other lock holders and then retrying. Note, however, that the
724  * return code for that is LOCKACQUIRE_NOT_AVAIL, so that it's unsafe to use
725  * in combination with dontWait = true, as the cause of failure couldn't be
726  * distinguished.
727  *
728  * If locallockp isn't NULL, *locallockp receives a pointer to the LOCALLOCK
729  * table entry if a lock is successfully acquired, or NULL if not.
730  */
734  bool sessionLock,
735  bool dontWait,
736  bool reportMemoryError,
737  LOCALLOCK **locallockp)
738 {
739  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
740  LockMethod lockMethodTable;
741  LOCALLOCKTAG localtag;
742  LOCALLOCK *locallock;
743  LOCK *lock;
744  PROCLOCK *proclock;
745  bool found;
746  ResourceOwner owner;
747  uint32 hashcode;
748  LWLock *partitionLock;
749  bool found_conflict;
750  bool log_lock = false;
751 
752  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
753  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
754  lockMethodTable = LockMethods[lockmethodid];
755  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
756  elog(ERROR, "unrecognized lock mode: %d", lockmode);
757 
758  if (RecoveryInProgress() && !InRecovery &&
759  (locktag->locktag_type == LOCKTAG_OBJECT ||
760  locktag->locktag_type == LOCKTAG_RELATION) &&
761  lockmode > RowExclusiveLock)
762  ereport(ERROR,
763  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
764  errmsg("cannot acquire lock mode %s on database objects while recovery is in progress",
765  lockMethodTable->lockModeNames[lockmode]),
766  errhint("Only RowExclusiveLock or less can be acquired on database objects during recovery.")));
767 
768 #ifdef LOCK_DEBUG
769  if (LOCK_DEBUG_ENABLED(locktag))
770  elog(LOG, "LockAcquire: lock [%u,%u] %s",
771  locktag->locktag_field1, locktag->locktag_field2,
772  lockMethodTable->lockModeNames[lockmode]);
773 #endif
774 
775  /* Identify owner for lock */
776  if (sessionLock)
777  owner = NULL;
778  else
779  owner = CurrentResourceOwner;
780 
781  /*
782  * Find or create a LOCALLOCK entry for this lock and lockmode
783  */
784  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
785  localtag.lock = *locktag;
786  localtag.mode = lockmode;
787 
788  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
789  (void *) &localtag,
790  HASH_ENTER, &found);
791 
792  /*
793  * if it's a new locallock object, initialize it
794  */
795  if (!found)
796  {
797  locallock->lock = NULL;
798  locallock->proclock = NULL;
799  locallock->hashcode = LockTagHashCode(&(localtag.lock));
800  locallock->nLocks = 0;
801  locallock->holdsStrongLockCount = false;
802  locallock->lockCleared = false;
803  locallock->numLockOwners = 0;
804  locallock->maxLockOwners = 8;
805  locallock->lockOwners = NULL; /* in case next line fails */
806  locallock->lockOwners = (LOCALLOCKOWNER *)
808  locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
809  }
810  else
811  {
812  /* Make sure there will be room to remember the lock */
813  if (locallock->numLockOwners >= locallock->maxLockOwners)
814  {
815  int newsize = locallock->maxLockOwners * 2;
816 
817  locallock->lockOwners = (LOCALLOCKOWNER *)
818  repalloc(locallock->lockOwners,
819  newsize * sizeof(LOCALLOCKOWNER));
820  locallock->maxLockOwners = newsize;
821  }
822  }
823  hashcode = locallock->hashcode;
824 
825  if (locallockp)
826  *locallockp = locallock;
827 
828  /*
829  * If we already hold the lock, we can just increase the count locally.
830  *
831  * If lockCleared is already set, caller need not worry about absorbing
832  * sinval messages related to the lock's object.
833  */
834  if (locallock->nLocks > 0)
835  {
836  GrantLockLocal(locallock, owner);
837  if (locallock->lockCleared)
839  else
841  }
842 
843  /*
844  * Prepare to emit a WAL record if acquisition of this lock needs to be
845  * replayed in a standby server.
846  *
847  * Here we prepare to log; after lock is acquired we'll issue log record.
848  * This arrangement simplifies error recovery in case the preparation step
849  * fails.
850  *
851  * Only AccessExclusiveLocks can conflict with lock types that read-only
852  * transactions can acquire in a standby server. Make sure this definition
853  * matches the one in GetRunningTransactionLocks().
854  */
855  if (lockmode >= AccessExclusiveLock &&
856  locktag->locktag_type == LOCKTAG_RELATION &&
857  !RecoveryInProgress() &&
859  {
861  log_lock = true;
862  }
863 
864  /*
865  * Attempt to take lock via fast path, if eligible. But if we remember
866  * having filled up the fast path array, we don't attempt to make any
867  * further use of it until we release some locks. It's possible that some
868  * other backend has transferred some of those locks to the shared hash
869  * table, leaving space free, but it's not worth acquiring the LWLock just
870  * to check. It's also possible that we're acquiring a second or third
871  * lock type on a relation we have already locked using the fast-path, but
872  * for now we don't worry about that case either.
873  */
874  if (EligibleForRelationFastPath(locktag, lockmode) &&
876  {
877  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
878  bool acquired;
879 
880  /*
881  * LWLockAcquire acts as a memory sequencing point, so it's safe to
882  * assume that any strong locker whose increment to
883  * FastPathStrongRelationLocks->counts becomes visible after we test
884  * it has yet to begin to transfer fast-path locks.
885  */
887  if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
888  acquired = false;
889  else
890  acquired = FastPathGrantRelationLock(locktag->locktag_field2,
891  lockmode);
893  if (acquired)
894  {
895  /*
896  * The locallock might contain stale pointers to some old shared
897  * objects; we MUST reset these to null before considering the
898  * lock to be acquired via fast-path.
899  */
900  locallock->lock = NULL;
901  locallock->proclock = NULL;
902  GrantLockLocal(locallock, owner);
903  return LOCKACQUIRE_OK;
904  }
905  }
906 
907  /*
908  * If this lock could potentially have been taken via the fast-path by
909  * some other backend, we must (temporarily) disable further use of the
910  * fast-path for this lock tag, and migrate any locks already taken via
911  * this method to the main lock table.
912  */
913  if (ConflictsWithRelationFastPath(locktag, lockmode))
914  {
915  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
916 
917  BeginStrongLockAcquire(locallock, fasthashcode);
918  if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
919  hashcode))
920  {
922  if (locallock->nLocks == 0)
923  RemoveLocalLock(locallock);
924  if (locallockp)
925  *locallockp = NULL;
926  if (reportMemoryError)
927  ereport(ERROR,
928  (errcode(ERRCODE_OUT_OF_MEMORY),
929  errmsg("out of shared memory"),
930  errhint("You might need to increase max_locks_per_transaction.")));
931  else
932  return LOCKACQUIRE_NOT_AVAIL;
933  }
934  }
935 
936  /*
937  * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
938  * take it via the fast-path, either, so we've got to mess with the shared
939  * lock table.
940  */
941  partitionLock = LockHashPartitionLock(hashcode);
942 
943  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
944 
945  /*
946  * Find or create lock and proclock entries with this tag
947  *
948  * Note: if the locallock object already existed, it might have a pointer
949  * to the lock already ... but we should not assume that that pointer is
950  * valid, since a lock object with zero hold and request counts can go
951  * away anytime. So we have to use SetupLockInTable() to recompute the
952  * lock and proclock pointers, even if they're already set.
953  */
954  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
955  hashcode, lockmode);
956  if (!proclock)
957  {
959  LWLockRelease(partitionLock);
960  if (locallock->nLocks == 0)
961  RemoveLocalLock(locallock);
962  if (locallockp)
963  *locallockp = NULL;
964  if (reportMemoryError)
965  ereport(ERROR,
966  (errcode(ERRCODE_OUT_OF_MEMORY),
967  errmsg("out of shared memory"),
968  errhint("You might need to increase max_locks_per_transaction.")));
969  else
970  return LOCKACQUIRE_NOT_AVAIL;
971  }
972  locallock->proclock = proclock;
973  lock = proclock->tag.myLock;
974  locallock->lock = lock;
975 
976  /*
977  * If lock requested conflicts with locks requested by waiters, must join
978  * wait queue. Otherwise, check for conflict with already-held locks.
979  * (That's last because most complex check.)
980  */
981  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
982  found_conflict = true;
983  else
984  found_conflict = LockCheckConflicts(lockMethodTable, lockmode,
985  lock, proclock);
986 
987  if (!found_conflict)
988  {
989  /* No conflict with held or previously requested locks */
990  GrantLock(lock, proclock, lockmode);
991  GrantLockLocal(locallock, owner);
992  }
993  else
994  {
995  /*
996  * We can't acquire the lock immediately. If caller specified no
997  * blocking, remove useless table entries and return
998  * LOCKACQUIRE_NOT_AVAIL without waiting.
999  */
1000  if (dontWait)
1001  {
1003  if (proclock->holdMask == 0)
1004  {
1005  uint32 proclock_hashcode;
1006 
1007  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1008  SHMQueueDelete(&proclock->lockLink);
1009  SHMQueueDelete(&proclock->procLink);
1010  if (!hash_search_with_hash_value(LockMethodProcLockHash,
1011  (void *) &(proclock->tag),
1012  proclock_hashcode,
1013  HASH_REMOVE,
1014  NULL))
1015  elog(PANIC, "proclock table corrupted");
1016  }
1017  else
1018  PROCLOCK_PRINT("LockAcquire: NOWAIT", proclock);
1019  lock->nRequested--;
1020  lock->requested[lockmode]--;
1021  LOCK_PRINT("LockAcquire: conditional lock failed", lock, lockmode);
1022  Assert((lock->nRequested > 0) && (lock->requested[lockmode] >= 0));
1023  Assert(lock->nGranted <= lock->nRequested);
1024  LWLockRelease(partitionLock);
1025  if (locallock->nLocks == 0)
1026  RemoveLocalLock(locallock);
1027  if (locallockp)
1028  *locallockp = NULL;
1029  return LOCKACQUIRE_NOT_AVAIL;
1030  }
1031 
1032  /*
1033  * Set bitmask of locks this process already holds on this object.
1034  */
1035  MyProc->heldLocks = proclock->holdMask;
1036 
1037  /*
1038  * Sleep till someone wakes me up.
1039  */
1040 
1041  TRACE_POSTGRESQL_LOCK_WAIT_START(locktag->locktag_field1,
1042  locktag->locktag_field2,
1043  locktag->locktag_field3,
1044  locktag->locktag_field4,
1045  locktag->locktag_type,
1046  lockmode);
1047 
1048  WaitOnLock(locallock, owner);
1049 
1050  TRACE_POSTGRESQL_LOCK_WAIT_DONE(locktag->locktag_field1,
1051  locktag->locktag_field2,
1052  locktag->locktag_field3,
1053  locktag->locktag_field4,
1054  locktag->locktag_type,
1055  lockmode);
1056 
1057  /*
1058  * NOTE: do not do any material change of state between here and
1059  * return. All required changes in locktable state must have been
1060  * done when the lock was granted to us --- see notes in WaitOnLock.
1061  */
1062 
1063  /*
1064  * Check the proclock entry status, in case something in the ipc
1065  * communication doesn't work correctly.
1066  */
1067  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
1068  {
1070  PROCLOCK_PRINT("LockAcquire: INCONSISTENT", proclock);
1071  LOCK_PRINT("LockAcquire: INCONSISTENT", lock, lockmode);
1072  /* Should we retry ? */
1073  LWLockRelease(partitionLock);
1074  elog(ERROR, "LockAcquire failed");
1075  }
1076  PROCLOCK_PRINT("LockAcquire: granted", proclock);
1077  LOCK_PRINT("LockAcquire: granted", lock, lockmode);
1078  }
1079 
1080  /*
1081  * Lock state is fully up-to-date now; if we error out after this, no
1082  * special error cleanup is required.
1083  */
1085 
1086  LWLockRelease(partitionLock);
1087 
1088  /*
1089  * Emit a WAL record if acquisition of this lock needs to be replayed in a
1090  * standby server.
1091  */
1092  if (log_lock)
1093  {
1094  /*
1095  * Decode the locktag back to the original values, to avoid sending
1096  * lots of empty bytes with every message. See lock.h to check how a
1097  * locktag is defined for LOCKTAG_RELATION
1098  */
1100  locktag->locktag_field2);
1101  }
1102 
1103  return LOCKACQUIRE_OK;
1104 }
1105 
1106 /*
1107  * Find or create LOCK and PROCLOCK objects as needed for a new lock
1108  * request.
1109  *
1110  * Returns the PROCLOCK object, or NULL if we failed to create the objects
1111  * for lack of shared memory.
1112  *
1113  * The appropriate partition lock must be held at entry, and will be
1114  * held at exit.
1115  */
1116 static PROCLOCK *
1117 SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
1118  const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
1119 {
1120  LOCK *lock;
1121  PROCLOCK *proclock;
1122  PROCLOCKTAG proclocktag;
1123  uint32 proclock_hashcode;
1124  bool found;
1125 
1126  /*
1127  * Find or create a lock with this tag.
1128  */
1129  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
1130  (const void *) locktag,
1131  hashcode,
1133  &found);
1134  if (!lock)
1135  return NULL;
1136 
1137  /*
1138  * if it's a new lock object, initialize it
1139  */
1140  if (!found)
1141  {
1142  lock->grantMask = 0;
1143  lock->waitMask = 0;
1144  SHMQueueInit(&(lock->procLocks));
1145  ProcQueueInit(&(lock->waitProcs));
1146  lock->nRequested = 0;
1147  lock->nGranted = 0;
1148  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
1149  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
1150  LOCK_PRINT("LockAcquire: new", lock, lockmode);
1151  }
1152  else
1153  {
1154  LOCK_PRINT("LockAcquire: found", lock, lockmode);
1155  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
1156  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
1157  Assert(lock->nGranted <= lock->nRequested);
1158  }
1159 
1160  /*
1161  * Create the hash key for the proclock table.
1162  */
1163  proclocktag.myLock = lock;
1164  proclocktag.myProc = proc;
1165 
1166  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
1167 
1168  /*
1169  * Find or create a proclock entry with this tag
1170  */
1171  proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
1172  (void *) &proclocktag,
1173  proclock_hashcode,
1175  &found);
1176  if (!proclock)
1177  {
1178  /* Oops, not enough shmem for the proclock */
1179  if (lock->nRequested == 0)
1180  {
1181  /*
1182  * There are no other requestors of this lock, so garbage-collect
1183  * the lock object. We *must* do this to avoid a permanent leak
1184  * of shared memory, because there won't be anything to cause
1185  * anyone to release the lock object later.
1186  */
1187  Assert(SHMQueueEmpty(&(lock->procLocks)));
1188  if (!hash_search_with_hash_value(LockMethodLockHash,
1189  (void *) &(lock->tag),
1190  hashcode,
1191  HASH_REMOVE,
1192  NULL))
1193  elog(PANIC, "lock table corrupted");
1194  }
1195  return NULL;
1196  }
1197 
1198  /*
1199  * If new, initialize the new entry
1200  */
1201  if (!found)
1202  {
1203  uint32 partition = LockHashPartition(hashcode);
1204 
1205  /*
1206  * It might seem unsafe to access proclock->groupLeader without a
1207  * lock, but it's not really. Either we are initializing a proclock
1208  * on our own behalf, in which case our group leader isn't changing
1209  * because the group leader for a process can only ever be changed by
1210  * the process itself; or else we are transferring a fast-path lock to
1211  * the main lock table, in which case that process can't change it's
1212  * lock group leader without first releasing all of its locks (and in
1213  * particular the one we are currently transferring).
1214  */
1215  proclock->groupLeader = proc->lockGroupLeader != NULL ?
1216  proc->lockGroupLeader : proc;
1217  proclock->holdMask = 0;
1218  proclock->releaseMask = 0;
1219  /* Add proclock to appropriate lists */
1220  SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
1221  SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
1222  &proclock->procLink);
1223  PROCLOCK_PRINT("LockAcquire: new", proclock);
1224  }
1225  else
1226  {
1227  PROCLOCK_PRINT("LockAcquire: found", proclock);
1228  Assert((proclock->holdMask & ~lock->grantMask) == 0);
1229 
1230 #ifdef CHECK_DEADLOCK_RISK
1231 
1232  /*
1233  * Issue warning if we already hold a lower-level lock on this object
1234  * and do not hold a lock of the requested level or higher. This
1235  * indicates a deadlock-prone coding practice (eg, we'd have a
1236  * deadlock if another backend were following the same code path at
1237  * about the same time).
1238  *
1239  * This is not enabled by default, because it may generate log entries
1240  * about user-level coding practices that are in fact safe in context.
1241  * It can be enabled to help find system-level problems.
1242  *
1243  * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
1244  * better to use a table. For now, though, this works.
1245  */
1246  {
1247  int i;
1248 
1249  for (i = lockMethodTable->numLockModes; i > 0; i--)
1250  {
1251  if (proclock->holdMask & LOCKBIT_ON(i))
1252  {
1253  if (i >= (int) lockmode)
1254  break; /* safe: we have a lock >= req level */
1255  elog(LOG, "deadlock risk: raising lock level"
1256  " from %s to %s on object %u/%u/%u",
1257  lockMethodTable->lockModeNames[i],
1258  lockMethodTable->lockModeNames[lockmode],
1259  lock->tag.locktag_field1, lock->tag.locktag_field2,
1260  lock->tag.locktag_field3);
1261  break;
1262  }
1263  }
1264  }
1265 #endif /* CHECK_DEADLOCK_RISK */
1266  }
1267 
1268  /*
1269  * lock->nRequested and lock->requested[] count the total number of
1270  * requests, whether granted or waiting, so increment those immediately.
1271  * The other counts don't increment till we get the lock.
1272  */
1273  lock->nRequested++;
1274  lock->requested[lockmode]++;
1275  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1276 
1277  /*
1278  * We shouldn't already hold the desired lock; else locallock table is
1279  * broken.
1280  */
1281  if (proclock->holdMask & LOCKBIT_ON(lockmode))
1282  elog(ERROR, "lock %s on object %u/%u/%u is already held",
1283  lockMethodTable->lockModeNames[lockmode],
1284  lock->tag.locktag_field1, lock->tag.locktag_field2,
1285  lock->tag.locktag_field3);
1286 
1287  return proclock;
1288 }
1289 
1290 /*
1291  * Subroutine to free a locallock entry
1292  */
1293 static void
1295 {
1296  int i;
1297 
1298  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1299  {
1300  if (locallock->lockOwners[i].owner != NULL)
1301  ResourceOwnerForgetLock(locallock->lockOwners[i].owner, locallock);
1302  }
1303  locallock->numLockOwners = 0;
1304  if (locallock->lockOwners != NULL)
1305  pfree(locallock->lockOwners);
1306  locallock->lockOwners = NULL;
1307 
1308  if (locallock->holdsStrongLockCount)
1309  {
1310  uint32 fasthashcode;
1311 
1312  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1313 
1314  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1315  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1316  FastPathStrongRelationLocks->count[fasthashcode]--;
1317  locallock->holdsStrongLockCount = false;
1318  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1319  }
1320 
1321  if (!hash_search(LockMethodLocalHash,
1322  (void *) &(locallock->tag),
1323  HASH_REMOVE, NULL))
1324  elog(WARNING, "locallock table corrupted");
1325 }
1326 
1327 /*
1328  * LockCheckConflicts -- test whether requested lock conflicts
1329  * with those already granted
1330  *
1331  * Returns true if conflict, false if no conflict.
1332  *
1333  * NOTES:
1334  * Here's what makes this complicated: one process's locks don't
1335  * conflict with one another, no matter what purpose they are held for
1336  * (eg, session and transaction locks do not conflict). Nor do the locks
1337  * of one process in a lock group conflict with those of another process in
1338  * the same group. So, we must subtract off these locks when determining
1339  * whether the requested new lock conflicts with those already held.
1340  */
1341 bool
1344  LOCK *lock,
1345  PROCLOCK *proclock)
1346 {
1347  int numLockModes = lockMethodTable->numLockModes;
1348  LOCKMASK myLocks;
1349  int conflictMask = lockMethodTable->conflictTab[lockmode];
1350  int conflictsRemaining[MAX_LOCKMODES];
1351  int totalConflictsRemaining = 0;
1352  int i;
1353  SHM_QUEUE *procLocks;
1354  PROCLOCK *otherproclock;
1355 
1356  /*
1357  * first check for global conflicts: If no locks conflict with my request,
1358  * then I get the lock.
1359  *
1360  * Checking for conflict: lock->grantMask represents the types of
1361  * currently held locks. conflictTable[lockmode] has a bit set for each
1362  * type of lock that conflicts with request. Bitwise compare tells if
1363  * there is a conflict.
1364  */
1365  if (!(conflictMask & lock->grantMask))
1366  {
1367  PROCLOCK_PRINT("LockCheckConflicts: no conflict", proclock);
1368  return false;
1369  }
1370 
1371  /*
1372  * Rats. Something conflicts. But it could still be my own lock, or a
1373  * lock held by another member of my locking group. First, figure out how
1374  * many conflicts remain after subtracting out any locks I hold myself.
1375  */
1376  myLocks = proclock->holdMask;
1377  for (i = 1; i <= numLockModes; i++)
1378  {
1379  if ((conflictMask & LOCKBIT_ON(i)) == 0)
1380  {
1381  conflictsRemaining[i] = 0;
1382  continue;
1383  }
1384  conflictsRemaining[i] = lock->granted[i];
1385  if (myLocks & LOCKBIT_ON(i))
1386  --conflictsRemaining[i];
1387  totalConflictsRemaining += conflictsRemaining[i];
1388  }
1389 
1390  /* If no conflicts remain, we get the lock. */
1391  if (totalConflictsRemaining == 0)
1392  {
1393  PROCLOCK_PRINT("LockCheckConflicts: resolved (simple)", proclock);
1394  return false;
1395  }
1396 
1397  /* If no group locking, it's definitely a conflict. */
1398  if (proclock->groupLeader == MyProc && MyProc->lockGroupLeader == NULL)
1399  {
1400  Assert(proclock->tag.myProc == MyProc);
1401  PROCLOCK_PRINT("LockCheckConflicts: conflicting (simple)",
1402  proclock);
1403  return true;
1404  }
1405 
1406  /*
1407  * Locks held in conflicting modes by members of our own lock group are
1408  * not real conflicts; we can subtract those out and see if we still have
1409  * a conflict. This is O(N) in the number of processes holding or
1410  * awaiting locks on this object. We could improve that by making the
1411  * shared memory state more complex (and larger) but it doesn't seem worth
1412  * it.
1413  */
1414  procLocks = &(lock->procLocks);
1415  otherproclock = (PROCLOCK *)
1416  SHMQueueNext(procLocks, procLocks, offsetof(PROCLOCK, lockLink));
1417  while (otherproclock != NULL)
1418  {
1419  if (proclock != otherproclock &&
1420  proclock->groupLeader == otherproclock->groupLeader &&
1421  (otherproclock->holdMask & conflictMask) != 0)
1422  {
1423  int intersectMask = otherproclock->holdMask & conflictMask;
1424 
1425  for (i = 1; i <= numLockModes; i++)
1426  {
1427  if ((intersectMask & LOCKBIT_ON(i)) != 0)
1428  {
1429  if (conflictsRemaining[i] <= 0)
1430  elog(PANIC, "proclocks held do not match lock");
1431  conflictsRemaining[i]--;
1432  totalConflictsRemaining--;
1433  }
1434  }
1435 
1436  if (totalConflictsRemaining == 0)
1437  {
1438  PROCLOCK_PRINT("LockCheckConflicts: resolved (group)",
1439  proclock);
1440  return false;
1441  }
1442  }
1443  otherproclock = (PROCLOCK *)
1444  SHMQueueNext(procLocks, &otherproclock->lockLink,
1445  offsetof(PROCLOCK, lockLink));
1446  }
1447 
1448  /* Nope, it's a real conflict. */
1449  PROCLOCK_PRINT("LockCheckConflicts: conflicting (group)", proclock);
1450  return true;
1451 }
1452 
1453 /*
1454  * GrantLock -- update the lock and proclock data structures to show
1455  * the lock request has been granted.
1456  *
1457  * NOTE: if proc was blocked, it also needs to be removed from the wait list
1458  * and have its waitLock/waitProcLock fields cleared. That's not done here.
1459  *
1460  * NOTE: the lock grant also has to be recorded in the associated LOCALLOCK
1461  * table entry; but since we may be awaking some other process, we can't do
1462  * that here; it's done by GrantLockLocal, instead.
1463  */
1464 void
1466 {
1467  lock->nGranted++;
1468  lock->granted[lockmode]++;
1469  lock->grantMask |= LOCKBIT_ON(lockmode);
1470  if (lock->granted[lockmode] == lock->requested[lockmode])
1471  lock->waitMask &= LOCKBIT_OFF(lockmode);
1472  proclock->holdMask |= LOCKBIT_ON(lockmode);
1473  LOCK_PRINT("GrantLock", lock, lockmode);
1474  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1475  Assert(lock->nGranted <= lock->nRequested);
1476 }
1477 
1478 /*
1479  * UnGrantLock -- opposite of GrantLock.
1480  *
1481  * Updates the lock and proclock data structures to show that the lock
1482  * is no longer held nor requested by the current holder.
1483  *
1484  * Returns true if there were any waiters waiting on the lock that
1485  * should now be woken up with ProcLockWakeup.
1486  */
1487 static bool
1489  PROCLOCK *proclock, LockMethod lockMethodTable)
1490 {
1491  bool wakeupNeeded = false;
1492 
1493  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
1494  Assert((lock->nGranted > 0) && (lock->granted[lockmode] > 0));
1495  Assert(lock->nGranted <= lock->nRequested);
1496 
1497  /*
1498  * fix the general lock stats
1499  */
1500  lock->nRequested--;
1501  lock->requested[lockmode]--;
1502  lock->nGranted--;
1503  lock->granted[lockmode]--;
1504 
1505  if (lock->granted[lockmode] == 0)
1506  {
1507  /* change the conflict mask. No more of this lock type. */
1508  lock->grantMask &= LOCKBIT_OFF(lockmode);
1509  }
1510 
1511  LOCK_PRINT("UnGrantLock: updated", lock, lockmode);
1512 
1513  /*
1514  * We need only run ProcLockWakeup if the released lock conflicts with at
1515  * least one of the lock types requested by waiter(s). Otherwise whatever
1516  * conflict made them wait must still exist. NOTE: before MVCC, we could
1517  * skip wakeup if lock->granted[lockmode] was still positive. But that's
1518  * not true anymore, because the remaining granted locks might belong to
1519  * some waiter, who could now be awakened because he doesn't conflict with
1520  * his own locks.
1521  */
1522  if (lockMethodTable->conflictTab[lockmode] & lock->waitMask)
1523  wakeupNeeded = true;
1524 
1525  /*
1526  * Now fix the per-proclock state.
1527  */
1528  proclock->holdMask &= LOCKBIT_OFF(lockmode);
1529  PROCLOCK_PRINT("UnGrantLock: updated", proclock);
1530 
1531  return wakeupNeeded;
1532 }
1533 
1534 /*
1535  * CleanUpLock -- clean up after releasing a lock. We garbage-collect the
1536  * proclock and lock objects if possible, and call ProcLockWakeup if there
1537  * are remaining requests and the caller says it's OK. (Normally, this
1538  * should be called after UnGrantLock, and wakeupNeeded is the result from
1539  * UnGrantLock.)
1540  *
1541  * The appropriate partition lock must be held at entry, and will be
1542  * held at exit.
1543  */
1544 static void
1545 CleanUpLock(LOCK *lock, PROCLOCK *proclock,
1546  LockMethod lockMethodTable, uint32 hashcode,
1547  bool wakeupNeeded)
1548 {
1549  /*
1550  * If this was my last hold on this lock, delete my entry in the proclock
1551  * table.
1552  */
1553  if (proclock->holdMask == 0)
1554  {
1555  uint32 proclock_hashcode;
1556 
1557  PROCLOCK_PRINT("CleanUpLock: deleting", proclock);
1558  SHMQueueDelete(&proclock->lockLink);
1559  SHMQueueDelete(&proclock->procLink);
1560  proclock_hashcode = ProcLockHashCode(&proclock->tag, hashcode);
1561  if (!hash_search_with_hash_value(LockMethodProcLockHash,
1562  (void *) &(proclock->tag),
1563  proclock_hashcode,
1564  HASH_REMOVE,
1565  NULL))
1566  elog(PANIC, "proclock table corrupted");
1567  }
1568 
1569  if (lock->nRequested == 0)
1570  {
1571  /*
1572  * The caller just released the last lock, so garbage-collect the lock
1573  * object.
1574  */
1575  LOCK_PRINT("CleanUpLock: deleting", lock, 0);
1576  Assert(SHMQueueEmpty(&(lock->procLocks)));
1577  if (!hash_search_with_hash_value(LockMethodLockHash,
1578  (void *) &(lock->tag),
1579  hashcode,
1580  HASH_REMOVE,
1581  NULL))
1582  elog(PANIC, "lock table corrupted");
1583  }
1584  else if (wakeupNeeded)
1585  {
1586  /* There are waiters on this lock, so wake them up. */
1587  ProcLockWakeup(lockMethodTable, lock);
1588  }
1589 }
1590 
1591 /*
1592  * GrantLockLocal -- update the locallock data structures to show
1593  * the lock request has been granted.
1594  *
1595  * We expect that LockAcquire made sure there is room to add a new
1596  * ResourceOwner entry.
1597  */
1598 static void
1600 {
1601  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1602  int i;
1603 
1604  Assert(locallock->numLockOwners < locallock->maxLockOwners);
1605  /* Count the total */
1606  locallock->nLocks++;
1607  /* Count the per-owner lock */
1608  for (i = 0; i < locallock->numLockOwners; i++)
1609  {
1610  if (lockOwners[i].owner == owner)
1611  {
1612  lockOwners[i].nLocks++;
1613  return;
1614  }
1615  }
1616  lockOwners[i].owner = owner;
1617  lockOwners[i].nLocks = 1;
1618  locallock->numLockOwners++;
1619  if (owner != NULL)
1620  ResourceOwnerRememberLock(owner, locallock);
1621 }
1622 
1623 /*
1624  * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1625  * and arrange for error cleanup if it fails
1626  */
1627 static void
1628 BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
1629 {
1630  Assert(StrongLockInProgress == NULL);
1631  Assert(locallock->holdsStrongLockCount == false);
1632 
1633  /*
1634  * Adding to a memory location is not atomic, so we take a spinlock to
1635  * ensure we don't collide with someone else trying to bump the count at
1636  * the same time.
1637  *
1638  * XXX: It might be worth considering using an atomic fetch-and-add
1639  * instruction here, on architectures where that is supported.
1640  */
1641 
1642  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1643  FastPathStrongRelationLocks->count[fasthashcode]++;
1644  locallock->holdsStrongLockCount = true;
1645  StrongLockInProgress = locallock;
1646  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1647 }
1648 
1649 /*
1650  * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1651  * acquisition once it's no longer needed
1652  */
1653 static void
1655 {
1656  StrongLockInProgress = NULL;
1657 }
1658 
1659 /*
1660  * AbortStrongLockAcquire - undo strong lock state changes performed by
1661  * BeginStrongLockAcquire.
1662  */
1663 void
1665 {
1666  uint32 fasthashcode;
1667  LOCALLOCK *locallock = StrongLockInProgress;
1668 
1669  if (locallock == NULL)
1670  return;
1671 
1672  fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
1673  Assert(locallock->holdsStrongLockCount == true);
1674  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
1675  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
1676  FastPathStrongRelationLocks->count[fasthashcode]--;
1677  locallock->holdsStrongLockCount = false;
1678  StrongLockInProgress = NULL;
1679  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
1680 }
1681 
1682 /*
1683  * GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1684  * WaitOnLock on.
1685  *
1686  * proc.c needs this for the case where we are booted off the lock by
1687  * timeout, but discover that someone granted us the lock anyway.
1688  *
1689  * We could just export GrantLockLocal, but that would require including
1690  * resowner.h in lock.h, which creates circularity.
1691  */
1692 void
1694 {
1695  GrantLockLocal(awaitedLock, awaitedOwner);
1696 }
1697 
1698 /*
1699  * MarkLockClear -- mark an acquired lock as "clear"
1700  *
1701  * This means that we know we have absorbed all sinval messages that other
1702  * sessions generated before we acquired this lock, and so we can confidently
1703  * assume we know about any catalog changes protected by this lock.
1704  */
1705 void
1707 {
1708  Assert(locallock->nLocks > 0);
1709  locallock->lockCleared = true;
1710 }
1711 
1712 /*
1713  * WaitOnLock -- wait to acquire a lock
1714  *
1715  * Caller must have set MyProc->heldLocks to reflect locks already held
1716  * on the lockable object by this process.
1717  *
1718  * The appropriate partition lock must be held at entry.
1719  */
1720 static void
1722 {
1723  LOCKMETHODID lockmethodid = LOCALLOCK_LOCKMETHOD(*locallock);
1724  LockMethod lockMethodTable = LockMethods[lockmethodid];
1725  char *volatile new_status = NULL;
1726 
1727  LOCK_PRINT("WaitOnLock: sleeping on lock",
1728  locallock->lock, locallock->tag.mode);
1729 
1730  /* Report change to waiting status */
1732  {
1733  const char *old_status;
1734  int len;
1735 
1736  old_status = get_ps_display(&len);
1737  new_status = (char *) palloc(len + 8 + 1);
1738  memcpy(new_status, old_status, len);
1739  strcpy(new_status + len, " waiting");
1740  set_ps_display(new_status, false);
1741  new_status[len] = '\0'; /* truncate off " waiting" */
1742  }
1743 
1744  awaitedLock = locallock;
1745  awaitedOwner = owner;
1746 
1747  /*
1748  * NOTE: Think not to put any shared-state cleanup after the call to
1749  * ProcSleep, in either the normal or failure path. The lock state must
1750  * be fully set by the lock grantor, or by CheckDeadLock if we give up
1751  * waiting for the lock. This is necessary because of the possibility
1752  * that a cancel/die interrupt will interrupt ProcSleep after someone else
1753  * grants us the lock, but before we've noticed it. Hence, after granting,
1754  * the locktable state must fully reflect the fact that we own the lock;
1755  * we can't do additional work on return.
1756  *
1757  * We can and do use a PG_TRY block to try to clean up after failure, but
1758  * this still has a major limitation: elog(FATAL) can occur while waiting
1759  * (eg, a "die" interrupt), and then control won't come back here. So all
1760  * cleanup of essential state should happen in LockErrorCleanup, not here.
1761  * We can use PG_TRY to clear the "waiting" status flags, since doing that
1762  * is unimportant if the process exits.
1763  */
1764  PG_TRY();
1765  {
1766  if (ProcSleep(locallock, lockMethodTable) != STATUS_OK)
1767  {
1768  /*
1769  * We failed as a result of a deadlock, see CheckDeadLock(). Quit
1770  * now.
1771  */
1772  awaitedLock = NULL;
1773  LOCK_PRINT("WaitOnLock: aborting on lock",
1774  locallock->lock, locallock->tag.mode);
1776 
1777  /*
1778  * Now that we aren't holding the partition lock, we can give an
1779  * error report including details about the detected deadlock.
1780  */
1781  DeadLockReport();
1782  /* not reached */
1783  }
1784  }
1785  PG_CATCH();
1786  {
1787  /* In this path, awaitedLock remains set until LockErrorCleanup */
1788 
1789  /* Report change to non-waiting status */
1791  {
1792  set_ps_display(new_status, false);
1793  pfree(new_status);
1794  }
1795 
1796  /* and propagate the error */
1797  PG_RE_THROW();
1798  }
1799  PG_END_TRY();
1800 
1801  awaitedLock = NULL;
1802 
1803  /* Report change to non-waiting status */
1805  {
1806  set_ps_display(new_status, false);
1807  pfree(new_status);
1808  }
1809 
1810  LOCK_PRINT("WaitOnLock: wakeup on lock",
1811  locallock->lock, locallock->tag.mode);
1812 }
1813 
1814 /*
1815  * Remove a proc from the wait-queue it is on (caller must know it is on one).
1816  * This is only used when the proc has failed to get the lock, so we set its
1817  * waitStatus to STATUS_ERROR.
1818  *
1819  * Appropriate partition lock must be held by caller. Also, caller is
1820  * responsible for signaling the proc if needed.
1821  *
1822  * NB: this does not clean up any locallock object that may exist for the lock.
1823  */
1824 void
1826 {
1827  LOCK *waitLock = proc->waitLock;
1828  PROCLOCK *proclock = proc->waitProcLock;
1829  LOCKMODE lockmode = proc->waitLockMode;
1830  LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*waitLock);
1831 
1832  /* Make sure proc is waiting */
1833  Assert(proc->waitStatus == STATUS_WAITING);
1834  Assert(proc->links.next != NULL);
1835  Assert(waitLock);
1836  Assert(waitLock->waitProcs.size > 0);
1837  Assert(0 < lockmethodid && lockmethodid < lengthof(LockMethods));
1838 
1839  /* Remove proc from lock's wait queue */
1840  SHMQueueDelete(&(proc->links));
1841  waitLock->waitProcs.size--;
1842 
1843  /* Undo increments of request counts by waiting process */
1844  Assert(waitLock->nRequested > 0);
1845  Assert(waitLock->nRequested > proc->waitLock->nGranted);
1846  waitLock->nRequested--;
1847  Assert(waitLock->requested[lockmode] > 0);
1848  waitLock->requested[lockmode]--;
1849  /* don't forget to clear waitMask bit if appropriate */
1850  if (waitLock->granted[lockmode] == waitLock->requested[lockmode])
1851  waitLock->waitMask &= LOCKBIT_OFF(lockmode);
1852 
1853  /* Clean up the proc's own state, and pass it the ok/fail signal */
1854  proc->waitLock = NULL;
1855  proc->waitProcLock = NULL;
1856  proc->waitStatus = STATUS_ERROR;
1857 
1858  /*
1859  * Delete the proclock immediately if it represents no already-held locks.
1860  * (This must happen now because if the owner of the lock decides to
1861  * release it, and the requested/granted counts then go to zero,
1862  * LockRelease expects there to be no remaining proclocks.) Then see if
1863  * any other waiters for the lock can be woken up now.
1864  */
1865  CleanUpLock(waitLock, proclock,
1866  LockMethods[lockmethodid], hashcode,
1867  true);
1868 }
1869 
1870 /*
1871  * LockRelease -- look up 'locktag' and release one 'lockmode' lock on it.
1872  * Release a session lock if 'sessionLock' is true, else release a
1873  * regular transaction lock.
1874  *
1875  * Side Effects: find any waiting processes that are now wakable,
1876  * grant them their requested locks and awaken them.
1877  * (We have to grant the lock here to avoid a race between
1878  * the waking process and any new process to
1879  * come along and request the lock.)
1880  */
1881 bool
1882 LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1883 {
1884  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
1885  LockMethod lockMethodTable;
1886  LOCALLOCKTAG localtag;
1887  LOCALLOCK *locallock;
1888  LOCK *lock;
1889  PROCLOCK *proclock;
1890  LWLock *partitionLock;
1891  bool wakeupNeeded;
1892 
1893  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
1894  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
1895  lockMethodTable = LockMethods[lockmethodid];
1896  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
1897  elog(ERROR, "unrecognized lock mode: %d", lockmode);
1898 
1899 #ifdef LOCK_DEBUG
1900  if (LOCK_DEBUG_ENABLED(locktag))
1901  elog(LOG, "LockRelease: lock [%u,%u] %s",
1902  locktag->locktag_field1, locktag->locktag_field2,
1903  lockMethodTable->lockModeNames[lockmode]);
1904 #endif
1905 
1906  /*
1907  * Find the LOCALLOCK entry for this lock and lockmode
1908  */
1909  MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
1910  localtag.lock = *locktag;
1911  localtag.mode = lockmode;
1912 
1913  locallock = (LOCALLOCK *) hash_search(LockMethodLocalHash,
1914  (void *) &localtag,
1915  HASH_FIND, NULL);
1916 
1917  /*
1918  * let the caller print its own error message, too. Do not ereport(ERROR).
1919  */
1920  if (!locallock || locallock->nLocks <= 0)
1921  {
1922  elog(WARNING, "you don't own a lock of type %s",
1923  lockMethodTable->lockModeNames[lockmode]);
1924  return false;
1925  }
1926 
1927  /*
1928  * Decrease the count for the resource owner.
1929  */
1930  {
1931  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
1932  ResourceOwner owner;
1933  int i;
1934 
1935  /* Identify owner for lock */
1936  if (sessionLock)
1937  owner = NULL;
1938  else
1939  owner = CurrentResourceOwner;
1940 
1941  for (i = locallock->numLockOwners - 1; i >= 0; i--)
1942  {
1943  if (lockOwners[i].owner == owner)
1944  {
1945  Assert(lockOwners[i].nLocks > 0);
1946  if (--lockOwners[i].nLocks == 0)
1947  {
1948  if (owner != NULL)
1949  ResourceOwnerForgetLock(owner, locallock);
1950  /* compact out unused slot */
1951  locallock->numLockOwners--;
1952  if (i < locallock->numLockOwners)
1953  lockOwners[i] = lockOwners[locallock->numLockOwners];
1954  }
1955  break;
1956  }
1957  }
1958  if (i < 0)
1959  {
1960  /* don't release a lock belonging to another owner */
1961  elog(WARNING, "you don't own a lock of type %s",
1962  lockMethodTable->lockModeNames[lockmode]);
1963  return false;
1964  }
1965  }
1966 
1967  /*
1968  * Decrease the total local count. If we're still holding the lock, we're
1969  * done.
1970  */
1971  locallock->nLocks--;
1972 
1973  if (locallock->nLocks > 0)
1974  return true;
1975 
1976  /*
1977  * At this point we can no longer suppose we are clear of invalidation
1978  * messages related to this lock. Although we'll delete the LOCALLOCK
1979  * object before any intentional return from this routine, it seems worth
1980  * the trouble to explicitly reset lockCleared right now, just in case
1981  * some error prevents us from deleting the LOCALLOCK.
1982  */
1983  locallock->lockCleared = false;
1984 
1985  /* Attempt fast release of any lock eligible for the fast path. */
1986  if (EligibleForRelationFastPath(locktag, lockmode) &&
1988  {
1989  bool released;
1990 
1991  /*
1992  * We might not find the lock here, even if we originally entered it
1993  * here. Another backend may have moved it to the main table.
1994  */
1996  released = FastPathUnGrantRelationLock(locktag->locktag_field2,
1997  lockmode);
1999  if (released)
2000  {
2001  RemoveLocalLock(locallock);
2002  return true;
2003  }
2004  }
2005 
2006  /*
2007  * Otherwise we've got to mess with the shared lock table.
2008  */
2009  partitionLock = LockHashPartitionLock(locallock->hashcode);
2010 
2011  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2012 
2013  /*
2014  * Normally, we don't need to re-find the lock or proclock, since we kept
2015  * their addresses in the locallock table, and they couldn't have been
2016  * removed while we were holding a lock on them. But it's possible that
2017  * the lock was taken fast-path and has since been moved to the main hash
2018  * table by another backend, in which case we will need to look up the
2019  * objects here. We assume the lock field is NULL if so.
2020  */
2021  lock = locallock->lock;
2022  if (!lock)
2023  {
2024  PROCLOCKTAG proclocktag;
2025 
2026  Assert(EligibleForRelationFastPath(locktag, lockmode));
2027  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2028  (const void *) locktag,
2029  locallock->hashcode,
2030  HASH_FIND,
2031  NULL);
2032  if (!lock)
2033  elog(ERROR, "failed to re-find shared lock object");
2034  locallock->lock = lock;
2035 
2036  proclocktag.myLock = lock;
2037  proclocktag.myProc = MyProc;
2038  locallock->proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash,
2039  (void *) &proclocktag,
2040  HASH_FIND,
2041  NULL);
2042  if (!locallock->proclock)
2043  elog(ERROR, "failed to re-find shared proclock object");
2044  }
2045  LOCK_PRINT("LockRelease: found", lock, lockmode);
2046  proclock = locallock->proclock;
2047  PROCLOCK_PRINT("LockRelease: found", proclock);
2048 
2049  /*
2050  * Double-check that we are actually holding a lock of the type we want to
2051  * release.
2052  */
2053  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
2054  {
2055  PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
2056  LWLockRelease(partitionLock);
2057  elog(WARNING, "you don't own a lock of type %s",
2058  lockMethodTable->lockModeNames[lockmode]);
2059  RemoveLocalLock(locallock);
2060  return false;
2061  }
2062 
2063  /*
2064  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
2065  */
2066  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
2067 
2068  CleanUpLock(lock, proclock,
2069  lockMethodTable, locallock->hashcode,
2070  wakeupNeeded);
2071 
2072  LWLockRelease(partitionLock);
2073 
2074  RemoveLocalLock(locallock);
2075  return true;
2076 }
2077 
2078 /*
2079  * LockReleaseAll -- Release all locks of the specified lock method that
2080  * are held by the current process.
2081  *
2082  * Well, not necessarily *all* locks. The available behaviors are:
2083  * allLocks == true: release all locks including session locks.
2084  * allLocks == false: release all non-session locks.
2085  */
2086 void
2087 LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
2088 {
2090  LockMethod lockMethodTable;
2091  int i,
2092  numLockModes;
2093  LOCALLOCK *locallock;
2094  LOCK *lock;
2095  PROCLOCK *proclock;
2096  int partition;
2097  bool have_fast_path_lwlock = false;
2098 
2099  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2100  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2101  lockMethodTable = LockMethods[lockmethodid];
2102 
2103 #ifdef LOCK_DEBUG
2104  if (*(lockMethodTable->trace_flag))
2105  elog(LOG, "LockReleaseAll: lockmethod=%d", lockmethodid);
2106 #endif
2107 
2108  /*
2109  * Get rid of our fast-path VXID lock, if appropriate. Note that this is
2110  * the only way that the lock we hold on our own VXID can ever get
2111  * released: it is always and only released when a toplevel transaction
2112  * ends.
2113  */
2114  if (lockmethodid == DEFAULT_LOCKMETHOD)
2116 
2117  numLockModes = lockMethodTable->numLockModes;
2118 
2119  /*
2120  * First we run through the locallock table and get rid of unwanted
2121  * entries, then we scan the process's proclocks and get rid of those. We
2122  * do this separately because we may have multiple locallock entries
2123  * pointing to the same proclock, and we daren't end up with any dangling
2124  * pointers. Fast-path locks are cleaned up during the locallock table
2125  * scan, though.
2126  */
2127  hash_seq_init(&status, LockMethodLocalHash);
2128 
2129  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2130  {
2131  /*
2132  * If the LOCALLOCK entry is unused, we must've run out of shared
2133  * memory while trying to set up this lock. Just forget the local
2134  * entry.
2135  */
2136  if (locallock->nLocks == 0)
2137  {
2138  RemoveLocalLock(locallock);
2139  continue;
2140  }
2141 
2142  /* Ignore items that are not of the lockmethod to be removed */
2143  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2144  continue;
2145 
2146  /*
2147  * If we are asked to release all locks, we can just zap the entry.
2148  * Otherwise, must scan to see if there are session locks. We assume
2149  * there is at most one lockOwners entry for session locks.
2150  */
2151  if (!allLocks)
2152  {
2153  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
2154 
2155  /* If session lock is above array position 0, move it down to 0 */
2156  for (i = 0; i < locallock->numLockOwners; i++)
2157  {
2158  if (lockOwners[i].owner == NULL)
2159  lockOwners[0] = lockOwners[i];
2160  else
2161  ResourceOwnerForgetLock(lockOwners[i].owner, locallock);
2162  }
2163 
2164  if (locallock->numLockOwners > 0 &&
2165  lockOwners[0].owner == NULL &&
2166  lockOwners[0].nLocks > 0)
2167  {
2168  /* Fix the locallock to show just the session locks */
2169  locallock->nLocks = lockOwners[0].nLocks;
2170  locallock->numLockOwners = 1;
2171  /* We aren't deleting this locallock, so done */
2172  continue;
2173  }
2174  else
2175  locallock->numLockOwners = 0;
2176  }
2177 
2178  /*
2179  * If the lock or proclock pointers are NULL, this lock was taken via
2180  * the relation fast-path (and is not known to have been transferred).
2181  */
2182  if (locallock->proclock == NULL || locallock->lock == NULL)
2183  {
2184  LOCKMODE lockmode = locallock->tag.mode;
2185  Oid relid;
2186 
2187  /* Verify that a fast-path lock is what we've got. */
2188  if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
2189  elog(PANIC, "locallock table corrupted");
2190 
2191  /*
2192  * If we don't currently hold the LWLock that protects our
2193  * fast-path data structures, we must acquire it before attempting
2194  * to release the lock via the fast-path. We will continue to
2195  * hold the LWLock until we're done scanning the locallock table,
2196  * unless we hit a transferred fast-path lock. (XXX is this
2197  * really such a good idea? There could be a lot of entries ...)
2198  */
2199  if (!have_fast_path_lwlock)
2200  {
2202  have_fast_path_lwlock = true;
2203  }
2204 
2205  /* Attempt fast-path release. */
2206  relid = locallock->tag.lock.locktag_field2;
2207  if (FastPathUnGrantRelationLock(relid, lockmode))
2208  {
2209  RemoveLocalLock(locallock);
2210  continue;
2211  }
2212 
2213  /*
2214  * Our lock, originally taken via the fast path, has been
2215  * transferred to the main lock table. That's going to require
2216  * some extra work, so release our fast-path lock before starting.
2217  */
2219  have_fast_path_lwlock = false;
2220 
2221  /*
2222  * Now dump the lock. We haven't got a pointer to the LOCK or
2223  * PROCLOCK in this case, so we have to handle this a bit
2224  * differently than a normal lock release. Unfortunately, this
2225  * requires an extra LWLock acquire-and-release cycle on the
2226  * partitionLock, but hopefully it shouldn't happen often.
2227  */
2228  LockRefindAndRelease(lockMethodTable, MyProc,
2229  &locallock->tag.lock, lockmode, false);
2230  RemoveLocalLock(locallock);
2231  continue;
2232  }
2233 
2234  /* Mark the proclock to show we need to release this lockmode */
2235  if (locallock->nLocks > 0)
2236  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
2237 
2238  /* And remove the locallock hashtable entry */
2239  RemoveLocalLock(locallock);
2240  }
2241 
2242  /* Done with the fast-path data structures */
2243  if (have_fast_path_lwlock)
2245 
2246  /*
2247  * Now, scan each lock partition separately.
2248  */
2249  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
2250  {
2251  LWLock *partitionLock;
2252  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
2253  PROCLOCK *nextplock;
2254 
2255  partitionLock = LockHashPartitionLockByIndex(partition);
2256 
2257  /*
2258  * If the proclock list for this partition is empty, we can skip
2259  * acquiring the partition lock. This optimization is trickier than
2260  * it looks, because another backend could be in process of adding
2261  * something to our proclock list due to promoting one of our
2262  * fast-path locks. However, any such lock must be one that we
2263  * decided not to delete above, so it's okay to skip it again now;
2264  * we'd just decide not to delete it again. We must, however, be
2265  * careful to re-fetch the list header once we've acquired the
2266  * partition lock, to be sure we have a valid, up-to-date pointer.
2267  * (There is probably no significant risk if pointer fetch/store is
2268  * atomic, but we don't wish to assume that.)
2269  *
2270  * XXX This argument assumes that the locallock table correctly
2271  * represents all of our fast-path locks. While allLocks mode
2272  * guarantees to clean up all of our normal locks regardless of the
2273  * locallock situation, we lose that guarantee for fast-path locks.
2274  * This is not ideal.
2275  */
2276  if (SHMQueueNext(procLocks, procLocks,
2277  offsetof(PROCLOCK, procLink)) == NULL)
2278  continue; /* needn't examine this partition */
2279 
2280  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2281 
2282  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2283  offsetof(PROCLOCK, procLink));
2284  proclock;
2285  proclock = nextplock)
2286  {
2287  bool wakeupNeeded = false;
2288 
2289  /* Get link first, since we may unlink/delete this proclock */
2290  nextplock = (PROCLOCK *)
2291  SHMQueueNext(procLocks, &proclock->procLink,
2292  offsetof(PROCLOCK, procLink));
2293 
2294  Assert(proclock->tag.myProc == MyProc);
2295 
2296  lock = proclock->tag.myLock;
2297 
2298  /* Ignore items that are not of the lockmethod to be removed */
2299  if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
2300  continue;
2301 
2302  /*
2303  * In allLocks mode, force release of all locks even if locallock
2304  * table had problems
2305  */
2306  if (allLocks)
2307  proclock->releaseMask = proclock->holdMask;
2308  else
2309  Assert((proclock->releaseMask & ~proclock->holdMask) == 0);
2310 
2311  /*
2312  * Ignore items that have nothing to be released, unless they have
2313  * holdMask == 0 and are therefore recyclable
2314  */
2315  if (proclock->releaseMask == 0 && proclock->holdMask != 0)
2316  continue;
2317 
2318  PROCLOCK_PRINT("LockReleaseAll", proclock);
2319  LOCK_PRINT("LockReleaseAll", lock, 0);
2320  Assert(lock->nRequested >= 0);
2321  Assert(lock->nGranted >= 0);
2322  Assert(lock->nGranted <= lock->nRequested);
2323  Assert((proclock->holdMask & ~lock->grantMask) == 0);
2324 
2325  /*
2326  * Release the previously-marked lock modes
2327  */
2328  for (i = 1; i <= numLockModes; i++)
2329  {
2330  if (proclock->releaseMask & LOCKBIT_ON(i))
2331  wakeupNeeded |= UnGrantLock(lock, i, proclock,
2332  lockMethodTable);
2333  }
2334  Assert((lock->nRequested >= 0) && (lock->nGranted >= 0));
2335  Assert(lock->nGranted <= lock->nRequested);
2336  LOCK_PRINT("LockReleaseAll: updated", lock, 0);
2337 
2338  proclock->releaseMask = 0;
2339 
2340  /* CleanUpLock will wake up waiters if needed. */
2341  CleanUpLock(lock, proclock,
2342  lockMethodTable,
2343  LockTagHashCode(&lock->tag),
2344  wakeupNeeded);
2345  } /* loop over PROCLOCKs within this partition */
2346 
2347  LWLockRelease(partitionLock);
2348  } /* loop over partitions */
2349 
2350 #ifdef LOCK_DEBUG
2351  if (*(lockMethodTable->trace_flag))
2352  elog(LOG, "LockReleaseAll done");
2353 #endif
2354 }
2355 
2356 /*
2357  * LockReleaseSession -- Release all session locks of the specified lock method
2358  * that are held by the current process.
2359  */
2360 void
2362 {
2364  LOCALLOCK *locallock;
2365 
2366  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2367  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2368 
2369  hash_seq_init(&status, LockMethodLocalHash);
2370 
2371  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2372  {
2373  /* Ignore items that are not of the specified lock method */
2374  if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
2375  continue;
2376 
2377  ReleaseLockIfHeld(locallock, true);
2378  }
2379 }
2380 
2381 /*
2382  * LockReleaseCurrentOwner
2383  * Release all locks belonging to CurrentResourceOwner
2384  *
2385  * If the caller knows what those locks are, it can pass them as an array.
2386  * That speeds up the call significantly, when a lot of locks are held.
2387  * Otherwise, pass NULL for locallocks, and we'll traverse through our hash
2388  * table to find them.
2389  */
2390 void
2391 LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2392 {
2393  if (locallocks == NULL)
2394  {
2396  LOCALLOCK *locallock;
2397 
2398  hash_seq_init(&status, LockMethodLocalHash);
2399 
2400  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2401  ReleaseLockIfHeld(locallock, false);
2402  }
2403  else
2404  {
2405  int i;
2406 
2407  for (i = nlocks - 1; i >= 0; i--)
2408  ReleaseLockIfHeld(locallocks[i], false);
2409  }
2410 }
2411 
2412 /*
2413  * ReleaseLockIfHeld
2414  * Release any session-level locks on this lockable object if sessionLock
2415  * is true; else, release any locks held by CurrentResourceOwner.
2416  *
2417  * It is tempting to pass this a ResourceOwner pointer (or NULL for session
2418  * locks), but without refactoring LockRelease() we cannot support releasing
2419  * locks belonging to resource owners other than CurrentResourceOwner.
2420  * If we were to refactor, it'd be a good idea to fix it so we don't have to
2421  * do a hashtable lookup of the locallock, too. However, currently this
2422  * function isn't used heavily enough to justify refactoring for its
2423  * convenience.
2424  */
2425 static void
2426 ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
2427 {
2428  ResourceOwner owner;
2429  LOCALLOCKOWNER *lockOwners;
2430  int i;
2431 
2432  /* Identify owner for lock (must match LockRelease!) */
2433  if (sessionLock)
2434  owner = NULL;
2435  else
2436  owner = CurrentResourceOwner;
2437 
2438  /* Scan to see if there are any locks belonging to the target owner */
2439  lockOwners = locallock->lockOwners;
2440  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2441  {
2442  if (lockOwners[i].owner == owner)
2443  {
2444  Assert(lockOwners[i].nLocks > 0);
2445  if (lockOwners[i].nLocks < locallock->nLocks)
2446  {
2447  /*
2448  * We will still hold this lock after forgetting this
2449  * ResourceOwner.
2450  */
2451  locallock->nLocks -= lockOwners[i].nLocks;
2452  /* compact out unused slot */
2453  locallock->numLockOwners--;
2454  if (owner != NULL)
2455  ResourceOwnerForgetLock(owner, locallock);
2456  if (i < locallock->numLockOwners)
2457  lockOwners[i] = lockOwners[locallock->numLockOwners];
2458  }
2459  else
2460  {
2461  Assert(lockOwners[i].nLocks == locallock->nLocks);
2462  /* We want to call LockRelease just once */
2463  lockOwners[i].nLocks = 1;
2464  locallock->nLocks = 1;
2465  if (!LockRelease(&locallock->tag.lock,
2466  locallock->tag.mode,
2467  sessionLock))
2468  elog(WARNING, "ReleaseLockIfHeld: failed??");
2469  }
2470  break;
2471  }
2472  }
2473 }
2474 
2475 /*
2476  * LockReassignCurrentOwner
2477  * Reassign all locks belonging to CurrentResourceOwner to belong
2478  * to its parent resource owner.
2479  *
2480  * If the caller knows what those locks are, it can pass them as an array.
2481  * That speeds up the call significantly, when a lot of locks are held
2482  * (e.g pg_dump with a large schema). Otherwise, pass NULL for locallocks,
2483  * and we'll traverse through our hash table to find them.
2484  */
2485 void
2486 LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
2487 {
2489 
2490  Assert(parent != NULL);
2491 
2492  if (locallocks == NULL)
2493  {
2495  LOCALLOCK *locallock;
2496 
2497  hash_seq_init(&status, LockMethodLocalHash);
2498 
2499  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
2500  LockReassignOwner(locallock, parent);
2501  }
2502  else
2503  {
2504  int i;
2505 
2506  for (i = nlocks - 1; i >= 0; i--)
2507  LockReassignOwner(locallocks[i], parent);
2508  }
2509 }
2510 
2511 /*
2512  * Subroutine of LockReassignCurrentOwner. Reassigns a given lock belonging to
2513  * CurrentResourceOwner to its parent.
2514  */
2515 static void
2517 {
2518  LOCALLOCKOWNER *lockOwners;
2519  int i;
2520  int ic = -1;
2521  int ip = -1;
2522 
2523  /*
2524  * Scan to see if there are any locks belonging to current owner or its
2525  * parent
2526  */
2527  lockOwners = locallock->lockOwners;
2528  for (i = locallock->numLockOwners - 1; i >= 0; i--)
2529  {
2530  if (lockOwners[i].owner == CurrentResourceOwner)
2531  ic = i;
2532  else if (lockOwners[i].owner == parent)
2533  ip = i;
2534  }
2535 
2536  if (ic < 0)
2537  return; /* no current locks */
2538 
2539  if (ip < 0)
2540  {
2541  /* Parent has no slot, so just give it the child's slot */
2542  lockOwners[ic].owner = parent;
2543  ResourceOwnerRememberLock(parent, locallock);
2544  }
2545  else
2546  {
2547  /* Merge child's count with parent's */
2548  lockOwners[ip].nLocks += lockOwners[ic].nLocks;
2549  /* compact out unused slot */
2550  locallock->numLockOwners--;
2551  if (ic < locallock->numLockOwners)
2552  lockOwners[ic] = lockOwners[locallock->numLockOwners];
2553  }
2555 }
2556 
2557 /*
2558  * FastPathGrantRelationLock
2559  * Grant lock using per-backend fast-path array, if there is space.
2560  */
2561 static bool
2563 {
2564  uint32 f;
2565  uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND;
2566 
2567  /* Scan for existing entry for this relid, remembering empty slot. */
2568  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2569  {
2570  if (FAST_PATH_GET_BITS(MyProc, f) == 0)
2571  unused_slot = f;
2572  else if (MyProc->fpRelId[f] == relid)
2573  {
2574  Assert(!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode));
2575  FAST_PATH_SET_LOCKMODE(MyProc, f, lockmode);
2576  return true;
2577  }
2578  }
2579 
2580  /* If no existing entry, use any empty slot. */
2581  if (unused_slot < FP_LOCK_SLOTS_PER_BACKEND)
2582  {
2583  MyProc->fpRelId[unused_slot] = relid;
2584  FAST_PATH_SET_LOCKMODE(MyProc, unused_slot, lockmode);
2586  return true;
2587  }
2588 
2589  /* No existing entry, and no empty slot. */
2590  return false;
2591 }
2592 
2593 /*
2594  * FastPathUnGrantRelationLock
2595  * Release fast-path lock, if present. Update backend-private local
2596  * use count, while we're at it.
2597  */
2598 static bool
2600 {
2601  uint32 f;
2602  bool result = false;
2603 
2605  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2606  {
2607  if (MyProc->fpRelId[f] == relid
2608  && FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2609  {
2610  Assert(!result);
2611  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2612  result = true;
2613  /* we continue iterating so as to update FastPathLocalUseCount */
2614  }
2615  if (FAST_PATH_GET_BITS(MyProc, f) != 0)
2617  }
2618  return result;
2619 }
2620 
2621 /*
2622  * FastPathTransferRelationLocks
2623  * Transfer locks matching the given lock tag from per-backend fast-path
2624  * arrays to the shared hash table.
2625  *
2626  * Returns true if successful, false if ran out of shared memory.
2627  */
2628 static bool
2630  uint32 hashcode)
2631 {
2632  LWLock *partitionLock = LockHashPartitionLock(hashcode);
2633  Oid relid = locktag->locktag_field2;
2634  uint32 i;
2635 
2636  /*
2637  * Every PGPROC that can potentially hold a fast-path lock is present in
2638  * ProcGlobal->allProcs. Prepared transactions are not, but any
2639  * outstanding fast-path locks held by prepared transactions are
2640  * transferred to the main lock table.
2641  */
2642  for (i = 0; i < ProcGlobal->allProcCount; i++)
2643  {
2644  PGPROC *proc = &ProcGlobal->allProcs[i];
2645  uint32 f;
2646 
2648 
2649  /*
2650  * If the target backend isn't referencing the same database as the
2651  * lock, then we needn't examine the individual relation IDs at all;
2652  * none of them can be relevant.
2653  *
2654  * proc->databaseId is set at backend startup time and never changes
2655  * thereafter, so it might be safe to perform this test before
2656  * acquiring &proc->backendLock. In particular, it's certainly safe
2657  * to assume that if the target backend holds any fast-path locks, it
2658  * must have performed a memory-fencing operation (in particular, an
2659  * LWLock acquisition) since setting proc->databaseId. However, it's
2660  * less clear that our backend is certain to have performed a memory
2661  * fencing operation since the other backend set proc->databaseId. So
2662  * for now, we test it after acquiring the LWLock just to be safe.
2663  */
2664  if (proc->databaseId != locktag->locktag_field1)
2665  {
2666  LWLockRelease(&proc->backendLock);
2667  continue;
2668  }
2669 
2670  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2671  {
2672  uint32 lockmode;
2673 
2674  /* Look for an allocated slot matching the given relid. */
2675  if (relid != proc->fpRelId[f] || FAST_PATH_GET_BITS(proc, f) == 0)
2676  continue;
2677 
2678  /* Find or create lock object. */
2679  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2680  for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
2682  ++lockmode)
2683  {
2684  PROCLOCK *proclock;
2685 
2686  if (!FAST_PATH_CHECK_LOCKMODE(proc, f, lockmode))
2687  continue;
2688  proclock = SetupLockInTable(lockMethodTable, proc, locktag,
2689  hashcode, lockmode);
2690  if (!proclock)
2691  {
2692  LWLockRelease(partitionLock);
2693  LWLockRelease(&proc->backendLock);
2694  return false;
2695  }
2696  GrantLock(proclock->tag.myLock, proclock, lockmode);
2697  FAST_PATH_CLEAR_LOCKMODE(proc, f, lockmode);
2698  }
2699  LWLockRelease(partitionLock);
2700 
2701  /* No need to examine remaining slots. */
2702  break;
2703  }
2704  LWLockRelease(&proc->backendLock);
2705  }
2706  return true;
2707 }
2708 
2709 /*
2710  * FastPathGetRelationLockEntry
2711  * Return the PROCLOCK for a lock originally taken via the fast-path,
2712  * transferring it to the primary lock table if necessary.
2713  *
2714  * Note: caller takes care of updating the locallock object.
2715  */
2716 static PROCLOCK *
2718 {
2719  LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
2720  LOCKTAG *locktag = &locallock->tag.lock;
2721  PROCLOCK *proclock = NULL;
2722  LWLock *partitionLock = LockHashPartitionLock(locallock->hashcode);
2723  Oid relid = locktag->locktag_field2;
2724  uint32 f;
2725 
2727 
2728  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2729  {
2730  uint32 lockmode;
2731 
2732  /* Look for an allocated slot matching the given relid. */
2733  if (relid != MyProc->fpRelId[f] || FAST_PATH_GET_BITS(MyProc, f) == 0)
2734  continue;
2735 
2736  /* If we don't have a lock of the given mode, forget it! */
2737  lockmode = locallock->tag.mode;
2738  if (!FAST_PATH_CHECK_LOCKMODE(MyProc, f, lockmode))
2739  break;
2740 
2741  /* Find or create lock object. */
2742  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
2743 
2744  proclock = SetupLockInTable(lockMethodTable, MyProc, locktag,
2745  locallock->hashcode, lockmode);
2746  if (!proclock)
2747  {
2748  LWLockRelease(partitionLock);
2750  ereport(ERROR,
2751  (errcode(ERRCODE_OUT_OF_MEMORY),
2752  errmsg("out of shared memory"),
2753  errhint("You might need to increase max_locks_per_transaction.")));
2754  }
2755  GrantLock(proclock->tag.myLock, proclock, lockmode);
2756  FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
2757 
2758  LWLockRelease(partitionLock);
2759 
2760  /* No need to examine remaining slots. */
2761  break;
2762  }
2763 
2765 
2766  /* Lock may have already been transferred by some other backend. */
2767  if (proclock == NULL)
2768  {
2769  LOCK *lock;
2770  PROCLOCKTAG proclocktag;
2771  uint32 proclock_hashcode;
2772 
2773  LWLockAcquire(partitionLock, LW_SHARED);
2774 
2775  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2776  (void *) locktag,
2777  locallock->hashcode,
2778  HASH_FIND,
2779  NULL);
2780  if (!lock)
2781  elog(ERROR, "failed to re-find shared lock object");
2782 
2783  proclocktag.myLock = lock;
2784  proclocktag.myProc = MyProc;
2785 
2786  proclock_hashcode = ProcLockHashCode(&proclocktag, locallock->hashcode);
2787  proclock = (PROCLOCK *)
2788  hash_search_with_hash_value(LockMethodProcLockHash,
2789  (void *) &proclocktag,
2790  proclock_hashcode,
2791  HASH_FIND,
2792  NULL);
2793  if (!proclock)
2794  elog(ERROR, "failed to re-find shared proclock object");
2795  LWLockRelease(partitionLock);
2796  }
2797 
2798  return proclock;
2799 }
2800 
2801 /*
2802  * GetLockConflicts
2803  * Get an array of VirtualTransactionIds of xacts currently holding locks
2804  * that would conflict with the specified lock/lockmode.
2805  * xacts merely awaiting such a lock are NOT reported.
2806  *
2807  * The result array is palloc'd and is terminated with an invalid VXID.
2808  * *countp, if not null, is updated to the number of items set.
2809  *
2810  * Of course, the result could be out of date by the time it's returned,
2811  * so use of this function has to be thought about carefully.
2812  *
2813  * Note we never include the current xact's vxid in the result array,
2814  * since an xact never blocks itself. Also, prepared transactions are
2815  * ignored, which is a bit more debatable but is appropriate for current
2816  * uses of the result.
2817  */
2820 {
2821  static VirtualTransactionId *vxids;
2822  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
2823  LockMethod lockMethodTable;
2824  LOCK *lock;
2825  LOCKMASK conflictMask;
2826  SHM_QUEUE *procLocks;
2827  PROCLOCK *proclock;
2828  uint32 hashcode;
2829  LWLock *partitionLock;
2830  int count = 0;
2831  int fast_count = 0;
2832 
2833  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
2834  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
2835  lockMethodTable = LockMethods[lockmethodid];
2836  if (lockmode <= 0 || lockmode > lockMethodTable->numLockModes)
2837  elog(ERROR, "unrecognized lock mode: %d", lockmode);
2838 
2839  /*
2840  * Allocate memory to store results, and fill with InvalidVXID. We only
2841  * need enough space for MaxBackends + a terminator, since prepared xacts
2842  * don't count. InHotStandby allocate once in TopMemoryContext.
2843  */
2844  if (InHotStandby)
2845  {
2846  if (vxids == NULL)
2847  vxids = (VirtualTransactionId *)
2849  sizeof(VirtualTransactionId) * (MaxBackends + 1));
2850  }
2851  else
2852  vxids = (VirtualTransactionId *)
2853  palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
2854 
2855  /* Compute hash code and partition lock, and look up conflicting modes. */
2856  hashcode = LockTagHashCode(locktag);
2857  partitionLock = LockHashPartitionLock(hashcode);
2858  conflictMask = lockMethodTable->conflictTab[lockmode];
2859 
2860  /*
2861  * Fast path locks might not have been entered in the primary lock table.
2862  * If the lock we're dealing with could conflict with such a lock, we must
2863  * examine each backend's fast-path array for conflicts.
2864  */
2865  if (ConflictsWithRelationFastPath(locktag, lockmode))
2866  {
2867  int i;
2868  Oid relid = locktag->locktag_field2;
2869  VirtualTransactionId vxid;
2870 
2871  /*
2872  * Iterate over relevant PGPROCs. Anything held by a prepared
2873  * transaction will have been transferred to the primary lock table,
2874  * so we need not worry about those. This is all a bit fuzzy, because
2875  * new locks could be taken after we've visited a particular
2876  * partition, but the callers had better be prepared to deal with that
2877  * anyway, since the locks could equally well be taken between the
2878  * time we return the value and the time the caller does something
2879  * with it.
2880  */
2881  for (i = 0; i < ProcGlobal->allProcCount; i++)
2882  {
2883  PGPROC *proc = &ProcGlobal->allProcs[i];
2884  uint32 f;
2885 
2886  /* A backend never blocks itself */
2887  if (proc == MyProc)
2888  continue;
2889 
2891 
2892  /*
2893  * If the target backend isn't referencing the same database as
2894  * the lock, then we needn't examine the individual relation IDs
2895  * at all; none of them can be relevant.
2896  *
2897  * See FastPathTransferRelationLocks() for discussion of why we do
2898  * this test after acquiring the lock.
2899  */
2900  if (proc->databaseId != locktag->locktag_field1)
2901  {
2902  LWLockRelease(&proc->backendLock);
2903  continue;
2904  }
2905 
2906  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
2907  {
2908  uint32 lockmask;
2909 
2910  /* Look for an allocated slot matching the given relid. */
2911  if (relid != proc->fpRelId[f])
2912  continue;
2913  lockmask = FAST_PATH_GET_BITS(proc, f);
2914  if (!lockmask)
2915  continue;
2916  lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
2917 
2918  /*
2919  * There can only be one entry per relation, so if we found it
2920  * and it doesn't conflict, we can skip the rest of the slots.
2921  */
2922  if ((lockmask & conflictMask) == 0)
2923  break;
2924 
2925  /* Conflict! */
2926  GET_VXID_FROM_PGPROC(vxid, *proc);
2927 
2928  /*
2929  * If we see an invalid VXID, then either the xact has already
2930  * committed (or aborted), or it's a prepared xact. In either
2931  * case we may ignore it.
2932  */
2933  if (VirtualTransactionIdIsValid(vxid))
2934  vxids[count++] = vxid;
2935 
2936  /* No need to examine remaining slots. */
2937  break;
2938  }
2939 
2940  LWLockRelease(&proc->backendLock);
2941  }
2942  }
2943 
2944  /* Remember how many fast-path conflicts we found. */
2945  fast_count = count;
2946 
2947  /*
2948  * Look up the lock object matching the tag.
2949  */
2950  LWLockAcquire(partitionLock, LW_SHARED);
2951 
2952  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
2953  (const void *) locktag,
2954  hashcode,
2955  HASH_FIND,
2956  NULL);
2957  if (!lock)
2958  {
2959  /*
2960  * If the lock object doesn't exist, there is nothing holding a lock
2961  * on this lockable object.
2962  */
2963  LWLockRelease(partitionLock);
2964  vxids[count].backendId = InvalidBackendId;
2966  if (countp)
2967  *countp = count;
2968  return vxids;
2969  }
2970 
2971  /*
2972  * Examine each existing holder (or awaiter) of the lock.
2973  */
2974 
2975  procLocks = &(lock->procLocks);
2976 
2977  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
2978  offsetof(PROCLOCK, lockLink));
2979 
2980  while (proclock)
2981  {
2982  if (conflictMask & proclock->holdMask)
2983  {
2984  PGPROC *proc = proclock->tag.myProc;
2985 
2986  /* A backend never blocks itself */
2987  if (proc != MyProc)
2988  {
2989  VirtualTransactionId vxid;
2990 
2991  GET_VXID_FROM_PGPROC(vxid, *proc);
2992 
2993  /*
2994  * If we see an invalid VXID, then either the xact has already
2995  * committed (or aborted), or it's a prepared xact. In either
2996  * case we may ignore it.
2997  */
2998  if (VirtualTransactionIdIsValid(vxid))
2999  {
3000  int i;
3001 
3002  /* Avoid duplicate entries. */
3003  for (i = 0; i < fast_count; ++i)
3004  if (VirtualTransactionIdEquals(vxids[i], vxid))
3005  break;
3006  if (i >= fast_count)
3007  vxids[count++] = vxid;
3008  }
3009  }
3010  }
3011 
3012  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3013  offsetof(PROCLOCK, lockLink));
3014  }
3015 
3016  LWLockRelease(partitionLock);
3017 
3018  if (count > MaxBackends) /* should never happen */
3019  elog(PANIC, "too many conflicting locks found");
3020 
3021  vxids[count].backendId = InvalidBackendId;
3023  if (countp)
3024  *countp = count;
3025  return vxids;
3026 }
3027 
3028 /*
3029  * Find a lock in the shared lock table and release it. It is the caller's
3030  * responsibility to verify that this is a sane thing to do. (For example, it
3031  * would be bad to release a lock here if there might still be a LOCALLOCK
3032  * object with pointers to it.)
3033  *
3034  * We currently use this in two situations: first, to release locks held by
3035  * prepared transactions on commit (see lock_twophase_postcommit); and second,
3036  * to release locks taken via the fast-path, transferred to the main hash
3037  * table, and then released (see LockReleaseAll).
3038  */
3039 static void
3040 LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
3042  bool decrement_strong_lock_count)
3043 {
3044  LOCK *lock;
3045  PROCLOCK *proclock;
3046  PROCLOCKTAG proclocktag;
3047  uint32 hashcode;
3048  uint32 proclock_hashcode;
3049  LWLock *partitionLock;
3050  bool wakeupNeeded;
3051 
3052  hashcode = LockTagHashCode(locktag);
3053  partitionLock = LockHashPartitionLock(hashcode);
3054 
3055  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3056 
3057  /*
3058  * Re-find the lock object (it had better be there).
3059  */
3060  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
3061  (void *) locktag,
3062  hashcode,
3063  HASH_FIND,
3064  NULL);
3065  if (!lock)
3066  elog(PANIC, "failed to re-find shared lock object");
3067 
3068  /*
3069  * Re-find the proclock object (ditto).
3070  */
3071  proclocktag.myLock = lock;
3072  proclocktag.myProc = proc;
3073 
3074  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
3075 
3076  proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
3077  (void *) &proclocktag,
3078  proclock_hashcode,
3079  HASH_FIND,
3080  NULL);
3081  if (!proclock)
3082  elog(PANIC, "failed to re-find shared proclock object");
3083 
3084  /*
3085  * Double-check that we are actually holding a lock of the type we want to
3086  * release.
3087  */
3088  if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
3089  {
3090  PROCLOCK_PRINT("lock_twophase_postcommit: WRONGTYPE", proclock);
3091  LWLockRelease(partitionLock);
3092  elog(WARNING, "you don't own a lock of type %s",
3093  lockMethodTable->lockModeNames[lockmode]);
3094  return;
3095  }
3096 
3097  /*
3098  * Do the releasing. CleanUpLock will waken any now-wakable waiters.
3099  */
3100  wakeupNeeded = UnGrantLock(lock, lockmode, proclock, lockMethodTable);
3101 
3102  CleanUpLock(lock, proclock,
3103  lockMethodTable, hashcode,
3104  wakeupNeeded);
3105 
3106  LWLockRelease(partitionLock);
3107 
3108  /*
3109  * Decrement strong lock count. This logic is needed only for 2PC.
3110  */
3111  if (decrement_strong_lock_count
3112  && ConflictsWithRelationFastPath(locktag, lockmode))
3113  {
3114  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
3115 
3116  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
3117  Assert(FastPathStrongRelationLocks->count[fasthashcode] > 0);
3118  FastPathStrongRelationLocks->count[fasthashcode]--;
3119  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
3120  }
3121 }
3122 
3123 /*
3124  * AtPrepare_Locks
3125  * Do the preparatory work for a PREPARE: make 2PC state file records
3126  * for all locks currently held.
3127  *
3128  * Session-level locks are ignored, as are VXID locks.
3129  *
3130  * There are some special cases that we error out on: we can't be holding any
3131  * locks at both session and transaction level (since we must either keep or
3132  * give away the PROCLOCK object), and we can't be holding any locks on
3133  * temporary objects (since that would mess up the current backend if it tries
3134  * to exit before the prepared xact is committed).
3135  */
3136 void
3138 {
3140  LOCALLOCK *locallock;
3141 
3142  /*
3143  * For the most part, we don't need to touch shared memory for this ---
3144  * all the necessary state information is in the locallock table.
3145  * Fast-path locks are an exception, however: we move any such locks to
3146  * the main table before allowing PREPARE TRANSACTION to succeed.
3147  */
3148  hash_seq_init(&status, LockMethodLocalHash);
3149 
3150  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3151  {
3152  TwoPhaseLockRecord record;
3153  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3154  bool haveSessionLock;
3155  bool haveXactLock;
3156  int i;
3157 
3158  /*
3159  * Ignore VXID locks. We don't want those to be held by prepared
3160  * transactions, since they aren't meaningful after a restart.
3161  */
3162  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3163  continue;
3164 
3165  /* Ignore it if we don't actually hold the lock */
3166  if (locallock->nLocks <= 0)
3167  continue;
3168 
3169  /* Scan to see whether we hold it at session or transaction level */
3170  haveSessionLock = haveXactLock = false;
3171  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3172  {
3173  if (lockOwners[i].owner == NULL)
3174  haveSessionLock = true;
3175  else
3176  haveXactLock = true;
3177  }
3178 
3179  /* Ignore it if we have only session lock */
3180  if (!haveXactLock)
3181  continue;
3182 
3183  /*
3184  * If we have both session- and transaction-level locks, fail. This
3185  * should never happen with regular locks, since we only take those at
3186  * session level in some special operations like VACUUM. It's
3187  * possible to hit this with advisory locks, though.
3188  *
3189  * It would be nice if we could keep the session hold and give away
3190  * the transactional hold to the prepared xact. However, that would
3191  * require two PROCLOCK objects, and we cannot be sure that another
3192  * PROCLOCK will be available when it comes time for PostPrepare_Locks
3193  * to do the deed. So for now, we error out while we can still do so
3194  * safely.
3195  */
3196  if (haveSessionLock)
3197  ereport(ERROR,
3198  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3199  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3200 
3201  /*
3202  * If the local lock was taken via the fast-path, we need to move it
3203  * to the primary lock table, or just get a pointer to the existing
3204  * primary lock table entry if by chance it's already been
3205  * transferred.
3206  */
3207  if (locallock->proclock == NULL)
3208  {
3209  locallock->proclock = FastPathGetRelationLockEntry(locallock);
3210  locallock->lock = locallock->proclock->tag.myLock;
3211  }
3212 
3213  /*
3214  * Arrange to not release any strong lock count held by this lock
3215  * entry. We must retain the count until the prepared transaction is
3216  * committed or rolled back.
3217  */
3218  locallock->holdsStrongLockCount = false;
3219 
3220  /*
3221  * Create a 2PC record.
3222  */
3223  memcpy(&(record.locktag), &(locallock->tag.lock), sizeof(LOCKTAG));
3224  record.lockmode = locallock->tag.mode;
3225 
3227  &record, sizeof(TwoPhaseLockRecord));
3228  }
3229 }
3230 
3231 /*
3232  * PostPrepare_Locks
3233  * Clean up after successful PREPARE
3234  *
3235  * Here, we want to transfer ownership of our locks to a dummy PGPROC
3236  * that's now associated with the prepared transaction, and we want to
3237  * clean out the corresponding entries in the LOCALLOCK table.
3238  *
3239  * Note: by removing the LOCALLOCK entries, we are leaving dangling
3240  * pointers in the transaction's resource owner. This is OK at the
3241  * moment since resowner.c doesn't try to free locks retail at a toplevel
3242  * transaction commit or abort. We could alternatively zero out nLocks
3243  * and leave the LOCALLOCK entries to be garbage-collected by LockReleaseAll,
3244  * but that probably costs more cycles.
3245  */
3246 void
3248 {
3249  PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
3251  LOCALLOCK *locallock;
3252  LOCK *lock;
3253  PROCLOCK *proclock;
3254  PROCLOCKTAG proclocktag;
3255  int partition;
3256 
3257  /* Can't prepare a lock group follower. */
3258  Assert(MyProc->lockGroupLeader == NULL ||
3260 
3261  /* This is a critical section: any error means big trouble */
3263 
3264  /*
3265  * First we run through the locallock table and get rid of unwanted
3266  * entries, then we scan the process's proclocks and transfer them to the
3267  * target proc.
3268  *
3269  * We do this separately because we may have multiple locallock entries
3270  * pointing to the same proclock, and we daren't end up with any dangling
3271  * pointers.
3272  */
3273  hash_seq_init(&status, LockMethodLocalHash);
3274 
3275  while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
3276  {
3277  LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
3278  bool haveSessionLock;
3279  bool haveXactLock;
3280  int i;
3281 
3282  if (locallock->proclock == NULL || locallock->lock == NULL)
3283  {
3284  /*
3285  * We must've run out of shared memory while trying to set up this
3286  * lock. Just forget the local entry.
3287  */
3288  Assert(locallock->nLocks == 0);
3289  RemoveLocalLock(locallock);
3290  continue;
3291  }
3292 
3293  /* Ignore VXID locks */
3294  if (locallock->tag.lock.locktag_type == LOCKTAG_VIRTUALTRANSACTION)
3295  continue;
3296 
3297  /* Scan to see whether we hold it at session or transaction level */
3298  haveSessionLock = haveXactLock = false;
3299  for (i = locallock->numLockOwners - 1; i >= 0; i--)
3300  {
3301  if (lockOwners[i].owner == NULL)
3302  haveSessionLock = true;
3303  else
3304  haveXactLock = true;
3305  }
3306 
3307  /* Ignore it if we have only session lock */
3308  if (!haveXactLock)
3309  continue;
3310 
3311  /* This can't happen, because we already checked it */
3312  if (haveSessionLock)
3313  ereport(PANIC,
3314  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
3315  errmsg("cannot PREPARE while holding both session-level and transaction-level locks on the same object")));
3316 
3317  /* Mark the proclock to show we need to release this lockmode */
3318  if (locallock->nLocks > 0)
3319  locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
3320 
3321  /* And remove the locallock hashtable entry */
3322  RemoveLocalLock(locallock);
3323  }
3324 
3325  /*
3326  * Now, scan each lock partition separately.
3327  */
3328  for (partition = 0; partition < NUM_LOCK_PARTITIONS; partition++)
3329  {
3330  LWLock *partitionLock;
3331  SHM_QUEUE *procLocks = &(MyProc->myProcLocks[partition]);
3332  PROCLOCK *nextplock;
3333 
3334  partitionLock = LockHashPartitionLockByIndex(partition);
3335 
3336  /*
3337  * If the proclock list for this partition is empty, we can skip
3338  * acquiring the partition lock. This optimization is safer than the
3339  * situation in LockReleaseAll, because we got rid of any fast-path
3340  * locks during AtPrepare_Locks, so there cannot be any case where
3341  * another backend is adding something to our lists now. For safety,
3342  * though, we code this the same way as in LockReleaseAll.
3343  */
3344  if (SHMQueueNext(procLocks, procLocks,
3345  offsetof(PROCLOCK, procLink)) == NULL)
3346  continue; /* needn't examine this partition */
3347 
3348  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
3349 
3350  for (proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3351  offsetof(PROCLOCK, procLink));
3352  proclock;
3353  proclock = nextplock)
3354  {
3355  /* Get link first, since we may unlink/relink this proclock */
3356  nextplock = (PROCLOCK *)
3357  SHMQueueNext(procLocks, &proclock->procLink,
3358  offsetof(PROCLOCK, procLink));
3359 
3360  Assert(proclock->tag.myProc == MyProc);
3361 
3362  lock = proclock->tag.myLock;
3363 
3364  /* Ignore VXID locks */
3366  continue;
3367 
3368  PROCLOCK_PRINT("PostPrepare_Locks", proclock);
3369  LOCK_PRINT("PostPrepare_Locks", lock, 0);
3370  Assert(lock->nRequested >= 0);
3371  Assert(lock->nGranted >= 0);
3372  Assert(lock->nGranted <= lock->nRequested);
3373  Assert((proclock->holdMask & ~lock->grantMask) == 0);
3374 
3375  /* Ignore it if nothing to release (must be a session lock) */
3376  if (proclock->releaseMask == 0)
3377  continue;
3378 
3379  /* Else we should be releasing all locks */
3380  if (proclock->releaseMask != proclock->holdMask)
3381  elog(PANIC, "we seem to have dropped a bit somewhere");
3382 
3383  /*
3384  * We cannot simply modify proclock->tag.myProc to reassign
3385  * ownership of the lock, because that's part of the hash key and
3386  * the proclock would then be in the wrong hash chain. Instead
3387  * use hash_update_hash_key. (We used to create a new hash entry,
3388  * but that risks out-of-memory failure if other processes are
3389  * busy making proclocks too.) We must unlink the proclock from
3390  * our procLink chain and put it into the new proc's chain, too.
3391  *
3392  * Note: the updated proclock hash key will still belong to the
3393  * same hash partition, cf proclock_hash(). So the partition lock
3394  * we already hold is sufficient for this.
3395  */
3396  SHMQueueDelete(&proclock->procLink);
3397 
3398  /*
3399  * Create the new hash key for the proclock.
3400  */
3401  proclocktag.myLock = lock;
3402  proclocktag.myProc = newproc;
3403 
3404  /*
3405  * Update groupLeader pointer to point to the new proc. (We'd
3406  * better not be a member of somebody else's lock group!)
3407  */
3408  Assert(proclock->groupLeader == proclock->tag.myProc);
3409  proclock->groupLeader = newproc;
3410 
3411  /*
3412  * Update the proclock. We should not find any existing entry for
3413  * the same hash key, since there can be only one entry for any
3414  * given lock with my own proc.
3415  */
3416  if (!hash_update_hash_key(LockMethodProcLockHash,
3417  (void *) proclock,
3418  (void *) &proclocktag))
3419  elog(PANIC, "duplicate entry found while reassigning a prepared transaction's locks");
3420 
3421  /* Re-link into the new proc's proclock list */
3422  SHMQueueInsertBefore(&(newproc->myProcLocks[partition]),
3423  &proclock->procLink);
3424 
3425  PROCLOCK_PRINT("PostPrepare_Locks: updated", proclock);
3426  } /* loop over PROCLOCKs within this partition */
3427 
3428  LWLockRelease(partitionLock);
3429  } /* loop over partitions */
3430 
3431  END_CRIT_SECTION();
3432 }
3433 
3434 
3435 /*
3436  * Estimate shared-memory space used for lock tables
3437  */
3438 Size
3440 {
3441  Size size = 0;
3442  long max_table_size;
3443 
3444  /* lock hash table */
3445  max_table_size = NLOCKENTS();
3446  size = add_size(size, hash_estimate_size(max_table_size, sizeof(LOCK)));
3447 
3448  /* proclock hash table */
3449  max_table_size *= 2;
3450  size = add_size(size, hash_estimate_size(max_table_size, sizeof(PROCLOCK)));
3451 
3452  /*
3453  * Since NLOCKENTS is only an estimate, add 10% safety margin.
3454  */
3455  size = add_size(size, size / 10);
3456 
3457  return size;
3458 }
3459 
3460 /*
3461  * GetLockStatusData - Return a summary of the lock manager's internal
3462  * status, for use in a user-level reporting function.
3463  *
3464  * The return data consists of an array of LockInstanceData objects,
3465  * which are a lightly abstracted version of the PROCLOCK data structures,
3466  * i.e. there is one entry for each unique lock and interested PGPROC.
3467  * It is the caller's responsibility to match up related items (such as
3468  * references to the same lockable object or PGPROC) if wanted.
3469  *
3470  * The design goal is to hold the LWLocks for as short a time as possible;
3471  * thus, this function simply makes a copy of the necessary data and releases
3472  * the locks, allowing the caller to contemplate and format the data for as
3473  * long as it pleases.
3474  */
3475 LockData *
3477 {
3478  LockData *data;
3479  PROCLOCK *proclock;
3480  HASH_SEQ_STATUS seqstat;
3481  int els;
3482  int el;
3483  int i;
3484 
3485  data = (LockData *) palloc(sizeof(LockData));
3486 
3487  /* Guess how much space we'll need. */
3488  els = MaxBackends;
3489  el = 0;
3490  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * els);
3491 
3492  /*
3493  * First, we iterate through the per-backend fast-path arrays, locking
3494  * them one at a time. This might produce an inconsistent picture of the
3495  * system state, but taking all of those LWLocks at the same time seems
3496  * impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
3497  * matter too much, because none of these locks can be involved in lock
3498  * conflicts anyway - anything that might must be present in the main lock
3499  * table. (For the same reason, we don't sweat about making leaderPid
3500  * completely valid. We cannot safely dereference another backend's
3501  * lockGroupLeader field without holding all lock partition locks, and
3502  * it's not worth that.)
3503  */
3504  for (i = 0; i < ProcGlobal->allProcCount; ++i)
3505  {
3506  PGPROC *proc = &ProcGlobal->allProcs[i];
3507  uint32 f;
3508 
3510 
3511  for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
3512  {
3513  LockInstanceData *instance;
3514  uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
3515 
3516  /* Skip unallocated slots. */
3517  if (!lockbits)
3518  continue;
3519 
3520  if (el >= els)
3521  {
3522  els += MaxBackends;
3523  data->locks = (LockInstanceData *)
3524  repalloc(data->locks, sizeof(LockInstanceData) * els);
3525  }
3526 
3527  instance = &data->locks[el];
3528  SET_LOCKTAG_RELATION(instance->locktag, proc->databaseId,
3529  proc->fpRelId[f]);
3530  instance->holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET;
3531  instance->waitLockMode = NoLock;
3532  instance->backend = proc->backendId;
3533  instance->lxid = proc->lxid;
3534  instance->pid = proc->pid;
3535  instance->leaderPid = proc->pid;
3536  instance->fastpath = true;
3537 
3538  el++;
3539  }
3540 
3541  if (proc->fpVXIDLock)
3542  {
3543  VirtualTransactionId vxid;
3544  LockInstanceData *instance;
3545 
3546  if (el >= els)
3547  {
3548  els += MaxBackends;
3549  data->locks = (LockInstanceData *)
3550  repalloc(data->locks, sizeof(LockInstanceData) * els);
3551  }
3552 
3553  vxid.backendId = proc->backendId;
3555 
3556  instance = &data->locks[el];
3557  SET_LOCKTAG_VIRTUALTRANSACTION(instance->locktag, vxid);
3558  instance->holdMask = LOCKBIT_ON(ExclusiveLock);
3559  instance->waitLockMode = NoLock;
3560  instance->backend = proc->backendId;
3561  instance->lxid = proc->lxid;
3562  instance->pid = proc->pid;
3563  instance->leaderPid = proc->pid;
3564  instance->fastpath = true;
3565 
3566  el++;
3567  }
3568 
3569  LWLockRelease(&proc->backendLock);
3570  }
3571 
3572  /*
3573  * Next, acquire lock on the entire shared lock data structure. We do
3574  * this so that, at least for locks in the primary lock table, the state
3575  * will be self-consistent.
3576  *
3577  * Since this is a read-only operation, we take shared instead of
3578  * exclusive lock. There's not a whole lot of point to this, because all
3579  * the normal operations require exclusive lock, but it doesn't hurt
3580  * anything either. It will at least allow two backends to do
3581  * GetLockStatusData in parallel.
3582  *
3583  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3584  */
3585  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3587 
3588  /* Now we can safely count the number of proclocks */
3589  data->nelements = el + hash_get_num_entries(LockMethodProcLockHash);
3590  if (data->nelements > els)
3591  {
3592  els = data->nelements;
3593  data->locks = (LockInstanceData *)
3594  repalloc(data->locks, sizeof(LockInstanceData) * els);
3595  }
3596 
3597  /* Now scan the tables to copy the data */
3598  hash_seq_init(&seqstat, LockMethodProcLockHash);
3599 
3600  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3601  {
3602  PGPROC *proc = proclock->tag.myProc;
3603  LOCK *lock = proclock->tag.myLock;
3604  LockInstanceData *instance = &data->locks[el];
3605 
3606  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3607  instance->holdMask = proclock->holdMask;
3608  if (proc->waitLock == proclock->tag.myLock)
3609  instance->waitLockMode = proc->waitLockMode;
3610  else
3611  instance->waitLockMode = NoLock;
3612  instance->backend = proc->backendId;
3613  instance->lxid = proc->lxid;
3614  instance->pid = proc->pid;
3615  instance->leaderPid = proclock->groupLeader->pid;
3616  instance->fastpath = false;
3617 
3618  el++;
3619  }
3620 
3621  /*
3622  * And release locks. We do this in reverse order for two reasons: (1)
3623  * Anyone else who needs more than one of the locks will be trying to lock
3624  * them in increasing order; we don't want to release the other process
3625  * until it can get all the locks it needs. (2) This avoids O(N^2)
3626  * behavior inside LWLockRelease.
3627  */
3628  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3630 
3631  Assert(el == data->nelements);
3632 
3633  return data;
3634 }
3635 
3636 /*
3637  * GetBlockerStatusData - Return a summary of the lock manager's state
3638  * concerning locks that are blocking the specified PID or any member of
3639  * the PID's lock group, for use in a user-level reporting function.
3640  *
3641  * For each PID within the lock group that is awaiting some heavyweight lock,
3642  * the return data includes an array of LockInstanceData objects, which are
3643  * the same data structure used by GetLockStatusData; but unlike that function,
3644  * this one reports only the PROCLOCKs associated with the lock that that PID
3645  * is blocked on. (Hence, all the locktags should be the same for any one
3646  * blocked PID.) In addition, we return an array of the PIDs of those backends
3647  * that are ahead of the blocked PID in the lock's wait queue. These can be
3648  * compared with the PIDs in the LockInstanceData objects to determine which
3649  * waiters are ahead of or behind the blocked PID in the queue.
3650  *
3651  * If blocked_pid isn't a valid backend PID or nothing in its lock group is
3652  * waiting on any heavyweight lock, return empty arrays.
3653  *
3654  * The design goal is to hold the LWLocks for as short a time as possible;
3655  * thus, this function simply makes a copy of the necessary data and releases
3656  * the locks, allowing the caller to contemplate and format the data for as
3657  * long as it pleases.
3658  */
3660 GetBlockerStatusData(int blocked_pid)
3661 {
3662  BlockedProcsData *data;
3663  PGPROC *proc;
3664  int i;
3665 
3666  data = (BlockedProcsData *) palloc(sizeof(BlockedProcsData));
3667 
3668  /*
3669  * Guess how much space we'll need, and preallocate. Most of the time
3670  * this will avoid needing to do repalloc while holding the LWLocks. (We
3671  * assume, but check with an Assert, that MaxBackends is enough entries
3672  * for the procs[] array; the other two could need enlargement, though.)
3673  */
3674  data->nprocs = data->nlocks = data->npids = 0;
3675  data->maxprocs = data->maxlocks = data->maxpids = MaxBackends;
3676  data->procs = (BlockedProcData *) palloc(sizeof(BlockedProcData) * data->maxprocs);
3677  data->locks = (LockInstanceData *) palloc(sizeof(LockInstanceData) * data->maxlocks);
3678  data->waiter_pids = (int *) palloc(sizeof(int) * data->maxpids);
3679 
3680  /*
3681  * In order to search the ProcArray for blocked_pid and assume that that
3682  * entry won't immediately disappear under us, we must hold ProcArrayLock.
3683  * In addition, to examine the lock grouping fields of any other backend,
3684  * we must hold all the hash partition locks. (Only one of those locks is
3685  * actually relevant for any one lock group, but we can't know which one
3686  * ahead of time.) It's fairly annoying to hold all those locks
3687  * throughout this, but it's no worse than GetLockStatusData(), and it
3688  * does have the advantage that we're guaranteed to return a
3689  * self-consistent instantaneous state.
3690  */
3691  LWLockAcquire(ProcArrayLock, LW_SHARED);
3692 
3693  proc = BackendPidGetProcWithLock(blocked_pid);
3694 
3695  /* Nothing to do if it's gone */
3696  if (proc != NULL)
3697  {
3698  /*
3699  * Acquire lock on the entire shared lock data structure. See notes
3700  * in GetLockStatusData().
3701  */
3702  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3704 
3705  if (proc->lockGroupLeader == NULL)
3706  {
3707  /* Easy case, proc is not a lock group member */
3708  GetSingleProcBlockerStatusData(proc, data);
3709  }
3710  else
3711  {
3712  /* Examine all procs in proc's lock group */
3713  dlist_iter iter;
3714 
3716  {
3717  PGPROC *memberProc;
3718 
3719  memberProc = dlist_container(PGPROC, lockGroupLink, iter.cur);
3720  GetSingleProcBlockerStatusData(memberProc, data);
3721  }
3722  }
3723 
3724  /*
3725  * And release locks. See notes in GetLockStatusData().
3726  */
3727  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3729 
3730  Assert(data->nprocs <= data->maxprocs);
3731  }
3732 
3733  LWLockRelease(ProcArrayLock);
3734 
3735  return data;
3736 }
3737 
3738 /* Accumulate data about one possibly-blocked proc for GetBlockerStatusData */
3739 static void
3741 {
3742  LOCK *theLock = blocked_proc->waitLock;
3743  BlockedProcData *bproc;
3744  SHM_QUEUE *procLocks;
3745  PROCLOCK *proclock;
3746  PROC_QUEUE *waitQueue;
3747  PGPROC *proc;
3748  int queue_size;
3749  int i;
3750 
3751  /* Nothing to do if this proc is not blocked */
3752  if (theLock == NULL)
3753  return;
3754 
3755  /* Set up a procs[] element */
3756  bproc = &data->procs[data->nprocs++];
3757  bproc->pid = blocked_proc->pid;
3758  bproc->first_lock = data->nlocks;
3759  bproc->first_waiter = data->npids;
3760 
3761  /*
3762  * We may ignore the proc's fast-path arrays, since nothing in those could
3763  * be related to a contended lock.
3764  */
3765 
3766  /* Collect all PROCLOCKs associated with theLock */
3767  procLocks = &(theLock->procLocks);
3768  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3769  offsetof(PROCLOCK, lockLink));
3770  while (proclock)
3771  {
3772  PGPROC *proc = proclock->tag.myProc;
3773  LOCK *lock = proclock->tag.myLock;
3774  LockInstanceData *instance;
3775 
3776  if (data->nlocks >= data->maxlocks)
3777  {
3778  data->maxlocks += MaxBackends;
3779  data->locks = (LockInstanceData *)
3780  repalloc(data->locks, sizeof(LockInstanceData) * data->maxlocks);
3781  }
3782 
3783  instance = &data->locks[data->nlocks];
3784  memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
3785  instance->holdMask = proclock->holdMask;
3786  if (proc->waitLock == lock)
3787  instance->waitLockMode = proc->waitLockMode;
3788  else
3789  instance->waitLockMode = NoLock;
3790  instance->backend = proc->backendId;
3791  instance->lxid = proc->lxid;
3792  instance->pid = proc->pid;
3793  instance->leaderPid = proclock->groupLeader->pid;
3794  instance->fastpath = false;
3795  data->nlocks++;
3796 
3797  proclock = (PROCLOCK *) SHMQueueNext(procLocks, &proclock->lockLink,
3798  offsetof(PROCLOCK, lockLink));
3799  }
3800 
3801  /* Enlarge waiter_pids[] if it's too small to hold all wait queue PIDs */
3802  waitQueue = &(theLock->waitProcs);
3803  queue_size = waitQueue->size;
3804 
3805  if (queue_size > data->maxpids - data->npids)
3806  {
3807  data->maxpids = Max(data->maxpids + MaxBackends,
3808  data->npids + queue_size);
3809  data->waiter_pids = (int *) repalloc(data->waiter_pids,
3810  sizeof(int) * data->maxpids);
3811  }
3812 
3813  /* Collect PIDs from the lock's wait queue, stopping at blocked_proc */
3814  proc = (PGPROC *) waitQueue->links.next;
3815  for (i = 0; i < queue_size; i++)
3816  {
3817  if (proc == blocked_proc)
3818  break;
3819  data->waiter_pids[data->npids++] = proc->pid;
3820  proc = (PGPROC *) proc->links.next;
3821  }
3822 
3823  bproc->num_locks = data->nlocks - bproc->first_lock;
3824  bproc->num_waiters = data->npids - bproc->first_waiter;
3825 }
3826 
3827 /*
3828  * Returns a list of currently held AccessExclusiveLocks, for use by
3829  * LogStandbySnapshot(). The result is a palloc'd array,
3830  * with the number of elements returned into *nlocks.
3831  *
3832  * XXX This currently takes a lock on all partitions of the lock table,
3833  * but it's possible to do better. By reference counting locks and storing
3834  * the value in the ProcArray entry for each backend we could tell if any
3835  * locks need recording without having to acquire the partition locks and
3836  * scan the lock table. Whether that's worth the additional overhead
3837  * is pretty dubious though.
3838  */
3841 {
3842  xl_standby_lock *accessExclusiveLocks;
3843  PROCLOCK *proclock;
3844  HASH_SEQ_STATUS seqstat;
3845  int i;
3846  int index;
3847  int els;
3848 
3849  /*
3850  * Acquire lock on the entire shared lock data structure.
3851  *
3852  * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
3853  */
3854  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3856 
3857  /* Now we can safely count the number of proclocks */
3858  els = hash_get_num_entries(LockMethodProcLockHash);
3859 
3860  /*
3861  * Allocating enough space for all locks in the lock table is overkill,
3862  * but it's more convenient and faster than having to enlarge the array.
3863  */
3864  accessExclusiveLocks = palloc(els * sizeof(xl_standby_lock));
3865 
3866  /* Now scan the tables to copy the data */
3867  hash_seq_init(&seqstat, LockMethodProcLockHash);
3868 
3869  /*
3870  * If lock is a currently granted AccessExclusiveLock then it will have
3871  * just one proclock holder, so locks are never accessed twice in this
3872  * particular case. Don't copy this code for use elsewhere because in the
3873  * general case this will give you duplicate locks when looking at
3874  * non-exclusive lock types.
3875  */
3876  index = 0;
3877  while ((proclock = (PROCLOCK *) hash_seq_search(&seqstat)))
3878  {
3879  /* make sure this definition matches the one used in LockAcquire */
3880  if ((proclock->holdMask & LOCKBIT_ON(AccessExclusiveLock)) &&
3881  proclock->tag.myLock->tag.locktag_type == LOCKTAG_RELATION)
3882  {
3883  PGPROC *proc = proclock->tag.myProc;
3884  PGXACT *pgxact = &ProcGlobal->allPgXact[proc->pgprocno];
3885  LOCK *lock = proclock->tag.myLock;
3886  TransactionId xid = pgxact->xid;
3887 
3888  /*
3889  * Don't record locks for transactions if we know they have
3890  * already issued their WAL record for commit but not yet released
3891  * lock. It is still possible that we see locks held by already
3892  * complete transactions, if they haven't yet zeroed their xids.
3893  */
3894  if (!TransactionIdIsValid(xid))
3895  continue;
3896 
3897  accessExclusiveLocks[index].xid = xid;
3898  accessExclusiveLocks[index].dbOid = lock->tag.locktag_field1;
3899  accessExclusiveLocks[index].relOid = lock->tag.locktag_field2;
3900 
3901  index++;
3902  }
3903  }
3904 
3905  Assert(index <= els);
3906 
3907  /*
3908  * And release locks. We do this in reverse order for two reasons: (1)
3909  * Anyone else who needs more than one of the locks will be trying to lock
3910  * them in increasing order; we don't want to release the other process
3911  * until it can get all the locks it needs. (2) This avoids O(N^2)
3912  * behavior inside LWLockRelease.
3913  */
3914  for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
3916 
3917  *nlocks = index;
3918  return accessExclusiveLocks;
3919 }
3920 
3921 /* Provide the textual name of any lock mode */
3922 const char *
3924 {
3925  Assert(lockmethodid > 0 && lockmethodid < lengthof(LockMethods));
3926  Assert(mode > 0 && mode <= LockMethods[lockmethodid]->numLockModes);
3927  return LockMethods[lockmethodid]->lockModeNames[mode];
3928 }
3929 
3930 #ifdef LOCK_DEBUG
3931 /*
3932  * Dump all locks in the given proc's myProcLocks lists.
3933  *
3934  * Caller is responsible for having acquired appropriate LWLocks.
3935  */
3936 void
3937 DumpLocks(PGPROC *proc)
3938 {
3939  SHM_QUEUE *procLocks;
3940  PROCLOCK *proclock;
3941  LOCK *lock;
3942  int i;
3943 
3944  if (proc == NULL)
3945  return;
3946 
3947  if (proc->waitLock)
3948  LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
3949 
3950  for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
3951  {
3952  procLocks = &(proc->myProcLocks[i]);
3953 
3954  proclock = (PROCLOCK *) SHMQueueNext(procLocks, procLocks,
3955  offsetof(PROCLOCK, procLink));
3956 
3957  while (proclock)
3958  {
3959  Assert(proclock->tag.myProc == proc);
3960 
3961  lock = proclock->tag.myLock;
3962 
3963  PROCLOCK_PRINT("DumpLocks", proclock);
3964  LOCK_PRINT("DumpLocks", lock, 0);
3965 
3966  proclock = (PROCLOCK *)
3967  SHMQueueNext(procLocks, &proclock->procLink,
3968  offsetof(PROCLOCK, procLink));
3969  }
3970  }
3971 }
3972 
3973 /*
3974  * Dump all lmgr locks.
3975  *
3976  * Caller is responsible for having acquired appropriate LWLocks.
3977  */
3978 void
3979 DumpAllLocks(void)
3980 {
3981  PGPROC *proc;
3982  PROCLOCK *proclock;
3983  LOCK *lock;
3985 
3986  proc = MyProc;
3987 
3988  if (proc && proc->waitLock)
3989  LOCK_PRINT("DumpAllLocks: waiting on", proc->waitLock, 0);
3990 
3991  hash_seq_init(&status, LockMethodProcLockHash);
3992 
3993  while ((proclock = (PROCLOCK *) hash_seq_search(&status)) != NULL)
3994  {
3995  PROCLOCK_PRINT("DumpAllLocks", proclock);
3996 
3997  lock = proclock->tag.myLock;
3998  if (lock)
3999  LOCK_PRINT("DumpAllLocks", lock, 0);
4000  else
4001  elog(LOG, "DumpAllLocks: proclock->tag.myLock = NULL");
4002  }
4003 }
4004 #endif /* LOCK_DEBUG */
4005 
4006 /*
4007  * LOCK 2PC resource manager's routines
4008  */
4009 
4010 /*
4011  * Re-acquire a lock belonging to a transaction that was prepared.
4012  *
4013  * Because this function is run at db startup, re-acquiring the locks should
4014  * never conflict with running transactions because there are none. We
4015  * assume that the lock state represented by the stored 2PC files is legal.
4016  *
4017  * When switching from Hot Standby mode to normal operation, the locks will
4018  * be already held by the startup process. The locks are acquired for the new
4019  * procs without checking for conflicts, so we don't get a conflict between the
4020  * startup process and the dummy procs, even though we will momentarily have
4021  * a situation where two procs are holding the same AccessExclusiveLock,
4022  * which isn't normally possible because the conflict. If we're in standby
4023  * mode, but a recovery snapshot hasn't been established yet, it's possible
4024  * that some but not all of the locks are already held by the startup process.
4025  *
4026  * This approach is simple, but also a bit dangerous, because if there isn't
4027  * enough shared memory to acquire the locks, an error will be thrown, which
4028  * is promoted to FATAL and recovery will abort, bringing down postmaster.
4029  * A safer approach would be to transfer the locks like we do in
4030  * AtPrepare_Locks, but then again, in hot standby mode it's possible for
4031  * read-only backends to use up all the shared lock memory anyway, so that
4032  * replaying the WAL record that needs to acquire a lock will throw an error
4033  * and PANIC anyway.
4034  */
4035 void
4037  void *recdata, uint32 len)
4038 {
4039  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4040  PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
4041  LOCKTAG *locktag;
4043  LOCKMETHODID lockmethodid;
4044  LOCK *lock;
4045  PROCLOCK *proclock;
4046  PROCLOCKTAG proclocktag;
4047  bool found;
4048  uint32 hashcode;
4049  uint32 proclock_hashcode;
4050  int partition;
4051  LWLock *partitionLock;
4052  LockMethod lockMethodTable;
4053 
4054  Assert(len == sizeof(TwoPhaseLockRecord));
4055  locktag = &rec->locktag;
4056  lockmode = rec->lockmode;
4057  lockmethodid = locktag->locktag_lockmethodid;
4058 
4059  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4060  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4061  lockMethodTable = LockMethods[lockmethodid];
4062 
4063  hashcode = LockTagHashCode(locktag);
4064  partition = LockHashPartition(hashcode);
4065  partitionLock = LockHashPartitionLock(hashcode);
4066 
4067  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4068 
4069  /*
4070  * Find or create a lock with this tag.
4071  */
4072  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4073  (void *) locktag,
4074  hashcode,
4076  &found);
4077  if (!lock)
4078  {
4079  LWLockRelease(partitionLock);
4080  ereport(ERROR,
4081  (errcode(ERRCODE_OUT_OF_MEMORY),
4082  errmsg("out of shared memory"),
4083  errhint("You might need to increase max_locks_per_transaction.")));
4084  }
4085 
4086  /*
4087  * if it's a new lock object, initialize it
4088  */
4089  if (!found)
4090  {
4091  lock->grantMask = 0;
4092  lock->waitMask = 0;
4093  SHMQueueInit(&(lock->procLocks));
4094  ProcQueueInit(&(lock->waitProcs));
4095  lock->nRequested = 0;
4096  lock->nGranted = 0;
4097  MemSet(lock->requested, 0, sizeof(int) * MAX_LOCKMODES);
4098  MemSet(lock->granted, 0, sizeof(int) * MAX_LOCKMODES);
4099  LOCK_PRINT("lock_twophase_recover: new", lock, lockmode);
4100  }
4101  else
4102  {
4103  LOCK_PRINT("lock_twophase_recover: found", lock, lockmode);
4104  Assert((lock->nRequested >= 0) && (lock->requested[lockmode] >= 0));
4105  Assert((lock->nGranted >= 0) && (lock->granted[lockmode] >= 0));
4106  Assert(lock->nGranted <= lock->nRequested);
4107  }
4108 
4109  /*
4110  * Create the hash key for the proclock table.
4111  */
4112  proclocktag.myLock = lock;
4113  proclocktag.myProc = proc;
4114 
4115  proclock_hashcode = ProcLockHashCode(&proclocktag, hashcode);
4116 
4117  /*
4118  * Find or create a proclock entry with this tag
4119  */
4120  proclock = (PROCLOCK *) hash_search_with_hash_value(LockMethodProcLockHash,
4121  (void *) &proclocktag,
4122  proclock_hashcode,
4124  &found);
4125  if (!proclock)
4126  {
4127  /* Oops, not enough shmem for the proclock */
4128  if (lock->nRequested == 0)
4129  {
4130  /*
4131  * There are no other requestors of this lock, so garbage-collect
4132  * the lock object. We *must* do this to avoid a permanent leak
4133  * of shared memory, because there won't be anything to cause
4134  * anyone to release the lock object later.
4135  */
4136  Assert(SHMQueueEmpty(&(lock->procLocks)));
4137  if (!hash_search_with_hash_value(LockMethodLockHash,
4138  (void *) &(lock->tag),
4139  hashcode,
4140  HASH_REMOVE,
4141  NULL))
4142  elog(PANIC, "lock table corrupted");
4143  }
4144  LWLockRelease(partitionLock);
4145  ereport(ERROR,
4146  (errcode(ERRCODE_OUT_OF_MEMORY),
4147  errmsg("out of shared memory"),
4148  errhint("You might need to increase max_locks_per_transaction.")));
4149  }
4150 
4151  /*
4152  * If new, initialize the new entry
4153  */
4154  if (!found)
4155  {
4156  Assert(proc->lockGroupLeader == NULL);
4157  proclock->groupLeader = proc;
4158  proclock->holdMask = 0;
4159  proclock->releaseMask = 0;
4160  /* Add proclock to appropriate lists */
4161  SHMQueueInsertBefore(&lock->procLocks, &proclock->lockLink);
4162  SHMQueueInsertBefore(&(proc->myProcLocks[partition]),
4163  &proclock->procLink);
4164  PROCLOCK_PRINT("lock_twophase_recover: new", proclock);
4165  }
4166  else
4167  {
4168  PROCLOCK_PRINT("lock_twophase_recover: found", proclock);
4169  Assert((proclock->holdMask & ~lock->grantMask) == 0);
4170  }
4171 
4172  /*
4173  * lock->nRequested and lock->requested[] count the total number of
4174  * requests, whether granted or waiting, so increment those immediately.
4175  */
4176  lock->nRequested++;
4177  lock->requested[lockmode]++;
4178  Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
4179 
4180  /*
4181  * We shouldn't already hold the desired lock.
4182  */
4183  if (proclock->holdMask & LOCKBIT_ON(lockmode))
4184  elog(ERROR, "lock %s on object %u/%u/%u is already held",
4185  lockMethodTable->lockModeNames[lockmode],
4186  lock->tag.locktag_field1, lock->tag.locktag_field2,
4187  lock->tag.locktag_field3);
4188 
4189  /*
4190  * We ignore any possible conflicts and just grant ourselves the lock. Not
4191  * only because we don't bother, but also to avoid deadlocks when
4192  * switching from standby to normal mode. See function comment.
4193  */
4194  GrantLock(lock, proclock, lockmode);
4195 
4196  /*
4197  * Bump strong lock count, to make sure any fast-path lock requests won't
4198  * be granted without consulting the primary lock table.
4199  */
4200  if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
4201  {
4202  uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
4203 
4204  SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
4205  FastPathStrongRelationLocks->count[fasthashcode]++;
4206  SpinLockRelease(&FastPathStrongRelationLocks->mutex);
4207  }
4208 
4209  LWLockRelease(partitionLock);
4210 }
4211 
4212 /*
4213  * Re-acquire a lock belonging to a transaction that was prepared, when
4214  * starting up into hot standby mode.
4215  */
4216 void
4218  void *recdata, uint32 len)
4219 {
4220  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4221  LOCKTAG *locktag;
4223  LOCKMETHODID lockmethodid;
4224 
4225  Assert(len == sizeof(TwoPhaseLockRecord));
4226  locktag = &rec->locktag;
4227  lockmode = rec->lockmode;
4228  lockmethodid = locktag->locktag_lockmethodid;
4229 
4230  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4231  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4232 
4233  if (lockmode == AccessExclusiveLock &&
4234  locktag->locktag_type == LOCKTAG_RELATION)
4235  {
4237  locktag->locktag_field1 /* dboid */ ,
4238  locktag->locktag_field2 /* reloid */ );
4239  }
4240 }
4241 
4242 
4243 /*
4244  * 2PC processing routine for COMMIT PREPARED case.
4245  *
4246  * Find and release the lock indicated by the 2PC record.
4247  */
4248 void
4250  void *recdata, uint32 len)
4251 {
4252  TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
4253  PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
4254  LOCKTAG *locktag;
4255  LOCKMETHODID lockmethodid;
4256  LockMethod lockMethodTable;
4257 
4258  Assert(len == sizeof(TwoPhaseLockRecord));
4259  locktag = &rec->locktag;
4260  lockmethodid = locktag->locktag_lockmethodid;
4261 
4262  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4263  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4264  lockMethodTable = LockMethods[lockmethodid];
4265 
4266  LockRefindAndRelease(lockMethodTable, proc, locktag, rec->lockmode, true);
4267 }
4268 
4269 /*
4270  * 2PC processing routine for ROLLBACK PREPARED case.
4271  *
4272  * This is actually just the same as the COMMIT case.
4273  */
4274 void
4276  void *recdata, uint32 len)
4277 {
4278  lock_twophase_postcommit(xid, info, recdata, len);
4279 }
4280 
4281 /*
4282  * VirtualXactLockTableInsert
4283  *
4284  * Take vxid lock via the fast-path. There can't be any pre-existing
4285  * lockers, as we haven't advertised this vxid via the ProcArray yet.
4286  *
4287  * Since MyProc->fpLocalTransactionId will normally contain the same data
4288  * as MyProc->lxid, you might wonder if we really need both. The
4289  * difference is that MyProc->lxid is set and cleared unlocked, and
4290  * examined by procarray.c, while fpLocalTransactionId is protected by
4291  * backendLock and is used only by the locking subsystem. Doing it this
4292  * way makes it easier to verify that there are no funny race conditions.
4293  *
4294  * We don't bother recording this lock in the local lock table, since it's
4295  * only ever released at the end of a transaction. Instead,
4296  * LockReleaseAll() calls VirtualXactLockTableCleanup().
4297  */
4298 void
4300 {
4302 
4304 
4305  Assert(MyProc->backendId == vxid.backendId);
4307  Assert(MyProc->fpVXIDLock == false);
4308 
4309  MyProc->fpVXIDLock = true;
4311 
4313 }
4314 
4315 /*
4316  * VirtualXactLockTableCleanup
4317  *
4318  * Check whether a VXID lock has been materialized; if so, release it,
4319  * unblocking waiters.
4320  */
4321 void
4323 {
4324  bool fastpath;
4325  LocalTransactionId lxid;
4326 
4328 
4329  /*
4330  * Clean up shared memory state.
4331  */
4333 
4334  fastpath = MyProc->fpVXIDLock;
4335  lxid = MyProc->fpLocalTransactionId;
4336  MyProc->fpVXIDLock = false;
4338 
4340 
4341  /*
4342  * If fpVXIDLock has been cleared without touching fpLocalTransactionId,
4343  * that means someone transferred the lock to the main lock table.
4344  */
4345  if (!fastpath && LocalTransactionIdIsValid(lxid))
4346  {
4347  VirtualTransactionId vxid;
4348  LOCKTAG locktag;
4349 
4350  vxid.backendId = MyBackendId;
4351  vxid.localTransactionId = lxid;
4352  SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid);
4353 
4355  &locktag, ExclusiveLock, false);
4356  }
4357 }
4358 
4359 /*
4360  * VirtualXactLock
4361  *
4362  * If wait = true, wait until the given VXID has been released, and then
4363  * return true.
4364  *
4365  * If wait = false, just check whether the VXID is still running, and return
4366  * true or false.
4367  */
4368 bool
4370 {
4371  LOCKTAG tag;
4372  PGPROC *proc;
4373 
4375 
4376  SET_LOCKTAG_VIRTUALTRANSACTION(tag, vxid);
4377 
4378  /*
4379  * If a lock table entry must be made, this is the PGPROC on whose behalf
4380  * it must be done. Note that the transaction might end or the PGPROC
4381  * might be reassigned to a new backend before we get around to examining
4382  * it, but it doesn't matter. If we find upon examination that the
4383  * relevant lxid is no longer running here, that's enough to prove that
4384  * it's no longer running anywhere.
4385  */
4386  proc = BackendIdGetProc(vxid.backendId);
4387  if (proc == NULL)
4388  return true;
4389 
4390  /*
4391  * We must acquire this lock before checking the backendId and lxid
4392  * against the ones we're waiting for. The target backend will only set
4393  * or clear lxid while holding this lock.
4394  */
4396 
4397  /* If the transaction has ended, our work here is done. */
4398  if (proc->backendId != vxid.backendId
4399  || proc->fpLocalTransactionId != vxid.localTransactionId)
4400  {
4401  LWLockRelease(&proc->backendLock);
4402  return true;
4403  }
4404 
4405  /*
4406  * If we aren't asked to wait, there's no need to set up a lock table
4407  * entry. The transaction is still in progress, so just return false.
4408  */
4409  if (!wait)
4410  {
4411  LWLockRelease(&proc->backendLock);
4412  return false;
4413  }
4414 
4415  /*
4416  * OK, we're going to need to sleep on the VXID. But first, we must set
4417  * up the primary lock table entry, if needed (ie, convert the proc's
4418  * fast-path lock on its VXID to a regular lock).
4419  */
4420  if (proc->fpVXIDLock)
4421  {
4422  PROCLOCK *proclock;
4423  uint32 hashcode;
4424  LWLock *partitionLock;
4425 
4426  hashcode = LockTagHashCode(&tag);
4427 
4428  partitionLock = LockHashPartitionLock(hashcode);
4429  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4430 
4431  proclock = SetupLockInTable(LockMethods[DEFAULT_LOCKMETHOD], proc,
4432  &tag, hashcode, ExclusiveLock);
4433  if (!proclock)
4434  {
4435  LWLockRelease(partitionLock);
4436  LWLockRelease(&proc->backendLock);
4437  ereport(ERROR,
4438  (errcode(ERRCODE_OUT_OF_MEMORY),
4439  errmsg("out of shared memory"),
4440  errhint("You might need to increase max_locks_per_transaction.")));
4441  }
4442  GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
4443 
4444  LWLockRelease(partitionLock);
4445 
4446  proc->fpVXIDLock = false;
4447  }
4448 
4449  /* Done with proc->fpLockBits */
4450  LWLockRelease(&proc->backendLock);
4451 
4452  /* Time to wait. */
4453  (void) LockAcquire(&tag, ShareLock, false, false);
4454 
4455  LockRelease(&tag, ShareLock, false);
4456  return true;
4457 }
4458 
4459 /*
4460  * LockWaiterCount
4461  *
4462  * Find the number of lock requester on this locktag
4463  */
4464 int
4466 {
4467  LOCKMETHODID lockmethodid = locktag->locktag_lockmethodid;
4468  LOCK *lock;
4469  bool found;
4470  uint32 hashcode;
4471  LWLock *partitionLock;
4472  int waiters = 0;
4473 
4474  if (lockmethodid <= 0 || lockmethodid >= lengthof(LockMethods))
4475  elog(ERROR, "unrecognized lock method: %d", lockmethodid);
4476 
4477  hashcode = LockTagHashCode(locktag);
4478  partitionLock = LockHashPartitionLock(hashcode);
4479  LWLockAcquire(partitionLock, LW_EXCLUSIVE);
4480 
4481  lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
4482  (const void *) locktag,
4483  hashcode,
4484  HASH_FIND,
4485  &found);
4486  if (found)
4487  {
4488  Assert(lock != NULL);
4489  waiters = lock->nRequested;
4490  }
4491  LWLockRelease(partitionLock);
4492 
4493  return waiters;
4494 }
static void LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
Definition: lock.c:2516
void * hash_search_with_hash_value(HTAB *hashp, const void *keyPtr, uint32 hashvalue, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
PROCLOCKTAG tag
Definition: lock.h:350
int slock_t
Definition: s_lock.h:934
static PgChecksumMode mode
Definition: pg_checksums.c:61
uint32 hashcode
Definition: lock.h:410
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:814
Definition: lwlock.h:32
static const LockMethodData user_lockmethod
Definition: lock.c:136
LOCALLOCKTAG tag
Definition: lock.h:407
static HTAB * LockMethodLocalHash
Definition: lock.c:255
bool holdsStrongLockCount
Definition: lock.h:417
static void LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, LOCKTAG *locktag, LOCKMODE lockmode, bool decrement_strong_lock_count)
Definition: lock.c:3040
int * waiter_pids
Definition: lock.h:464
int errhint(const char *fmt,...)
Definition: elog.c:1069
BackendId MyBackendId
Definition: globals.c:81
LockAcquireResult LockAcquire(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait)
Definition: lock.c:708
void VirtualXactLockTableCleanup(void)
Definition: lock.c:4322
const char * GetLockmodeName(LOCKMETHODID lockmethodid, LOCKMODE mode)
Definition: lock.c:3923
int numLockOwners
Definition: lock.h:414
void lock_twophase_recover(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4036
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2599
static PROCLOCK * SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc, const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode)
Definition: lock.c:1117
#define ConflictsWithRelationFastPath(locktag, mode)
Definition: lock.c:205
#define GET_VXID_FROM_PGPROC(vxid, proc)
Definition: lock.h:79
LockInstanceData * locks
Definition: lock.h:444
LOCKTAG lock
Definition: lock.h:388
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode)
Definition: lock.c:2562
void GrantAwaitedLock(void)
Definition: lock.c:1693
#define HASH_ELEM
Definition: hsearch.h:87
BackendId backendId
Definition: proc.h:113
uint32 TransactionId
Definition: c.h:514
static PROCLOCK * FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Definition: lock.c:2717
Definition: proc.h:222
bool LockHeldByMe(const LOCKTAG *locktag, LOCKMODE lockmode)
Definition: lock.c:571
BlockedProcsData * GetBlockerStatusData(int blocked_pid)
Definition: lock.c:3660
int LOCKMODE
Definition: lockdefs.h:26
int first_lock
Definition: lock.h:452
dlist_head lockGroupMembers
Definition: proc.h:204
LOCKMODE mode
Definition: lock.h:389
PROCLOCK * proclock
Definition: lock.h:412
bool update_process_title
Definition: ps_status.c:35
int nRequested
Definition: lock.h:298
SHM_QUEUE links
Definition: lock.h:31
PGXACT * allPgXact
Definition: proc.h:249
PGPROC * MyProc
Definition: proc.c:67
#define ExclusiveLock
Definition: lockdefs.h:44
int num_waiters
Definition: lock.h:457
LOCKMASK holdMask
Definition: lock.h:354
#define PointerGetDatum(X)
Definition: postgres.h:556
void GrantLock(LOCK *lock, PROCLOCK *proclock, LOCKMODE lockmode)
Definition: lock.c:1465
int64 nLocks
Definition: lock.h:401
TransactionId xid
Definition: proc.h:224
SHM_QUEUE links
Definition: proc.h:98
#define dlist_foreach(iter, lhead)
Definition: ilist.h:507
ResourceOwner CurrentResourceOwner
Definition: resowner.c:142
struct SHM_QUEUE * next
Definition: shmem.h:31
bool fastpath
Definition: lock.h:438
#define SpinLockInit(lock)
Definition: spin.h:60
LOCKMODE waitLockMode
Definition: proc.h:141
uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS]
Definition: lock.c:241
bool InRecovery
Definition: xlog.c:200
LOCKTAG tag
Definition: lock.h:290
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define FAST_PATH_GET_BITS(proc, n)
Definition: lock.c:177
Definition: lock.h:163
#define AccessShareLock
Definition: lockdefs.h:36
void set_ps_display(const char *activity, bool force)
Definition: ps_status.c:335
static int FastPathLocalUseCount
Definition: lock.c:171
Size entrysize
Definition: hsearch.h:73
const LOCKMASK * conflictTab
Definition: lock.h:113
#define LockHashPartitionLock(hashcode)
Definition: lock.h:502
static const char *const lock_mode_names[]
Definition: lock.c:108
SHM_QUEUE lockLink
Definition: lock.h:356
static uint32 proclock_hash(const void *key, Size keysize)
Definition: lock.c:507
void LogAccessExclusiveLock(Oid dbOid, Oid relOid)
Definition: standby.c:1039
#define InHotStandby
Definition: xlog.h:74
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
int errcode(int sqlerrcode)
Definition: elog.c:608
PROC_HDR * ProcGlobal
Definition: proc.c:80
#define STATUS_ERROR
Definition: c.h:1121
#define MemSet(start, val, len)
Definition: c.h:962
#define FastPathStrongLockHashPartition(hashcode)
Definition: lock.c:235
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1335
#define LockHashPartition(hashcode)
Definition: lock.h:500
void SHMQueueInsertBefore(SHM_QUEUE *queue, SHM_QUEUE *elem)
Definition: shmqueue.c:89
static HTAB * LockMethodProcLockHash
Definition: lock.c:254
BlockedProcData * procs
Definition: lock.h:462
#define lengthof(array)
Definition: c.h:669
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
static LOCALLOCK * StrongLockInProgress
Definition: lock.c:259
bool fpVXIDLock
Definition: proc.h:195
#define LOG
Definition: elog.h:26
unsigned int Oid
Definition: postgres_ext.h:31
bool RecoveryInProgress(void)
Definition: xlog.c:7930
Definition: lock.h:441
LocalTransactionId localTransactionId
Definition: lock.h:65
#define EligibleForRelationFastPath(locktag, mode)
Definition: lock.c:199
struct LOCALLOCKTAG LOCALLOCKTAG
static LOCALLOCK * awaitedLock
Definition: lock.c:260
LOCKTAG locktag
Definition: lock.h:431
#define PANIC
Definition: elog.h:53
LOCKMODE waitLockMode
Definition: lock.h:433
void lock_twophase_standby_recover(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4217
int maxLockOwners
Definition: lock.h:415
xl_standby_lock * GetRunningTransactionLocks(int *nlocks)
Definition: lock.c:3840
int nGranted
Definition: lock.h:300
#define FirstNormalObjectId
Definition: transam.h:141
#define HASH_PARTITION
Definition: hsearch.h:83
int num_locks
Definition: lock.h:453
void RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info, const void *data, uint32 len)
Definition: twophase.c:1187
#define NLOCKENTS()
Definition: lock.c:56
static void WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1721
#define DEFAULT_LOCKMETHOD
Definition: lock.h:127
PROC_QUEUE waitProcs
Definition: lock.h:296
LOCKTAG locktag
Definition: lock.c:160
uint16 locktag_field4
Definition: lock.h:168
Definition: type.h:89
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1726
bool VirtualXactLock(VirtualTransactionId vxid, bool wait)
Definition: lock.c:4369
void lock_twophase_postcommit(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4249
int leaderPid
Definition: lock.h:437
#define PROCLOCK_LOCKMETHOD(proclock)
Definition: lock.h:360
#define VirtualTransactionIdEquals(vxid1, vxid2)
Definition: lock.h:73
void LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2391
#define FAST_PATH_CHECK_LOCKMODE(proc, n, l)
Definition: lock.c:188
PGPROC * BackendPidGetProcWithLock(int pid)
Definition: procarray.c:2388
static void GetSingleProcBlockerStatusData(PGPROC *blocked_proc, BlockedProcsData *data)
Definition: lock.c:3740
#define SpinLockAcquire(lock)
Definition: spin.h:62
#define LOCKBIT_OFF(lockmode)
Definition: lock.h:87
Definition: dynahash.c:208
#define dlist_container(type, membername, ptr)
Definition: ilist.h:477
uint32 get_hash_value(HTAB *hashp, const void *keyPtr)
Definition: dynahash.c:860
#define LockHashPartitionLockByIndex(i)
Definition: lock.h:505
struct LOCALLOCK LOCALLOCK
unsigned short uint16
Definition: c.h:358
void pfree(void *pointer)
Definition: mcxt.c:1056
static const LockMethodData default_lockmethod
Definition: lock.c:125
#define ERROR
Definition: elog.h:43
static volatile FastPathStrongRelationLockData * FastPathStrongRelationLocks
Definition: lock.c:244
static void RemoveLocalLock(LOCALLOCK *locallock)
Definition: lock.c:1294
long num_partitions
Definition: hsearch.h:67
static void ReleaseLockIfHeld(LOCALLOCK *locallock, bool sessionLock)
Definition: lock.c:2426
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:392
void PostPrepare_Locks(TransactionId xid)
Definition: lock.c:3247
static uint32 ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
Definition: lock.c:538
static void FinishStrongLockAcquire(void)
Definition: lock.c:1654
#define FAST_PATH_SET_LOCKMODE(proc, n, l)
Definition: lock.c:184
int MaxBackends
Definition: globals.c:135
void LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
Definition: lock.c:2486
PROCLOCK * waitProcLock
Definition: proc.h:140
void VirtualXactLockTableInsert(VirtualTransactionId vxid)
Definition: lock.c:4299
#define SET_LOCKTAG_RELATION(locktag, dboid, reloid)
Definition: lock.h:180
const char * get_ps_display(int *displen)
Definition: ps_status.c:416
LockData * GetLockStatusData(void)
Definition: lock.c:3476
#define NoLock
Definition: lockdefs.h:34
#define FP_LOCK_SLOTS_PER_BACKEND
Definition: proc.h:71
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
void ProcQueueInit(PROC_QUEUE *queue)
Definition: proc.c:1038
#define RowExclusiveLock
Definition: lockdefs.h:38
void LogAccessExclusiveLockPrepare(void)
Definition: standby.c:1056
void AbortStrongLockAcquire(void)
Definition: lock.c:1664
uint32 locktag_field2
Definition: lock.h:166
Oid databaseId
Definition: proc.h:114
unsigned int uint32
Definition: c.h:359
int granted[MAX_LOCKMODES]
Definition: lock.h:299
Definition: lock.h:287
uint32 LocalTransactionId
Definition: c.h:516
PGPROC * TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
Definition: twophase.c:878
LOCK * waitLock
Definition: proc.h:139
uint32 LockTagHashCode(const LOCKTAG *locktag)
Definition: lock.c:490
struct PROCLOCK PROCLOCK
#define LOCALLOCK_LOCKMETHOD(llock)
Definition: lock.h:421
#define ereport(elevel, rest)
Definition: elog.h:141
#define STATUS_OK
Definition: c.h:1120
int max_locks_per_xact
Definition: lock.c:54
MemoryContext TopMemoryContext
Definition: mcxt.c:44
LOCKMASK waitMask
Definition: lock.h:294
void InitLocks(void)
Definition: lock.c:377
int maxlocks
Definition: lock.h:468
uint16 LOCKMETHODID
Definition: lock.h:124
SHM_QUEUE procLocks
Definition: lock.h:295
TransactionId xid
Definition: lockdefs.h:54
struct LOCKTAG LOCKTAG
#define WARNING
Definition: elog.h:40
#define VirtualTransactionIdIsValid(vxid)
Definition: lock.h:70
bool SHMQueueEmpty(const SHM_QUEUE *queue)
Definition: shmqueue.c:180
Size hash_estimate_size(long num_entries, Size entrysize)
Definition: dynahash.c:732
#define FAST_PATH_STRONG_LOCK_HASH_PARTITIONS
Definition: lock.c:233
#define SpinLockRelease(lock)
Definition: spin.h:64
int requested[MAX_LOCKMODES]
Definition: lock.h:297
void ProcLockWakeup(LockMethod lockMethodTable, LOCK *lock)
Definition: proc.c:1626
#define HASH_BLOBS
Definition: hsearch.h:88
int nelements
Definition: lock.h:443
#define MAX_LOCKMODES
Definition: lock.h:84
#define InvalidBackendId
Definition: backendid.h:23
SHM_QUEUE procLink
Definition: lock.h:357
#define RowShareLock
Definition: lockdefs.h:37
static const LOCKMASK LockConflicts[]
Definition: lock.c:65
void * palloc0(Size size)
Definition: mcxt.c:980
ResourceOwner ResourceOwnerGetParent(ResourceOwner owner)
Definition: resowner.c:739
LockInstanceData * locks
Definition: lock.h:463
struct PROCLOCKTAG PROCLOCKTAG
uintptr_t Datum
Definition: postgres.h:367
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
Size add_size(Size s1, Size s2)
Definition: shmem.c:498
BackendId backend
Definition: lock.h:434
Pointer SHMQueueNext(const SHM_QUEUE *queue, const SHM_QUEUE *curElem, Size linkOffset)
Definition: shmqueue.c:145
void ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:953
static ResourceOwner awaitedOwner
Definition: lock.c:261
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Definition: lock.c:1628
void RemoveFromWaitQueue(PGPROC *proc, uint32 hashcode)
Definition: lock.c:1825
Size keysize
Definition: hsearch.h:72
#define XLogStandbyInfoActive()
Definition: xlog.h:195
dlist_node * cur
Definition: ilist.h:161
#define LOCK_PRINT(where, lock, type)
Definition: lock.c:337
int waitStatus
Definition: proc.h:102
LockMethod GetLockTagsMethodTable(const LOCKTAG *locktag)
Definition: lock.c:472
void DeadLockReport(void)
Definition: deadlock.c:1083
struct LOCK LOCK
int first_waiter
Definition: lock.h:456
VirtualTransactionId * GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
Definition: lock.c:2819
bool LockCheckConflicts(LockMethod lockMethodTable, LOCKMODE lockmode, LOCK *lock, PROCLOCK *proclock)
Definition: lock.c:1342
#define PG_CATCH()
Definition: elog.h:332
#define Max(x, y)
Definition: c.h:905
#define ShareUpdateExclusiveLock
Definition: lockdefs.h:39
uint8 locktag_type
Definition: lock.h:169
LocalTransactionId lxid
Definition: lock.h:435
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]
Definition: proc.h:194
#define Assert(condition)
Definition: c.h:739
BackendId backendId
Definition: lock.h:64
void LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
Definition: lock.c:2087
#define ShareRowExclusiveLock
Definition: lockdefs.h:42
void AtPrepare_Locks(void)
Definition: lock.c:3137
LWLock backendLock
Definition: proc.h:190
static HTAB * LockMethodLockHash
Definition: lock.c:253
size_t Size
Definition: c.h:467
void LockReleaseSession(LOCKMETHODID lockmethodid)
Definition: lock.c:2361
#define TWOPHASE_RM_LOCK_ID
Definition: twophase_rmgr.h:25
LOCALLOCKOWNER * lockOwners
Definition: lock.h:416
#define SET_LOCKTAG_VIRTUALTRANSACTION(locktag, vxid)
Definition: lock.h:225
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1122
bool hash_update_hash_key(HTAB *hashp, void *existingEntry, const void *newKeyPtr)
Definition: dynahash.c:1115
LOCK * lock
Definition: lock.h:411
#define PG_RE_THROW()
Definition: elog.h:363
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
uint32 allProcCount
Definition: proc.h:251
int LOCKMASK
Definition: lockdefs.h:25
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1069
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
LOCKMASK holdMask
Definition: lock.h:432
uint8 locktag_lockmethodid
Definition: lock.h:170
PGPROC * myProc
Definition: lock.h:344
#define FAST_PATH_LOCKNUMBER_OFFSET
Definition: lock.c:175
int LockWaiterCount(const LOCKTAG *locktag)
Definition: lock.c:4465
#define LOCKBIT_ON(lockmode)
Definition: lock.h:86
void MarkLockClear(LOCALLOCK *locallock)
Definition: lock.c:1706
Size LockShmemSize(void)
Definition: lock.c:3439
LOCKMASK grantMask
Definition: lock.h:293
#define AccessExclusiveLock
Definition: lockdefs.h:45
Definition: lock.h:347
int64 nLocks
Definition: lock.h:413
int pgprocno
Definition: proc.h:110
void * palloc(Size size)
Definition: mcxt.c:949
int errmsg(const char *fmt,...)
Definition: elog.c:822
LockAcquireResult
Definition: lock.h:475
void SHMQueueInit(SHM_QUEUE *queue)
Definition: shmqueue.c:36
int ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
Definition: proc.c:1062
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:796
void StandbyAcquireAccessExclusiveLock(TransactionId xid, Oid dbOid, Oid relOid)
Definition: standby.c:629
#define elog(elevel,...)
Definition: elog.h:228
#define InvalidLocalTransactionId
Definition: lock.h:68
#define ShareLock
Definition: lockdefs.h:41
int i
bool LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:1882
#define LocalTransactionIdIsValid(lxid)
Definition: lock.h:69
const bool * trace_flag
Definition: lock.h:115
bool LockHasWaiters(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
Definition: lock.c:595
void lock_twophase_postabort(TransactionId xid, uint16 info, void *recdata, uint32 len)
Definition: lock.c:4275
int size
Definition: lock.h:32
void SHMQueueDelete(SHM_QUEUE *queue)
Definition: shmqueue.c:68
LOCK * myLock
Definition: lock.h:343
struct ResourceOwnerData * owner
Definition: lock.h:400
PGPROC * allProcs
Definition: proc.h:247
static bool Dummy_trace
Definition: lock.c:122
static bool UnGrantLock(LOCK *lock, LOCKMODE lockmode, PROCLOCK *proclock, LockMethod lockMethodTable)
Definition: lock.c:1488
static const LockMethod LockMethods[]
Definition: lock.c:150
SHM_QUEUE myProcLocks[NUM_LOCK_PARTITIONS]
Definition: proc.h:160
#define PROCLOCK_PRINT(where, proclockP)
Definition: lock.c:338
#define STATUS_WAITING
Definition: c.h:1123
HTAB * ShmemInitHash(const char *name, long init_size, long max_size, HASHCTL *infoP, int hash_flags)
Definition: shmem.c:337
static void CleanUpLock(LOCK *lock, PROCLOCK *proclock, LockMethod lockMethodTable, uint32 hashcode, bool wakeupNeeded)
Definition: lock.c:1545
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag, uint32 hashcode)
Definition: lock.c:2629
#define FAST_PATH_BITS_PER_SLOT
Definition: lock.c:174
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226
#define PG_TRY()
Definition: elog.h:322
uint32 locktag_field1
Definition: lock.h:165
LOCKMODE lockmode
Definition: lock.c:161
LockAcquireResult LockAcquireExtended(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock, bool dontWait, bool reportMemoryError, LOCALLOCK **locallockp)
Definition: lock.c:732
Definition: proc.h:95
void ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
Definition: resowner.c:933
int pid
Definition: proc.h:109
LockMethod GetLocksMethodTable(const LOCK *lock)
Definition: lock.c:460
uint32 locktag_field3
Definition: lock.h:167
#define LOG2_NUM_LOCK_PARTITIONS
Definition: lwlock.h:116
#define PG_END_TRY()
Definition: elog.h:347
PGPROC * lockGroupLeader
Definition: proc.h:203
LocalTransactionId fpLocalTransactionId
Definition: proc.h:196
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
Definition: lock.c:1599
#define offsetof(type, field)
Definition: c.h:662
bool lockCleared
Definition: lock.h:418
const char *const * lockModeNames
Definition: lock.h:114
LOCKMASK heldLocks
Definition: proc.h:142
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117
PGPROC * groupLeader
Definition: lock.h:353
#define FAST_PATH_CLEAR_LOCKMODE(proc, n, l)
Definition: lock.c:186
struct TwoPhaseLockRecord TwoPhaseLockRecord
HashValueFunc hash
Definition: hsearch.h:74
#define HASH_FUNCTION
Definition: hsearch.h:89
PGPROC * BackendIdGetProc(int backendID)
Definition: sinvaladt.c:376
#define LOCK_LOCKMETHOD(lock)
Definition: lock.h:303
int numLockModes
Definition: lock.h:112
LocalTransactionId lxid
Definition: proc.h:106
int maxprocs
Definition: lock.h:466
LOCKMASK releaseMask
Definition: lock.h:355