PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
lwlock.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * lwlock.c
4  * Lightweight lock manager
5  *
6  * Lightweight locks are intended primarily to provide mutual exclusion of
7  * access to shared-memory data structures. Therefore, they offer both
8  * exclusive and shared lock modes (to support read/write and read-only
9  * access to a shared object). There are few other frammishes. User-level
10  * locking should be done with the full lock manager --- which depends on
11  * LWLocks to protect its shared state.
12  *
13  * In addition to exclusive and shared modes, lightweight locks can be used to
14  * wait until a variable changes value. The variable is initially not set
15  * when the lock is acquired with LWLockAcquire, i.e. it remains set to the
16  * value it was set to when the lock was released last, and can be updated
17  * without releasing the lock by calling LWLockUpdateVar. LWLockWaitForVar
18  * waits for the variable to be updated, or until the lock is free. When
19  * releasing the lock with LWLockReleaseClearVar() the value can be set to an
20  * appropriate value for a free lock. The meaning of the variable is up to
21  * the caller, the lightweight lock code just assigns and compares it.
22  *
23  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
24  * Portions Copyright (c) 1994, Regents of the University of California
25  *
26  * IDENTIFICATION
27  * src/backend/storage/lmgr/lwlock.c
28  *
29  * NOTES:
30  *
31  * This used to be a pretty straight forward reader-writer lock
32  * implementation, in which the internal state was protected by a
33  * spinlock. Unfortunately the overhead of taking the spinlock proved to be
34  * too high for workloads/locks that were taken in shared mode very
35  * frequently. Often we were spinning in the (obviously exclusive) spinlock,
36  * while trying to acquire a shared lock that was actually free.
37  *
38  * Thus a new implementation was devised that provides wait-free shared lock
39  * acquisition for locks that aren't exclusively locked.
40  *
41  * The basic idea is to have a single atomic variable 'lockcount' instead of
42  * the formerly separate shared and exclusive counters and to use atomic
43  * operations to acquire the lock. That's fairly easy to do for plain
44  * rw-spinlocks, but a lot harder for something like LWLocks that want to wait
45  * in the OS.
46  *
47  * For lock acquisition we use an atomic compare-and-exchange on the lockcount
48  * variable. For exclusive lock we swap in a sentinel value
49  * (LW_VAL_EXCLUSIVE), for shared locks we count the number of holders.
50  *
51  * To release the lock we use an atomic decrement to release the lock. If the
52  * new value is zero (we get that atomically), we know we can/have to release
53  * waiters.
54  *
55  * Obviously it is important that the sentinel value for exclusive locks
56  * doesn't conflict with the maximum number of possible share lockers -
57  * luckily MAX_BACKENDS makes that easily possible.
58  *
59  *
60  * The attentive reader might have noticed that naively doing the above has a
61  * glaring race condition: We try to lock using the atomic operations and
62  * notice that we have to wait. Unfortunately by the time we have finished
63  * queuing, the former locker very well might have already finished it's
64  * work. That's problematic because we're now stuck waiting inside the OS.
65 
66  * To mitigate those races we use a two phased attempt at locking:
67  * Phase 1: Try to do it atomically, if we succeed, nice
68  * Phase 2: Add ourselves to the waitqueue of the lock
69  * Phase 3: Try to grab the lock again, if we succeed, remove ourselves from
70  * the queue
71  * Phase 4: Sleep till wake-up, goto Phase 1
72  *
73  * This protects us against the problem from above as nobody can release too
74  * quick, before we're queued, since after Phase 2 we're already queued.
75  * -------------------------------------------------------------------------
76  */
77 #include "postgres.h"
78 
79 #include "miscadmin.h"
80 #include "pgstat.h"
81 #include "pg_trace.h"
82 #include "postmaster/postmaster.h"
83 #include "replication/slot.h"
84 #include "storage/ipc.h"
85 #include "storage/predicate.h"
86 #include "storage/proc.h"
87 #include "storage/proclist.h"
88 #include "storage/spin.h"
89 #include "utils/memutils.h"
90 
91 #ifdef LWLOCK_STATS
92 #include "utils/hsearch.h"
93 #endif
94 
95 
96 /* We use the ShmemLock spinlock to protect LWLockCounter */
97 extern slock_t *ShmemLock;
98 
99 #define LW_FLAG_HAS_WAITERS ((uint32) 1 << 30)
100 #define LW_FLAG_RELEASE_OK ((uint32) 1 << 29)
101 #define LW_FLAG_LOCKED ((uint32) 1 << 28)
102 
103 #define LW_VAL_EXCLUSIVE ((uint32) 1 << 24)
104 #define LW_VAL_SHARED 1
105 
106 #define LW_LOCK_MASK ((uint32) ((1 << 25)-1))
107 /* Must be greater than MAX_BACKENDS - which is 2^23-1, so we're fine. */
108 #define LW_SHARED_MASK ((uint32) ((1 << 24)-1))
109 
110 /*
111  * This is indexed by tranche ID and stores the names of all tranches known
112  * to the current backend.
113  */
114 static char **LWLockTrancheArray = NULL;
115 static int LWLockTranchesAllocated = 0;
116 
117 #define T_NAME(lock) \
118  (LWLockTrancheArray[(lock)->tranche])
119 
120 /*
121  * This points to the main array of LWLocks in shared memory. Backends inherit
122  * the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
123  * where we have special measures to pass it down).
124  */
126 
127 /*
128  * We use this structure to keep track of locked LWLocks for release
129  * during error recovery. Normally, only a few will be held at once, but
130  * occasionally the number can be much higher; for example, the pg_buffercache
131  * extension locks all buffer partitions simultaneously.
132  */
133 #define MAX_SIMUL_LWLOCKS 200
134 
135 /* struct representing the LWLocks we're holding */
136 typedef struct LWLockHandle
137 {
140 } LWLockHandle;
141 
142 static int num_held_lwlocks = 0;
144 
145 /* struct representing the LWLock tranche request for named tranche */
147 {
151 
155 
157 
158 static bool lock_named_request_allowed = true;
159 
160 static void InitializeLWLocks(void);
161 static void RegisterLWLockTranches(void);
162 
163 static inline void LWLockReportWaitStart(LWLock *lock);
164 static inline void LWLockReportWaitEnd(void);
165 
166 #ifdef LWLOCK_STATS
167 typedef struct lwlock_stats_key
168 {
169  int tranche;
170  void *instance;
171 } lwlock_stats_key;
172 
173 typedef struct lwlock_stats
174 {
175  lwlock_stats_key key;
176  int sh_acquire_count;
177  int ex_acquire_count;
178  int block_count;
179  int dequeue_self_count;
180  int spin_delay_count;
181 } lwlock_stats;
182 
183 static HTAB *lwlock_stats_htab;
184 static lwlock_stats lwlock_stats_dummy;
185 #endif
186 
187 #ifdef LOCK_DEBUG
188 bool Trace_lwlocks = false;
189 
190 inline static void
191 PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode)
192 {
193  /* hide statement & context here, otherwise the log is just too verbose */
194  if (Trace_lwlocks)
195  {
197 
198  ereport(LOG,
199  (errhidestmt(true),
200  errhidecontext(true),
201  errmsg_internal("%d: %s(%s %p): excl %u shared %u haswaiters %u waiters %u rOK %d",
202  MyProcPid,
203  where, T_NAME(lock), lock,
204  (state & LW_VAL_EXCLUSIVE) != 0,
205  state & LW_SHARED_MASK,
206  (state & LW_FLAG_HAS_WAITERS) != 0,
207  pg_atomic_read_u32(&lock->nwaiters),
208  (state & LW_FLAG_RELEASE_OK) != 0)));
209  }
210 }
211 
212 inline static void
213 LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg)
214 {
215  /* hide statement & context here, otherwise the log is just too verbose */
216  if (Trace_lwlocks)
217  {
218  ereport(LOG,
219  (errhidestmt(true),
220  errhidecontext(true),
221  errmsg_internal("%s(%s %p): %s", where,
222  T_NAME(lock), lock, msg)));
223  }
224 }
225 
226 #else /* not LOCK_DEBUG */
227 #define PRINT_LWDEBUG(a,b,c) ((void)0)
228 #define LOG_LWDEBUG(a,b,c) ((void)0)
229 #endif /* LOCK_DEBUG */
230 
231 #ifdef LWLOCK_STATS
232 
233 static void init_lwlock_stats(void);
234 static void print_lwlock_stats(int code, Datum arg);
235 static lwlock_stats * get_lwlock_stats_entry(LWLock *lockid);
236 
237 static void
238 init_lwlock_stats(void)
239 {
240  HASHCTL ctl;
241  static MemoryContext lwlock_stats_cxt = NULL;
242  static bool exit_registered = false;
243 
244  if (lwlock_stats_cxt != NULL)
245  MemoryContextDelete(lwlock_stats_cxt);
246 
247  /*
248  * The LWLock stats will be updated within a critical section, which
249  * requires allocating new hash entries. Allocations within a critical
250  * section are normally not allowed because running out of memory would
251  * lead to a PANIC, but LWLOCK_STATS is debugging code that's not normally
252  * turned on in production, so that's an acceptable risk. The hash entries
253  * are small, so the risk of running out of memory is minimal in practice.
254  */
255  lwlock_stats_cxt = AllocSetContextCreate(TopMemoryContext,
256  "LWLock stats",
258  MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true);
259 
260  MemSet(&ctl, 0, sizeof(ctl));
261  ctl.keysize = sizeof(lwlock_stats_key);
262  ctl.entrysize = sizeof(lwlock_stats);
263  ctl.hcxt = lwlock_stats_cxt;
264  lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
266  if (!exit_registered)
267  {
268  on_shmem_exit(print_lwlock_stats, 0);
269  exit_registered = true;
270  }
271 }
272 
273 static void
274 print_lwlock_stats(int code, Datum arg)
275 {
276  HASH_SEQ_STATUS scan;
277  lwlock_stats *lwstats;
278 
279  hash_seq_init(&scan, lwlock_stats_htab);
280 
281  /* Grab an LWLock to keep different backends from mixing reports */
282  LWLockAcquire(&MainLWLockArray[0].lock, LW_EXCLUSIVE);
283 
284  while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
285  {
286  fprintf(stderr,
287  "PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
288  MyProcPid, LWLockTrancheArray[lwstats->key.tranche],
289  lwstats->key.instance, lwstats->sh_acquire_count,
290  lwstats->ex_acquire_count, lwstats->block_count,
291  lwstats->spin_delay_count, lwstats->dequeue_self_count);
292  }
293 
294  LWLockRelease(&MainLWLockArray[0].lock);
295 }
296 
297 static lwlock_stats *
298 get_lwlock_stats_entry(LWLock *lock)
299 {
300  lwlock_stats_key key;
301  lwlock_stats *lwstats;
302  bool found;
303 
304  /*
305  * During shared memory initialization, the hash table doesn't exist yet.
306  * Stats of that phase aren't very interesting, so just collect operations
307  * on all locks in a single dummy entry.
308  */
309  if (lwlock_stats_htab == NULL)
310  return &lwlock_stats_dummy;
311 
312  /* Fetch or create the entry. */
313  key.tranche = lock->tranche;
314  key.instance = lock;
315  lwstats = hash_search(lwlock_stats_htab, &key, HASH_ENTER, &found);
316  if (!found)
317  {
318  lwstats->sh_acquire_count = 0;
319  lwstats->ex_acquire_count = 0;
320  lwstats->block_count = 0;
321  lwstats->dequeue_self_count = 0;
322  lwstats->spin_delay_count = 0;
323  }
324  return lwstats;
325 }
326 #endif /* LWLOCK_STATS */
327 
328 
329 /*
330  * Compute number of LWLocks required by named tranches. These will be
331  * allocated in the main array.
332  */
333 static int
335 {
336  int numLocks = 0;
337  int i;
338 
339  for (i = 0; i < NamedLWLockTrancheRequests; i++)
340  numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
341 
342  return numLocks;
343 }
344 
345 /*
346  * Compute shmem space needed for LWLocks and named tranches.
347  */
348 Size
350 {
351  Size size;
352  int i;
353  int numLocks = NUM_FIXED_LWLOCKS;
354 
355  numLocks += NumLWLocksByNamedTranches();
356 
357  /* Space for the LWLock array. */
358  size = mul_size(numLocks, sizeof(LWLockPadded));
359 
360  /* Space for dynamic allocation counter, plus room for alignment. */
361  size = add_size(size, sizeof(int) + LWLOCK_PADDED_SIZE);
362 
363  /* space for named tranches. */
365 
366  /* space for name of each tranche. */
367  for (i = 0; i < NamedLWLockTrancheRequests; i++)
368  size = add_size(size, strlen(NamedLWLockTrancheRequestArray[i].tranche_name) + 1);
369 
370  /* Disallow named LWLocks' requests after startup */
372 
373  return size;
374 }
375 
376 /*
377  * Allocate shmem space for the main LWLock array and all tranches and
378  * initialize it. We also register all the LWLock tranches here.
379  */
380 void
382 {
384  "MAX_BACKENDS too big for lwlock.c");
385 
387  sizeof(LWLock) <= LWLOCK_PADDED_SIZE,
388  "Miscalculated LWLock padding");
389 
390  if (!IsUnderPostmaster)
391  {
392  Size spaceLocks = LWLockShmemSize();
393  int *LWLockCounter;
394  char *ptr;
395 
396  /* Allocate space */
397  ptr = (char *) ShmemAlloc(spaceLocks);
398 
399  /* Leave room for dynamic allocation of tranches */
400  ptr += sizeof(int);
401 
402  /* Ensure desired alignment of LWLock array */
403  ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
404 
405  MainLWLockArray = (LWLockPadded *) ptr;
406 
407  /*
408  * Initialize the dynamic-allocation counter for tranches, which is
409  * stored just before the first LWLock.
410  */
411  LWLockCounter = (int *) ((char *) MainLWLockArray - sizeof(int));
412  *LWLockCounter = LWTRANCHE_FIRST_USER_DEFINED;
413 
414  /* Initialize all LWLocks */
416  }
417 
418  /* Register all LWLock tranches */
420 }
421 
422 /*
423  * Initialize LWLocks that are fixed and those belonging to named tranches.
424  */
425 static void
427 {
428  int numNamedLocks = NumLWLocksByNamedTranches();
429  int id;
430  int i;
431  int j;
432  LWLockPadded *lock;
433 
434  /* Initialize all individual LWLocks in main array */
435  for (id = 0, lock = MainLWLockArray; id < NUM_INDIVIDUAL_LWLOCKS; id++, lock++)
436  LWLockInitialize(&lock->lock, id);
437 
438  /* Initialize buffer mapping LWLocks in main array */
439  lock = MainLWLockArray + NUM_INDIVIDUAL_LWLOCKS;
440  for (id = 0; id < NUM_BUFFER_PARTITIONS; id++, lock++)
442 
443  /* Initialize lmgrs' LWLocks in main array */
444  lock = MainLWLockArray + NUM_INDIVIDUAL_LWLOCKS + NUM_BUFFER_PARTITIONS;
445  for (id = 0; id < NUM_LOCK_PARTITIONS; id++, lock++)
447 
448  /* Initialize predicate lmgrs' LWLocks in main array */
449  lock = MainLWLockArray + NUM_INDIVIDUAL_LWLOCKS +
450  NUM_BUFFER_PARTITIONS + NUM_LOCK_PARTITIONS;
451  for (id = 0; id < NUM_PREDICATELOCK_PARTITIONS; id++, lock++)
453 
454  /* Initialize named tranches. */
456  {
457  char *trancheNames;
458 
459  NamedLWLockTrancheArray = (NamedLWLockTranche *)
460  &MainLWLockArray[NUM_FIXED_LWLOCKS + numNamedLocks];
461 
462  trancheNames = (char *) NamedLWLockTrancheArray +
464  lock = &MainLWLockArray[NUM_FIXED_LWLOCKS];
465 
466  for (i = 0; i < NamedLWLockTrancheRequests; i++)
467  {
468  NamedLWLockTrancheRequest *request;
469  NamedLWLockTranche *tranche;
470  char *name;
471 
472  request = &NamedLWLockTrancheRequestArray[i];
473  tranche = &NamedLWLockTrancheArray[i];
474 
475  name = trancheNames;
476  trancheNames += strlen(request->tranche_name) + 1;
477  strcpy(name, request->tranche_name);
478  tranche->trancheId = LWLockNewTrancheId();
479  tranche->trancheName = name;
480 
481  for (j = 0; j < request->num_lwlocks; j++, lock++)
482  LWLockInitialize(&lock->lock, tranche->trancheId);
483  }
484  }
485 }
486 
487 /*
488  * Register named tranches and tranches for fixed LWLocks.
489  */
490 static void
492 {
493  int i;
494 
495  if (LWLockTrancheArray == NULL)
496  {
498  LWLockTrancheArray = (char **)
500  LWLockTranchesAllocated * sizeof(char *));
502  }
503 
504  for (i = 0; i < NUM_INDIVIDUAL_LWLOCKS; ++i)
506 
510  "predicate_lock_manager");
512  "parallel_query_dsa");
514 
515  /* Register named tranches. */
516  for (i = 0; i < NamedLWLockTrancheRequests; i++)
517  LWLockRegisterTranche(NamedLWLockTrancheArray[i].trancheId,
518  NamedLWLockTrancheArray[i].trancheName);
519 }
520 
521 /*
522  * InitLWLockAccess - initialize backend-local state needed to hold LWLocks
523  */
524 void
526 {
527 #ifdef LWLOCK_STATS
528  init_lwlock_stats();
529 #endif
530 }
531 
532 /*
533  * GetNamedLWLockTranche - returns the base address of LWLock from the
534  * specified tranche.
535  *
536  * Caller needs to retrieve the requested number of LWLocks starting from
537  * the base lock address returned by this API. This can be used for
538  * tranches that are requested by using RequestNamedLWLockTranche() API.
539  */
540 LWLockPadded *
541 GetNamedLWLockTranche(const char *tranche_name)
542 {
543  int lock_pos;
544  int i;
545 
546  /*
547  * Obtain the position of base address of LWLock belonging to requested
548  * tranche_name in MainLWLockArray. LWLocks for named tranches are placed
549  * in MainLWLockArray after fixed locks.
550  */
551  lock_pos = NUM_FIXED_LWLOCKS;
552  for (i = 0; i < NamedLWLockTrancheRequests; i++)
553  {
554  if (strcmp(NamedLWLockTrancheRequestArray[i].tranche_name,
555  tranche_name) == 0)
556  return &MainLWLockArray[lock_pos];
557 
558  lock_pos += NamedLWLockTrancheRequestArray[i].num_lwlocks;
559  }
560 
561  if (i >= NamedLWLockTrancheRequests)
562  elog(ERROR, "requested tranche is not registered");
563 
564  /* just to keep compiler quiet */
565  return NULL;
566 }
567 
568 /*
569  * Allocate a new tranche ID.
570  */
571 int
573 {
574  int result;
575  int *LWLockCounter;
576 
577  LWLockCounter = (int *) ((char *) MainLWLockArray - sizeof(int));
579  result = (*LWLockCounter)++;
581 
582  return result;
583 }
584 
585 /*
586  * Register a tranche ID in the lookup table for the current process. This
587  * routine will save a pointer to the tranche name passed as an argument,
588  * so the name should be allocated in a backend-lifetime context
589  * (TopMemoryContext, static variable, or similar).
590  */
591 void
592 LWLockRegisterTranche(int tranche_id, char *tranche_name)
593 {
595 
596  if (tranche_id >= LWLockTranchesAllocated)
597  {
599  int j = LWLockTranchesAllocated;
600 
601  while (i <= tranche_id)
602  i *= 2;
603 
604  LWLockTrancheArray = (char **)
605  repalloc(LWLockTrancheArray, i * sizeof(char *));
607  while (j < LWLockTranchesAllocated)
608  LWLockTrancheArray[j++] = NULL;
609  }
610 
611  LWLockTrancheArray[tranche_id] = tranche_name;
612 }
613 
614 /*
615  * RequestNamedLWLockTranche
616  * Request that extra LWLocks be allocated during postmaster
617  * startup.
618  *
619  * This is only useful for extensions if called from the _PG_init hook
620  * of a library that is loaded into the postmaster via
621  * shared_preload_libraries. Once shared memory has been allocated, calls
622  * will be ignored. (We could raise an error, but it seems better to make
623  * it a no-op, so that libraries containing such calls can be reloaded if
624  * needed.)
625  */
626 void
627 RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
628 {
629  NamedLWLockTrancheRequest *request;
630 
632  return; /* too late */
633 
634  if (NamedLWLockTrancheRequestArray == NULL)
635  {
637  NamedLWLockTrancheRequestArray = (NamedLWLockTrancheRequest *)
640  * sizeof(NamedLWLockTrancheRequest));
641  }
642 
644  {
646 
647  while (i <= NamedLWLockTrancheRequests)
648  i *= 2;
649 
650  NamedLWLockTrancheRequestArray = (NamedLWLockTrancheRequest *)
651  repalloc(NamedLWLockTrancheRequestArray,
652  i * sizeof(NamedLWLockTrancheRequest));
654  }
655 
656  request = &NamedLWLockTrancheRequestArray[NamedLWLockTrancheRequests];
657  Assert(strlen(tranche_name) + 1 < NAMEDATALEN);
658  StrNCpy(request->tranche_name, tranche_name, NAMEDATALEN);
659  request->num_lwlocks = num_lwlocks;
661 }
662 
663 /*
664  * LWLockInitialize - initialize a new lwlock; it's initially unlocked
665  */
666 void
667 LWLockInitialize(LWLock *lock, int tranche_id)
668 {
670 #ifdef LOCK_DEBUG
671  pg_atomic_init_u32(&lock->nwaiters, 0);
672 #endif
673  lock->tranche = tranche_id;
674  proclist_init(&lock->waiters);
675 }
676 
677 /*
678  * Report start of wait event for light-weight locks.
679  *
680  * This function will be used by all the light-weight lock calls which
681  * needs to wait to acquire the lock. This function distinguishes wait
682  * event based on tranche and lock id.
683  */
684 static inline void
686 {
688 }
689 
690 /*
691  * Report end of wait event for light-weight locks.
692  */
693 static inline void
695 {
697 }
698 
699 /*
700  * Return an identifier for an LWLock based on the wait class and event.
701  */
702 const char *
704 {
705  Assert(classId == PG_WAIT_LWLOCK);
706 
707  /*
708  * It is quite possible that user has registered tranche in one of the
709  * backends (e.g. by allocating lwlocks in dynamic shared memory) but not
710  * all of them, so we can't assume the tranche is registered here.
711  */
712  if (eventId >= LWLockTranchesAllocated ||
713  LWLockTrancheArray[eventId] == NULL)
714  return "extension";
715 
716  return LWLockTrancheArray[eventId];
717 }
718 
719 /*
720  * Internal function that tries to atomically acquire the lwlock in the passed
721  * in mode.
722  *
723  * This function will not block waiting for a lock to become free - that's the
724  * callers job.
725  *
726  * Returns true if the lock isn't free and we need to wait.
727  */
728 static bool
730 {
731  uint32 old_state;
732 
733  AssertArg(mode == LW_EXCLUSIVE || mode == LW_SHARED);
734 
735  /*
736  * Read once outside the loop, later iterations will get the newer value
737  * via compare & exchange.
738  */
739  old_state = pg_atomic_read_u32(&lock->state);
740 
741  /* loop until we've determined whether we could acquire the lock or not */
742  while (true)
743  {
744  uint32 desired_state;
745  bool lock_free;
746 
747  desired_state = old_state;
748 
749  if (mode == LW_EXCLUSIVE)
750  {
751  lock_free = (old_state & LW_LOCK_MASK) == 0;
752  if (lock_free)
753  desired_state += LW_VAL_EXCLUSIVE;
754  }
755  else
756  {
757  lock_free = (old_state & LW_VAL_EXCLUSIVE) == 0;
758  if (lock_free)
759  desired_state += LW_VAL_SHARED;
760  }
761 
762  /*
763  * Attempt to swap in the state we are expecting. If we didn't see
764  * lock to be free, that's just the old value. If we saw it as free,
765  * we'll attempt to mark it acquired. The reason that we always swap
766  * in the value is that this doubles as a memory barrier. We could try
767  * to be smarter and only swap in values if we saw the lock as free,
768  * but benchmark haven't shown it as beneficial so far.
769  *
770  * Retry if the value changed since we last looked at it.
771  */
773  &old_state, desired_state))
774  {
775  if (lock_free)
776  {
777  /* Great! Got the lock. */
778 #ifdef LOCK_DEBUG
779  if (mode == LW_EXCLUSIVE)
780  lock->owner = MyProc;
781 #endif
782  return false;
783  }
784  else
785  return true; /* somebody else has the lock */
786  }
787  }
788  pg_unreachable();
789 }
790 
791 /*
792  * Lock the LWLock's wait list against concurrent activity.
793  *
794  * NB: even though the wait list is locked, non-conflicting lock operations
795  * may still happen concurrently.
796  *
797  * Time spent holding mutex should be short!
798  */
799 static void
801 {
802  uint32 old_state;
803 #ifdef LWLOCK_STATS
804  lwlock_stats *lwstats;
805  uint32 delays = 0;
806 
807  lwstats = get_lwlock_stats_entry(lock);
808 #endif
809 
810  while (true)
811  {
812  /* always try once to acquire lock directly */
813  old_state = pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_LOCKED);
814  if (!(old_state & LW_FLAG_LOCKED))
815  break; /* got lock */
816 
817  /* and then spin without atomic operations until lock is released */
818  {
819  SpinDelayStatus delayStatus;
820 
821  init_local_spin_delay(&delayStatus);
822 
823  while (old_state & LW_FLAG_LOCKED)
824  {
825  perform_spin_delay(&delayStatus);
826  old_state = pg_atomic_read_u32(&lock->state);
827  }
828 #ifdef LWLOCK_STATS
829  delays += delayStatus.delays;
830 #endif
831  finish_spin_delay(&delayStatus);
832  }
833 
834  /*
835  * Retry. The lock might obviously already be re-acquired by the time
836  * we're attempting to get it again.
837  */
838  }
839 
840 #ifdef LWLOCK_STATS
841  lwstats->spin_delay_count += delays;
842 #endif
843 }
844 
845 /*
846  * Unlock the LWLock's wait list.
847  *
848  * Note that it can be more efficient to manipulate flags and release the
849  * locks in a single atomic operation.
850  */
851 static void
853 {
855 
856  old_state = pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_LOCKED);
857 
858  Assert(old_state & LW_FLAG_LOCKED);
859 }
860 
861 /*
862  * Wakeup all the lockers that currently have a chance to acquire the lock.
863  */
864 static void
866 {
867  bool new_release_ok;
868  bool wokeup_somebody = false;
869  proclist_head wakeup;
871 
872  proclist_init(&wakeup);
873 
874  new_release_ok = true;
875 
876  /* lock wait list while collecting backends to wake up */
877  LWLockWaitListLock(lock);
878 
879  proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
880  {
881  PGPROC *waiter = GetPGProcByNumber(iter.cur);
882 
883  if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
884  continue;
885 
886  proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
887  proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
888 
889  if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
890  {
891  /*
892  * Prevent additional wakeups until retryer gets to run. Backends
893  * that are just waiting for the lock to become free don't retry
894  * automatically.
895  */
896  new_release_ok = false;
897 
898  /*
899  * Don't wakeup (further) exclusive locks.
900  */
901  wokeup_somebody = true;
902  }
903 
904  /*
905  * Once we've woken up an exclusive lock, there's no point in waking
906  * up anybody else.
907  */
908  if (waiter->lwWaitMode == LW_EXCLUSIVE)
909  break;
910  }
911 
913 
914  /* unset required flags, and release lock, in one fell swoop */
915  {
916  uint32 old_state;
917  uint32 desired_state;
918 
919  old_state = pg_atomic_read_u32(&lock->state);
920  while (true)
921  {
922  desired_state = old_state;
923 
924  /* compute desired flags */
925 
926  if (new_release_ok)
927  desired_state |= LW_FLAG_RELEASE_OK;
928  else
929  desired_state &= ~LW_FLAG_RELEASE_OK;
930 
931  if (proclist_is_empty(&wakeup))
932  desired_state &= ~LW_FLAG_HAS_WAITERS;
933 
934  desired_state &= ~LW_FLAG_LOCKED; /* release lock */
935 
936  if (pg_atomic_compare_exchange_u32(&lock->state, &old_state,
937  desired_state))
938  break;
939  }
940  }
941 
942  /* Awaken any waiters I removed from the queue. */
943  proclist_foreach_modify(iter, &wakeup, lwWaitLink)
944  {
945  PGPROC *waiter = GetPGProcByNumber(iter.cur);
946 
947  LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
948  proclist_delete(&wakeup, iter.cur, lwWaitLink);
949 
950  /*
951  * Guarantee that lwWaiting being unset only becomes visible once the
952  * unlink from the link has completed. Otherwise the target backend
953  * could be woken up for other reason and enqueue for a new lock - if
954  * that happens before the list unlink happens, the list would end up
955  * being corrupted.
956  *
957  * The barrier pairs with the LWLockWaitListLock() when enqueuing for
958  * another lock.
959  */
961  waiter->lwWaiting = false;
962  PGSemaphoreUnlock(waiter->sem);
963  }
964 }
965 
966 /*
967  * Add ourselves to the end of the queue.
968  *
969  * NB: Mode can be LW_WAIT_UNTIL_FREE here!
970  */
971 static void
973 {
974  /*
975  * If we don't have a PGPROC structure, there's no way to wait. This
976  * should never occur, since MyProc should only be null during shared
977  * memory initialization.
978  */
979  if (MyProc == NULL)
980  elog(PANIC, "cannot wait without a PGPROC structure");
981 
982  if (MyProc->lwWaiting)
983  elog(PANIC, "queueing for lock while waiting on another one");
984 
985  LWLockWaitListLock(lock);
986 
987  /* setting the flag is protected by the spinlock */
989 
990  MyProc->lwWaiting = true;
991  MyProc->lwWaitMode = mode;
992 
993  /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
994  if (mode == LW_WAIT_UNTIL_FREE)
995  proclist_push_head(&lock->waiters, MyProc->pgprocno, lwWaitLink);
996  else
997  proclist_push_tail(&lock->waiters, MyProc->pgprocno, lwWaitLink);
998 
999  /* Can release the mutex now */
1000  LWLockWaitListUnlock(lock);
1001 
1002 #ifdef LOCK_DEBUG
1003  pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
1004 #endif
1005 
1006 }
1007 
1008 /*
1009  * Remove ourselves from the waitlist.
1010  *
1011  * This is used if we queued ourselves because we thought we needed to sleep
1012  * but, after further checking, we discovered that we don't actually need to
1013  * do so.
1014  */
1015 static void
1017 {
1018  bool found = false;
1019  proclist_mutable_iter iter;
1020 
1021 #ifdef LWLOCK_STATS
1022  lwlock_stats *lwstats;
1023 
1024  lwstats = get_lwlock_stats_entry(lock);
1025 
1026  lwstats->dequeue_self_count++;
1027 #endif
1028 
1029  LWLockWaitListLock(lock);
1030 
1031  /*
1032  * Can't just remove ourselves from the list, but we need to iterate over
1033  * all entries as somebody else could have dequeued us.
1034  */
1035  proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1036  {
1037  if (iter.cur == MyProc->pgprocno)
1038  {
1039  found = true;
1040  proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1041  break;
1042  }
1043  }
1044 
1045  if (proclist_is_empty(&lock->waiters) &&
1046  (pg_atomic_read_u32(&lock->state) & LW_FLAG_HAS_WAITERS) != 0)
1047  {
1049  }
1050 
1051  /* XXX: combine with fetch_and above? */
1052  LWLockWaitListUnlock(lock);
1053 
1054  /* clear waiting state again, nice for debugging */
1055  if (found)
1056  MyProc->lwWaiting = false;
1057  else
1058  {
1059  int extraWaits = 0;
1060 
1061  /*
1062  * Somebody else dequeued us and has or will wake us up. Deal with the
1063  * superfluous absorption of a wakeup.
1064  */
1065 
1066  /*
1067  * Reset releaseOk if somebody woke us before we removed ourselves -
1068  * they'll have set it to false.
1069  */
1071 
1072  /*
1073  * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
1074  * get reset at some inconvenient point later. Most of the time this
1075  * will immediately return.
1076  */
1077  for (;;)
1078  {
1080  if (!MyProc->lwWaiting)
1081  break;
1082  extraWaits++;
1083  }
1084 
1085  /*
1086  * Fix the process wait semaphore's count for any absorbed wakeups.
1087  */
1088  while (extraWaits-- > 0)
1090  }
1091 
1092 #ifdef LOCK_DEBUG
1093  {
1094  /* not waiting anymore */
1095  uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1096 
1097  Assert(nwaiters < MAX_BACKENDS);
1098  }
1099 #endif
1100 }
1101 
1102 /*
1103  * LWLockAcquire - acquire a lightweight lock in the specified mode
1104  *
1105  * If the lock is not available, sleep until it is. Returns true if the lock
1106  * was available immediately, false if we had to sleep.
1107  *
1108  * Side effect: cancel/die interrupts are held off until lock release.
1109  */
1110 bool
1112 {
1113  PGPROC *proc = MyProc;
1114  bool result = true;
1115  int extraWaits = 0;
1116 #ifdef LWLOCK_STATS
1117  lwlock_stats *lwstats;
1118 
1119  lwstats = get_lwlock_stats_entry(lock);
1120 #endif
1121 
1122  AssertArg(mode == LW_SHARED || mode == LW_EXCLUSIVE);
1123 
1124  PRINT_LWDEBUG("LWLockAcquire", lock, mode);
1125 
1126 #ifdef LWLOCK_STATS
1127  /* Count lock acquisition attempts */
1128  if (mode == LW_EXCLUSIVE)
1129  lwstats->ex_acquire_count++;
1130  else
1131  lwstats->sh_acquire_count++;
1132 #endif /* LWLOCK_STATS */
1133 
1134  /*
1135  * We can't wait if we haven't got a PGPROC. This should only occur
1136  * during bootstrap or shared memory initialization. Put an Assert here
1137  * to catch unsafe coding practices.
1138  */
1139  Assert(!(proc == NULL && IsUnderPostmaster));
1140 
1141  /* Ensure we will have room to remember the lock */
1143  elog(ERROR, "too many LWLocks taken");
1144 
1145  /*
1146  * Lock out cancel/die interrupts until we exit the code section protected
1147  * by the LWLock. This ensures that interrupts will not interfere with
1148  * manipulations of data structures in shared memory.
1149  */
1150  HOLD_INTERRUPTS();
1151 
1152  /*
1153  * Loop here to try to acquire lock after each time we are signaled by
1154  * LWLockRelease.
1155  *
1156  * NOTE: it might seem better to have LWLockRelease actually grant us the
1157  * lock, rather than retrying and possibly having to go back to sleep. But
1158  * in practice that is no good because it means a process swap for every
1159  * lock acquisition when two or more processes are contending for the same
1160  * lock. Since LWLocks are normally used to protect not-very-long
1161  * sections of computation, a process needs to be able to acquire and
1162  * release the same lock many times during a single CPU time slice, even
1163  * in the presence of contention. The efficiency of being able to do that
1164  * outweighs the inefficiency of sometimes wasting a process dispatch
1165  * cycle because the lock is not free when a released waiter finally gets
1166  * to run. See pgsql-hackers archives for 29-Dec-01.
1167  */
1168  for (;;)
1169  {
1170  bool mustwait;
1171 
1172  /*
1173  * Try to grab the lock the first time, we're not in the waitqueue
1174  * yet/anymore.
1175  */
1176  mustwait = LWLockAttemptLock(lock, mode);
1177 
1178  if (!mustwait)
1179  {
1180  LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
1181  break; /* got the lock */
1182  }
1183 
1184  /*
1185  * Ok, at this point we couldn't grab the lock on the first try. We
1186  * cannot simply queue ourselves to the end of the list and wait to be
1187  * woken up because by now the lock could long have been released.
1188  * Instead add us to the queue and try to grab the lock again. If we
1189  * succeed we need to revert the queuing and be happy, otherwise we
1190  * recheck the lock. If we still couldn't grab it, we know that the
1191  * other locker will see our queue entries when releasing since they
1192  * existed before we checked for the lock.
1193  */
1194 
1195  /* add to the queue */
1196  LWLockQueueSelf(lock, mode);
1197 
1198  /* we're now guaranteed to be woken up if necessary */
1199  mustwait = LWLockAttemptLock(lock, mode);
1200 
1201  /* ok, grabbed the lock the second time round, need to undo queueing */
1202  if (!mustwait)
1203  {
1204  LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
1205 
1206  LWLockDequeueSelf(lock);
1207  break;
1208  }
1209 
1210  /*
1211  * Wait until awakened.
1212  *
1213  * Since we share the process wait semaphore with the regular lock
1214  * manager and ProcWaitForSignal, and we may need to acquire an LWLock
1215  * while one of those is pending, it is possible that we get awakened
1216  * for a reason other than being signaled by LWLockRelease. If so,
1217  * loop back and wait again. Once we've gotten the LWLock,
1218  * re-increment the sema by the number of additional signals received,
1219  * so that the lock manager or signal manager will see the received
1220  * signal when it next waits.
1221  */
1222  LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
1223 
1224 #ifdef LWLOCK_STATS
1225  lwstats->block_count++;
1226 #endif
1227 
1228  LWLockReportWaitStart(lock);
1229  TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1230 
1231  for (;;)
1232  {
1233  PGSemaphoreLock(proc->sem);
1234  if (!proc->lwWaiting)
1235  break;
1236  extraWaits++;
1237  }
1238 
1239  /* Retrying, allow LWLockRelease to release waiters again. */
1241 
1242 #ifdef LOCK_DEBUG
1243  {
1244  /* not waiting anymore */
1245  uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1246 
1247  Assert(nwaiters < MAX_BACKENDS);
1248  }
1249 #endif
1250 
1251  TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
1253 
1254  LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
1255 
1256  /* Now loop back and try to acquire lock again. */
1257  result = false;
1258  }
1259 
1260  TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), mode);
1261 
1262  /* Add lock to list of locks held by this backend */
1263  held_lwlocks[num_held_lwlocks].lock = lock;
1264  held_lwlocks[num_held_lwlocks++].mode = mode;
1265 
1266  /*
1267  * Fix the process wait semaphore's count for any absorbed wakeups.
1268  */
1269  while (extraWaits-- > 0)
1270  PGSemaphoreUnlock(proc->sem);
1271 
1272  return result;
1273 }
1274 
1275 /*
1276  * LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
1277  *
1278  * If the lock is not available, return FALSE with no side-effects.
1279  *
1280  * If successful, cancel/die interrupts are held off until lock release.
1281  */
1282 bool
1284 {
1285  bool mustwait;
1286 
1287  AssertArg(mode == LW_SHARED || mode == LW_EXCLUSIVE);
1288 
1289  PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
1290 
1291  /* Ensure we will have room to remember the lock */
1293  elog(ERROR, "too many LWLocks taken");
1294 
1295  /*
1296  * Lock out cancel/die interrupts until we exit the code section protected
1297  * by the LWLock. This ensures that interrupts will not interfere with
1298  * manipulations of data structures in shared memory.
1299  */
1300  HOLD_INTERRUPTS();
1301 
1302  /* Check for the lock */
1303  mustwait = LWLockAttemptLock(lock, mode);
1304 
1305  if (mustwait)
1306  {
1307  /* Failed to get lock, so release interrupt holdoff */
1309 
1310  LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
1311  TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(lock), mode);
1312  }
1313  else
1314  {
1315  /* Add lock to list of locks held by this backend */
1316  held_lwlocks[num_held_lwlocks].lock = lock;
1317  held_lwlocks[num_held_lwlocks++].mode = mode;
1318  TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(lock), mode);
1319  }
1320  return !mustwait;
1321 }
1322 
1323 /*
1324  * LWLockAcquireOrWait - Acquire lock, or wait until it's free
1325  *
1326  * The semantics of this function are a bit funky. If the lock is currently
1327  * free, it is acquired in the given mode, and the function returns true. If
1328  * the lock isn't immediately free, the function waits until it is released
1329  * and returns false, but does not acquire the lock.
1330  *
1331  * This is currently used for WALWriteLock: when a backend flushes the WAL,
1332  * holding WALWriteLock, it can flush the commit records of many other
1333  * backends as a side-effect. Those other backends need to wait until the
1334  * flush finishes, but don't need to acquire the lock anymore. They can just
1335  * wake up, observe that their records have already been flushed, and return.
1336  */
1337 bool
1339 {
1340  PGPROC *proc = MyProc;
1341  bool mustwait;
1342  int extraWaits = 0;
1343 #ifdef LWLOCK_STATS
1344  lwlock_stats *lwstats;
1345 
1346  lwstats = get_lwlock_stats_entry(lock);
1347 #endif
1348 
1349  Assert(mode == LW_SHARED || mode == LW_EXCLUSIVE);
1350 
1351  PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
1352 
1353  /* Ensure we will have room to remember the lock */
1355  elog(ERROR, "too many LWLocks taken");
1356 
1357  /*
1358  * Lock out cancel/die interrupts until we exit the code section protected
1359  * by the LWLock. This ensures that interrupts will not interfere with
1360  * manipulations of data structures in shared memory.
1361  */
1362  HOLD_INTERRUPTS();
1363 
1364  /*
1365  * NB: We're using nearly the same twice-in-a-row lock acquisition
1366  * protocol as LWLockAcquire(). Check its comments for details.
1367  */
1368  mustwait = LWLockAttemptLock(lock, mode);
1369 
1370  if (mustwait)
1371  {
1373 
1374  mustwait = LWLockAttemptLock(lock, mode);
1375 
1376  if (mustwait)
1377  {
1378  /*
1379  * Wait until awakened. Like in LWLockAcquire, be prepared for
1380  * bogus wakeups, because we share the semaphore with
1381  * ProcWaitForSignal.
1382  */
1383  LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
1384 
1385 #ifdef LWLOCK_STATS
1386  lwstats->block_count++;
1387 #endif
1388 
1389  LWLockReportWaitStart(lock);
1390  TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1391 
1392  for (;;)
1393  {
1394  PGSemaphoreLock(proc->sem);
1395  if (!proc->lwWaiting)
1396  break;
1397  extraWaits++;
1398  }
1399 
1400 #ifdef LOCK_DEBUG
1401  {
1402  /* not waiting anymore */
1403  uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1404 
1405  Assert(nwaiters < MAX_BACKENDS);
1406  }
1407 #endif
1408  TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
1410 
1411  LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
1412  }
1413  else
1414  {
1415  LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
1416 
1417  /*
1418  * Got lock in the second attempt, undo queueing. We need to treat
1419  * this as having successfully acquired the lock, otherwise we'd
1420  * not necessarily wake up people we've prevented from acquiring
1421  * the lock.
1422  */
1423  LWLockDequeueSelf(lock);
1424  }
1425  }
1426 
1427  /*
1428  * Fix the process wait semaphore's count for any absorbed wakeups.
1429  */
1430  while (extraWaits-- > 0)
1431  PGSemaphoreUnlock(proc->sem);
1432 
1433  if (mustwait)
1434  {
1435  /* Failed to get lock, so release interrupt holdoff */
1437  LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
1438  TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(lock), mode);
1439  }
1440  else
1441  {
1442  LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
1443  /* Add lock to list of locks held by this backend */
1444  held_lwlocks[num_held_lwlocks].lock = lock;
1445  held_lwlocks[num_held_lwlocks++].mode = mode;
1446  TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(lock), mode);
1447  }
1448 
1449  return !mustwait;
1450 }
1451 
1452 /*
1453  * Does the lwlock in its current state need to wait for the variable value to
1454  * change?
1455  *
1456  * If we don't need to wait, and it's because the value of the variable has
1457  * changed, store the current value in newval.
1458  *
1459  * *result is set to true if the lock was free, and false otherwise.
1460  */
1461 static bool
1463  uint64 *valptr, uint64 oldval, uint64 *newval,
1464  bool *result)
1465 {
1466  bool mustwait;
1467  uint64 value;
1468 
1469  /*
1470  * Test first to see if it the slot is free right now.
1471  *
1472  * XXX: the caller uses a spinlock before this, so we don't need a memory
1473  * barrier here as far as the current usage is concerned. But that might
1474  * not be safe in general.
1475  */
1476  mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0;
1477 
1478  if (!mustwait)
1479  {
1480  *result = true;
1481  return false;
1482  }
1483 
1484  *result = false;
1485 
1486  /*
1487  * Read value using the lwlock's wait list lock, as we can't generally
1488  * rely on atomic 64 bit reads/stores. TODO: On platforms with a way to
1489  * do atomic 64 bit reads/writes the spinlock should be optimized away.
1490  */
1491  LWLockWaitListLock(lock);
1492  value = *valptr;
1493  LWLockWaitListUnlock(lock);
1494 
1495  if (value != oldval)
1496  {
1497  mustwait = false;
1498  *newval = value;
1499  }
1500  else
1501  {
1502  mustwait = true;
1503  }
1504 
1505  return mustwait;
1506 }
1507 
1508 /*
1509  * LWLockWaitForVar - Wait until lock is free, or a variable is updated.
1510  *
1511  * If the lock is held and *valptr equals oldval, waits until the lock is
1512  * either freed, or the lock holder updates *valptr by calling
1513  * LWLockUpdateVar. If the lock is free on exit (immediately or after
1514  * waiting), returns true. If the lock is still held, but *valptr no longer
1515  * matches oldval, returns false and sets *newval to the current value in
1516  * *valptr.
1517  *
1518  * Note: this function ignores shared lock holders; if the lock is held
1519  * in shared mode, returns 'true'.
1520  */
1521 bool
1522 LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
1523 {
1524  PGPROC *proc = MyProc;
1525  int extraWaits = 0;
1526  bool result = false;
1527 #ifdef LWLOCK_STATS
1528  lwlock_stats *lwstats;
1529 
1530  lwstats = get_lwlock_stats_entry(lock);
1531 #endif
1532 
1533  PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
1534 
1535  /*
1536  * Lock out cancel/die interrupts while we sleep on the lock. There is no
1537  * cleanup mechanism to remove us from the wait queue if we got
1538  * interrupted.
1539  */
1540  HOLD_INTERRUPTS();
1541 
1542  /*
1543  * Loop here to check the lock's status after each time we are signaled.
1544  */
1545  for (;;)
1546  {
1547  bool mustwait;
1548 
1549  mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1550  &result);
1551 
1552  if (!mustwait)
1553  break; /* the lock was free or value didn't match */
1554 
1555  /*
1556  * Add myself to wait queue. Note that this is racy, somebody else
1557  * could wakeup before we're finished queuing. NB: We're using nearly
1558  * the same twice-in-a-row lock acquisition protocol as
1559  * LWLockAcquire(). Check its comments for details. The only
1560  * difference is that we also have to check the variable's values when
1561  * checking the state of the lock.
1562  */
1564 
1565  /*
1566  * Set RELEASE_OK flag, to make sure we get woken up as soon as the
1567  * lock is released.
1568  */
1570 
1571  /*
1572  * We're now guaranteed to be woken up if necessary. Recheck the lock
1573  * and variables state.
1574  */
1575  mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1576  &result);
1577 
1578  /* Ok, no conflict after we queued ourselves. Undo queueing. */
1579  if (!mustwait)
1580  {
1581  LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
1582 
1583  LWLockDequeueSelf(lock);
1584  break;
1585  }
1586 
1587  /*
1588  * Wait until awakened.
1589  *
1590  * Since we share the process wait semaphore with the regular lock
1591  * manager and ProcWaitForSignal, and we may need to acquire an LWLock
1592  * while one of those is pending, it is possible that we get awakened
1593  * for a reason other than being signaled by LWLockRelease. If so,
1594  * loop back and wait again. Once we've gotten the LWLock,
1595  * re-increment the sema by the number of additional signals received,
1596  * so that the lock manager or signal manager will see the received
1597  * signal when it next waits.
1598  */
1599  LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
1600 
1601 #ifdef LWLOCK_STATS
1602  lwstats->block_count++;
1603 #endif
1604 
1605  LWLockReportWaitStart(lock);
1606  TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), LW_EXCLUSIVE);
1607 
1608  for (;;)
1609  {
1610  PGSemaphoreLock(proc->sem);
1611  if (!proc->lwWaiting)
1612  break;
1613  extraWaits++;
1614  }
1615 
1616 #ifdef LOCK_DEBUG
1617  {
1618  /* not waiting anymore */
1619  uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1620 
1621  Assert(nwaiters < MAX_BACKENDS);
1622  }
1623 #endif
1624 
1625  TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), LW_EXCLUSIVE);
1627 
1628  LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
1629 
1630  /* Now loop back and check the status of the lock again. */
1631  }
1632 
1633  TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), LW_EXCLUSIVE);
1634 
1635  /*
1636  * Fix the process wait semaphore's count for any absorbed wakeups.
1637  */
1638  while (extraWaits-- > 0)
1639  PGSemaphoreUnlock(proc->sem);
1640 
1641  /*
1642  * Now okay to allow cancel/die interrupts.
1643  */
1645 
1646  return result;
1647 }
1648 
1649 
1650 /*
1651  * LWLockUpdateVar - Update a variable and wake up waiters atomically
1652  *
1653  * Sets *valptr to 'val', and wakes up all processes waiting for us with
1654  * LWLockWaitForVar(). Setting the value and waking up the processes happen
1655  * atomically so that any process calling LWLockWaitForVar() on the same lock
1656  * is guaranteed to see the new value, and act accordingly.
1657  *
1658  * The caller must be holding the lock in exclusive mode.
1659  */
1660 void
1661 LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
1662 {
1663  proclist_head wakeup;
1664  proclist_mutable_iter iter;
1665 
1666  PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
1667 
1668  proclist_init(&wakeup);
1669 
1670  LWLockWaitListLock(lock);
1671 
1673 
1674  /* Update the lock's value */
1675  *valptr = val;
1676 
1677  /*
1678  * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
1679  * up. They are always in the front of the queue.
1680  */
1681  proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1682  {
1683  PGPROC *waiter = GetPGProcByNumber(iter.cur);
1684 
1685  if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
1686  break;
1687 
1688  proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1689  proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
1690  }
1691 
1692  /* We are done updating shared state of the lock itself. */
1693  LWLockWaitListUnlock(lock);
1694 
1695  /*
1696  * Awaken any waiters I removed from the queue.
1697  */
1698  proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1699  {
1700  PGPROC *waiter = GetPGProcByNumber(iter.cur);
1701 
1702  proclist_delete(&wakeup, iter.cur, lwWaitLink);
1703  /* check comment in LWLockWakeup() about this barrier */
1704  pg_write_barrier();
1705  waiter->lwWaiting = false;
1706  PGSemaphoreUnlock(waiter->sem);
1707  }
1708 }
1709 
1710 
1711 /*
1712  * LWLockRelease - release a previously acquired lock
1713  */
1714 void
1716 {
1717  LWLockMode mode;
1718  uint32 oldstate;
1719  bool check_waiters;
1720  int i;
1721 
1722  /*
1723  * Remove lock from list of locks held. Usually, but not always, it will
1724  * be the latest-acquired lock; so search array backwards.
1725  */
1726  for (i = num_held_lwlocks; --i >= 0;)
1727  if (lock == held_lwlocks[i].lock)
1728  break;
1729 
1730  if (i < 0)
1731  elog(ERROR, "lock %s is not held", T_NAME(lock));
1732 
1733  mode = held_lwlocks[i].mode;
1734 
1735  num_held_lwlocks--;
1736  for (; i < num_held_lwlocks; i++)
1737  held_lwlocks[i] = held_lwlocks[i + 1];
1738 
1739  PRINT_LWDEBUG("LWLockRelease", lock, mode);
1740 
1741  /*
1742  * Release my hold on lock, after that it can immediately be acquired by
1743  * others, even if we still have to wakeup other waiters.
1744  */
1745  if (mode == LW_EXCLUSIVE)
1746  oldstate = pg_atomic_sub_fetch_u32(&lock->state, LW_VAL_EXCLUSIVE);
1747  else
1748  oldstate = pg_atomic_sub_fetch_u32(&lock->state, LW_VAL_SHARED);
1749 
1750  /* nobody else can have that kind of lock */
1751  Assert(!(oldstate & LW_VAL_EXCLUSIVE));
1752 
1753 
1754  /*
1755  * We're still waiting for backends to get scheduled, don't wake them up
1756  * again.
1757  */
1758  if ((oldstate & (LW_FLAG_HAS_WAITERS | LW_FLAG_RELEASE_OK)) ==
1760  (oldstate & LW_LOCK_MASK) == 0)
1761  check_waiters = true;
1762  else
1763  check_waiters = false;
1764 
1765  /*
1766  * As waking up waiters requires the spinlock to be acquired, only do so
1767  * if necessary.
1768  */
1769  if (check_waiters)
1770  {
1771  /* XXX: remove before commit? */
1772  LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
1773  LWLockWakeup(lock);
1774  }
1775 
1776  TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock));
1777 
1778  /*
1779  * Now okay to allow cancel/die interrupts.
1780  */
1782 }
1783 
1784 /*
1785  * LWLockReleaseClearVar - release a previously acquired lock, reset variable
1786  */
1787 void
1788 LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val)
1789 {
1790  LWLockWaitListLock(lock);
1791 
1792  /*
1793  * Set the variable's value before releasing the lock, that prevents race
1794  * a race condition wherein a new locker acquires the lock, but hasn't yet
1795  * set the variables value.
1796  */
1797  *valptr = val;
1798  LWLockWaitListUnlock(lock);
1799 
1800  LWLockRelease(lock);
1801 }
1802 
1803 
1804 /*
1805  * LWLockReleaseAll - release all currently-held locks
1806  *
1807  * Used to clean up after ereport(ERROR). An important difference between this
1808  * function and retail LWLockRelease calls is that InterruptHoldoffCount is
1809  * unchanged by this operation. This is necessary since InterruptHoldoffCount
1810  * has been set to an appropriate level earlier in error recovery. We could
1811  * decrement it below zero if we allow it to drop for each released lock!
1812  */
1813 void
1815 {
1816  while (num_held_lwlocks > 0)
1817  {
1818  HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
1819 
1820  LWLockRelease(held_lwlocks[num_held_lwlocks - 1].lock);
1821  }
1822 }
1823 
1824 
1825 /*
1826  * LWLockHeldByMe - test whether my process holds a lock in any mode
1827  *
1828  * This is meant as debug support only.
1829  */
1830 bool
1832 {
1833  int i;
1834 
1835  for (i = 0; i < num_held_lwlocks; i++)
1836  {
1837  if (held_lwlocks[i].lock == l)
1838  return true;
1839  }
1840  return false;
1841 }
1842 
1843 /*
1844  * LWLockHeldByMeInMode - test whether my process holds a lock in given mode
1845  *
1846  * This is meant as debug support only.
1847  */
1848 bool
1850 {
1851  int i;
1852 
1853  for (i = 0; i < num_held_lwlocks; i++)
1854  {
1855  if (held_lwlocks[i].lock == l && held_lwlocks[i].mode == mode)
1856  return true;
1857  }
1858  return false;
1859 }
#define T_NAME(lock)
Definition: lwlock.c:117
int slock_t
Definition: s_lock.h:888
#define init_local_spin_delay(status)
Definition: s_lock.h:997
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:354
Definition: lwlock.h:32
#define pg_unreachable()
Definition: c.h:946
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:200
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
Definition: lwlock.c:1849
#define GetPGProcByNumber(n)
Definition: proc.h:263
static bool LWLockAttemptLock(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:729
int MyProcPid
Definition: globals.c:39
#define LW_FLAG_LOCKED
Definition: lwlock.c:101
int LWLockNewTrancheId(void)
Definition: lwlock.c:572
LWLockMode
Definition: lwlock.h:132
#define HASH_CONTEXT
Definition: hsearch.h:93
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition: mcxt.c:374
#define HASH_ELEM
Definition: hsearch.h:87
void PGSemaphoreUnlock(PGSemaphore sema)
Definition: posix_sema.c:323
MemoryContext hcxt
Definition: hsearch.h:78
void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
Definition: lwlock.c:627
static bool lock_named_request_allowed
Definition: lwlock.c:158
bool LWLockHeldByMe(LWLock *l)
Definition: lwlock.c:1831
#define LW_FLAG_HAS_WAITERS
Definition: lwlock.c:99
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:322
PGPROC * MyProc
Definition: proc.c:67
proclist_head waiters
Definition: lwlock.h:36
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:412
void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
Definition: lwlock.c:1661
char tranche_name[NAMEDATALEN]
Definition: lwlock.c:148
struct LWLockHandle LWLockHandle
bool lwWaiting
Definition: proc.h:126
#define MAX_SIMUL_LWLOCKS
Definition: lwlock.c:133
Size entrysize
Definition: hsearch.h:73
int errhidestmt(bool hide_stmt)
Definition: elog.c:1068
#define MemSet(start, val, len)
Definition: c.h:857
static void LWLockWaitListUnlock(LWLock *lock)
Definition: lwlock.c:852
return result
Definition: formatting.c:1633
#define proclist_foreach_modify(iter, lhead, link_member)
Definition: proclist.h:195
#define LW_SHARED_MASK
Definition: lwlock.c:108
uint8 lwWaitMode
Definition: proc.h:127
void * ShmemAlloc(Size size)
Definition: shmem.c:157
pg_atomic_uint32 state
Definition: lwlock.h:35
#define LW_LOCK_MASK
Definition: lwlock.c:106
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
#define LOG
Definition: elog.h:26
NamedLWLockTranche * NamedLWLockTrancheArray
Definition: lwlock.c:156
#define PANIC
Definition: elog.h:53
#define proclist_delete(list, procno, link_member)
Definition: proclist.h:176
Size LWLockShmemSize(void)
Definition: lwlock.c:349
#define LW_VAL_SHARED
Definition: lwlock.c:104
void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val)
Definition: lwlock.c:1788
static void RegisterLWLockTranches(void)
Definition: lwlock.c:491
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1715
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:119
static int LWLockTranchesAllocated
Definition: lwlock.c:115
#define NAMEDATALEN
#define SpinLockAcquire(lock)
Definition: spin.h:62
Definition: dynahash.c:193
#define NUM_FIXED_LWLOCKS
Definition: lwlock.h:129
unsigned short uint16
Definition: c.h:267
static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS]
Definition: lwlock.c:143
#define LWLOCK_MINIMAL_SIZE
Definition: lwlock.h:74
#define ERROR
Definition: elog.h:43
#define MAX_BACKENDS
Definition: postmaster.h:75
LWLockMode mode
Definition: lwlock.c:139
#define proclist_push_head(list, procno, link_member)
Definition: proclist.h:178
int NamedLWLockTrancheRequests
Definition: lwlock.c:154
static struct @121 value
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:165
void finish_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:175
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:348
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1283
bool IsUnderPostmaster
Definition: globals.c:101
bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
Definition: lwlock.c:1522
static void LWLockReportWaitEnd(void)
Definition: lwlock.c:694
static void LWLockWakeup(LWLock *lock)
Definition: lwlock.c:865
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:369
#define NUM_BUFFER_PARTITIONS
Definition: lwlock.h:113
unsigned int uint32
Definition: c.h:268
static void InitializeLWLocks(void)
Definition: lwlock.c:426
static void pgstat_report_wait_end(void)
Definition: pgstat.h:1232
uint16 tranche
Definition: lwlock.h:34
static void LWLockDequeueSelf(LWLock *lock)
Definition: lwlock.c:1016
#define ereport(elevel, rest)
Definition: elog.h:122
#define AssertArg(condition)
Definition: c.h:677
char * trancheName
Definition: lwlock.h:97
MemoryContext TopMemoryContext
Definition: mcxt.c:43
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:667
static void LWLockReportWaitStart(LWLock *lock)
Definition: lwlock.c:685
#define SpinLockRelease(lock)
Definition: spin.h:64
#define HASH_BLOBS
Definition: hsearch.h:88
slock_t * ShmemLock
Definition: shmem.c:84
Size mul_size(Size s1, Size s2)
Definition: shmem.c:492
MemoryContext AllocSetContextCreate(MemoryContext parent, const char *name, Size minContextSize, Size initBlockSize, Size maxBlockSize)
Definition: aset.c:322
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:301
uintptr_t Datum
Definition: postgres.h:372
Size add_size(Size s1, Size s2)
Definition: shmem.c:475
static int NamedLWLockTrancheRequestsAllocated
Definition: lwlock.c:153
Size keysize
Definition: hsearch.h:72
bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1338
#define StaticAssertExpr(condition, errmessage)
Definition: c.h:759
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:742
static void LWLockQueueSelf(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:972
static int num_held_lwlocks
Definition: lwlock.c:142
LWLock lock
Definition: lwlock.h:79
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:339
int errmsg_internal(const char *fmt,...)
Definition: elog.c:827
void CreateLWLocks(void)
Definition: lwlock.c:381
static char ** LWLockTrancheArray
Definition: lwlock.c:114
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
#define StrNCpy(dst, src, len)
Definition: c.h:830
LWLockPadded * GetNamedLWLockTranche(const char *tranche_name)
Definition: lwlock.c:541
Definition: regguts.h:298
size_t Size
Definition: c.h:356
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: pgstat.h:1208
LWLock * lock
Definition: lwlock.c:138
#define newval
#define PRINT_LWDEBUG(a, b, c)
Definition: lwlock.c:227
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1111
static bool proclist_is_empty(proclist_head *list)
Definition: proclist.h:38
void LWLockRegisterTranche(int tranche_id, char *tranche_name)
Definition: lwlock.c:592
#define LW_FLAG_RELEASE_OK
Definition: lwlock.c:100
char * MainLWLockNames[]
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1351
static int NumLWLocksByNamedTranches(void)
Definition: lwlock.c:334
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:963
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1341
#define proclist_push_tail(list, procno, link_member)
Definition: proclist.h:180
void PGSemaphoreLock(PGSemaphore sema)
Definition: posix_sema.c:303
const char * name
Definition: encode.c:521
NamedLWLockTrancheRequest * NamedLWLockTrancheRequestArray
Definition: lwlock.c:152
static bool LWLockConflictsWithVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
Definition: lwlock.c:1462
static void LWLockWaitListLock(LWLock *lock)
Definition: lwlock.c:800
struct NamedLWLockTrancheRequest NamedLWLockTrancheRequest
int pgprocno
Definition: proc.h:109
#define LW_VAL_EXCLUSIVE
Definition: lwlock.c:103
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:707
const char * GetLWLockIdentifier(uint32 classId, uint16 eventId)
Definition: lwlock.c:703
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:117
int i
static void proclist_init(proclist_head *list)
Definition: proclist.h:29
#define pg_write_barrier()
Definition: atomics.h:162
void * arg
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:383
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:234
#define elog
Definition: elog.h:219
void LWLockReleaseAll(void)
Definition: lwlock.c:1814
#define LWLOCK_PADDED_SIZE
Definition: lwlock.h:73
PGSemaphore sem
Definition: proc.h:100
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:990
LWLockPadded * MainLWLockArray
Definition: lwlock.c:125
#define PG_WAIT_LWLOCK
Definition: pgstat.h:737
Definition: proc.h:94
long val
Definition: informix.c:689
int errhidecontext(bool hide_ctx)
Definition: elog.c:1087
void InitLWLockAccess(void)
Definition: lwlock.c:525
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:117
void perform_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:125
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:252
#define NUM_PREDICATELOCK_PARTITIONS
Definition: lwlock.h:121
#define LOG_LWDEBUG(a, b, c)
Definition: lwlock.c:228