PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
lwlock.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * lwlock.c
4 * Lightweight lock manager
5 *
6 * Lightweight locks are intended primarily to provide mutual exclusion of
7 * access to shared-memory data structures. Therefore, they offer both
8 * exclusive and shared lock modes (to support read/write and read-only
9 * access to a shared object). There are few other frammishes. User-level
10 * locking should be done with the full lock manager --- which depends on
11 * LWLocks to protect its shared state.
12 *
13 * In addition to exclusive and shared modes, lightweight locks can be used to
14 * wait until a variable changes value. The variable is initially not set
15 * when the lock is acquired with LWLockAcquire, i.e. it remains set to the
16 * value it was set to when the lock was released last, and can be updated
17 * without releasing the lock by calling LWLockUpdateVar. LWLockWaitForVar
18 * waits for the variable to be updated, or until the lock is free. When
19 * releasing the lock with LWLockReleaseClearVar() the value can be set to an
20 * appropriate value for a free lock. The meaning of the variable is up to
21 * the caller, the lightweight lock code just assigns and compares it.
22 *
23 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
24 * Portions Copyright (c) 1994, Regents of the University of California
25 *
26 * IDENTIFICATION
27 * src/backend/storage/lmgr/lwlock.c
28 *
29 * NOTES:
30 *
31 * This used to be a pretty straight forward reader-writer lock
32 * implementation, in which the internal state was protected by a
33 * spinlock. Unfortunately the overhead of taking the spinlock proved to be
34 * too high for workloads/locks that were taken in shared mode very
35 * frequently. Often we were spinning in the (obviously exclusive) spinlock,
36 * while trying to acquire a shared lock that was actually free.
37 *
38 * Thus a new implementation was devised that provides wait-free shared lock
39 * acquisition for locks that aren't exclusively locked.
40 *
41 * The basic idea is to have a single atomic variable 'lockcount' instead of
42 * the formerly separate shared and exclusive counters and to use atomic
43 * operations to acquire the lock. That's fairly easy to do for plain
44 * rw-spinlocks, but a lot harder for something like LWLocks that want to wait
45 * in the OS.
46 *
47 * For lock acquisition we use an atomic compare-and-exchange on the lockcount
48 * variable. For exclusive lock we swap in a sentinel value
49 * (LW_VAL_EXCLUSIVE), for shared locks we count the number of holders.
50 *
51 * To release the lock we use an atomic decrement to release the lock. If the
52 * new value is zero (we get that atomically), we know we can/have to release
53 * waiters.
54 *
55 * Obviously it is important that the sentinel value for exclusive locks
56 * doesn't conflict with the maximum number of possible share lockers -
57 * luckily MAX_BACKENDS makes that easily possible.
58 *
59 *
60 * The attentive reader might have noticed that naively doing the above has a
61 * glaring race condition: We try to lock using the atomic operations and
62 * notice that we have to wait. Unfortunately by the time we have finished
63 * queuing, the former locker very well might have already finished its
64 * work. That's problematic because we're now stuck waiting inside the OS.
65
66 * To mitigate those races we use a two phased attempt at locking:
67 * Phase 1: Try to do it atomically, if we succeed, nice
68 * Phase 2: Add ourselves to the waitqueue of the lock
69 * Phase 3: Try to grab the lock again, if we succeed, remove ourselves from
70 * the queue
71 * Phase 4: Sleep till wake-up, goto Phase 1
72 *
73 * This protects us against the problem from above as nobody can release too
74 * quick, before we're queued, since after Phase 2 we're already queued.
75 * -------------------------------------------------------------------------
76 */
77#include "postgres.h"
78
79#include "miscadmin.h"
80#include "pg_trace.h"
81#include "pgstat.h"
82#include "port/pg_bitutils.h"
83#include "storage/proc.h"
84#include "storage/proclist.h"
85#include "storage/procnumber.h"
86#include "storage/spin.h"
87#include "utils/memutils.h"
88
89#ifdef LWLOCK_STATS
90#include "utils/hsearch.h"
91#endif
92
93
94#define LW_FLAG_HAS_WAITERS ((uint32) 1 << 31)
95#define LW_FLAG_RELEASE_OK ((uint32) 1 << 30)
96#define LW_FLAG_LOCKED ((uint32) 1 << 29)
97#define LW_FLAG_BITS 3
98#define LW_FLAG_MASK (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))
99
100/* assumes MAX_BACKENDS is a (power of 2) - 1, checked below */
101#define LW_VAL_EXCLUSIVE (MAX_BACKENDS + 1)
102#define LW_VAL_SHARED 1
103
104/* already (power of 2)-1, i.e. suitable for a mask */
105#define LW_SHARED_MASK MAX_BACKENDS
106#define LW_LOCK_MASK (MAX_BACKENDS | LW_VAL_EXCLUSIVE)
107
108
110 "MAX_BACKENDS + 1 needs to be a power of 2");
111
113 "MAX_BACKENDS and LW_FLAG_MASK overlap");
114
116 "LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap");
117
118/*
119 * There are three sorts of LWLock "tranches":
120 *
121 * 1. The individually-named locks defined in lwlocklist.h each have their
122 * own tranche. We absorb the names of these tranches from there into
123 * BuiltinTrancheNames here.
124 *
125 * 2. There are some predefined tranches for built-in groups of locks defined
126 * in lwlocklist.h. We absorb the names of these tranches, too.
127 *
128 * 3. Extensions can create new tranches, via either RequestNamedLWLockTranche
129 * or LWLockNewTrancheId. These names are stored in shared memory and can be
130 * accessed via LWLockTrancheNames.
131 *
132 * All these names are user-visible as wait event names, so choose with care
133 * ... and do not forget to update the documentation's list of wait events.
134 */
135static const char *const BuiltinTrancheNames[] = {
136#define PG_LWLOCK(id, lockname) [id] = CppAsString(lockname),
137#define PG_LWLOCKTRANCHE(id, lockname) [LWTRANCHE_##id] = CppAsString(lockname),
138#include "storage/lwlocklist.h"
139#undef PG_LWLOCK
140#undef PG_LWLOCKTRANCHE
141};
142
145 "missing entries in BuiltinTrancheNames[]");
146
147/*
148 * This is indexed by tranche ID minus LWTRANCHE_FIRST_USER_DEFINED, and
149 * points to the shared memory locations of the names of all
150 * dynamically-created tranches. Backends inherit the pointer by fork from the
151 * postmaster (except in the EXEC_BACKEND case, where we have special measures
152 * to pass it down).
153 */
154char **LWLockTrancheNames = NULL;
155
156/*
157 * This points to the main array of LWLocks in shared memory. Backends inherit
158 * the pointer by fork from the postmaster (except in the EXEC_BACKEND case,
159 * where we have special measures to pass it down).
160 */
162
163/*
164 * We use this structure to keep track of locked LWLocks for release
165 * during error recovery. Normally, only a few will be held at once, but
166 * occasionally the number can be much higher.
167 */
168#define MAX_SIMUL_LWLOCKS 200
169
170/* struct representing the LWLocks we're holding */
171typedef struct LWLockHandle
172{
176
177static int num_held_lwlocks = 0;
179
180/* struct representing the LWLock tranche request for named tranche */
182{
186
187/*
188 * NamedLWLockTrancheRequests is the valid length of the request array. These
189 * variables are non-static so that launch_backend.c can copy them to child
190 * processes in EXEC_BACKEND builds.
191 */
194
195/* postmaster's local copy of the request array */
197
198/* shared memory counter of registered tranches */
199int *LWLockCounter = NULL;
200
201/* backend-local counter of registered tranches */
203
204#define MAX_NAMED_TRANCHES 256
205
206static void InitializeLWLocks(void);
207static inline void LWLockReportWaitStart(LWLock *lock);
208static inline void LWLockReportWaitEnd(void);
209static const char *GetLWTrancheName(uint16 trancheId);
210
211#define T_NAME(lock) \
212 GetLWTrancheName((lock)->tranche)
213
214#ifdef LWLOCK_STATS
215typedef struct lwlock_stats_key
216{
217 int tranche;
218 void *instance;
219} lwlock_stats_key;
220
221typedef struct lwlock_stats
222{
223 lwlock_stats_key key;
224 int sh_acquire_count;
225 int ex_acquire_count;
226 int block_count;
227 int dequeue_self_count;
228 int spin_delay_count;
229} lwlock_stats;
230
231static HTAB *lwlock_stats_htab;
232static lwlock_stats lwlock_stats_dummy;
233#endif
234
235#ifdef LOCK_DEBUG
236bool Trace_lwlocks = false;
237
238inline static void
239PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode)
240{
241 /* hide statement & context here, otherwise the log is just too verbose */
242 if (Trace_lwlocks)
243 {
245
246 ereport(LOG,
247 (errhidestmt(true),
248 errhidecontext(true),
249 errmsg_internal("%d: %s(%s %p): excl %u shared %u haswaiters %u waiters %u rOK %d",
250 MyProcPid,
251 where, T_NAME(lock), lock,
252 (state & LW_VAL_EXCLUSIVE) != 0,
254 (state & LW_FLAG_HAS_WAITERS) != 0,
255 pg_atomic_read_u32(&lock->nwaiters),
256 (state & LW_FLAG_RELEASE_OK) != 0)));
257 }
258}
259
260inline static void
261LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg)
262{
263 /* hide statement & context here, otherwise the log is just too verbose */
264 if (Trace_lwlocks)
265 {
266 ereport(LOG,
267 (errhidestmt(true),
268 errhidecontext(true),
269 errmsg_internal("%s(%s %p): %s", where,
270 T_NAME(lock), lock, msg)));
271 }
272}
273
274#else /* not LOCK_DEBUG */
275#define PRINT_LWDEBUG(a,b,c) ((void)0)
276#define LOG_LWDEBUG(a,b,c) ((void)0)
277#endif /* LOCK_DEBUG */
278
279#ifdef LWLOCK_STATS
280
281static void init_lwlock_stats(void);
282static void print_lwlock_stats(int code, Datum arg);
283static lwlock_stats * get_lwlock_stats_entry(LWLock *lock);
284
285static void
286init_lwlock_stats(void)
287{
288 HASHCTL ctl;
289 static MemoryContext lwlock_stats_cxt = NULL;
290 static bool exit_registered = false;
291
292 if (lwlock_stats_cxt != NULL)
293 MemoryContextDelete(lwlock_stats_cxt);
294
295 /*
296 * The LWLock stats will be updated within a critical section, which
297 * requires allocating new hash entries. Allocations within a critical
298 * section are normally not allowed because running out of memory would
299 * lead to a PANIC, but LWLOCK_STATS is debugging code that's not normally
300 * turned on in production, so that's an acceptable risk. The hash entries
301 * are small, so the risk of running out of memory is minimal in practice.
302 */
303 lwlock_stats_cxt = AllocSetContextCreate(TopMemoryContext,
304 "LWLock stats",
306 MemoryContextAllowInCriticalSection(lwlock_stats_cxt, true);
307
308 ctl.keysize = sizeof(lwlock_stats_key);
309 ctl.entrysize = sizeof(lwlock_stats);
310 ctl.hcxt = lwlock_stats_cxt;
311 lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
313 if (!exit_registered)
314 {
315 on_shmem_exit(print_lwlock_stats, 0);
316 exit_registered = true;
317 }
318}
319
320static void
321print_lwlock_stats(int code, Datum arg)
322{
323 HASH_SEQ_STATUS scan;
324 lwlock_stats *lwstats;
325
326 hash_seq_init(&scan, lwlock_stats_htab);
327
328 /* Grab an LWLock to keep different backends from mixing reports */
330
331 while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
332 {
333 fprintf(stderr,
334 "PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
335 MyProcPid, GetLWTrancheName(lwstats->key.tranche),
336 lwstats->key.instance, lwstats->sh_acquire_count,
337 lwstats->ex_acquire_count, lwstats->block_count,
338 lwstats->spin_delay_count, lwstats->dequeue_self_count);
339 }
340
342}
343
344static lwlock_stats *
345get_lwlock_stats_entry(LWLock *lock)
346{
347 lwlock_stats_key key;
348 lwlock_stats *lwstats;
349 bool found;
350
351 /*
352 * During shared memory initialization, the hash table doesn't exist yet.
353 * Stats of that phase aren't very interesting, so just collect operations
354 * on all locks in a single dummy entry.
355 */
356 if (lwlock_stats_htab == NULL)
357 return &lwlock_stats_dummy;
358
359 /* Fetch or create the entry. */
360 MemSet(&key, 0, sizeof(key));
361 key.tranche = lock->tranche;
362 key.instance = lock;
363 lwstats = hash_search(lwlock_stats_htab, &key, HASH_ENTER, &found);
364 if (!found)
365 {
366 lwstats->sh_acquire_count = 0;
367 lwstats->ex_acquire_count = 0;
368 lwstats->block_count = 0;
369 lwstats->dequeue_self_count = 0;
370 lwstats->spin_delay_count = 0;
371 }
372 return lwstats;
373}
374#endif /* LWLOCK_STATS */
375
376
377/*
378 * Compute number of LWLocks required by named tranches. These will be
379 * allocated in the main array.
380 */
381static int
383{
384 int numLocks = 0;
385 int i;
386
387 for (i = 0; i < NamedLWLockTrancheRequests; i++)
388 numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
389
390 return numLocks;
391}
392
393/*
394 * Compute shmem space needed for LWLocks and named tranches.
395 */
396Size
398{
399 Size size;
400 int numLocks = NUM_FIXED_LWLOCKS;
401
402 /*
403 * If re-initializing shared memory, the request array will no longer be
404 * accessible, so switch to the copy in postmaster's local memory. We'll
405 * copy it back into shared memory later when CreateLWLocks() is called
406 * again.
407 */
410
411 /* Calculate total number of locks needed in the main array. */
412 numLocks += NumLWLocksForNamedTranches();
413
414 /* Space for dynamic allocation counter. */
415 size = MAXALIGN(sizeof(int));
416
417 /* Space for named tranches. */
418 size = add_size(size, mul_size(MAX_NAMED_TRANCHES, sizeof(char *)));
420
421 /*
422 * Make space for named tranche requests. This is done for the benefit of
423 * EXEC_BACKEND builds, which otherwise wouldn't be able to call
424 * GetNamedLWLockTranche() outside postmaster.
425 */
428
429 /* Space for the LWLock array, plus room for cache line alignment. */
430 size = add_size(size, LWLOCK_PADDED_SIZE);
431 size = add_size(size, mul_size(numLocks, sizeof(LWLockPadded)));
432
433 return size;
434}
435
436/*
437 * Allocate shmem space for the main LWLock array and all tranches and
438 * initialize it.
439 */
440void
442{
444 {
445 Size spaceLocks = LWLockShmemSize();
446 char *ptr;
447
448 /* Allocate space */
449 ptr = (char *) ShmemAlloc(spaceLocks);
450
451 /* Initialize the dynamic-allocation counter for tranches */
452 LWLockCounter = (int *) ptr;
454 ptr += MAXALIGN(sizeof(int));
455
456 /* Initialize tranche names */
457 LWLockTrancheNames = (char **) ptr;
458 ptr += MAX_NAMED_TRANCHES * sizeof(char *);
459 for (int i = 0; i < MAX_NAMED_TRANCHES; i++)
460 {
461 LWLockTrancheNames[i] = ptr;
462 ptr += NAMEDATALEN;
463 }
464
465 /*
466 * Move named tranche requests to shared memory. This is done for the
467 * benefit of EXEC_BACKEND builds, which otherwise wouldn't be able to
468 * call GetNamedLWLockTranche() outside postmaster.
469 */
471 {
472 /*
473 * Save the pointer to the request array in postmaster's local
474 * memory. We'll need it if we ever need to re-initialize shared
475 * memory after a crash.
476 */
478
483 }
484
485 /* Ensure desired alignment of LWLock array */
486 ptr += LWLOCK_PADDED_SIZE - ((uintptr_t) ptr) % LWLOCK_PADDED_SIZE;
488
489 /* Initialize all LWLocks */
491 }
492}
493
494/*
495 * Initialize LWLocks that are fixed and those belonging to named tranches.
496 */
497static void
499{
500 int id;
501 int i;
502 int j;
503 LWLockPadded *lock;
504
505 /* Initialize all individual LWLocks in main array */
506 for (id = 0, lock = MainLWLockArray; id < NUM_INDIVIDUAL_LWLOCKS; id++, lock++)
507 LWLockInitialize(&lock->lock, id);
508
509 /* Initialize buffer mapping LWLocks in main array */
511 for (id = 0; id < NUM_BUFFER_PARTITIONS; id++, lock++)
512 LWLockInitialize(&lock->lock, LWTRANCHE_BUFFER_MAPPING);
513
514 /* Initialize lmgrs' LWLocks in main array */
516 for (id = 0; id < NUM_LOCK_PARTITIONS; id++, lock++)
517 LWLockInitialize(&lock->lock, LWTRANCHE_LOCK_MANAGER);
518
519 /* Initialize predicate lmgrs' LWLocks in main array */
521 for (id = 0; id < NUM_PREDICATELOCK_PARTITIONS; id++, lock++)
522 LWLockInitialize(&lock->lock, LWTRANCHE_PREDICATE_LOCK_MANAGER);
523
524 /*
525 * Copy the info about any named tranches into shared memory (so that
526 * other processes can see it), and initialize the requested LWLocks.
527 */
529 {
531
532 for (i = 0; i < NamedLWLockTrancheRequests; i++)
533 {
535 int tranche;
536
538 tranche = LWLockNewTrancheId(request->tranche_name);
539
540 for (j = 0; j < request->num_lwlocks; j++, lock++)
541 LWLockInitialize(&lock->lock, tranche);
542 }
543 }
544}
545
546/*
547 * InitLWLockAccess - initialize backend-local state needed to hold LWLocks
548 */
549void
551{
552#ifdef LWLOCK_STATS
553 init_lwlock_stats();
554#endif
555}
556
557/*
558 * GetNamedLWLockTranche - returns the base address of LWLock from the
559 * specified tranche.
560 *
561 * Caller needs to retrieve the requested number of LWLocks starting from
562 * the base lock address returned by this API. This can be used for
563 * tranches that are requested by using RequestNamedLWLockTranche() API.
564 */
566GetNamedLWLockTranche(const char *tranche_name)
567{
568 int lock_pos;
569 int i;
570
571 /*
572 * Obtain the position of base address of LWLock belonging to requested
573 * tranche_name in MainLWLockArray. LWLocks for named tranches are placed
574 * in MainLWLockArray after fixed locks.
575 */
576 lock_pos = NUM_FIXED_LWLOCKS;
577 for (i = 0; i < NamedLWLockTrancheRequests; i++)
578 {
579 if (strcmp(NamedLWLockTrancheRequestArray[i].tranche_name,
580 tranche_name) == 0)
581 return &MainLWLockArray[lock_pos];
582
584 }
585
586 elog(ERROR, "requested tranche is not registered");
587
588 /* just to keep compiler quiet */
589 return NULL;
590}
591
592/*
593 * Allocate a new tranche ID with the provided name.
594 */
595int
597{
598 int result;
599
600 if (!name)
602 (errcode(ERRCODE_INVALID_NAME),
603 errmsg("tranche name cannot be NULL")));
604
605 if (strlen(name) >= NAMEDATALEN)
607 (errcode(ERRCODE_NAME_TOO_LONG),
608 errmsg("tranche name too long"),
609 errdetail("LWLock tranche names must be no longer than %d bytes.",
610 NAMEDATALEN - 1)));
611
612 /*
613 * We use the ShmemLock spinlock to protect LWLockCounter and
614 * LWLockTrancheNames.
615 */
617
619 {
622 (errmsg("maximum number of tranches already registered"),
623 errdetail("No more than %d tranches may be registered.",
625 }
626
627 result = (*LWLockCounter)++;
630
632
633 return result;
634}
635
636/*
637 * RequestNamedLWLockTranche
638 * Request that extra LWLocks be allocated during postmaster
639 * startup.
640 *
641 * This may only be called via the shmem_request_hook of a library that is
642 * loaded into the postmaster via shared_preload_libraries. Calls from
643 * elsewhere will fail.
644 *
645 * The tranche name will be user-visible as a wait event name, so try to
646 * use a name that fits the style for those.
647 */
648void
649RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
650{
652 static int NamedLWLockTrancheRequestsAllocated;
653
655 elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook");
656
657 if (!tranche_name)
659 (errcode(ERRCODE_INVALID_NAME),
660 errmsg("tranche name cannot be NULL")));
661
662 if (strlen(tranche_name) >= NAMEDATALEN)
664 (errcode(ERRCODE_NAME_TOO_LONG),
665 errmsg("tranche name too long"),
666 errdetail("LWLock tranche names must be no longer than %d bytes.",
667 NAMEDATALEN - 1)));
668
670 {
671 NamedLWLockTrancheRequestsAllocated = 16;
674 NamedLWLockTrancheRequestsAllocated
675 * sizeof(NamedLWLockTrancheRequest));
676 }
677
678 if (NamedLWLockTrancheRequests >= NamedLWLockTrancheRequestsAllocated)
679 {
681
684 i * sizeof(NamedLWLockTrancheRequest));
685 NamedLWLockTrancheRequestsAllocated = i;
686 }
687
689 strlcpy(request->tranche_name, tranche_name, NAMEDATALEN);
690 request->num_lwlocks = num_lwlocks;
692}
693
694/*
695 * LWLockInitialize - initialize a new lwlock; it's initially unlocked
696 */
697void
698LWLockInitialize(LWLock *lock, int tranche_id)
699{
700 /* verify the tranche_id is valid */
701 (void) GetLWTrancheName(tranche_id);
702
704#ifdef LOCK_DEBUG
705 pg_atomic_init_u32(&lock->nwaiters, 0);
706#endif
707 lock->tranche = tranche_id;
708 proclist_init(&lock->waiters);
709}
710
711/*
712 * Report start of wait event for light-weight locks.
713 *
714 * This function will be used by all the light-weight lock calls which
715 * needs to wait to acquire the lock. This function distinguishes wait
716 * event based on tranche and lock id.
717 */
718static inline void
720{
722}
723
724/*
725 * Report end of wait event for light-weight locks.
726 */
727static inline void
729{
731}
732
733/*
734 * Return the name of an LWLock tranche.
735 */
736static const char *
738{
739 /* Built-in tranche or individual LWLock? */
740 if (trancheId < LWTRANCHE_FIRST_USER_DEFINED)
741 return BuiltinTrancheNames[trancheId];
742
743 /*
744 * We only ever add new entries to LWLockTrancheNames, so most lookups can
745 * avoid taking the spinlock as long as the backend-local counter
746 * (LocalLWLockCounter) is greater than the requested tranche ID. Else,
747 * we need to first update the backend-local counter with ShmemLock held
748 * before attempting the lookup again. In practice, the latter case is
749 * probably rare.
750 */
751 if (trancheId >= LocalLWLockCounter)
752 {
756
757 if (trancheId >= LocalLWLockCounter)
758 elog(ERROR, "tranche %d is not registered", trancheId);
759 }
760
761 /*
762 * It's an extension tranche, so look in LWLockTrancheNames.
763 */
764 trancheId -= LWTRANCHE_FIRST_USER_DEFINED;
765
766 return LWLockTrancheNames[trancheId];
767}
768
769/*
770 * Return an identifier for an LWLock based on the wait class and event.
771 */
772const char *
774{
775 Assert(classId == PG_WAIT_LWLOCK);
776 /* The event IDs are just tranche numbers. */
777 return GetLWTrancheName(eventId);
778}
779
780/*
781 * Internal function that tries to atomically acquire the lwlock in the passed
782 * in mode.
783 *
784 * This function will not block waiting for a lock to become free - that's the
785 * caller's job.
786 *
787 * Returns true if the lock isn't free and we need to wait.
788 */
789static bool
791{
792 uint32 old_state;
793
795
796 /*
797 * Read once outside the loop, later iterations will get the newer value
798 * via compare & exchange.
799 */
800 old_state = pg_atomic_read_u32(&lock->state);
801
802 /* loop until we've determined whether we could acquire the lock or not */
803 while (true)
804 {
805 uint32 desired_state;
806 bool lock_free;
807
808 desired_state = old_state;
809
810 if (mode == LW_EXCLUSIVE)
811 {
812 lock_free = (old_state & LW_LOCK_MASK) == 0;
813 if (lock_free)
814 desired_state += LW_VAL_EXCLUSIVE;
815 }
816 else
817 {
818 lock_free = (old_state & LW_VAL_EXCLUSIVE) == 0;
819 if (lock_free)
820 desired_state += LW_VAL_SHARED;
821 }
822
823 /*
824 * Attempt to swap in the state we are expecting. If we didn't see
825 * lock to be free, that's just the old value. If we saw it as free,
826 * we'll attempt to mark it acquired. The reason that we always swap
827 * in the value is that this doubles as a memory barrier. We could try
828 * to be smarter and only swap in values if we saw the lock as free,
829 * but benchmark haven't shown it as beneficial so far.
830 *
831 * Retry if the value changed since we last looked at it.
832 */
834 &old_state, desired_state))
835 {
836 if (lock_free)
837 {
838 /* Great! Got the lock. */
839#ifdef LOCK_DEBUG
840 if (mode == LW_EXCLUSIVE)
841 lock->owner = MyProc;
842#endif
843 return false;
844 }
845 else
846 return true; /* somebody else has the lock */
847 }
848 }
850}
851
852/*
853 * Lock the LWLock's wait list against concurrent activity.
854 *
855 * NB: even though the wait list is locked, non-conflicting lock operations
856 * may still happen concurrently.
857 *
858 * Time spent holding mutex should be short!
859 */
860static void
862{
863 uint32 old_state;
864#ifdef LWLOCK_STATS
865 lwlock_stats *lwstats;
866 uint32 delays = 0;
867
868 lwstats = get_lwlock_stats_entry(lock);
869#endif
870
871 while (true)
872 {
873 /* always try once to acquire lock directly */
874 old_state = pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_LOCKED);
875 if (!(old_state & LW_FLAG_LOCKED))
876 break; /* got lock */
877
878 /* and then spin without atomic operations until lock is released */
879 {
880 SpinDelayStatus delayStatus;
881
882 init_local_spin_delay(&delayStatus);
883
884 while (old_state & LW_FLAG_LOCKED)
885 {
886 perform_spin_delay(&delayStatus);
887 old_state = pg_atomic_read_u32(&lock->state);
888 }
889#ifdef LWLOCK_STATS
890 delays += delayStatus.delays;
891#endif
892 finish_spin_delay(&delayStatus);
893 }
894
895 /*
896 * Retry. The lock might obviously already be re-acquired by the time
897 * we're attempting to get it again.
898 */
899 }
900
901#ifdef LWLOCK_STATS
902 lwstats->spin_delay_count += delays;
903#endif
904}
905
906/*
907 * Unlock the LWLock's wait list.
908 *
909 * Note that it can be more efficient to manipulate flags and release the
910 * locks in a single atomic operation.
911 */
912static void
914{
916
917 old_state = pg_atomic_fetch_and_u32(&lock->state, ~LW_FLAG_LOCKED);
918
919 Assert(old_state & LW_FLAG_LOCKED);
920}
921
922/*
923 * Wakeup all the lockers that currently have a chance to acquire the lock.
924 */
925static void
927{
928 bool new_release_ok;
929 bool wokeup_somebody = false;
932
934
935 new_release_ok = true;
936
937 /* lock wait list while collecting backends to wake up */
938 LWLockWaitListLock(lock);
939
940 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
941 {
942 PGPROC *waiter = GetPGProcByNumber(iter.cur);
943
944 if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
945 continue;
946
947 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
948 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
949
950 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
951 {
952 /*
953 * Prevent additional wakeups until retryer gets to run. Backends
954 * that are just waiting for the lock to become free don't retry
955 * automatically.
956 */
957 new_release_ok = false;
958
959 /*
960 * Don't wakeup (further) exclusive locks.
961 */
962 wokeup_somebody = true;
963 }
964
965 /*
966 * Signal that the process isn't on the wait list anymore. This allows
967 * LWLockDequeueSelf() to remove itself of the waitlist with a
968 * proclist_delete(), rather than having to check if it has been
969 * removed from the list.
970 */
971 Assert(waiter->lwWaiting == LW_WS_WAITING);
973
974 /*
975 * Once we've woken up an exclusive lock, there's no point in waking
976 * up anybody else.
977 */
978 if (waiter->lwWaitMode == LW_EXCLUSIVE)
979 break;
980 }
981
983
984 /* unset required flags, and release lock, in one fell swoop */
985 {
986 uint32 old_state;
987 uint32 desired_state;
988
989 old_state = pg_atomic_read_u32(&lock->state);
990 while (true)
991 {
992 desired_state = old_state;
993
994 /* compute desired flags */
995
996 if (new_release_ok)
997 desired_state |= LW_FLAG_RELEASE_OK;
998 else
999 desired_state &= ~LW_FLAG_RELEASE_OK;
1000
1002 desired_state &= ~LW_FLAG_HAS_WAITERS;
1003
1004 desired_state &= ~LW_FLAG_LOCKED; /* release lock */
1005
1006 if (pg_atomic_compare_exchange_u32(&lock->state, &old_state,
1007 desired_state))
1008 break;
1009 }
1010 }
1011
1012 /* Awaken any waiters I removed from the queue. */
1013 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1014 {
1015 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1016
1017 LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
1018 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1019
1020 /*
1021 * Guarantee that lwWaiting being unset only becomes visible once the
1022 * unlink from the link has completed. Otherwise the target backend
1023 * could be woken up for other reason and enqueue for a new lock - if
1024 * that happens before the list unlink happens, the list would end up
1025 * being corrupted.
1026 *
1027 * The barrier pairs with the LWLockWaitListLock() when enqueuing for
1028 * another lock.
1029 */
1031 waiter->lwWaiting = LW_WS_NOT_WAITING;
1032 PGSemaphoreUnlock(waiter->sem);
1033 }
1034}
1035
1036/*
1037 * Add ourselves to the end of the queue.
1038 *
1039 * NB: Mode can be LW_WAIT_UNTIL_FREE here!
1040 */
1041static void
1043{
1044 /*
1045 * If we don't have a PGPROC structure, there's no way to wait. This
1046 * should never occur, since MyProc should only be null during shared
1047 * memory initialization.
1048 */
1049 if (MyProc == NULL)
1050 elog(PANIC, "cannot wait without a PGPROC structure");
1051
1053 elog(PANIC, "queueing for lock while waiting on another one");
1054
1055 LWLockWaitListLock(lock);
1056
1057 /* setting the flag is protected by the spinlock */
1059
1062
1063 /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
1064 if (mode == LW_WAIT_UNTIL_FREE)
1065 proclist_push_head(&lock->waiters, MyProcNumber, lwWaitLink);
1066 else
1067 proclist_push_tail(&lock->waiters, MyProcNumber, lwWaitLink);
1068
1069 /* Can release the mutex now */
1071
1072#ifdef LOCK_DEBUG
1073 pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
1074#endif
1075}
1076
1077/*
1078 * Remove ourselves from the waitlist.
1079 *
1080 * This is used if we queued ourselves because we thought we needed to sleep
1081 * but, after further checking, we discovered that we don't actually need to
1082 * do so.
1083 */
1084static void
1086{
1087 bool on_waitlist;
1088
1089#ifdef LWLOCK_STATS
1090 lwlock_stats *lwstats;
1091
1092 lwstats = get_lwlock_stats_entry(lock);
1093
1094 lwstats->dequeue_self_count++;
1095#endif
1096
1097 LWLockWaitListLock(lock);
1098
1099 /*
1100 * Remove ourselves from the waitlist, unless we've already been removed.
1101 * The removal happens with the wait list lock held, so there's no race in
1102 * this check.
1103 */
1104 on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
1105 if (on_waitlist)
1106 proclist_delete(&lock->waiters, MyProcNumber, lwWaitLink);
1107
1108 if (proclist_is_empty(&lock->waiters) &&
1110 {
1112 }
1113
1114 /* XXX: combine with fetch_and above? */
1116
1117 /* clear waiting state again, nice for debugging */
1118 if (on_waitlist)
1120 else
1121 {
1122 int extraWaits = 0;
1123
1124 /*
1125 * Somebody else dequeued us and has or will wake us up. Deal with the
1126 * superfluous absorption of a wakeup.
1127 */
1128
1129 /*
1130 * Reset RELEASE_OK flag if somebody woke us before we removed
1131 * ourselves - they'll have set it to false.
1132 */
1134
1135 /*
1136 * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
1137 * get reset at some inconvenient point later. Most of the time this
1138 * will immediately return.
1139 */
1140 for (;;)
1141 {
1144 break;
1145 extraWaits++;
1146 }
1147
1148 /*
1149 * Fix the process wait semaphore's count for any absorbed wakeups.
1150 */
1151 while (extraWaits-- > 0)
1153 }
1154
1155#ifdef LOCK_DEBUG
1156 {
1157 /* not waiting anymore */
1158 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1159
1160 Assert(nwaiters < MAX_BACKENDS);
1161 }
1162#endif
1163}
1164
1165/*
1166 * LWLockAcquire - acquire a lightweight lock in the specified mode
1167 *
1168 * If the lock is not available, sleep until it is. Returns true if the lock
1169 * was available immediately, false if we had to sleep.
1170 *
1171 * Side effect: cancel/die interrupts are held off until lock release.
1172 */
1173bool
1175{
1176 PGPROC *proc = MyProc;
1177 bool result = true;
1178 int extraWaits = 0;
1179#ifdef LWLOCK_STATS
1180 lwlock_stats *lwstats;
1181
1182 lwstats = get_lwlock_stats_entry(lock);
1183#endif
1184
1186
1187 PRINT_LWDEBUG("LWLockAcquire", lock, mode);
1188
1189#ifdef LWLOCK_STATS
1190 /* Count lock acquisition attempts */
1191 if (mode == LW_EXCLUSIVE)
1192 lwstats->ex_acquire_count++;
1193 else
1194 lwstats->sh_acquire_count++;
1195#endif /* LWLOCK_STATS */
1196
1197 /*
1198 * We can't wait if we haven't got a PGPROC. This should only occur
1199 * during bootstrap or shared memory initialization. Put an Assert here
1200 * to catch unsafe coding practices.
1201 */
1202 Assert(!(proc == NULL && IsUnderPostmaster));
1203
1204 /* Ensure we will have room to remember the lock */
1206 elog(ERROR, "too many LWLocks taken");
1207
1208 /*
1209 * Lock out cancel/die interrupts until we exit the code section protected
1210 * by the LWLock. This ensures that interrupts will not interfere with
1211 * manipulations of data structures in shared memory.
1212 */
1214
1215 /*
1216 * Loop here to try to acquire lock after each time we are signaled by
1217 * LWLockRelease.
1218 *
1219 * NOTE: it might seem better to have LWLockRelease actually grant us the
1220 * lock, rather than retrying and possibly having to go back to sleep. But
1221 * in practice that is no good because it means a process swap for every
1222 * lock acquisition when two or more processes are contending for the same
1223 * lock. Since LWLocks are normally used to protect not-very-long
1224 * sections of computation, a process needs to be able to acquire and
1225 * release the same lock many times during a single CPU time slice, even
1226 * in the presence of contention. The efficiency of being able to do that
1227 * outweighs the inefficiency of sometimes wasting a process dispatch
1228 * cycle because the lock is not free when a released waiter finally gets
1229 * to run. See pgsql-hackers archives for 29-Dec-01.
1230 */
1231 for (;;)
1232 {
1233 bool mustwait;
1234
1235 /*
1236 * Try to grab the lock the first time, we're not in the waitqueue
1237 * yet/anymore.
1238 */
1239 mustwait = LWLockAttemptLock(lock, mode);
1240
1241 if (!mustwait)
1242 {
1243 LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
1244 break; /* got the lock */
1245 }
1246
1247 /*
1248 * Ok, at this point we couldn't grab the lock on the first try. We
1249 * cannot simply queue ourselves to the end of the list and wait to be
1250 * woken up because by now the lock could long have been released.
1251 * Instead add us to the queue and try to grab the lock again. If we
1252 * succeed we need to revert the queuing and be happy, otherwise we
1253 * recheck the lock. If we still couldn't grab it, we know that the
1254 * other locker will see our queue entries when releasing since they
1255 * existed before we checked for the lock.
1256 */
1257
1258 /* add to the queue */
1259 LWLockQueueSelf(lock, mode);
1260
1261 /* we're now guaranteed to be woken up if necessary */
1262 mustwait = LWLockAttemptLock(lock, mode);
1263
1264 /* ok, grabbed the lock the second time round, need to undo queueing */
1265 if (!mustwait)
1266 {
1267 LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
1268
1269 LWLockDequeueSelf(lock);
1270 break;
1271 }
1272
1273 /*
1274 * Wait until awakened.
1275 *
1276 * It is possible that we get awakened for a reason other than being
1277 * signaled by LWLockRelease. If so, loop back and wait again. Once
1278 * we've gotten the LWLock, re-increment the sema by the number of
1279 * additional signals received.
1280 */
1281 LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
1282
1283#ifdef LWLOCK_STATS
1284 lwstats->block_count++;
1285#endif
1286
1288 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1289 TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1290
1291 for (;;)
1292 {
1293 PGSemaphoreLock(proc->sem);
1294 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1295 break;
1296 extraWaits++;
1297 }
1298
1299 /* Retrying, allow LWLockRelease to release waiters again. */
1301
1302#ifdef LOCK_DEBUG
1303 {
1304 /* not waiting anymore */
1305 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1306
1307 Assert(nwaiters < MAX_BACKENDS);
1308 }
1309#endif
1310
1311 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1312 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
1314
1315 LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
1316
1317 /* Now loop back and try to acquire lock again. */
1318 result = false;
1319 }
1320
1321 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_ENABLED())
1322 TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), mode);
1323
1324 /* Add lock to list of locks held by this backend */
1327
1328 /*
1329 * Fix the process wait semaphore's count for any absorbed wakeups.
1330 */
1331 while (extraWaits-- > 0)
1332 PGSemaphoreUnlock(proc->sem);
1333
1334 return result;
1335}
1336
1337/*
1338 * LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
1339 *
1340 * If the lock is not available, return false with no side-effects.
1341 *
1342 * If successful, cancel/die interrupts are held off until lock release.
1343 */
1344bool
1346{
1347 bool mustwait;
1348
1350
1351 PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
1352
1353 /* Ensure we will have room to remember the lock */
1355 elog(ERROR, "too many LWLocks taken");
1356
1357 /*
1358 * Lock out cancel/die interrupts until we exit the code section protected
1359 * by the LWLock. This ensures that interrupts will not interfere with
1360 * manipulations of data structures in shared memory.
1361 */
1363
1364 /* Check for the lock */
1365 mustwait = LWLockAttemptLock(lock, mode);
1366
1367 if (mustwait)
1368 {
1369 /* Failed to get lock, so release interrupt holdoff */
1371
1372 LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
1373 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL_ENABLED())
1374 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(T_NAME(lock), mode);
1375 }
1376 else
1377 {
1378 /* Add lock to list of locks held by this backend */
1381 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_ENABLED())
1382 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(T_NAME(lock), mode);
1383 }
1384 return !mustwait;
1385}
1386
1387/*
1388 * LWLockAcquireOrWait - Acquire lock, or wait until it's free
1389 *
1390 * The semantics of this function are a bit funky. If the lock is currently
1391 * free, it is acquired in the given mode, and the function returns true. If
1392 * the lock isn't immediately free, the function waits until it is released
1393 * and returns false, but does not acquire the lock.
1394 *
1395 * This is currently used for WALWriteLock: when a backend flushes the WAL,
1396 * holding WALWriteLock, it can flush the commit records of many other
1397 * backends as a side-effect. Those other backends need to wait until the
1398 * flush finishes, but don't need to acquire the lock anymore. They can just
1399 * wake up, observe that their records have already been flushed, and return.
1400 */
1401bool
1403{
1404 PGPROC *proc = MyProc;
1405 bool mustwait;
1406 int extraWaits = 0;
1407#ifdef LWLOCK_STATS
1408 lwlock_stats *lwstats;
1409
1410 lwstats = get_lwlock_stats_entry(lock);
1411#endif
1412
1414
1415 PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
1416
1417 /* Ensure we will have room to remember the lock */
1419 elog(ERROR, "too many LWLocks taken");
1420
1421 /*
1422 * Lock out cancel/die interrupts until we exit the code section protected
1423 * by the LWLock. This ensures that interrupts will not interfere with
1424 * manipulations of data structures in shared memory.
1425 */
1427
1428 /*
1429 * NB: We're using nearly the same twice-in-a-row lock acquisition
1430 * protocol as LWLockAcquire(). Check its comments for details.
1431 */
1432 mustwait = LWLockAttemptLock(lock, mode);
1433
1434 if (mustwait)
1435 {
1437
1438 mustwait = LWLockAttemptLock(lock, mode);
1439
1440 if (mustwait)
1441 {
1442 /*
1443 * Wait until awakened. Like in LWLockAcquire, be prepared for
1444 * bogus wakeups.
1445 */
1446 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
1447
1448#ifdef LWLOCK_STATS
1449 lwstats->block_count++;
1450#endif
1451
1453 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1454 TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), mode);
1455
1456 for (;;)
1457 {
1458 PGSemaphoreLock(proc->sem);
1459 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1460 break;
1461 extraWaits++;
1462 }
1463
1464#ifdef LOCK_DEBUG
1465 {
1466 /* not waiting anymore */
1467 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1468
1469 Assert(nwaiters < MAX_BACKENDS);
1470 }
1471#endif
1472 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1473 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), mode);
1475
1476 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
1477 }
1478 else
1479 {
1480 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
1481
1482 /*
1483 * Got lock in the second attempt, undo queueing. We need to treat
1484 * this as having successfully acquired the lock, otherwise we'd
1485 * not necessarily wake up people we've prevented from acquiring
1486 * the lock.
1487 */
1488 LWLockDequeueSelf(lock);
1489 }
1490 }
1491
1492 /*
1493 * Fix the process wait semaphore's count for any absorbed wakeups.
1494 */
1495 while (extraWaits-- > 0)
1496 PGSemaphoreUnlock(proc->sem);
1497
1498 if (mustwait)
1499 {
1500 /* Failed to get lock, so release interrupt holdoff */
1502 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
1503 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL_ENABLED())
1504 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(T_NAME(lock), mode);
1505 }
1506 else
1507 {
1508 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
1509 /* Add lock to list of locks held by this backend */
1512 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_ENABLED())
1513 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(T_NAME(lock), mode);
1514 }
1515
1516 return !mustwait;
1517}
1518
1519/*
1520 * Does the lwlock in its current state need to wait for the variable value to
1521 * change?
1522 *
1523 * If we don't need to wait, and it's because the value of the variable has
1524 * changed, store the current value in newval.
1525 *
1526 * *result is set to true if the lock was free, and false otherwise.
1527 */
1528static bool
1530 uint64 *newval, bool *result)
1531{
1532 bool mustwait;
1533 uint64 value;
1534
1535 /*
1536 * Test first to see if it the slot is free right now.
1537 *
1538 * XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
1539 * via LWLockWaitForVar(), uses an implied barrier with a spinlock before
1540 * this, so we don't need a memory barrier here as far as the current
1541 * usage is concerned. But that might not be safe in general.
1542 */
1543 mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0;
1544
1545 if (!mustwait)
1546 {
1547 *result = true;
1548 return false;
1549 }
1550
1551 *result = false;
1552
1553 /*
1554 * Reading this value atomically is safe even on platforms where uint64
1555 * cannot be read without observing a torn value.
1556 */
1557 value = pg_atomic_read_u64(valptr);
1558
1559 if (value != oldval)
1560 {
1561 mustwait = false;
1562 *newval = value;
1563 }
1564 else
1565 {
1566 mustwait = true;
1567 }
1568
1569 return mustwait;
1570}
1571
1572/*
1573 * LWLockWaitForVar - Wait until lock is free, or a variable is updated.
1574 *
1575 * If the lock is held and *valptr equals oldval, waits until the lock is
1576 * either freed, or the lock holder updates *valptr by calling
1577 * LWLockUpdateVar. If the lock is free on exit (immediately or after
1578 * waiting), returns true. If the lock is still held, but *valptr no longer
1579 * matches oldval, returns false and sets *newval to the current value in
1580 * *valptr.
1581 *
1582 * Note: this function ignores shared lock holders; if the lock is held
1583 * in shared mode, returns 'true'.
1584 *
1585 * Be aware that LWLockConflictsWithVar() does not include a memory barrier,
1586 * hence the caller of this function may want to rely on an explicit barrier or
1587 * an implied barrier via spinlock or LWLock to avoid memory ordering issues.
1588 */
1589bool
1591 uint64 *newval)
1592{
1593 PGPROC *proc = MyProc;
1594 int extraWaits = 0;
1595 bool result = false;
1596#ifdef LWLOCK_STATS
1597 lwlock_stats *lwstats;
1598
1599 lwstats = get_lwlock_stats_entry(lock);
1600#endif
1601
1602 PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
1603
1604 /*
1605 * Lock out cancel/die interrupts while we sleep on the lock. There is no
1606 * cleanup mechanism to remove us from the wait queue if we got
1607 * interrupted.
1608 */
1610
1611 /*
1612 * Loop here to check the lock's status after each time we are signaled.
1613 */
1614 for (;;)
1615 {
1616 bool mustwait;
1617
1618 mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1619 &result);
1620
1621 if (!mustwait)
1622 break; /* the lock was free or value didn't match */
1623
1624 /*
1625 * Add myself to wait queue. Note that this is racy, somebody else
1626 * could wakeup before we're finished queuing. NB: We're using nearly
1627 * the same twice-in-a-row lock acquisition protocol as
1628 * LWLockAcquire(). Check its comments for details. The only
1629 * difference is that we also have to check the variable's values when
1630 * checking the state of the lock.
1631 */
1633
1634 /*
1635 * Set RELEASE_OK flag, to make sure we get woken up as soon as the
1636 * lock is released.
1637 */
1639
1640 /*
1641 * We're now guaranteed to be woken up if necessary. Recheck the lock
1642 * and variables state.
1643 */
1644 mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval,
1645 &result);
1646
1647 /* Ok, no conflict after we queued ourselves. Undo queueing. */
1648 if (!mustwait)
1649 {
1650 LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
1651
1652 LWLockDequeueSelf(lock);
1653 break;
1654 }
1655
1656 /*
1657 * Wait until awakened.
1658 *
1659 * It is possible that we get awakened for a reason other than being
1660 * signaled by LWLockRelease. If so, loop back and wait again. Once
1661 * we've gotten the LWLock, re-increment the sema by the number of
1662 * additional signals received.
1663 */
1664 LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
1665
1666#ifdef LWLOCK_STATS
1667 lwstats->block_count++;
1668#endif
1669
1671 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1672 TRACE_POSTGRESQL_LWLOCK_WAIT_START(T_NAME(lock), LW_EXCLUSIVE);
1673
1674 for (;;)
1675 {
1676 PGSemaphoreLock(proc->sem);
1677 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1678 break;
1679 extraWaits++;
1680 }
1681
1682#ifdef LOCK_DEBUG
1683 {
1684 /* not waiting anymore */
1685 uint32 nwaiters PG_USED_FOR_ASSERTS_ONLY = pg_atomic_fetch_sub_u32(&lock->nwaiters, 1);
1686
1687 Assert(nwaiters < MAX_BACKENDS);
1688 }
1689#endif
1690
1691 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1692 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(T_NAME(lock), LW_EXCLUSIVE);
1694
1695 LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
1696
1697 /* Now loop back and check the status of the lock again. */
1698 }
1699
1700 /*
1701 * Fix the process wait semaphore's count for any absorbed wakeups.
1702 */
1703 while (extraWaits-- > 0)
1704 PGSemaphoreUnlock(proc->sem);
1705
1706 /*
1707 * Now okay to allow cancel/die interrupts.
1708 */
1710
1711 return result;
1712}
1713
1714
1715/*
1716 * LWLockUpdateVar - Update a variable and wake up waiters atomically
1717 *
1718 * Sets *valptr to 'val', and wakes up all processes waiting for us with
1719 * LWLockWaitForVar(). It first sets the value atomically and then wakes up
1720 * waiting processes so that any process calling LWLockWaitForVar() on the same
1721 * lock is guaranteed to see the new value, and act accordingly.
1722 *
1723 * The caller must be holding the lock in exclusive mode.
1724 */
1725void
1727{
1730
1731 PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
1732
1733 /*
1734 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1735 * that the variable is updated before waking up waiters.
1736 */
1737 pg_atomic_exchange_u64(valptr, val);
1738
1740
1741 LWLockWaitListLock(lock);
1742
1744
1745 /*
1746 * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
1747 * up. They are always in the front of the queue.
1748 */
1749 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1750 {
1751 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1752
1753 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
1754 break;
1755
1756 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1757 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
1758
1759 /* see LWLockWakeup() */
1760 Assert(waiter->lwWaiting == LW_WS_WAITING);
1762 }
1763
1764 /* We are done updating shared state of the lock itself. */
1766
1767 /*
1768 * Awaken any waiters I removed from the queue.
1769 */
1770 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1771 {
1772 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1773
1774 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1775 /* check comment in LWLockWakeup() about this barrier */
1777 waiter->lwWaiting = LW_WS_NOT_WAITING;
1778 PGSemaphoreUnlock(waiter->sem);
1779 }
1780}
1781
1782
1783/*
1784 * Stop treating lock as held by current backend.
1785 *
1786 * This is the code that can be shared between actually releasing a lock
1787 * (LWLockRelease()) and just not tracking ownership of the lock anymore
1788 * without releasing the lock (LWLockDisown()).
1789 *
1790 * Returns the mode in which the lock was held by the current backend.
1791 *
1792 * NB: This does not call RESUME_INTERRUPTS(), but leaves that responsibility
1793 * of the caller.
1794 *
1795 * NB: This will leave lock->owner pointing to the current backend (if
1796 * LOCK_DEBUG is set). This is somewhat intentional, as it makes it easier to
1797 * debug cases of missing wakeups during lock release.
1798 */
1799static inline LWLockMode
1801{
1803 int i;
1804
1805 /*
1806 * Remove lock from list of locks held. Usually, but not always, it will
1807 * be the latest-acquired lock; so search array backwards.
1808 */
1809 for (i = num_held_lwlocks; --i >= 0;)
1810 if (lock == held_lwlocks[i].lock)
1811 break;
1812
1813 if (i < 0)
1814 elog(ERROR, "lock %s is not held", T_NAME(lock));
1815
1817
1819 for (; i < num_held_lwlocks; i++)
1820 held_lwlocks[i] = held_lwlocks[i + 1];
1821
1822 return mode;
1823}
1824
1825/*
1826 * Helper function to release lock, shared between LWLockRelease() and
1827 * LWLockReleaseDisowned().
1828 */
1829static void
1831{
1832 uint32 oldstate;
1833 bool check_waiters;
1834
1835 /*
1836 * Release my hold on lock, after that it can immediately be acquired by
1837 * others, even if we still have to wakeup other waiters.
1838 */
1839 if (mode == LW_EXCLUSIVE)
1841 else
1842 oldstate = pg_atomic_sub_fetch_u32(&lock->state, LW_VAL_SHARED);
1843
1844 /* nobody else can have that kind of lock */
1845 Assert(!(oldstate & LW_VAL_EXCLUSIVE));
1846
1847 if (TRACE_POSTGRESQL_LWLOCK_RELEASE_ENABLED())
1848 TRACE_POSTGRESQL_LWLOCK_RELEASE(T_NAME(lock));
1849
1850 /*
1851 * We're still waiting for backends to get scheduled, don't wake them up
1852 * again.
1853 */
1854 if ((oldstate & (LW_FLAG_HAS_WAITERS | LW_FLAG_RELEASE_OK)) ==
1856 (oldstate & LW_LOCK_MASK) == 0)
1857 check_waiters = true;
1858 else
1859 check_waiters = false;
1860
1861 /*
1862 * As waking up waiters requires the spinlock to be acquired, only do so
1863 * if necessary.
1864 */
1865 if (check_waiters)
1866 {
1867 /* XXX: remove before commit? */
1868 LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
1869 LWLockWakeup(lock);
1870 }
1871}
1872
1873
1874/*
1875 * Stop treating lock as held by current backend.
1876 *
1877 * After calling this function it's the callers responsibility to ensure that
1878 * the lock gets released (via LWLockReleaseDisowned()), even in case of an
1879 * error. This only is desirable if the lock is going to be released in a
1880 * different process than the process that acquired it.
1881 */
1882void
1884{
1886
1888}
1889
1890/*
1891 * LWLockRelease - release a previously acquired lock
1892 */
1893void
1895{
1897
1898 mode = LWLockDisownInternal(lock);
1899
1900 PRINT_LWDEBUG("LWLockRelease", lock, mode);
1901
1903
1904 /*
1905 * Now okay to allow cancel/die interrupts.
1906 */
1908}
1909
1910/*
1911 * Release lock previously disowned with LWLockDisown().
1912 */
1913void
1915{
1917}
1918
1919/*
1920 * LWLockReleaseClearVar - release a previously acquired lock, reset variable
1921 */
1922void
1924{
1925 /*
1926 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1927 * that the variable is updated before releasing the lock.
1928 */
1929 pg_atomic_exchange_u64(valptr, val);
1930
1931 LWLockRelease(lock);
1932}
1933
1934
1935/*
1936 * LWLockReleaseAll - release all currently-held locks
1937 *
1938 * Used to clean up after ereport(ERROR). An important difference between this
1939 * function and retail LWLockRelease calls is that InterruptHoldoffCount is
1940 * unchanged by this operation. This is necessary since InterruptHoldoffCount
1941 * has been set to an appropriate level earlier in error recovery. We could
1942 * decrement it below zero if we allow it to drop for each released lock!
1943 */
1944void
1946{
1947 while (num_held_lwlocks > 0)
1948 {
1949 HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
1950
1952 }
1953}
1954
1955
1956/*
1957 * ForEachLWLockHeldByMe - run a callback for each held lock
1958 *
1959 * This is meant as debug support only.
1960 */
1961void
1963 void *context)
1964{
1965 int i;
1966
1967 for (i = 0; i < num_held_lwlocks; i++)
1968 callback(held_lwlocks[i].lock, held_lwlocks[i].mode, context);
1969}
1970
1971/*
1972 * LWLockHeldByMe - test whether my process holds a lock in any mode
1973 *
1974 * This is meant as debug support only.
1975 */
1976bool
1978{
1979 int i;
1980
1981 for (i = 0; i < num_held_lwlocks; i++)
1982 {
1983 if (held_lwlocks[i].lock == lock)
1984 return true;
1985 }
1986 return false;
1987}
1988
1989/*
1990 * LWLockAnyHeldByMe - test whether my process holds any of an array of locks
1991 *
1992 * This is meant as debug support only.
1993 */
1994bool
1995LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride)
1996{
1997 char *held_lock_addr;
1998 char *begin;
1999 char *end;
2000 int i;
2001
2002 begin = (char *) lock;
2003 end = begin + nlocks * stride;
2004 for (i = 0; i < num_held_lwlocks; i++)
2005 {
2006 held_lock_addr = (char *) held_lwlocks[i].lock;
2007 if (held_lock_addr >= begin &&
2008 held_lock_addr < end &&
2009 (held_lock_addr - begin) % stride == 0)
2010 return true;
2011 }
2012 return false;
2013}
2014
2015/*
2016 * LWLockHeldByMeInMode - test whether my process holds a lock in given mode
2017 *
2018 * This is meant as debug support only.
2019 */
2020bool
2022{
2023 int i;
2024
2025 for (i = 0; i < num_held_lwlocks; i++)
2026 {
2027 if (held_lwlocks[i].lock == lock && held_lwlocks[i].mode == mode)
2028 return true;
2029 }
2030 return false;
2031}
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:394
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:347
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:408
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:437
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:379
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:219
#define pg_write_barrier()
Definition: atomics.h:155
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:364
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:237
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:465
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:501
#define MAXALIGN(LEN)
Definition: c.h:810
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:223
uint64_t uint64
Definition: c.h:539
uint16_t uint16
Definition: c.h:537
#define pg_unreachable()
Definition: c.h:331
uint32_t uint32
Definition: c.h:538
#define lengthof(array)
Definition: c.h:787
#define MemSet(start, val, len)
Definition: c.h:1019
size_t Size
Definition: c.h:610
#define fprintf(file, fmt, msg)
Definition: cubescan.l:21
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:358
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1415
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1161
int errhidestmt(bool hide_stmt)
Definition: elog.c:1436
int errdetail(const char *fmt,...)
Definition: elog.c:1207
int errhidecontext(bool hide_ctx)
Definition: elog.c:1455
int errcode(int sqlerrcode)
Definition: elog.c:854
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define LOG
Definition: elog.h:31
#define FATAL
Definition: elog.h:41
#define PANIC
Definition: elog.h:42
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
int MyProcPid
Definition: globals.c:47
ProcNumber MyProcNumber
Definition: globals.c:90
bool IsUnderPostmaster
Definition: globals.c:120
#define newval
Assert(PointerIsAligned(start, uint64))
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
static struct @169 value
long val
Definition: informix.c:689
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:365
int j
Definition: isn.c:78
int i
Definition: isn.c:77
#define LW_VAL_EXCLUSIVE
Definition: lwlock.c:101
void LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition: lwlock.c:1726
static void LWLockWakeup(LWLock *lock)
Definition: lwlock.c:926
#define LW_FLAG_LOCKED
Definition: lwlock.c:96
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1977
static int LocalLWLockCounter
Definition: lwlock.c:202
static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS]
Definition: lwlock.c:178
void LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition: lwlock.c:1923
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1174
static void LWLockReleaseInternal(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1830
void CreateLWLocks(void)
Definition: lwlock.c:441
void LWLockDisown(LWLock *lock)
Definition: lwlock.c:1883
int LWLockNewTrancheId(const char *name)
Definition: lwlock.c:596
static LWLockMode LWLockDisownInternal(LWLock *lock)
Definition: lwlock.c:1800
#define LW_VAL_SHARED
Definition: lwlock.c:102
static bool LWLockAttemptLock(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:790
static void LWLockWaitListLock(LWLock *lock)
Definition: lwlock.c:861
LWLockPadded * GetNamedLWLockTranche(const char *tranche_name)
Definition: lwlock.c:566
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:2021
static void LWLockReportWaitEnd(void)
Definition: lwlock.c:728
struct LWLockHandle LWLockHandle
char ** LWLockTrancheNames
Definition: lwlock.c:154
bool LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
Definition: lwlock.c:1590
static const char * GetLWTrancheName(uint16 trancheId)
Definition: lwlock.c:737
#define LW_LOCK_MASK
Definition: lwlock.c:106
int NamedLWLockTrancheRequests
Definition: lwlock.c:192
void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
Definition: lwlock.c:649
#define LW_FLAG_RELEASE_OK
Definition: lwlock.c:95
#define LW_FLAG_MASK
Definition: lwlock.c:98
#define LW_FLAG_HAS_WAITERS
Definition: lwlock.c:94
#define MAX_SIMUL_LWLOCKS
Definition: lwlock.c:168
struct NamedLWLockTrancheRequest NamedLWLockTrancheRequest
static int NumLWLocksForNamedTranches(void)
Definition: lwlock.c:382
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1894
static NamedLWLockTrancheRequest * LocalNamedLWLockTrancheRequestArray
Definition: lwlock.c:196
#define T_NAME(lock)
Definition: lwlock.c:211
static int num_held_lwlocks
Definition: lwlock.c:177
void LWLockReleaseAll(void)
Definition: lwlock.c:1945
static void InitializeLWLocks(void)
Definition: lwlock.c:498
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:698
static const char *const BuiltinTrancheNames[]
Definition: lwlock.c:135
NamedLWLockTrancheRequest * NamedLWLockTrancheRequestArray
Definition: lwlock.c:193
void LWLockReleaseDisowned(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1914
static void LWLockWaitListUnlock(LWLock *lock)
Definition: lwlock.c:913
#define LOG_LWDEBUG(a, b, c)
Definition: lwlock.c:276
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1345
bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1402
static void LWLockQueueSelf(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1042
#define PRINT_LWDEBUG(a, b, c)
Definition: lwlock.c:275
static void LWLockReportWaitStart(LWLock *lock)
Definition: lwlock.c:719
LWLockPadded * MainLWLockArray
Definition: lwlock.c:161
StaticAssertDecl(((MAX_BACKENDS+1) &MAX_BACKENDS)==0, "MAX_BACKENDS + 1 needs to be a power of 2")
void ForEachLWLockHeldByMe(void(*callback)(LWLock *, LWLockMode, void *), void *context)
Definition: lwlock.c:1962
const char * GetLWLockIdentifier(uint32 classId, uint16 eventId)
Definition: lwlock.c:773
static void LWLockDequeueSelf(LWLock *lock)
Definition: lwlock.c:1085
int * LWLockCounter
Definition: lwlock.c:199
Size LWLockShmemSize(void)
Definition: lwlock.c:397
bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride)
Definition: lwlock.c:1995
#define MAX_NAMED_TRANCHES
Definition: lwlock.c:204
#define LW_SHARED_MASK
Definition: lwlock.c:105
static bool LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
Definition: lwlock.c:1529
void InitLWLockAccess(void)
Definition: lwlock.c:550
@ LW_WS_NOT_WAITING
Definition: lwlock.h:30
@ LW_WS_WAITING
Definition: lwlock.h:31
@ LW_WS_PENDING_WAKEUP
Definition: lwlock.h:32
#define LWLOCK_PADDED_SIZE
Definition: lwlock.h:62
#define BUFFER_MAPPING_LWLOCK_OFFSET
Definition: lwlock.h:102
#define NUM_LOCK_PARTITIONS
Definition: lwlock.h:95
@ LWTRANCHE_FIRST_USER_DEFINED
Definition: lwlock.h:186
#define LOCK_MANAGER_LWLOCK_OFFSET
Definition: lwlock.h:103
#define NUM_BUFFER_PARTITIONS
Definition: lwlock.h:91
#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET
Definition: lwlock.h:105
#define NUM_FIXED_LWLOCKS
Definition: lwlock.h:107
LWLockMode
Definition: lwlock.h:111
@ LW_SHARED
Definition: lwlock.h:113
@ LW_WAIT_UNTIL_FREE
Definition: lwlock.h:114
@ LW_EXCLUSIVE
Definition: lwlock.h:112
#define NUM_PREDICATELOCK_PARTITIONS
Definition: lwlock.h:99
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition: mcxt.c:1229
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1610
MemoryContext TopMemoryContext
Definition: mcxt.c:166
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:469
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition: mcxt.c:740
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:135
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:133
bool process_shmem_requests_in_progress
Definition: miscinit.c:1790
void * arg
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:189
static PgChecksumMode mode
Definition: pg_checksums.c:55
#define NAMEDATALEN
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
void PGSemaphoreUnlock(PGSemaphore sema)
Definition: posix_sema.c:339
void PGSemaphoreLock(PGSemaphore sema)
Definition: posix_sema.c:319
uint64_t Datum
Definition: postgres.h:70
#define GetPGProcByNumber(n)
Definition: proc.h:440
#define proclist_delete(list, procno, link_member)
Definition: proclist.h:187
static void proclist_init(proclist_head *list)
Definition: proclist.h:29
#define proclist_push_tail(list, procno, link_member)
Definition: proclist.h:191
#define proclist_push_head(list, procno, link_member)
Definition: proclist.h:189
#define proclist_foreach_modify(iter, lhead, link_member)
Definition: proclist.h:206
static bool proclist_is_empty(const proclist_head *list)
Definition: proclist.h:38
#define MAX_BACKENDS
Definition: procnumber.h:39
tree ctl
Definition: radixtree.h:1838
void perform_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:126
void finish_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:186
#define init_local_spin_delay(status)
Definition: s_lock.h:733
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510
void * ShmemAlloc(Size size)
Definition: shmem.c:152
slock_t * ShmemLock
Definition: shmem.c:88
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
PGPROC * MyProc
Definition: proc.c:66
Definition: dynahash.c:222
LWLockMode mode
Definition: lwlock.c:174
LWLock * lock
Definition: lwlock.c:173
Definition: lwlock.h:42
pg_atomic_uint32 state
Definition: lwlock.h:44
uint16 tranche
Definition: lwlock.h:43
proclist_head waiters
Definition: lwlock.h:45
char tranche_name[NAMEDATALEN]
Definition: lwlock.c:183
Definition: proc.h:179
uint8 lwWaitMode
Definition: proc.h:241
PGSemaphore sem
Definition: proc.h:183
uint8 lwWaiting
Definition: proc.h:240
Definition: regguts.h:323
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:46
LWLock lock
Definition: lwlock.h:70
#define PG_WAIT_LWLOCK
Definition: wait_classes.h:18
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:69
static void pgstat_report_wait_end(void)
Definition: wait_event.h:85
const char * name
static TimestampTz wakeup[NUM_WALRCV_WAKEUPS]
Definition: walreceiver.c:130