PostgreSQL Source Code git master
Loading...
Searching...
No Matches
lwlock.c File Reference
#include "postgres.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "port/pg_bitutils.h"
#include "storage/proc.h"
#include "storage/proclist.h"
#include "storage/procnumber.h"
#include "storage/spin.h"
#include "utils/memutils.h"
#include "storage/lwlocklist.h"
Include dependency graph for lwlock.c:

Go to the source code of this file.

Data Structures

struct  LWLockHandle
 
struct  NamedLWLockTrancheRequest
 

Macros

#define LW_FLAG_HAS_WAITERS   ((uint32) 1 << 31)
 
#define LW_FLAG_WAKE_IN_PROGRESS   ((uint32) 1 << 30)
 
#define LW_FLAG_LOCKED   ((uint32) 1 << 29)
 
#define LW_FLAG_BITS   3
 
#define LW_FLAG_MASK   (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))
 
#define LW_VAL_EXCLUSIVE   (MAX_BACKENDS + 1)
 
#define LW_VAL_SHARED   1
 
#define LW_SHARED_MASK   MAX_BACKENDS
 
#define LW_LOCK_MASK   (MAX_BACKENDS | LW_VAL_EXCLUSIVE)
 
#define PG_LWLOCK(id, lockname)   [id] = CppAsString(lockname),
 
#define PG_LWLOCKTRANCHE(id, lockname)   [LWTRANCHE_##id] = CppAsString(lockname),
 
#define MAX_SIMUL_LWLOCKS   200
 
#define MAX_NAMED_TRANCHES   256
 
#define T_NAME(lock)    GetLWTrancheName((lock)->tranche)
 
#define PRINT_LWDEBUG(a, b, c)   ((void)0)
 
#define LOG_LWDEBUG(a, b, c)   ((void)0)
 

Typedefs

typedef struct LWLockHandle LWLockHandle
 
typedef struct NamedLWLockTrancheRequest NamedLWLockTrancheRequest
 

Functions

 StaticAssertDecl (((MAX_BACKENDS+1) &MAX_BACKENDS)==0, "MAX_BACKENDS + 1 needs to be a power of 2")
 
 StaticAssertDecl ((MAX_BACKENDS &LW_FLAG_MASK)==0, "MAX_BACKENDS and LW_FLAG_MASK overlap")
 
 StaticAssertDecl ((LW_VAL_EXCLUSIVE &LW_FLAG_MASK)==0, "LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap")
 
 StaticAssertDecl (lengthof(BuiltinTrancheNames)==LWTRANCHE_FIRST_USER_DEFINED, "missing entries in BuiltinTrancheNames[]")
 
static void InitializeLWLocks (void)
 
static void LWLockReportWaitStart (LWLock *lock)
 
static void LWLockReportWaitEnd (void)
 
static const charGetLWTrancheName (uint16 trancheId)
 
static int NumLWLocksForNamedTranches (void)
 
Size LWLockShmemSize (void)
 
void CreateLWLocks (void)
 
void InitLWLockAccess (void)
 
LWLockPaddedGetNamedLWLockTranche (const char *tranche_name)
 
int LWLockNewTrancheId (const char *name)
 
void RequestNamedLWLockTranche (const char *tranche_name, int num_lwlocks)
 
void LWLockInitialize (LWLock *lock, int tranche_id)
 
const charGetLWLockIdentifier (uint32 classId, uint16 eventId)
 
static bool LWLockAttemptLock (LWLock *lock, LWLockMode mode)
 
static void LWLockWaitListLock (LWLock *lock)
 
static void LWLockWaitListUnlock (LWLock *lock)
 
static void LWLockWakeup (LWLock *lock)
 
static void LWLockQueueSelf (LWLock *lock, LWLockMode mode)
 
static void LWLockDequeueSelf (LWLock *lock)
 
bool LWLockAcquire (LWLock *lock, LWLockMode mode)
 
bool LWLockConditionalAcquire (LWLock *lock, LWLockMode mode)
 
bool LWLockAcquireOrWait (LWLock *lock, LWLockMode mode)
 
static bool LWLockConflictsWithVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
 
bool LWLockWaitForVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
 
void LWLockUpdateVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
 
void LWLockRelease (LWLock *lock)
 
void LWLockReleaseClearVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
 
void LWLockReleaseAll (void)
 
bool LWLockHeldByMe (LWLock *lock)
 
bool LWLockAnyHeldByMe (LWLock *lock, int nlocks, size_t stride)
 
bool LWLockHeldByMeInMode (LWLock *lock, LWLockMode mode)
 

Variables

static const char *const BuiltinTrancheNames []
 
char ** LWLockTrancheNames = NULL
 
LWLockPaddedMainLWLockArray = NULL
 
static int num_held_lwlocks = 0
 
static LWLockHandle held_lwlocks [MAX_SIMUL_LWLOCKS]
 
int NamedLWLockTrancheRequests = 0
 
NamedLWLockTrancheRequestNamedLWLockTrancheRequestArray = NULL
 
static NamedLWLockTrancheRequestLocalNamedLWLockTrancheRequestArray = NULL
 
intLWLockCounter = NULL
 
static int LocalLWLockCounter
 

Macro Definition Documentation

◆ LOG_LWDEBUG

#define LOG_LWDEBUG (   a,
  b,
  c 
)    ((void)0)

Definition at line 276 of file lwlock.c.

◆ LW_FLAG_BITS

#define LW_FLAG_BITS   3

Definition at line 97 of file lwlock.c.

◆ LW_FLAG_HAS_WAITERS

#define LW_FLAG_HAS_WAITERS   ((uint32) 1 << 31)

Definition at line 94 of file lwlock.c.

◆ LW_FLAG_LOCKED

#define LW_FLAG_LOCKED   ((uint32) 1 << 29)

Definition at line 96 of file lwlock.c.

◆ LW_FLAG_MASK

#define LW_FLAG_MASK   (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))

Definition at line 98 of file lwlock.c.

◆ LW_FLAG_WAKE_IN_PROGRESS

#define LW_FLAG_WAKE_IN_PROGRESS   ((uint32) 1 << 30)

Definition at line 95 of file lwlock.c.

◆ LW_LOCK_MASK

#define LW_LOCK_MASK   (MAX_BACKENDS | LW_VAL_EXCLUSIVE)

Definition at line 106 of file lwlock.c.

◆ LW_SHARED_MASK

#define LW_SHARED_MASK   MAX_BACKENDS

Definition at line 105 of file lwlock.c.

◆ LW_VAL_EXCLUSIVE

#define LW_VAL_EXCLUSIVE   (MAX_BACKENDS + 1)

Definition at line 101 of file lwlock.c.

◆ LW_VAL_SHARED

#define LW_VAL_SHARED   1

Definition at line 102 of file lwlock.c.

◆ MAX_NAMED_TRANCHES

#define MAX_NAMED_TRANCHES   256

Definition at line 204 of file lwlock.c.

◆ MAX_SIMUL_LWLOCKS

#define MAX_SIMUL_LWLOCKS   200

Definition at line 168 of file lwlock.c.

◆ PG_LWLOCK

#define PG_LWLOCK (   id,
  lockname 
)    [id] = CppAsString(lockname),

◆ PG_LWLOCKTRANCHE

#define PG_LWLOCKTRANCHE (   id,
  lockname 
)    [LWTRANCHE_##id] = CppAsString(lockname),

◆ PRINT_LWDEBUG

#define PRINT_LWDEBUG (   a,
  b,
  c 
)    ((void)0)

Definition at line 275 of file lwlock.c.

◆ T_NAME

#define T_NAME (   lock)     GetLWTrancheName((lock)->tranche)

Definition at line 211 of file lwlock.c.

215{
216 int tranche;
217 void *instance;
219
220typedef struct lwlock_stats
221{
225 int block_count;
229
230static HTAB *lwlock_stats_htab;
232#endif
233
234#ifdef LOCK_DEBUG
235bool Trace_lwlocks = false;
236
237inline static void
238PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode)
239{
240 /* hide statement & context here, otherwise the log is just too verbose */
241 if (Trace_lwlocks)
242 {
244
245 ereport(LOG,
246 (errhidestmt(true),
247 errhidecontext(true),
248 errmsg_internal("%d: %s(%s %p): excl %u shared %u haswaiters %u waiters %u waking %d",
249 MyProcPid,
250 where, T_NAME(lock), lock,
251 (state & LW_VAL_EXCLUSIVE) != 0,
253 (state & LW_FLAG_HAS_WAITERS) != 0,
254 pg_atomic_read_u32(&lock->nwaiters),
256 }
257}
258
259inline static void
260LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg)
261{
262 /* hide statement & context here, otherwise the log is just too verbose */
263 if (Trace_lwlocks)
264 {
265 ereport(LOG,
266 (errhidestmt(true),
267 errhidecontext(true),
268 errmsg_internal("%s(%s %p): %s", where,
269 T_NAME(lock), lock, msg)));
270 }
271}
272
273#else /* not LOCK_DEBUG */
274#define PRINT_LWDEBUG(a,b,c) ((void)0)
275#define LOG_LWDEBUG(a,b,c) ((void)0)
276#endif /* LOCK_DEBUG */
277
278#ifdef LWLOCK_STATS
279
280static void init_lwlock_stats(void);
281static void print_lwlock_stats(int code, Datum arg);
283
284static void
286{
287 HASHCTL ctl;
289 static bool exit_registered = false;
290
291 if (lwlock_stats_cxt != NULL)
293
294 /*
295 * The LWLock stats will be updated within a critical section, which
296 * requires allocating new hash entries. Allocations within a critical
297 * section are normally not allowed because running out of memory would
298 * lead to a PANIC, but LWLOCK_STATS is debugging code that's not normally
299 * turned on in production, so that's an acceptable risk. The hash entries
300 * are small, so the risk of running out of memory is minimal in practice.
301 */
303 "LWLock stats",
306
307 ctl.keysize = sizeof(lwlock_stats_key);
308 ctl.entrysize = sizeof(lwlock_stats);
309 ctl.hcxt = lwlock_stats_cxt;
310 lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
312 if (!exit_registered)
313 {
315 exit_registered = true;
316 }
317}
318
319static void
321{
322 HASH_SEQ_STATUS scan;
324
326
327 /* Grab an LWLock to keep different backends from mixing reports */
329
330 while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
331 {
333 "PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
334 MyProcPid, GetLWTrancheName(lwstats->key.tranche),
335 lwstats->key.instance, lwstats->sh_acquire_count,
336 lwstats->ex_acquire_count, lwstats->block_count,
337 lwstats->spin_delay_count, lwstats->dequeue_self_count);
338 }
339
341}
342
343static lwlock_stats *
345{
348 bool found;
349
350 /*
351 * During shared memory initialization, the hash table doesn't exist yet.
352 * Stats of that phase aren't very interesting, so just collect operations
353 * on all locks in a single dummy entry.
354 */
355 if (lwlock_stats_htab == NULL)
356 return &lwlock_stats_dummy;
357
358 /* Fetch or create the entry. */
359 MemSet(&key, 0, sizeof(key));
360 key.tranche = lock->tranche;
361 key.instance = lock;
363 if (!found)
364 {
365 lwstats->sh_acquire_count = 0;
366 lwstats->ex_acquire_count = 0;
367 lwstats->block_count = 0;
368 lwstats->dequeue_self_count = 0;
369 lwstats->spin_delay_count = 0;
370 }
371 return lwstats;
372}
373#endif /* LWLOCK_STATS */
374
375
376/*
377 * Compute number of LWLocks required by named tranches. These will be
378 * allocated in the main array.
379 */
380static int
382{
383 int numLocks = 0;
384 int i;
385
386 for (i = 0; i < NamedLWLockTrancheRequests; i++)
387 numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
388
389 return numLocks;
390}
391
392/*
393 * Compute shmem space needed for LWLocks and named tranches.
394 */
395Size
396LWLockShmemSize(void)
397{
398 Size size;
399 int numLocks = NUM_FIXED_LWLOCKS;
400
401 /*
402 * If re-initializing shared memory, the request array will no longer be
403 * accessible, so switch to the copy in postmaster's local memory. We'll
404 * copy it back into shared memory later when CreateLWLocks() is called
405 * again.
406 */
409
410 /* Calculate total number of locks needed in the main array. */
411 numLocks += NumLWLocksForNamedTranches();
412
413 /* Space for dynamic allocation counter. */
414 size = MAXALIGN(sizeof(int));
415
416 /* Space for named tranches. */
417 size = add_size(size, mul_size(MAX_NAMED_TRANCHES, sizeof(char *)));
419
420 /*
421 * Make space for named tranche requests. This is done for the benefit of
422 * EXEC_BACKEND builds, which otherwise wouldn't be able to call
423 * GetNamedLWLockTranche() outside postmaster.
424 */
427
428 /* Space for the LWLock array, plus room for cache line alignment. */
429 size = add_size(size, LWLOCK_PADDED_SIZE);
430 size = add_size(size, mul_size(numLocks, sizeof(LWLockPadded)));
431
432 return size;
433}
434
435/*
436 * Allocate shmem space for the main LWLock array and all tranches and
437 * initialize it.
438 */
439void
440CreateLWLocks(void)
441{
443 {
445 char *ptr;
446
447 /* Allocate space */
448 ptr = (char *) ShmemAlloc(spaceLocks);
449
450 /* Initialize the dynamic-allocation counter for tranches */
451 LWLockCounter = (int *) ptr;
453 ptr += MAXALIGN(sizeof(int));
454
455 /* Initialize tranche names */
456 LWLockTrancheNames = (char **) ptr;
457 ptr += MAX_NAMED_TRANCHES * sizeof(char *);
458 for (int i = 0; i < MAX_NAMED_TRANCHES; i++)
459 {
460 LWLockTrancheNames[i] = ptr;
461 ptr += NAMEDATALEN;
462 }
463
464 /*
465 * Move named tranche requests to shared memory. This is done for the
466 * benefit of EXEC_BACKEND builds, which otherwise wouldn't be able to
467 * call GetNamedLWLockTranche() outside postmaster.
468 */
470 {
471 /*
472 * Save the pointer to the request array in postmaster's local
473 * memory. We'll need it if we ever need to re-initialize shared
474 * memory after a crash.
475 */
477
482 }
483
484 /* Ensure desired alignment of LWLock array */
487
488 /* Initialize all LWLocks */
490 }
491}
492
493/*
494 * Initialize LWLocks that are fixed and those belonging to named tranches.
495 */
496static void
498{
499 int id;
500 int i;
501 int j;
502 LWLockPadded *lock;
503
504 /* Initialize all individual LWLocks in main array */
505 for (id = 0, lock = MainLWLockArray; id < NUM_INDIVIDUAL_LWLOCKS; id++, lock++)
506 LWLockInitialize(&lock->lock, id);
507
508 /* Initialize buffer mapping LWLocks in main array */
510 for (id = 0; id < NUM_BUFFER_PARTITIONS; id++, lock++)
512
513 /* Initialize lmgrs' LWLocks in main array */
515 for (id = 0; id < NUM_LOCK_PARTITIONS; id++, lock++)
517
518 /* Initialize predicate lmgrs' LWLocks in main array */
520 for (id = 0; id < NUM_PREDICATELOCK_PARTITIONS; id++, lock++)
522
523 /*
524 * Copy the info about any named tranches into shared memory (so that
525 * other processes can see it), and initialize the requested LWLocks.
526 */
528 {
530
531 for (i = 0; i < NamedLWLockTrancheRequests; i++)
532 {
534 int tranche;
535
537 tranche = LWLockNewTrancheId(request->tranche_name);
538
539 for (j = 0; j < request->num_lwlocks; j++, lock++)
540 LWLockInitialize(&lock->lock, tranche);
541 }
542 }
543}
544
545/*
546 * InitLWLockAccess - initialize backend-local state needed to hold LWLocks
547 */
548void
550{
551#ifdef LWLOCK_STATS
553#endif
554}
555
556/*
557 * GetNamedLWLockTranche - returns the base address of LWLock from the
558 * specified tranche.
559 *
560 * Caller needs to retrieve the requested number of LWLocks starting from
561 * the base lock address returned by this API. This can be used for
562 * tranches that are requested by using RequestNamedLWLockTranche() API.
563 */
565GetNamedLWLockTranche(const char *tranche_name)
566{
567 int lock_pos;
568 int i;
569
570 /*
571 * Obtain the position of base address of LWLock belonging to requested
572 * tranche_name in MainLWLockArray. LWLocks for named tranches are placed
573 * in MainLWLockArray after fixed locks.
574 */
576 for (i = 0; i < NamedLWLockTrancheRequests; i++)
577 {
578 if (strcmp(NamedLWLockTrancheRequestArray[i].tranche_name,
579 tranche_name) == 0)
580 return &MainLWLockArray[lock_pos];
581
583 }
584
585 elog(ERROR, "requested tranche is not registered");
586
587 /* just to keep compiler quiet */
588 return NULL;
589}
590
591/*
592 * Allocate a new tranche ID with the provided name.
593 */
594int
595LWLockNewTrancheId(const char *name)
596{
597 int result;
598
599 if (!name)
602 errmsg("tranche name cannot be NULL")));
603
604 if (strlen(name) >= NAMEDATALEN)
607 errmsg("tranche name too long"),
608 errdetail("LWLock tranche names must be no longer than %d bytes.",
609 NAMEDATALEN - 1)));
610
611 /*
612 * We use the ShmemLock spinlock to protect LWLockCounter and
613 * LWLockTrancheNames.
614 */
616
618 {
621 (errmsg("maximum number of tranches already registered"),
622 errdetail("No more than %d tranches may be registered.",
624 }
625
626 result = (*LWLockCounter)++;
629
631
632 return result;
633}
634
635/*
636 * RequestNamedLWLockTranche
637 * Request that extra LWLocks be allocated during postmaster
638 * startup.
639 *
640 * This may only be called via the shmem_request_hook of a library that is
641 * loaded into the postmaster via shared_preload_libraries. Calls from
642 * elsewhere will fail.
643 *
644 * The tranche name will be user-visible as a wait event name, so try to
645 * use a name that fits the style for those.
646 */
647void
648RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
649{
652
654 elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook");
655
656 if (!tranche_name)
659 errmsg("tranche name cannot be NULL")));
660
661 if (strlen(tranche_name) >= NAMEDATALEN)
664 errmsg("tranche name too long"),
665 errdetail("LWLock tranche names must be no longer than %d bytes.",
666 NAMEDATALEN - 1)));
667
669 {
674 * sizeof(NamedLWLockTrancheRequest));
675 }
676
678 {
680
683 i * sizeof(NamedLWLockTrancheRequest));
685 }
686
688 strlcpy(request->tranche_name, tranche_name, NAMEDATALEN);
689 request->num_lwlocks = num_lwlocks;
691}
692
693/*
694 * LWLockInitialize - initialize a new lwlock; it's initially unlocked
695 */
696void
697LWLockInitialize(LWLock *lock, int tranche_id)
698{
699 /* verify the tranche_id is valid */
700 (void) GetLWTrancheName(tranche_id);
701
702 pg_atomic_init_u32(&lock->state, 0);
703#ifdef LOCK_DEBUG
704 pg_atomic_init_u32(&lock->nwaiters, 0);
705#endif
706 lock->tranche = tranche_id;
707 proclist_init(&lock->waiters);
708}
709
710/*
711 * Report start of wait event for light-weight locks.
712 *
713 * This function will be used by all the light-weight lock calls which
714 * needs to wait to acquire the lock. This function distinguishes wait
715 * event based on tranche and lock id.
716 */
717static inline void
719{
721}
722
723/*
724 * Report end of wait event for light-weight locks.
725 */
726static inline void
728{
730}
731
732/*
733 * Return the name of an LWLock tranche.
734 */
735static const char *
737{
738 /* Built-in tranche or individual LWLock? */
741
742 /*
743 * We only ever add new entries to LWLockTrancheNames, so most lookups can
744 * avoid taking the spinlock as long as the backend-local counter
745 * (LocalLWLockCounter) is greater than the requested tranche ID. Else,
746 * we need to first update the backend-local counter with ShmemLock held
747 * before attempting the lookup again. In practice, the latter case is
748 * probably rare.
749 */
751 {
755
757 elog(ERROR, "tranche %d is not registered", trancheId);
758 }
759
760 /*
761 * It's an extension tranche, so look in LWLockTrancheNames.
762 */
764
766}
767
768/*
769 * Return an identifier for an LWLock based on the wait class and event.
770 */
771const char *
773{
774 Assert(classId == PG_WAIT_LWLOCK);
775 /* The event IDs are just tranche numbers. */
777}
778
779/*
780 * Internal function that tries to atomically acquire the lwlock in the passed
781 * in mode.
782 *
783 * This function will not block waiting for a lock to become free - that's the
784 * caller's job.
785 *
786 * Returns true if the lock isn't free and we need to wait.
787 */
788static bool
790{
792
794
795 /*
796 * Read once outside the loop, later iterations will get the newer value
797 * via compare & exchange.
798 */
800
801 /* loop until we've determined whether we could acquire the lock or not */
802 while (true)
803 {
805 bool lock_free;
806
808
809 if (mode == LW_EXCLUSIVE)
810 {
812 if (lock_free)
814 }
815 else
816 {
818 if (lock_free)
820 }
821
822 /*
823 * Attempt to swap in the state we are expecting. If we didn't see
824 * lock to be free, that's just the old value. If we saw it as free,
825 * we'll attempt to mark it acquired. The reason that we always swap
826 * in the value is that this doubles as a memory barrier. We could try
827 * to be smarter and only swap in values if we saw the lock as free,
828 * but benchmark haven't shown it as beneficial so far.
829 *
830 * Retry if the value changed since we last looked at it.
831 */
834 {
835 if (lock_free)
836 {
837 /* Great! Got the lock. */
838#ifdef LOCK_DEBUG
839 if (mode == LW_EXCLUSIVE)
840 lock->owner = MyProc;
841#endif
842 return false;
843 }
844 else
845 return true; /* somebody else has the lock */
846 }
847 }
849}
850
851/*
852 * Lock the LWLock's wait list against concurrent activity.
853 *
854 * NB: even though the wait list is locked, non-conflicting lock operations
855 * may still happen concurrently.
856 *
857 * Time spent holding mutex should be short!
858 */
859static void
861{
863#ifdef LWLOCK_STATS
865 uint32 delays = 0;
866
868#endif
869
870 while (true)
871 {
872 /*
873 * Always try once to acquire the lock directly, without setting up
874 * the spin-delay infrastructure. The work necessary for that shows up
875 * in profiles and is rarely necessary.
876 */
879 break; /* got lock */
880
881 /* and then spin without atomic operations until lock is released */
882 {
884
886
887 while (old_state & LW_FLAG_LOCKED)
888 {
891 }
892#ifdef LWLOCK_STATS
893 delays += delayStatus.delays;
894#endif
896 }
897
898 /*
899 * Retry. The lock might obviously already be re-acquired by the time
900 * we're attempting to get it again.
901 */
902 }
903
904#ifdef LWLOCK_STATS
905 lwstats->spin_delay_count += delays;
906#endif
907}
908
909/*
910 * Unlock the LWLock's wait list.
911 *
912 * Note that it can be more efficient to manipulate flags and release the
913 * locks in a single atomic operation.
914 */
915static void
917{
919
921
923}
924
925/*
926 * Wakeup all the lockers that currently have a chance to acquire the lock.
927 */
928static void
929LWLockWakeup(LWLock *lock)
930{
931 bool new_wake_in_progress = false;
932 bool wokeup_somebody = false;
935
937
938 /* lock wait list while collecting backends to wake up */
939 LWLockWaitListLock(lock);
940
941 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
942 {
943 PGPROC *waiter = GetPGProcByNumber(iter.cur);
944
945 if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
946 continue;
947
948 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
949 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
950
951 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
952 {
953 /*
954 * Prevent additional wakeups until retryer gets to run. Backends
955 * that are just waiting for the lock to become free don't retry
956 * automatically.
957 */
959
960 /*
961 * Don't wakeup (further) exclusive locks.
962 */
963 wokeup_somebody = true;
964 }
965
966 /*
967 * Signal that the process isn't on the wait list anymore. This allows
968 * LWLockDequeueSelf() to remove itself of the waitlist with a
969 * proclist_delete(), rather than having to check if it has been
970 * removed from the list.
971 */
972 Assert(waiter->lwWaiting == LW_WS_WAITING);
974
975 /*
976 * Once we've woken up an exclusive lock, there's no point in waking
977 * up anybody else.
978 */
979 if (waiter->lwWaitMode == LW_EXCLUSIVE)
980 break;
981 }
982
984
985 /* unset required flags, and release lock, in one fell swoop */
986 {
989
991 while (true)
992 {
994
995 /* compute desired flags */
996
999 else
1001
1002 if (proclist_is_empty(&lock->waiters))
1004
1005 desired_state &= ~LW_FLAG_LOCKED; /* release lock */
1006
1009 break;
1010 }
1011 }
1012
1013 /* Awaken any waiters I removed from the queue. */
1014 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1015 {
1016 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1017
1018 LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
1019 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1020
1021 /*
1022 * Guarantee that lwWaiting being unset only becomes visible once the
1023 * unlink from the link has completed. Otherwise the target backend
1024 * could be woken up for other reason and enqueue for a new lock - if
1025 * that happens before the list unlink happens, the list would end up
1026 * being corrupted.
1027 *
1028 * The barrier pairs with the LWLockWaitListLock() when enqueuing for
1029 * another lock.
1030 */
1032 waiter->lwWaiting = LW_WS_NOT_WAITING;
1033 PGSemaphoreUnlock(waiter->sem);
1034 }
1035}
1036
1037/*
1038 * Add ourselves to the end of the queue.
1039 *
1040 * NB: Mode can be LW_WAIT_UNTIL_FREE here!
1041 */
1042static void
1044{
1045 /*
1046 * If we don't have a PGPROC structure, there's no way to wait. This
1047 * should never occur, since MyProc should only be null during shared
1048 * memory initialization.
1049 */
1050 if (MyProc == NULL)
1051 elog(PANIC, "cannot wait without a PGPROC structure");
1052
1054 elog(PANIC, "queueing for lock while waiting on another one");
1055
1056 LWLockWaitListLock(lock);
1057
1058 /* setting the flag is protected by the spinlock */
1060
1063
1064 /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
1065 if (mode == LW_WAIT_UNTIL_FREE)
1066 proclist_push_head(&lock->waiters, MyProcNumber, lwWaitLink);
1067 else
1068 proclist_push_tail(&lock->waiters, MyProcNumber, lwWaitLink);
1069
1070 /* Can release the mutex now */
1072
1073#ifdef LOCK_DEBUG
1074 pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
1075#endif
1076}
1077
1078/*
1079 * Remove ourselves from the waitlist.
1080 *
1081 * This is used if we queued ourselves because we thought we needed to sleep
1082 * but, after further checking, we discovered that we don't actually need to
1083 * do so.
1084 */
1085static void
1087{
1088 bool on_waitlist;
1089
1090#ifdef LWLOCK_STATS
1092
1094
1095 lwstats->dequeue_self_count++;
1096#endif
1097
1098 LWLockWaitListLock(lock);
1099
1100 /*
1101 * Remove ourselves from the waitlist, unless we've already been removed.
1102 * The removal happens with the wait list lock held, so there's no race in
1103 * this check.
1104 */
1106 if (on_waitlist)
1107 proclist_delete(&lock->waiters, MyProcNumber, lwWaitLink);
1108
1109 if (proclist_is_empty(&lock->waiters) &&
1111 {
1113 }
1114
1115 /* XXX: combine with fetch_and above? */
1117
1118 /* clear waiting state again, nice for debugging */
1119 if (on_waitlist)
1121 else
1122 {
1123 int extraWaits = 0;
1124
1125 /*
1126 * Somebody else dequeued us and has or will wake us up. Deal with the
1127 * superfluous absorption of a wakeup.
1128 */
1129
1130 /*
1131 * Clear LW_FLAG_WAKE_IN_PROGRESS if somebody woke us before we
1132 * removed ourselves - they'll have set it.
1133 */
1135
1136 /*
1137 * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
1138 * get reset at some inconvenient point later. Most of the time this
1139 * will immediately return.
1140 */
1141 for (;;)
1142 {
1145 break;
1146 extraWaits++;
1147 }
1148
1149 /*
1150 * Fix the process wait semaphore's count for any absorbed wakeups.
1151 */
1152 while (extraWaits-- > 0)
1154 }
1155
1156#ifdef LOCK_DEBUG
1157 {
1158 /* not waiting anymore */
1160
1162 }
1163#endif
1164}
1165
1166/*
1167 * LWLockAcquire - acquire a lightweight lock in the specified mode
1168 *
1169 * If the lock is not available, sleep until it is. Returns true if the lock
1170 * was available immediately, false if we had to sleep.
1171 *
1172 * Side effect: cancel/die interrupts are held off until lock release.
1173 */
1174bool
1176{
1177 PGPROC *proc = MyProc;
1178 bool result = true;
1179 int extraWaits = 0;
1180#ifdef LWLOCK_STATS
1182
1184#endif
1185
1187
1188 PRINT_LWDEBUG("LWLockAcquire", lock, mode);
1189
1190#ifdef LWLOCK_STATS
1191 /* Count lock acquisition attempts */
1192 if (mode == LW_EXCLUSIVE)
1193 lwstats->ex_acquire_count++;
1194 else
1195 lwstats->sh_acquire_count++;
1196#endif /* LWLOCK_STATS */
1197
1198 /*
1199 * We can't wait if we haven't got a PGPROC. This should only occur
1200 * during bootstrap or shared memory initialization. Put an Assert here
1201 * to catch unsafe coding practices.
1202 */
1203 Assert(!(proc == NULL && IsUnderPostmaster));
1204
1205 /* Ensure we will have room to remember the lock */
1207 elog(ERROR, "too many LWLocks taken");
1208
1209 /*
1210 * Lock out cancel/die interrupts until we exit the code section protected
1211 * by the LWLock. This ensures that interrupts will not interfere with
1212 * manipulations of data structures in shared memory.
1213 */
1215
1216 /*
1217 * Loop here to try to acquire lock after each time we are signaled by
1218 * LWLockRelease.
1219 *
1220 * NOTE: it might seem better to have LWLockRelease actually grant us the
1221 * lock, rather than retrying and possibly having to go back to sleep. But
1222 * in practice that is no good because it means a process swap for every
1223 * lock acquisition when two or more processes are contending for the same
1224 * lock. Since LWLocks are normally used to protect not-very-long
1225 * sections of computation, a process needs to be able to acquire and
1226 * release the same lock many times during a single CPU time slice, even
1227 * in the presence of contention. The efficiency of being able to do that
1228 * outweighs the inefficiency of sometimes wasting a process dispatch
1229 * cycle because the lock is not free when a released waiter finally gets
1230 * to run. See pgsql-hackers archives for 29-Dec-01.
1231 */
1232 for (;;)
1233 {
1234 bool mustwait;
1235
1236 /*
1237 * Try to grab the lock the first time, we're not in the waitqueue
1238 * yet/anymore.
1239 */
1241
1242 if (!mustwait)
1243 {
1244 LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
1245 break; /* got the lock */
1246 }
1247
1248 /*
1249 * Ok, at this point we couldn't grab the lock on the first try. We
1250 * cannot simply queue ourselves to the end of the list and wait to be
1251 * woken up because by now the lock could long have been released.
1252 * Instead add us to the queue and try to grab the lock again. If we
1253 * succeed we need to revert the queuing and be happy, otherwise we
1254 * recheck the lock. If we still couldn't grab it, we know that the
1255 * other locker will see our queue entries when releasing since they
1256 * existed before we checked for the lock.
1257 */
1258
1259 /* add to the queue */
1260 LWLockQueueSelf(lock, mode);
1261
1262 /* we're now guaranteed to be woken up if necessary */
1264
1265 /* ok, grabbed the lock the second time round, need to undo queueing */
1266 if (!mustwait)
1267 {
1268 LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
1269
1270 LWLockDequeueSelf(lock);
1271 break;
1272 }
1273
1274 /*
1275 * Wait until awakened.
1276 *
1277 * It is possible that we get awakened for a reason other than being
1278 * signaled by LWLockRelease. If so, loop back and wait again. Once
1279 * we've gotten the LWLock, re-increment the sema by the number of
1280 * additional signals received.
1281 */
1282 LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
1283
1284#ifdef LWLOCK_STATS
1285 lwstats->block_count++;
1286#endif
1287
1291
1292 for (;;)
1293 {
1294 PGSemaphoreLock(proc->sem);
1295 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1296 break;
1297 extraWaits++;
1298 }
1299
1300 /* Retrying, allow LWLockRelease to release waiters again. */
1302
1303#ifdef LOCK_DEBUG
1304 {
1305 /* not waiting anymore */
1307
1309 }
1310#endif
1311
1315
1316 LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
1317
1318 /* Now loop back and try to acquire lock again. */
1319 result = false;
1320 }
1321
1324
1325 /* Add lock to list of locks held by this backend */
1328
1329 /*
1330 * Fix the process wait semaphore's count for any absorbed wakeups.
1331 */
1332 while (extraWaits-- > 0)
1333 PGSemaphoreUnlock(proc->sem);
1334
1335 return result;
1336}
1337
1338/*
1339 * LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
1340 *
1341 * If the lock is not available, return false with no side-effects.
1342 *
1343 * If successful, cancel/die interrupts are held off until lock release.
1344 */
1345bool
1347{
1348 bool mustwait;
1349
1351
1352 PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
1353
1354 /* Ensure we will have room to remember the lock */
1356 elog(ERROR, "too many LWLocks taken");
1357
1358 /*
1359 * Lock out cancel/die interrupts until we exit the code section protected
1360 * by the LWLock. This ensures that interrupts will not interfere with
1361 * manipulations of data structures in shared memory.
1362 */
1364
1365 /* Check for the lock */
1367
1368 if (mustwait)
1369 {
1370 /* Failed to get lock, so release interrupt holdoff */
1372
1373 LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
1376 }
1377 else
1378 {
1379 /* Add lock to list of locks held by this backend */
1384 }
1385 return !mustwait;
1386}
1387
1388/*
1389 * LWLockAcquireOrWait - Acquire lock, or wait until it's free
1390 *
1391 * The semantics of this function are a bit funky. If the lock is currently
1392 * free, it is acquired in the given mode, and the function returns true. If
1393 * the lock isn't immediately free, the function waits until it is released
1394 * and returns false, but does not acquire the lock.
1395 *
1396 * This is currently used for WALWriteLock: when a backend flushes the WAL,
1397 * holding WALWriteLock, it can flush the commit records of many other
1398 * backends as a side-effect. Those other backends need to wait until the
1399 * flush finishes, but don't need to acquire the lock anymore. They can just
1400 * wake up, observe that their records have already been flushed, and return.
1401 */
1402bool
1404{
1405 PGPROC *proc = MyProc;
1406 bool mustwait;
1407 int extraWaits = 0;
1408#ifdef LWLOCK_STATS
1410
1412#endif
1413
1415
1416 PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
1417
1418 /* Ensure we will have room to remember the lock */
1420 elog(ERROR, "too many LWLocks taken");
1421
1422 /*
1423 * Lock out cancel/die interrupts until we exit the code section protected
1424 * by the LWLock. This ensures that interrupts will not interfere with
1425 * manipulations of data structures in shared memory.
1426 */
1428
1429 /*
1430 * NB: We're using nearly the same twice-in-a-row lock acquisition
1431 * protocol as LWLockAcquire(). Check its comments for details.
1432 */
1434
1435 if (mustwait)
1436 {
1438
1440
1441 if (mustwait)
1442 {
1443 /*
1444 * Wait until awakened. Like in LWLockAcquire, be prepared for
1445 * bogus wakeups.
1446 */
1447 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
1448
1449#ifdef LWLOCK_STATS
1450 lwstats->block_count++;
1451#endif
1452
1456
1457 for (;;)
1458 {
1459 PGSemaphoreLock(proc->sem);
1460 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1461 break;
1462 extraWaits++;
1463 }
1464
1465#ifdef LOCK_DEBUG
1466 {
1467 /* not waiting anymore */
1469
1471 }
1472#endif
1476
1477 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
1478 }
1479 else
1480 {
1481 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
1482
1483 /*
1484 * Got lock in the second attempt, undo queueing. We need to treat
1485 * this as having successfully acquired the lock, otherwise we'd
1486 * not necessarily wake up people we've prevented from acquiring
1487 * the lock.
1488 */
1489 LWLockDequeueSelf(lock);
1490 }
1491 }
1492
1493 /*
1494 * Fix the process wait semaphore's count for any absorbed wakeups.
1495 */
1496 while (extraWaits-- > 0)
1497 PGSemaphoreUnlock(proc->sem);
1498
1499 if (mustwait)
1500 {
1501 /* Failed to get lock, so release interrupt holdoff */
1503 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
1506 }
1507 else
1508 {
1509 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
1510 /* Add lock to list of locks held by this backend */
1515 }
1516
1517 return !mustwait;
1518}
1519
1520/*
1521 * Does the lwlock in its current state need to wait for the variable value to
1522 * change?
1523 *
1524 * If we don't need to wait, and it's because the value of the variable has
1525 * changed, store the current value in newval.
1526 *
1527 * *result is set to true if the lock was free, and false otherwise.
1528 */
1529static bool
1531 uint64 *newval, bool *result)
1532{
1533 bool mustwait;
1534 uint64 value;
1535
1536 /*
1537 * Test first to see if it the slot is free right now.
1538 *
1539 * XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
1540 * via LWLockWaitForVar(), uses an implied barrier with a spinlock before
1541 * this, so we don't need a memory barrier here as far as the current
1542 * usage is concerned. But that might not be safe in general.
1543 */
1545
1546 if (!mustwait)
1547 {
1548 *result = true;
1549 return false;
1550 }
1551
1552 *result = false;
1553
1554 /*
1555 * Reading this value atomically is safe even on platforms where uint64
1556 * cannot be read without observing a torn value.
1557 */
1559
1560 if (value != oldval)
1561 {
1562 mustwait = false;
1563 *newval = value;
1564 }
1565 else
1566 {
1567 mustwait = true;
1568 }
1569
1570 return mustwait;
1571}
1572
1573/*
1574 * LWLockWaitForVar - Wait until lock is free, or a variable is updated.
1575 *
1576 * If the lock is held and *valptr equals oldval, waits until the lock is
1577 * either freed, or the lock holder updates *valptr by calling
1578 * LWLockUpdateVar. If the lock is free on exit (immediately or after
1579 * waiting), returns true. If the lock is still held, but *valptr no longer
1580 * matches oldval, returns false and sets *newval to the current value in
1581 * *valptr.
1582 *
1583 * Note: this function ignores shared lock holders; if the lock is held
1584 * in shared mode, returns 'true'.
1585 *
1586 * Be aware that LWLockConflictsWithVar() does not include a memory barrier,
1587 * hence the caller of this function may want to rely on an explicit barrier or
1588 * an implied barrier via spinlock or LWLock to avoid memory ordering issues.
1589 */
1590bool
1592 uint64 *newval)
1593{
1594 PGPROC *proc = MyProc;
1595 int extraWaits = 0;
1596 bool result = false;
1597#ifdef LWLOCK_STATS
1599
1601#endif
1602
1603 PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
1604
1605 /*
1606 * Lock out cancel/die interrupts while we sleep on the lock. There is no
1607 * cleanup mechanism to remove us from the wait queue if we got
1608 * interrupted.
1609 */
1611
1612 /*
1613 * Loop here to check the lock's status after each time we are signaled.
1614 */
1615 for (;;)
1616 {
1617 bool mustwait;
1618
1620 &result);
1621
1622 if (!mustwait)
1623 break; /* the lock was free or value didn't match */
1624
1625 /*
1626 * Add myself to wait queue. Note that this is racy, somebody else
1627 * could wakeup before we're finished queuing. NB: We're using nearly
1628 * the same twice-in-a-row lock acquisition protocol as
1629 * LWLockAcquire(). Check its comments for details. The only
1630 * difference is that we also have to check the variable's values when
1631 * checking the state of the lock.
1632 */
1634
1635 /*
1636 * Clear LW_FLAG_WAKE_IN_PROGRESS flag, to make sure we get woken up
1637 * as soon as the lock is released.
1638 */
1640
1641 /*
1642 * We're now guaranteed to be woken up if necessary. Recheck the lock
1643 * and variables state.
1644 */
1646 &result);
1647
1648 /* Ok, no conflict after we queued ourselves. Undo queueing. */
1649 if (!mustwait)
1650 {
1651 LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
1652
1653 LWLockDequeueSelf(lock);
1654 break;
1655 }
1656
1657 /*
1658 * Wait until awakened.
1659 *
1660 * It is possible that we get awakened for a reason other than being
1661 * signaled by LWLockRelease. If so, loop back and wait again. Once
1662 * we've gotten the LWLock, re-increment the sema by the number of
1663 * additional signals received.
1664 */
1665 LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
1666
1667#ifdef LWLOCK_STATS
1668 lwstats->block_count++;
1669#endif
1670
1674
1675 for (;;)
1676 {
1677 PGSemaphoreLock(proc->sem);
1678 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1679 break;
1680 extraWaits++;
1681 }
1682
1683#ifdef LOCK_DEBUG
1684 {
1685 /* not waiting anymore */
1687
1689 }
1690#endif
1691
1695
1696 LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
1697
1698 /* Now loop back and check the status of the lock again. */
1699 }
1700
1701 /*
1702 * Fix the process wait semaphore's count for any absorbed wakeups.
1703 */
1704 while (extraWaits-- > 0)
1705 PGSemaphoreUnlock(proc->sem);
1706
1707 /*
1708 * Now okay to allow cancel/die interrupts.
1709 */
1711
1712 return result;
1713}
1714
1715
1716/*
1717 * LWLockUpdateVar - Update a variable and wake up waiters atomically
1718 *
1719 * Sets *valptr to 'val', and wakes up all processes waiting for us with
1720 * LWLockWaitForVar(). It first sets the value atomically and then wakes up
1721 * waiting processes so that any process calling LWLockWaitForVar() on the same
1722 * lock is guaranteed to see the new value, and act accordingly.
1723 *
1724 * The caller must be holding the lock in exclusive mode.
1725 */
1726void
1728{
1731
1732 PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
1733
1734 /*
1735 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1736 * that the variable is updated before waking up waiters.
1737 */
1739
1741
1742 LWLockWaitListLock(lock);
1743
1745
1746 /*
1747 * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
1748 * up. They are always in the front of the queue.
1749 */
1750 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1751 {
1752 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1753
1754 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
1755 break;
1756
1757 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1758 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
1759
1760 /* see LWLockWakeup() */
1761 Assert(waiter->lwWaiting == LW_WS_WAITING);
1763 }
1764
1765 /* We are done updating shared state of the lock itself. */
1767
1768 /*
1769 * Awaken any waiters I removed from the queue.
1770 */
1771 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1772 {
1773 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1774
1775 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1776 /* check comment in LWLockWakeup() about this barrier */
1778 waiter->lwWaiting = LW_WS_NOT_WAITING;
1779 PGSemaphoreUnlock(waiter->sem);
1780 }
1781}
1782
1783
1784/*
1785 * LWLockRelease - release a previously acquired lock
1786 *
1787 * NB: This will leave lock->owner pointing to the current backend (if
1788 * LOCK_DEBUG is set). This is somewhat intentional, as it makes it easier to
1789 * debug cases of missing wakeups during lock release.
1790 */
1791void
1792LWLockRelease(LWLock *lock)
1793{
1796 bool check_waiters;
1797 int i;
1798
1799 /*
1800 * Remove lock from list of locks held. Usually, but not always, it will
1801 * be the latest-acquired lock; so search array backwards.
1802 */
1803 for (i = num_held_lwlocks; --i >= 0;)
1804 if (lock == held_lwlocks[i].lock)
1805 break;
1806
1807 if (i < 0)
1808 elog(ERROR, "lock %s is not held", T_NAME(lock));
1809
1811
1813 for (; i < num_held_lwlocks; i++)
1814 held_lwlocks[i] = held_lwlocks[i + 1];
1815
1816 PRINT_LWDEBUG("LWLockRelease", lock, mode);
1817
1818 /*
1819 * Release my hold on lock, after that it can immediately be acquired by
1820 * others, even if we still have to wakeup other waiters.
1821 */
1822 if (mode == LW_EXCLUSIVE)
1824 else
1826
1827 /* nobody else can have that kind of lock */
1829
1832
1833 /*
1834 * Check if we're still waiting for backends to get scheduled, if so,
1835 * don't wake them up again.
1836 */
1837 if ((oldstate & LW_FLAG_HAS_WAITERS) &&
1839 (oldstate & LW_LOCK_MASK) == 0)
1840 check_waiters = true;
1841 else
1842 check_waiters = false;
1843
1844 /*
1845 * As waking up waiters requires the spinlock to be acquired, only do so
1846 * if necessary.
1847 */
1848 if (check_waiters)
1849 {
1850 /* XXX: remove before commit? */
1851 LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
1852 LWLockWakeup(lock);
1853 }
1854
1855 /*
1856 * Now okay to allow cancel/die interrupts.
1857 */
1859}
1860
1861/*
1862 * LWLockReleaseClearVar - release a previously acquired lock, reset variable
1863 */
1864void
1866{
1867 /*
1868 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1869 * that the variable is updated before releasing the lock.
1870 */
1872
1873 LWLockRelease(lock);
1874}
1875
1876
1877/*
1878 * LWLockReleaseAll - release all currently-held locks
1879 *
1880 * Used to clean up after ereport(ERROR). An important difference between this
1881 * function and retail LWLockRelease calls is that InterruptHoldoffCount is
1882 * unchanged by this operation. This is necessary since InterruptHoldoffCount
1883 * has been set to an appropriate level earlier in error recovery. We could
1884 * decrement it below zero if we allow it to drop for each released lock!
1885 *
1886 * Note that this function must be safe to call even before the LWLock
1887 * subsystem has been initialized (e.g., during early startup failures).
1888 * In that case, num_held_lwlocks will be 0 and we do nothing.
1889 */
1890void
1891LWLockReleaseAll(void)
1892{
1893 while (num_held_lwlocks > 0)
1894 {
1895 HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
1896
1898 }
1899
1901}
1902
1903
1904/*
1905 * LWLockHeldByMe - test whether my process holds a lock in any mode
1906 *
1907 * This is meant as debug support only.
1908 */
1909bool
1911{
1912 int i;
1913
1914 for (i = 0; i < num_held_lwlocks; i++)
1915 {
1916 if (held_lwlocks[i].lock == lock)
1917 return true;
1918 }
1919 return false;
1920}
1921
1922/*
1923 * LWLockAnyHeldByMe - test whether my process holds any of an array of locks
1924 *
1925 * This is meant as debug support only.
1926 */
1927bool
1928LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride)
1929{
1930 char *held_lock_addr;
1931 char *begin;
1932 char *end;
1933 int i;
1934
1935 begin = (char *) lock;
1936 end = begin + nlocks * stride;
1937 for (i = 0; i < num_held_lwlocks; i++)
1938 {
1939 held_lock_addr = (char *) held_lwlocks[i].lock;
1940 if (held_lock_addr >= begin &&
1941 held_lock_addr < end &&
1942 (held_lock_addr - begin) % stride == 0)
1943 return true;
1944 }
1945 return false;
1946}
1947
1948/*
1949 * LWLockHeldByMeInMode - test whether my process holds a lock in given mode
1950 *
1951 * This is meant as debug support only.
1952 */
1953bool
1955{
1956 int i;
1957
1958 for (i = 0; i < num_held_lwlocks; i++)
1959 {
1960 if (held_lwlocks[i].lock == lock && held_lwlocks[i].mode == mode)
1961 return true;
1962 }
1963 return false;
1964}
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition atomics.h:396
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition atomics.h:349
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition atomics.h:410
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition atomics.h:439
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition atomics.h:381
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition atomics.h:219
#define pg_write_barrier()
Definition atomics.h:155
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition atomics.h:366
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition atomics.h:237
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition atomics.h:513
#define likely(x)
Definition c.h:421
#define MAXALIGN(LEN)
Definition c.h:836
#define PG_USED_FOR_ASSERTS_ONLY
Definition c.h:223
#define Assert(condition)
Definition c.h:883
uint64_t uint64
Definition c.h:557
uint16_t uint16
Definition c.h:555
#define pg_unreachable()
Definition c.h:351
uint32_t uint32
Definition c.h:556
#define MemSet(start, val, len)
Definition c.h:1023
size_t Size
Definition c.h:629
#define fprintf(file, fmt, msg)
Definition cubescan.l:21
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:952
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:358
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1415
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1380
int errmsg_internal(const char *fmt,...)
Definition elog.c:1170
int errhidestmt(bool hide_stmt)
Definition elog.c:1445
int errdetail(const char *fmt,...)
Definition elog.c:1216
int errhidecontext(bool hide_ctx)
Definition elog.c:1464
int errcode(int sqlerrcode)
Definition elog.c:863
int errmsg(const char *fmt,...)
Definition elog.c:1080
#define LOG
Definition elog.h:31
#define FATAL
Definition elog.h:41
#define PANIC
Definition elog.h:42
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define ereport(elevel,...)
Definition elog.h:150
int MyProcPid
Definition globals.c:47
ProcNumber MyProcNumber
Definition globals.c:90
bool IsUnderPostmaster
Definition globals.c:120
#define newval
@ HASH_ENTER
Definition hsearch.h:114
#define HASH_CONTEXT
Definition hsearch.h:102
#define HASH_ELEM
Definition hsearch.h:95
#define HASH_BLOBS
Definition hsearch.h:97
long val
Definition informix.c:689
static struct @172 value
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition ipc.c:372
int j
Definition isn.c:78
int i
Definition isn.c:77
#define LW_VAL_EXCLUSIVE
Definition lwlock.c:101
void LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition lwlock.c:1728
static void LWLockWakeup(LWLock *lock)
Definition lwlock.c:930
#define LW_FLAG_LOCKED
Definition lwlock.c:96
bool LWLockHeldByMe(LWLock *lock)
Definition lwlock.c:1911
static int LocalLWLockCounter
Definition lwlock.c:202
static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS]
Definition lwlock.c:178
void LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition lwlock.c:1866
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1176
void CreateLWLocks(void)
Definition lwlock.c:441
int LWLockNewTrancheId(const char *name)
Definition lwlock.c:596
#define LW_VAL_SHARED
Definition lwlock.c:102
static bool LWLockAttemptLock(LWLock *lock, LWLockMode mode)
Definition lwlock.c:790
static void LWLockWaitListLock(LWLock *lock)
Definition lwlock.c:861
LWLockPadded * GetNamedLWLockTranche(const char *tranche_name)
Definition lwlock.c:566
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1955
static void LWLockReportWaitEnd(void)
Definition lwlock.c:728
char ** LWLockTrancheNames
Definition lwlock.c:154
bool LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
Definition lwlock.c:1592
static const char * GetLWTrancheName(uint16 trancheId)
Definition lwlock.c:737
#define LW_LOCK_MASK
Definition lwlock.c:106
int NamedLWLockTrancheRequests
Definition lwlock.c:192
void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
Definition lwlock.c:649
#define LW_FLAG_HAS_WAITERS
Definition lwlock.c:94
#define MAX_SIMUL_LWLOCKS
Definition lwlock.c:168
static int NumLWLocksForNamedTranches(void)
Definition lwlock.c:382
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1793
static NamedLWLockTrancheRequest * LocalNamedLWLockTrancheRequestArray
Definition lwlock.c:196
#define T_NAME(lock)
Definition lwlock.c:211
static int num_held_lwlocks
Definition lwlock.c:177
void LWLockReleaseAll(void)
Definition lwlock.c:1892
static void InitializeLWLocks(void)
Definition lwlock.c:498
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition lwlock.c:698
static const char *const BuiltinTrancheNames[]
Definition lwlock.c:135
NamedLWLockTrancheRequest * NamedLWLockTrancheRequestArray
Definition lwlock.c:193
static void LWLockWaitListUnlock(LWLock *lock)
Definition lwlock.c:917
#define LOG_LWDEBUG(a, b, c)
Definition lwlock.c:276
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1347
bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1404
static void LWLockQueueSelf(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1044
#define PRINT_LWDEBUG(a, b, c)
Definition lwlock.c:275
static void LWLockReportWaitStart(LWLock *lock)
Definition lwlock.c:719
LWLockPadded * MainLWLockArray
Definition lwlock.c:161
#define LW_FLAG_WAKE_IN_PROGRESS
Definition lwlock.c:95
const char * GetLWLockIdentifier(uint32 classId, uint16 eventId)
Definition lwlock.c:773
static void LWLockDequeueSelf(LWLock *lock)
Definition lwlock.c:1087
int * LWLockCounter
Definition lwlock.c:199
Size LWLockShmemSize(void)
Definition lwlock.c:397
bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride)
Definition lwlock.c:1929
#define MAX_NAMED_TRANCHES
Definition lwlock.c:204
#define LW_SHARED_MASK
Definition lwlock.c:105
static bool LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
Definition lwlock.c:1531
void InitLWLockAccess(void)
Definition lwlock.c:550
@ LW_WS_NOT_WAITING
Definition lwlock.h:30
@ LW_WS_WAITING
Definition lwlock.h:31
@ LW_WS_PENDING_WAKEUP
Definition lwlock.h:32
#define LWLOCK_PADDED_SIZE
Definition lwlock.h:62
#define BUFFER_MAPPING_LWLOCK_OFFSET
Definition lwlock.h:102
#define NUM_LOCK_PARTITIONS
Definition lwlock.h:95
@ LWTRANCHE_FIRST_USER_DEFINED
Definition lwlock.h:182
#define LOCK_MANAGER_LWLOCK_OFFSET
Definition lwlock.h:103
#define NUM_BUFFER_PARTITIONS
Definition lwlock.h:91
#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET
Definition lwlock.h:105
#define NUM_FIXED_LWLOCKS
Definition lwlock.h:107
LWLockMode
Definition lwlock.h:111
@ LW_SHARED
Definition lwlock.h:113
@ LW_WAIT_UNTIL_FREE
Definition lwlock.h:114
@ LW_EXCLUSIVE
Definition lwlock.h:112
#define NUM_PREDICATELOCK_PARTITIONS
Definition lwlock.h:99
void * MemoryContextAlloc(MemoryContext context, Size size)
Definition mcxt.c:1232
void * repalloc(void *pointer, Size size)
Definition mcxt.c:1632
MemoryContext TopMemoryContext
Definition mcxt.c:166
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition mcxt.c:743
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define RESUME_INTERRUPTS()
Definition miscadmin.h:136
#define HOLD_INTERRUPTS()
Definition miscadmin.h:134
bool process_shmem_requests_in_progress
Definition miscinit.c:1790
void * arg
static uint32 pg_nextpower2_32(uint32 num)
static PgChecksumMode mode
#define NAMEDATALEN
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition strlcpy.c:45
void PGSemaphoreUnlock(PGSemaphore sema)
Definition posix_sema.c:335
void PGSemaphoreLock(PGSemaphore sema)
Definition posix_sema.c:315
uint64_t Datum
Definition postgres.h:70
static int fb(int x)
#define GetPGProcByNumber(n)
Definition proc.h:446
#define proclist_delete(list, procno, link_member)
Definition proclist.h:187
static void proclist_init(proclist_head *list)
Definition proclist.h:29
#define proclist_push_tail(list, procno, link_member)
Definition proclist.h:191
#define proclist_push_head(list, procno, link_member)
Definition proclist.h:189
#define proclist_foreach_modify(iter, lhead, link_member)
Definition proclist.h:206
static bool proclist_is_empty(const proclist_head *list)
Definition proclist.h:38
#define MAX_BACKENDS
Definition procnumber.h:39
tree ctl
Definition radixtree.h:1838
void perform_spin_delay(SpinDelayStatus *status)
Definition s_lock.c:126
void finish_spin_delay(SpinDelayStatus *status)
Definition s_lock.c:186
#define init_local_spin_delay(status)
Definition s_lock.h:753
Size add_size(Size s1, Size s2)
Definition shmem.c:495
Size mul_size(Size s1, Size s2)
Definition shmem.c:510
void * ShmemAlloc(Size size)
Definition shmem.c:154
slock_t * ShmemLock
Definition shmem.c:90
#define SpinLockRelease(lock)
Definition spin.h:61
#define SpinLockAcquire(lock)
Definition spin.h:59
PGPROC * MyProc
Definition proc.c:67
LWLockMode mode
Definition lwlock.c:174
LWLock * lock
Definition lwlock.c:173
pg_atomic_uint32 state
Definition lwlock.h:44
uint16 tranche
Definition lwlock.h:43
proclist_head waiters
Definition lwlock.h:45
Definition proc.h:179
uint8 lwWaitMode
Definition proc.h:247
PGSemaphore sem
Definition proc.h:183
uint8 lwWaiting
Definition proc.h:246
LWLock lock
Definition lwlock.h:70
#define PG_WAIT_LWLOCK
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition wait_event.h:69
static void pgstat_report_wait_end(void)
Definition wait_event.h:85
const char * name
static TimestampTz wakeup[NUM_WALRCV_WAKEUPS]

Typedef Documentation

◆ LWLockHandle

◆ NamedLWLockTrancheRequest

Function Documentation

◆ CreateLWLocks()

void CreateLWLocks ( void  )

Definition at line 441 of file lwlock.c.

442{
444 {
446 char *ptr;
447
448 /* Allocate space */
449 ptr = (char *) ShmemAlloc(spaceLocks);
450
451 /* Initialize the dynamic-allocation counter for tranches */
452 LWLockCounter = (int *) ptr;
454 ptr += MAXALIGN(sizeof(int));
455
456 /* Initialize tranche names */
457 LWLockTrancheNames = (char **) ptr;
458 ptr += MAX_NAMED_TRANCHES * sizeof(char *);
459 for (int i = 0; i < MAX_NAMED_TRANCHES; i++)
460 {
461 LWLockTrancheNames[i] = ptr;
462 ptr += NAMEDATALEN;
463 }
464
465 /*
466 * Move named tranche requests to shared memory. This is done for the
467 * benefit of EXEC_BACKEND builds, which otherwise wouldn't be able to
468 * call GetNamedLWLockTranche() outside postmaster.
469 */
471 {
472 /*
473 * Save the pointer to the request array in postmaster's local
474 * memory. We'll need it if we ever need to re-initialize shared
475 * memory after a crash.
476 */
478
483 }
484
485 /* Ensure desired alignment of LWLock array */
488
489 /* Initialize all LWLocks */
491 }
492}

References fb(), i, InitializeLWLocks(), IsUnderPostmaster, LocalNamedLWLockTrancheRequestArray, LWLOCK_PADDED_SIZE, LWLockCounter, LWLockShmemSize(), LWLockTrancheNames, LWTRANCHE_FIRST_USER_DEFINED, MainLWLockArray, MAX_NAMED_TRANCHES, MAXALIGN, NAMEDATALEN, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, and ShmemAlloc().

Referenced by CreateOrAttachShmemStructs().

◆ GetLWLockIdentifier()

const char * GetLWLockIdentifier ( uint32  classId,
uint16  eventId 
)

Definition at line 773 of file lwlock.c.

774{
775 Assert(classId == PG_WAIT_LWLOCK);
776 /* The event IDs are just tranche numbers. */
778}

References Assert, fb(), GetLWTrancheName(), and PG_WAIT_LWLOCK.

Referenced by pgstat_get_wait_event().

◆ GetLWTrancheName()

static const char * GetLWTrancheName ( uint16  trancheId)
static

Definition at line 737 of file lwlock.c.

738{
739 /* Built-in tranche or individual LWLock? */
742
743 /*
744 * We only ever add new entries to LWLockTrancheNames, so most lookups can
745 * avoid taking the spinlock as long as the backend-local counter
746 * (LocalLWLockCounter) is greater than the requested tranche ID. Else,
747 * we need to first update the backend-local counter with ShmemLock held
748 * before attempting the lookup again. In practice, the latter case is
749 * probably rare.
750 */
752 {
756
758 elog(ERROR, "tranche %d is not registered", trancheId);
759 }
760
761 /*
762 * It's an extension tranche, so look in LWLockTrancheNames.
763 */
765
767}

References BuiltinTrancheNames, elog, ERROR, fb(), LocalLWLockCounter, LWLockCounter, LWLockTrancheNames, LWTRANCHE_FIRST_USER_DEFINED, ShmemLock, SpinLockAcquire, and SpinLockRelease.

Referenced by GetLWLockIdentifier(), and LWLockInitialize().

◆ GetNamedLWLockTranche()

LWLockPadded * GetNamedLWLockTranche ( const char tranche_name)

Definition at line 566 of file lwlock.c.

567{
568 int lock_pos;
569 int i;
570
571 /*
572 * Obtain the position of base address of LWLock belonging to requested
573 * tranche_name in MainLWLockArray. LWLocks for named tranches are placed
574 * in MainLWLockArray after fixed locks.
575 */
577 for (i = 0; i < NamedLWLockTrancheRequests; i++)
578 {
579 if (strcmp(NamedLWLockTrancheRequestArray[i].tranche_name,
580 tranche_name) == 0)
581 return &MainLWLockArray[lock_pos];
582
584 }
585
586 elog(ERROR, "requested tranche is not registered");
587
588 /* just to keep compiler quiet */
589 return NULL;
590}

References elog, ERROR, fb(), i, MainLWLockArray, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NUM_FIXED_LWLOCKS, and NamedLWLockTrancheRequest::num_lwlocks.

Referenced by pgss_shmem_startup(), and test_lwlock_tranche_lookup().

◆ InitializeLWLocks()

static void InitializeLWLocks ( void  )
static

Definition at line 498 of file lwlock.c.

499{
500 int id;
501 int i;
502 int j;
503 LWLockPadded *lock;
504
505 /* Initialize all individual LWLocks in main array */
506 for (id = 0, lock = MainLWLockArray; id < NUM_INDIVIDUAL_LWLOCKS; id++, lock++)
507 LWLockInitialize(&lock->lock, id);
508
509 /* Initialize buffer mapping LWLocks in main array */
511 for (id = 0; id < NUM_BUFFER_PARTITIONS; id++, lock++)
513
514 /* Initialize lmgrs' LWLocks in main array */
516 for (id = 0; id < NUM_LOCK_PARTITIONS; id++, lock++)
518
519 /* Initialize predicate lmgrs' LWLocks in main array */
521 for (id = 0; id < NUM_PREDICATELOCK_PARTITIONS; id++, lock++)
523
524 /*
525 * Copy the info about any named tranches into shared memory (so that
526 * other processes can see it), and initialize the requested LWLocks.
527 */
529 {
531
532 for (i = 0; i < NamedLWLockTrancheRequests; i++)
533 {
535 int tranche;
536
538 tranche = LWLockNewTrancheId(request->tranche_name);
539
540 for (j = 0; j < request->num_lwlocks; j++, lock++)
541 LWLockInitialize(&lock->lock, tranche);
542 }
543 }
544}

References BUFFER_MAPPING_LWLOCK_OFFSET, fb(), i, j, LWLockPadded::lock, LOCK_MANAGER_LWLOCK_OFFSET, LWLockInitialize(), LWLockNewTrancheId(), MainLWLockArray, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NUM_BUFFER_PARTITIONS, NUM_FIXED_LWLOCKS, NUM_LOCK_PARTITIONS, NUM_PREDICATELOCK_PARTITIONS, and PREDICATELOCK_MANAGER_LWLOCK_OFFSET.

Referenced by CreateLWLocks().

◆ InitLWLockAccess()

void InitLWLockAccess ( void  )

Definition at line 550 of file lwlock.c.

551{
552#ifdef LWLOCK_STATS
554#endif
555}

References fb().

Referenced by InitAuxiliaryProcess(), and InitProcess().

◆ LWLockAcquire()

bool LWLockAcquire ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1176 of file lwlock.c.

1177{
1178 PGPROC *proc = MyProc;
1179 bool result = true;
1180 int extraWaits = 0;
1181#ifdef LWLOCK_STATS
1183
1185#endif
1186
1188
1189 PRINT_LWDEBUG("LWLockAcquire", lock, mode);
1190
1191#ifdef LWLOCK_STATS
1192 /* Count lock acquisition attempts */
1193 if (mode == LW_EXCLUSIVE)
1194 lwstats->ex_acquire_count++;
1195 else
1196 lwstats->sh_acquire_count++;
1197#endif /* LWLOCK_STATS */
1198
1199 /*
1200 * We can't wait if we haven't got a PGPROC. This should only occur
1201 * during bootstrap or shared memory initialization. Put an Assert here
1202 * to catch unsafe coding practices.
1203 */
1204 Assert(!(proc == NULL && IsUnderPostmaster));
1205
1206 /* Ensure we will have room to remember the lock */
1208 elog(ERROR, "too many LWLocks taken");
1209
1210 /*
1211 * Lock out cancel/die interrupts until we exit the code section protected
1212 * by the LWLock. This ensures that interrupts will not interfere with
1213 * manipulations of data structures in shared memory.
1214 */
1216
1217 /*
1218 * Loop here to try to acquire lock after each time we are signaled by
1219 * LWLockRelease.
1220 *
1221 * NOTE: it might seem better to have LWLockRelease actually grant us the
1222 * lock, rather than retrying and possibly having to go back to sleep. But
1223 * in practice that is no good because it means a process swap for every
1224 * lock acquisition when two or more processes are contending for the same
1225 * lock. Since LWLocks are normally used to protect not-very-long
1226 * sections of computation, a process needs to be able to acquire and
1227 * release the same lock many times during a single CPU time slice, even
1228 * in the presence of contention. The efficiency of being able to do that
1229 * outweighs the inefficiency of sometimes wasting a process dispatch
1230 * cycle because the lock is not free when a released waiter finally gets
1231 * to run. See pgsql-hackers archives for 29-Dec-01.
1232 */
1233 for (;;)
1234 {
1235 bool mustwait;
1236
1237 /*
1238 * Try to grab the lock the first time, we're not in the waitqueue
1239 * yet/anymore.
1240 */
1242
1243 if (!mustwait)
1244 {
1245 LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
1246 break; /* got the lock */
1247 }
1248
1249 /*
1250 * Ok, at this point we couldn't grab the lock on the first try. We
1251 * cannot simply queue ourselves to the end of the list and wait to be
1252 * woken up because by now the lock could long have been released.
1253 * Instead add us to the queue and try to grab the lock again. If we
1254 * succeed we need to revert the queuing and be happy, otherwise we
1255 * recheck the lock. If we still couldn't grab it, we know that the
1256 * other locker will see our queue entries when releasing since they
1257 * existed before we checked for the lock.
1258 */
1259
1260 /* add to the queue */
1261 LWLockQueueSelf(lock, mode);
1262
1263 /* we're now guaranteed to be woken up if necessary */
1265
1266 /* ok, grabbed the lock the second time round, need to undo queueing */
1267 if (!mustwait)
1268 {
1269 LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
1270
1271 LWLockDequeueSelf(lock);
1272 break;
1273 }
1274
1275 /*
1276 * Wait until awakened.
1277 *
1278 * It is possible that we get awakened for a reason other than being
1279 * signaled by LWLockRelease. If so, loop back and wait again. Once
1280 * we've gotten the LWLock, re-increment the sema by the number of
1281 * additional signals received.
1282 */
1283 LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
1284
1285#ifdef LWLOCK_STATS
1286 lwstats->block_count++;
1287#endif
1288
1292
1293 for (;;)
1294 {
1295 PGSemaphoreLock(proc->sem);
1296 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1297 break;
1298 extraWaits++;
1299 }
1300
1301 /* Retrying, allow LWLockRelease to release waiters again. */
1303
1304#ifdef LOCK_DEBUG
1305 {
1306 /* not waiting anymore */
1308
1310 }
1311#endif
1312
1316
1317 LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
1318
1319 /* Now loop back and try to acquire lock again. */
1320 result = false;
1321 }
1322
1325
1326 /* Add lock to list of locks held by this backend */
1329
1330 /*
1331 * Fix the process wait semaphore's count for any absorbed wakeups.
1332 */
1333 while (extraWaits-- > 0)
1334 PGSemaphoreUnlock(proc->sem);
1335
1336 return result;
1337}

References Assert, elog, ERROR, fb(), held_lwlocks, HOLD_INTERRUPTS, IsUnderPostmaster, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_WAKE_IN_PROGRESS, LW_SHARED, LW_WS_NOT_WAITING, LWLockAttemptLock(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, MyProc, num_held_lwlocks, pg_atomic_fetch_and_u32(), pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, PGPROC::sem, LWLock::state, and T_NAME.

Referenced by _bt_end_vacuum(), _bt_parallel_done(), _bt_parallel_primscan_schedule(), _bt_parallel_release(), _bt_parallel_seize(), _bt_start_vacuum(), _bt_vacuum_cycleid(), abort_logical_decoding_activation(), AbsorbSyncRequests(), ActivateCommitTs(), addLSNWaiter(), AdvanceNextFullTransactionIdPastXid(), AdvanceOldestClogXid(), AdvanceOldestCommitTsXid(), AdvanceXLInsertBuffer(), alloc_object(), AlterSystemSetConfigFile(), ApplyLauncherMain(), apw_detach_shmem(), apw_dump_now(), apw_load_buffers(), AsyncNotifyFreezeXids(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AtAbort_Twophase(), AtEOXact_LogicalRepWorkers(), AtPrepare_PredicateLocks(), attach_internal(), autoprewarm_main(), autoprewarm_start_worker(), AutoVacLauncherMain(), AutoVacuumRequestWork(), AutoVacWorkerMain(), BackendPidGetProc(), BackendXidGetPid(), BecomeLockGroupLeader(), BecomeLockGroupMember(), BecomeRegisteredListener(), btparallelrescan(), BufferAlloc(), CancelDBBackends(), check_for_freed_segments(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckLogicalSlotExists(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointReplicationOrigin(), CheckPointReplicationSlots(), CheckPointTwoPhase(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), choose_next_subplan_for_leader(), choose_next_subplan_for_worker(), CleanupInvalidationState(), ClearOldPredicateLocks(), ComputeXidHorizons(), consume_xids_shortcut(), copy_replication_slot(), CountDBBackends(), CountDBConnections(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreateInitDecodingContext(), CreatePredicateLock(), CreateRestartPoint(), DeactivateCommitTs(), DeleteChildTargetLocks(), DeleteLockTarget(), deleteLSNWaiter(), destroy_superblock(), DisableLogicalDecoding(), DisableLogicalDecodingIfNecessary(), do_autovacuum(), do_pg_backup_start(), do_pg_backup_stop(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_get_total_size(), dsa_get_total_size_from_handle(), dsa_pin(), dsa_release_in_place(), dsa_set_size_limit(), dsa_trim(), dsa_unpin(), dshash_delete_key(), dshash_dump(), dshash_find(), dshash_find_or_insert(), dshash_seq_next(), dsm_attach(), dsm_create(), dsm_detach(), dsm_pin_segment(), dsm_unpin_segment(), EnableLogicalDecoding(), ensure_active_superblock(), entry_reset(), ExecParallelHashMergeCounters(), ExecParallelHashPopChunkQueue(), ExecParallelHashTupleAlloc(), ExecParallelHashTuplePrealloc(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendBufferedRelShared(), ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), FindAndDropRelationBuffers(), FindDeletedTupleInLocalRel(), FinishPreparedTransaction(), ForceTransactionIdLimitUpdate(), ForwardSyncRequest(), FreeWorkerInfo(), get_local_synced_slots(), get_val_in_shmem(), get_xid_status(), GetBackgroundWorkerPid(), GetBackgroundWorkerTypeByPid(), GetBlockerStatusData(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastImportantRecPtr(), GetLastSegSwitchData(), GetLatestCommitTsData(), GetLeaderApplyWorkerPid(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetMultiXactInfo(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestSafeDecodingTransactionId(), GetOldestUnsummarizedLSN(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionData(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSafeSnapshotBlockingPids(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetStrictOldestNonRemovableTransactionId(), GetVirtualXIDsDelayingChkpt(), GetWaitEventCustomIdentifier(), GetWaitEventCustomNames(), GetWalSummarizerState(), HaveVirtualXIDsDelayingChkpt(), init_conflict_slot_xmin(), init_dsm_registry(), initGlobalChannelTable(), InitWalSender(), injection_shmem_startup(), InjectionPointAttach(), InjectionPointDetach(), InjectionPointList(), InstallXLogFileSegment(), InvalidateBuffer(), InvalidateObsoleteReplicationSlots(), InvalidatePossiblyObsoleteSlot(), InvalidateVictimBuffer(), IoWorkerMain(), IsInstallXLogFileSegmentActive(), IsLogicalDecodingEnabled(), IsXLogLogicalInfoEnabled(), KnownAssignedXidsCompress(), KnownAssignedXidsReset(), lock_twophase_recover(), LockAcquireExtended(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), LockWaiterCount(), logicalrep_launcher_attach_dshmem(), logicalrep_pa_worker_stop(), logicalrep_reset_seqsync_start_time(), logicalrep_worker_attach(), logicalrep_worker_detach(), logicalrep_worker_launch(), logicalrep_worker_stop(), logicalrep_worker_stop_internal(), logicalrep_worker_wakeup(), logicalrep_workers_find(), LookupGXact(), LookupGXactBySubid(), MarkAsPrepared(), MarkAsPreparing(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), PageIsPredicateLocked(), perform_relmap_update(), pg_control_checkpoint(), pg_control_init(), pg_control_recovery(), pg_control_system(), pg_get_replication_slots(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_notification_queue_usage(), pg_show_replication_origin_status(), pg_stat_get_subscription(), pg_stat_statements_internal(), pg_xact_status(), pgaio_worker_die(), pgaio_worker_register(), pgaio_worker_submit_internal(), pgss_shmem_startup(), pgss_store(), pgstat_archiver_reset_all_cb(), pgstat_archiver_snapshot_cb(), pgstat_bgwriter_reset_all_cb(), pgstat_bgwriter_snapshot_cb(), pgstat_build_snapshot(), pgstat_checkpointer_reset_all_cb(), pgstat_checkpointer_snapshot_cb(), pgstat_fetch_replslot(), pgstat_io_flush_cb(), pgstat_io_reset_all_cb(), pgstat_io_snapshot_cb(), pgstat_lock_entry(), pgstat_lock_entry_shared(), pgstat_reset_matching_entries(), pgstat_reset_replslot(), pgstat_reset_slru_counter_internal(), pgstat_slru_flush_cb(), pgstat_slru_snapshot_cb(), pgstat_wal_flush_cb(), pgstat_wal_reset_all_cb(), pgstat_wal_snapshot_cb(), PostPrepare_Locks(), PostPrepare_MultiXact(), PostPrepare_Twophase(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockTwoPhaseFinish(), PrefetchSharedBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayClearTransaction(), ProcArrayEndTransaction(), ProcArrayGetReplicationSlotXmin(), ProcArrayGroupClearXid(), ProcArrayInstallImportedXmin(), ProcArrayInstallRestoredXmin(), ProcArrayRemove(), ProcArraySetReplicationSlotXmin(), ProcessSequencesForSync(), ProcessSyncingTablesForApply(), ProcKill(), ProcNumberGetTransactionIds(), ProcSleep(), ReachedEndOfBackup(), read_relmap_file(), ReadMultiXactIdRange(), ReadNextFullTransactionId(), ReadNextMultiXactId(), ReadReplicationSlot(), RecordNewMultiXact(), RecoverPreparedTransactions(), RegisterDynamicBackgroundWorker(), RegisterPredicateLockingXid(), RelationCacheInitFilePreInvalidate(), RelationMapCopy(), RelationMapFinishBootstrap(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), relmap_redo(), RemoveScratchTarget(), ReplicationSlotAcquire(), ReplicationSlotCleanup(), ReplicationSlotCreate(), ReplicationSlotDropPtr(), ReplicationSlotName(), ReplicationSlotRelease(), ReplicationSlotReserveWal(), ReplicationSlotsComputeLogicalRestartLSN(), ReplicationSlotsComputeRequiredLSN(), ReplicationSlotsComputeRequiredXmin(), ReplicationSlotsCountDBSlots(), ReplicationSlotsDropDBSlots(), replorigin_advance(), replorigin_get_progress(), replorigin_session_advance(), replorigin_session_get_progress(), replorigin_session_reset_internal(), replorigin_session_setup(), replorigin_state_clear(), RequestDisableLogicalDecoding(), ResetInstallXLogFileSegmentActive(), resize(), RestoreScratchTarget(), restoreTwoPhaseData(), SaveSlotToPath(), SearchNamedReplicationSlot(), SerialAdd(), SerialGetMinConflictCommitSeqNo(), SerialInit(), SerialSetActiveSerXmin(), set_indexsafe_procflags(), set_val_in_shmem(), SetCommitTsLimit(), SetInstallXLogFileSegmentActive(), SetMultiXactIdLimit(), SetNextObjectId(), SetOldestOffset(), SetTransactionIdLimit(), SetXidCommitTsInPage(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SignalVirtualTransaction(), SIInsertDataEntries(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SimpleLruWriteAll(), SimpleLruZeroAndWritePage(), SlruDeleteSegment(), SlruInternalWritePage(), SnapBuildInitialSnapshot(), ss_get_location(), StandbyRecoverPreparedTransactions(), StandbySlotsHaveCaughtup(), StartupDecodingContext(), StartupSUBTRANS(), StartupXLOG(), sts_parallel_scan_next(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SummarizeWAL(), SwitchIntoArchiveRecovery(), synchronize_one_slot(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), tbm_shared_iterate(), TerminateBackgroundWorker(), TerminateBackgroundWorkersForDatabase(), TerminateOtherDBBackends(), test_aio_shmem_startup(), test_custom_stats_fixed_reset_all_cb(), test_custom_stats_fixed_snapshot_cb(), test_custom_stats_fixed_update(), test_slru_page_exists(), test_slru_page_read(), test_slru_page_write(), TransactionGroupUpdateXidStatus(), TransactionIdGetCommitTsData(), TransactionIdIsInProgress(), TransactionIdSetPageStatus(), TransactionTreeSetCommitTsData(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TrimMultiXact(), TruncateMultiXact(), TwoPhaseGetGXact(), TwoPhaseGetOldestXidInCommit(), TwoPhaseGetXidByVirtualXID(), update_cached_xid_range(), update_synced_slots_inactive_since(), UpdateLogicalDecodingStatusEndOfRecovery(), UpdateMinRecoveryPoint(), vac_truncate_clog(), vacuum_rel(), VacuumUpdateCosts(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), wait_for_table_state_change(), wait_for_worker_state_change(), WaitEventCustomNew(), WaitForReplicationWorkerAttach(), WaitForWalSummarization(), wakeupWaiters(), WakeupWalSummarizer(), WALInsertLockAcquire(), WALInsertLockAcquireExclusive(), WalSummarizerMain(), WalSummarizerShutdown(), write_relcache_init_file(), xact_redo(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), and XLogReportParameters().

◆ LWLockAcquireOrWait()

bool LWLockAcquireOrWait ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1404 of file lwlock.c.

1405{
1406 PGPROC *proc = MyProc;
1407 bool mustwait;
1408 int extraWaits = 0;
1409#ifdef LWLOCK_STATS
1411
1413#endif
1414
1416
1417 PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
1418
1419 /* Ensure we will have room to remember the lock */
1421 elog(ERROR, "too many LWLocks taken");
1422
1423 /*
1424 * Lock out cancel/die interrupts until we exit the code section protected
1425 * by the LWLock. This ensures that interrupts will not interfere with
1426 * manipulations of data structures in shared memory.
1427 */
1429
1430 /*
1431 * NB: We're using nearly the same twice-in-a-row lock acquisition
1432 * protocol as LWLockAcquire(). Check its comments for details.
1433 */
1435
1436 if (mustwait)
1437 {
1439
1441
1442 if (mustwait)
1443 {
1444 /*
1445 * Wait until awakened. Like in LWLockAcquire, be prepared for
1446 * bogus wakeups.
1447 */
1448 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
1449
1450#ifdef LWLOCK_STATS
1451 lwstats->block_count++;
1452#endif
1453
1457
1458 for (;;)
1459 {
1460 PGSemaphoreLock(proc->sem);
1461 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1462 break;
1463 extraWaits++;
1464 }
1465
1466#ifdef LOCK_DEBUG
1467 {
1468 /* not waiting anymore */
1470
1472 }
1473#endif
1477
1478 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
1479 }
1480 else
1481 {
1482 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
1483
1484 /*
1485 * Got lock in the second attempt, undo queueing. We need to treat
1486 * this as having successfully acquired the lock, otherwise we'd
1487 * not necessarily wake up people we've prevented from acquiring
1488 * the lock.
1489 */
1490 LWLockDequeueSelf(lock);
1491 }
1492 }
1493
1494 /*
1495 * Fix the process wait semaphore's count for any absorbed wakeups.
1496 */
1497 while (extraWaits-- > 0)
1498 PGSemaphoreUnlock(proc->sem);
1499
1500 if (mustwait)
1501 {
1502 /* Failed to get lock, so release interrupt holdoff */
1504 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
1507 }
1508 else
1509 {
1510 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
1511 /* Add lock to list of locks held by this backend */
1516 }
1517
1518 return !mustwait;
1519}

References Assert, elog, ERROR, fb(), held_lwlocks, HOLD_INTERRUPTS, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_SHARED, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LWLockAttemptLock(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, MyProc, num_held_lwlocks, pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, RESUME_INTERRUPTS, PGPROC::sem, and T_NAME.

Referenced by XLogFlush().

◆ LWLockAnyHeldByMe()

bool LWLockAnyHeldByMe ( LWLock lock,
int  nlocks,
size_t  stride 
)

Definition at line 1929 of file lwlock.c.

1930{
1931 char *held_lock_addr;
1932 char *begin;
1933 char *end;
1934 int i;
1935
1936 begin = (char *) lock;
1937 end = begin + nlocks * stride;
1938 for (i = 0; i < num_held_lwlocks; i++)
1939 {
1940 held_lock_addr = (char *) held_lwlocks[i].lock;
1941 if (held_lock_addr >= begin &&
1942 held_lock_addr < end &&
1943 (held_lock_addr - begin) % stride == 0)
1944 return true;
1945 }
1946 return false;
1947}

References fb(), held_lwlocks, i, and num_held_lwlocks.

◆ LWLockAttemptLock()

static bool LWLockAttemptLock ( LWLock lock,
LWLockMode  mode 
)
static

Definition at line 790 of file lwlock.c.

791{
793
795
796 /*
797 * Read once outside the loop, later iterations will get the newer value
798 * via compare & exchange.
799 */
801
802 /* loop until we've determined whether we could acquire the lock or not */
803 while (true)
804 {
806 bool lock_free;
807
809
810 if (mode == LW_EXCLUSIVE)
811 {
813 if (lock_free)
815 }
816 else
817 {
819 if (lock_free)
821 }
822
823 /*
824 * Attempt to swap in the state we are expecting. If we didn't see
825 * lock to be free, that's just the old value. If we saw it as free,
826 * we'll attempt to mark it acquired. The reason that we always swap
827 * in the value is that this doubles as a memory barrier. We could try
828 * to be smarter and only swap in values if we saw the lock as free,
829 * but benchmark haven't shown it as beneficial so far.
830 *
831 * Retry if the value changed since we last looked at it.
832 */
835 {
836 if (lock_free)
837 {
838 /* Great! Got the lock. */
839#ifdef LOCK_DEBUG
840 if (mode == LW_EXCLUSIVE)
841 lock->owner = MyProc;
842#endif
843 return false;
844 }
845 else
846 return true; /* somebody else has the lock */
847 }
848 }
850}

References Assert, fb(), LW_EXCLUSIVE, LW_LOCK_MASK, LW_SHARED, LW_VAL_EXCLUSIVE, LW_VAL_SHARED, mode, MyProc, pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pg_unreachable, and LWLock::state.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockConditionalAcquire().

◆ LWLockConditionalAcquire()

bool LWLockConditionalAcquire ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1347 of file lwlock.c.

1348{
1349 bool mustwait;
1350
1352
1353 PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
1354
1355 /* Ensure we will have room to remember the lock */
1357 elog(ERROR, "too many LWLocks taken");
1358
1359 /*
1360 * Lock out cancel/die interrupts until we exit the code section protected
1361 * by the LWLock. This ensures that interrupts will not interfere with
1362 * manipulations of data structures in shared memory.
1363 */
1365
1366 /* Check for the lock */
1368
1369 if (mustwait)
1370 {
1371 /* Failed to get lock, so release interrupt holdoff */
1373
1374 LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
1377 }
1378 else
1379 {
1380 /* Add lock to list of locks held by this backend */
1385 }
1386 return !mustwait;
1387}

References Assert, elog, ERROR, fb(), held_lwlocks, HOLD_INTERRUPTS, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_SHARED, LWLockAttemptLock(), MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, num_held_lwlocks, PRINT_LWDEBUG, RESUME_INTERRUPTS, and T_NAME.

Referenced by pgstat_io_flush_cb(), pgstat_lock_entry(), pgstat_lock_entry_shared(), pgstat_slru_flush_cb(), pgstat_wal_flush_cb(), ProcArrayEndTransaction(), SimpleLruWaitIO(), ss_report_location(), TransactionIdSetPageStatus(), and XLogNeedsFlush().

◆ LWLockConflictsWithVar()

static bool LWLockConflictsWithVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  oldval,
uint64 newval,
bool result 
)
static

Definition at line 1531 of file lwlock.c.

1533{
1534 bool mustwait;
1535 uint64 value;
1536
1537 /*
1538 * Test first to see if it the slot is free right now.
1539 *
1540 * XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
1541 * via LWLockWaitForVar(), uses an implied barrier with a spinlock before
1542 * this, so we don't need a memory barrier here as far as the current
1543 * usage is concerned. But that might not be safe in general.
1544 */
1546
1547 if (!mustwait)
1548 {
1549 *result = true;
1550 return false;
1551 }
1552
1553 *result = false;
1554
1555 /*
1556 * Reading this value atomically is safe even on platforms where uint64
1557 * cannot be read without observing a torn value.
1558 */
1560
1561 if (value != oldval)
1562 {
1563 mustwait = false;
1564 *newval = value;
1565 }
1566 else
1567 {
1568 mustwait = true;
1569 }
1570
1571 return mustwait;
1572}

References fb(), LW_VAL_EXCLUSIVE, newval, pg_atomic_read_u32(), pg_atomic_read_u64(), LWLock::state, and value.

Referenced by LWLockWaitForVar().

◆ LWLockDequeueSelf()

static void LWLockDequeueSelf ( LWLock lock)
static

Definition at line 1087 of file lwlock.c.

1088{
1089 bool on_waitlist;
1090
1091#ifdef LWLOCK_STATS
1093
1095
1096 lwstats->dequeue_self_count++;
1097#endif
1098
1099 LWLockWaitListLock(lock);
1100
1101 /*
1102 * Remove ourselves from the waitlist, unless we've already been removed.
1103 * The removal happens with the wait list lock held, so there's no race in
1104 * this check.
1105 */
1107 if (on_waitlist)
1108 proclist_delete(&lock->waiters, MyProcNumber, lwWaitLink);
1109
1110 if (proclist_is_empty(&lock->waiters) &&
1112 {
1114 }
1115
1116 /* XXX: combine with fetch_and above? */
1118
1119 /* clear waiting state again, nice for debugging */
1120 if (on_waitlist)
1122 else
1123 {
1124 int extraWaits = 0;
1125
1126 /*
1127 * Somebody else dequeued us and has or will wake us up. Deal with the
1128 * superfluous absorption of a wakeup.
1129 */
1130
1131 /*
1132 * Clear LW_FLAG_WAKE_IN_PROGRESS if somebody woke us before we
1133 * removed ourselves - they'll have set it.
1134 */
1136
1137 /*
1138 * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
1139 * get reset at some inconvenient point later. Most of the time this
1140 * will immediately return.
1141 */
1142 for (;;)
1143 {
1146 break;
1147 extraWaits++;
1148 }
1149
1150 /*
1151 * Fix the process wait semaphore's count for any absorbed wakeups.
1152 */
1153 while (extraWaits-- > 0)
1155 }
1156
1157#ifdef LOCK_DEBUG
1158 {
1159 /* not waiting anymore */
1161
1163 }
1164#endif
1165}

References Assert, fb(), LW_FLAG_HAS_WAITERS, LW_FLAG_WAKE_IN_PROGRESS, LW_WS_NOT_WAITING, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, MAX_BACKENDS, MyProc, MyProcNumber, pg_atomic_fetch_and_u32(), pg_atomic_fetch_sub_u32(), pg_atomic_read_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), proclist_delete, proclist_is_empty(), PGPROC::sem, LWLock::state, and LWLock::waiters.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockHeldByMe()

◆ LWLockHeldByMeInMode()

◆ LWLockInitialize()

◆ LWLockNewTrancheId()

int LWLockNewTrancheId ( const char name)

Definition at line 596 of file lwlock.c.

597{
598 int result;
599
600 if (!name)
603 errmsg("tranche name cannot be NULL")));
604
605 if (strlen(name) >= NAMEDATALEN)
608 errmsg("tranche name too long"),
609 errdetail("LWLock tranche names must be no longer than %d bytes.",
610 NAMEDATALEN - 1)));
611
612 /*
613 * We use the ShmemLock spinlock to protect LWLockCounter and
614 * LWLockTrancheNames.
615 */
617
619 {
622 (errmsg("maximum number of tranches already registered"),
623 errdetail("No more than %d tranches may be registered.",
625 }
626
627 result = (*LWLockCounter)++;
630
632
633 return result;
634}

References ereport, errcode(), errdetail(), errmsg(), ERROR, fb(), LocalLWLockCounter, LWLockCounter, LWLockTrancheNames, LWTRANCHE_FIRST_USER_DEFINED, MAX_NAMED_TRANCHES, name, NAMEDATALEN, ShmemLock, SpinLockAcquire, SpinLockRelease, and strlcpy().

Referenced by apw_init_state(), GetNamedDSA(), GetNamedDSHash(), init_tdr_dsm(), init_tranche(), InitializeLWLocks(), test_basic(), test_create(), test_empty(), test_lwlock_tranche_creation(), test_lwlock_tranches(), test_random(), and test_slru_shmem_startup().

◆ LWLockQueueSelf()

static void LWLockQueueSelf ( LWLock lock,
LWLockMode  mode 
)
static

Definition at line 1044 of file lwlock.c.

1045{
1046 /*
1047 * If we don't have a PGPROC structure, there's no way to wait. This
1048 * should never occur, since MyProc should only be null during shared
1049 * memory initialization.
1050 */
1051 if (MyProc == NULL)
1052 elog(PANIC, "cannot wait without a PGPROC structure");
1053
1055 elog(PANIC, "queueing for lock while waiting on another one");
1056
1057 LWLockWaitListLock(lock);
1058
1059 /* setting the flag is protected by the spinlock */
1061
1064
1065 /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
1066 if (mode == LW_WAIT_UNTIL_FREE)
1067 proclist_push_head(&lock->waiters, MyProcNumber, lwWaitLink);
1068 else
1069 proclist_push_tail(&lock->waiters, MyProcNumber, lwWaitLink);
1070
1071 /* Can release the mutex now */
1073
1074#ifdef LOCK_DEBUG
1075 pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
1076#endif
1077}

References elog, fb(), LW_FLAG_HAS_WAITERS, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, mode, MyProc, MyProcNumber, PANIC, pg_atomic_fetch_add_u32(), pg_atomic_fetch_or_u32(), proclist_push_head, proclist_push_tail, LWLock::state, and LWLock::waiters.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockRelease()

void LWLockRelease ( LWLock lock)

Definition at line 1793 of file lwlock.c.

1794{
1797 bool check_waiters;
1798 int i;
1799
1800 /*
1801 * Remove lock from list of locks held. Usually, but not always, it will
1802 * be the latest-acquired lock; so search array backwards.
1803 */
1804 for (i = num_held_lwlocks; --i >= 0;)
1805 if (lock == held_lwlocks[i].lock)
1806 break;
1807
1808 if (i < 0)
1809 elog(ERROR, "lock %s is not held", T_NAME(lock));
1810
1812
1814 for (; i < num_held_lwlocks; i++)
1815 held_lwlocks[i] = held_lwlocks[i + 1];
1816
1817 PRINT_LWDEBUG("LWLockRelease", lock, mode);
1818
1819 /*
1820 * Release my hold on lock, after that it can immediately be acquired by
1821 * others, even if we still have to wakeup other waiters.
1822 */
1823 if (mode == LW_EXCLUSIVE)
1825 else
1827
1828 /* nobody else can have that kind of lock */
1830
1833
1834 /*
1835 * Check if we're still waiting for backends to get scheduled, if so,
1836 * don't wake them up again.
1837 */
1838 if ((oldstate & LW_FLAG_HAS_WAITERS) &&
1840 (oldstate & LW_LOCK_MASK) == 0)
1841 check_waiters = true;
1842 else
1843 check_waiters = false;
1844
1845 /*
1846 * As waking up waiters requires the spinlock to be acquired, only do so
1847 * if necessary.
1848 */
1849 if (check_waiters)
1850 {
1851 /* XXX: remove before commit? */
1852 LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
1853 LWLockWakeup(lock);
1854 }
1855
1856 /*
1857 * Now okay to allow cancel/die interrupts.
1858 */
1860}

References Assert, elog, ERROR, fb(), held_lwlocks, i, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_HAS_WAITERS, LW_FLAG_WAKE_IN_PROGRESS, LW_LOCK_MASK, LW_VAL_EXCLUSIVE, LW_VAL_SHARED, LWLockWakeup(), LWLockHandle::mode, mode, num_held_lwlocks, pg_atomic_sub_fetch_u32(), PRINT_LWDEBUG, RESUME_INTERRUPTS, LWLock::state, and T_NAME.

Referenced by _bt_end_vacuum(), _bt_parallel_done(), _bt_parallel_primscan_schedule(), _bt_parallel_release(), _bt_parallel_seize(), _bt_start_vacuum(), _bt_vacuum_cycleid(), abort_logical_decoding_activation(), AbsorbSyncRequests(), ActivateCommitTs(), addLSNWaiter(), AdvanceNextFullTransactionIdPastXid(), AdvanceOldestClogXid(), AdvanceOldestCommitTsXid(), AdvanceXLInsertBuffer(), alloc_object(), AlterSystemSetConfigFile(), ApplyLauncherMain(), apw_detach_shmem(), apw_dump_now(), apw_load_buffers(), AsyncNotifyFreezeXids(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueProcessPageEntries(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AtAbort_Twophase(), AtEOXact_LogicalRepWorkers(), AtPrepare_PredicateLocks(), attach_internal(), autoprewarm_main(), autoprewarm_start_worker(), AutoVacLauncherMain(), AutoVacuumRequestWork(), AutoVacWorkerMain(), BackendPidGetProc(), BackendXidGetPid(), BecomeLockGroupLeader(), BecomeLockGroupMember(), BecomeRegisteredListener(), btparallelrescan(), BufferAlloc(), CancelDBBackends(), check_for_freed_segments(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckLogicalSlotExists(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointReplicationOrigin(), CheckPointReplicationSlots(), CheckPointTwoPhase(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), choose_next_subplan_for_leader(), choose_next_subplan_for_worker(), CleanupInvalidationState(), ClearOldPredicateLocks(), ComputeXidHorizons(), consume_xids_shortcut(), copy_replication_slot(), CountDBBackends(), CountDBConnections(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreateInitDecodingContext(), CreatePredicateLock(), CreateRestartPoint(), DeactivateCommitTs(), DeleteChildTargetLocks(), DeleteLockTarget(), deleteLSNWaiter(), destroy_superblock(), DisableLogicalDecoding(), DisableLogicalDecodingIfNecessary(), do_autovacuum(), do_pg_backup_start(), do_pg_backup_stop(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_get_total_size(), dsa_get_total_size_from_handle(), dsa_pin(), dsa_release_in_place(), dsa_set_size_limit(), dsa_trim(), dsa_unpin(), dshash_delete_entry(), dshash_delete_key(), dshash_dump(), dshash_find(), dshash_find_or_insert(), dshash_release_lock(), dshash_seq_next(), dshash_seq_term(), dsm_attach(), dsm_create(), dsm_detach(), dsm_pin_segment(), dsm_unpin_segment(), EnableLogicalDecoding(), ensure_active_superblock(), entry_reset(), ExecParallelHashMergeCounters(), ExecParallelHashPopChunkQueue(), ExecParallelHashTupleAlloc(), ExecParallelHashTuplePrealloc(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendBufferedRelShared(), ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), find_multixact_start(), FindAndDropRelationBuffers(), FindDeletedTupleInLocalRel(), FinishPreparedTransaction(), ForceTransactionIdLimitUpdate(), ForwardSyncRequest(), FreeWorkerInfo(), get_local_synced_slots(), get_val_in_shmem(), get_xid_status(), GetBackgroundWorkerPid(), GetBackgroundWorkerTypeByPid(), GetBlockerStatusData(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastImportantRecPtr(), GetLastSegSwitchData(), GetLatestCommitTsData(), GetLeaderApplyWorkerPid(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetMultiXactInfo(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestSafeDecodingTransactionId(), GetOldestUnsummarizedLSN(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSafeSnapshotBlockingPids(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetStrictOldestNonRemovableTransactionId(), GetVirtualXIDsDelayingChkpt(), GetWaitEventCustomIdentifier(), GetWaitEventCustomNames(), GetWalSummarizerState(), HaveVirtualXIDsDelayingChkpt(), init_conflict_slot_xmin(), init_dsm_registry(), initGlobalChannelTable(), InitWalSender(), injection_shmem_startup(), InjectionPointAttach(), InjectionPointDetach(), InjectionPointList(), InstallXLogFileSegment(), InvalidateBuffer(), InvalidateObsoleteReplicationSlots(), InvalidatePossiblyObsoleteSlot(), InvalidateVictimBuffer(), IoWorkerMain(), IsInstallXLogFileSegmentActive(), IsLogicalDecodingEnabled(), IsXLogLogicalInfoEnabled(), KnownAssignedXidsCompress(), KnownAssignedXidsReset(), lock_twophase_recover(), LockAcquireExtended(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), LockWaiterCount(), logicalrep_launcher_attach_dshmem(), logicalrep_pa_worker_stop(), logicalrep_reset_seqsync_start_time(), logicalrep_worker_attach(), logicalrep_worker_detach(), logicalrep_worker_launch(), logicalrep_worker_stop(), logicalrep_worker_stop_internal(), logicalrep_worker_wakeup(), logicalrep_workers_find(), LogStandbySnapshot(), LookupGXact(), LookupGXactBySubid(), LWLockReleaseAll(), LWLockReleaseClearVar(), MarkAsPrepared(), MarkAsPreparing(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), OnConflict_CheckForSerializationFailure(), PageIsPredicateLocked(), perform_relmap_update(), pg_control_checkpoint(), pg_control_init(), pg_control_recovery(), pg_control_system(), pg_get_replication_slots(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_notification_queue_usage(), pg_show_replication_origin_status(), pg_stat_get_subscription(), pg_stat_statements_internal(), pg_xact_status(), pgaio_worker_die(), pgaio_worker_register(), pgaio_worker_submit_internal(), pgss_shmem_startup(), pgss_store(), pgstat_archiver_reset_all_cb(), pgstat_archiver_snapshot_cb(), pgstat_bgwriter_reset_all_cb(), pgstat_bgwriter_snapshot_cb(), pgstat_build_snapshot(), pgstat_checkpointer_reset_all_cb(), pgstat_checkpointer_snapshot_cb(), pgstat_fetch_replslot(), pgstat_io_flush_cb(), pgstat_io_reset_all_cb(), pgstat_io_snapshot_cb(), pgstat_reset_matching_entries(), pgstat_reset_replslot(), pgstat_reset_slru_counter_internal(), pgstat_slru_flush_cb(), pgstat_slru_snapshot_cb(), pgstat_unlock_entry(), pgstat_wal_flush_cb(), pgstat_wal_reset_all_cb(), pgstat_wal_snapshot_cb(), PostPrepare_Locks(), PostPrepare_MultiXact(), PostPrepare_Twophase(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockTwoPhaseFinish(), PrefetchSharedBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayClearTransaction(), ProcArrayEndTransaction(), ProcArrayGetReplicationSlotXmin(), ProcArrayGroupClearXid(), ProcArrayInstallImportedXmin(), ProcArrayInstallRestoredXmin(), ProcArrayRemove(), ProcArraySetReplicationSlotXmin(), ProcessSequencesForSync(), ProcessSyncingTablesForApply(), ProcKill(), ProcNumberGetTransactionIds(), ProcSleep(), ReachedEndOfBackup(), read_relmap_file(), ReadMultiXactIdRange(), ReadNextFullTransactionId(), ReadNextMultiXactId(), ReadReplicationSlot(), RecordNewMultiXact(), RecoverPreparedTransactions(), RegisterDynamicBackgroundWorker(), RegisterPredicateLockingXid(), RelationCacheInitFilePostInvalidate(), RelationMapCopy(), RelationMapFinishBootstrap(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), relmap_redo(), RemoveScratchTarget(), ReplicationSlotAcquire(), ReplicationSlotCleanup(), ReplicationSlotCreate(), ReplicationSlotDropPtr(), ReplicationSlotName(), ReplicationSlotRelease(), ReplicationSlotReserveWal(), ReplicationSlotsComputeLogicalRestartLSN(), ReplicationSlotsComputeRequiredLSN(), ReplicationSlotsComputeRequiredXmin(), ReplicationSlotsCountDBSlots(), ReplicationSlotsDropDBSlots(), replorigin_advance(), replorigin_get_progress(), replorigin_session_advance(), replorigin_session_get_progress(), replorigin_session_reset_internal(), replorigin_session_setup(), replorigin_state_clear(), RequestDisableLogicalDecoding(), ResetInstallXLogFileSegmentActive(), resize(), RestoreScratchTarget(), restoreTwoPhaseData(), SaveSlotToPath(), SearchNamedReplicationSlot(), SerialAdd(), SerialGetMinConflictCommitSeqNo(), SerialInit(), SerialSetActiveSerXmin(), set_indexsafe_procflags(), set_val_in_shmem(), SetCommitTsLimit(), SetInstallXLogFileSegmentActive(), SetMultiXactIdLimit(), SetNextObjectId(), SetOldestOffset(), SetTransactionIdLimit(), SetXidCommitTsInPage(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SignalVirtualTransaction(), SIInsertDataEntries(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SimpleLruWriteAll(), SimpleLruZeroAndWritePage(), SlruDeleteSegment(), SlruInternalWritePage(), SnapBuildInitialSnapshot(), ss_get_location(), ss_report_location(), StandbyRecoverPreparedTransactions(), StandbySlotsHaveCaughtup(), StartupDecodingContext(), StartupSUBTRANS(), StartupXLOG(), sts_parallel_scan_next(), SubTransGetParent(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SummarizeWAL(), SwitchIntoArchiveRecovery(), synchronize_one_slot(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), tbm_shared_iterate(), TerminateBackgroundWorker(), TerminateBackgroundWorkersForDatabase(), TerminateOtherDBBackends(), test_aio_shmem_startup(), test_custom_stats_fixed_reset_all_cb(), test_custom_stats_fixed_snapshot_cb(), test_custom_stats_fixed_update(), test_slru_page_exists(), test_slru_page_read(), test_slru_page_readonly(), test_slru_page_write(), TransactionGroupUpdateXidStatus(), TransactionIdGetCommitTsData(), TransactionIdGetStatus(), TransactionIdIsInProgress(), TransactionIdSetPageStatus(), TransactionTreeSetCommitTsData(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TrimMultiXact(), TruncateMultiXact(), TwoPhaseGetGXact(), TwoPhaseGetOldestXidInCommit(), TwoPhaseGetXidByVirtualXID(), update_cached_xid_range(), update_synced_slots_inactive_since(), UpdateLogicalDecodingStatusEndOfRecovery(), UpdateMinRecoveryPoint(), vac_truncate_clog(), vacuum_rel(), VacuumUpdateCosts(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), wait_for_table_state_change(), wait_for_worker_state_change(), WaitEventCustomNew(), WaitForReplicationWorkerAttach(), WaitForWalSummarization(), wakeupWaiters(), WakeupWalSummarizer(), WalSummarizerMain(), WalSummarizerShutdown(), write_relcache_init_file(), xact_redo(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), XLogFlush(), XLogNeedsFlush(), and XLogReportParameters().

◆ LWLockReleaseAll()

◆ LWLockReleaseClearVar()

void LWLockReleaseClearVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  val 
)

Definition at line 1866 of file lwlock.c.

1867{
1868 /*
1869 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1870 * that the variable is updated before releasing the lock.
1871 */
1873
1874 LWLockRelease(lock);
1875}

References fb(), LWLockRelease(), pg_atomic_exchange_u64(), and val.

Referenced by WALInsertLockRelease().

◆ LWLockReportWaitEnd()

static void LWLockReportWaitEnd ( void  )
inlinestatic

Definition at line 728 of file lwlock.c.

729{
731}

References pgstat_report_wait_end().

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockReportWaitStart()

static void LWLockReportWaitStart ( LWLock lock)
inlinestatic

◆ LWLockShmemSize()

Size LWLockShmemSize ( void  )

Definition at line 397 of file lwlock.c.

398{
399 Size size;
400 int numLocks = NUM_FIXED_LWLOCKS;
401
402 /*
403 * If re-initializing shared memory, the request array will no longer be
404 * accessible, so switch to the copy in postmaster's local memory. We'll
405 * copy it back into shared memory later when CreateLWLocks() is called
406 * again.
407 */
410
411 /* Calculate total number of locks needed in the main array. */
412 numLocks += NumLWLocksForNamedTranches();
413
414 /* Space for dynamic allocation counter. */
415 size = MAXALIGN(sizeof(int));
416
417 /* Space for named tranches. */
418 size = add_size(size, mul_size(MAX_NAMED_TRANCHES, sizeof(char *)));
420
421 /*
422 * Make space for named tranche requests. This is done for the benefit of
423 * EXEC_BACKEND builds, which otherwise wouldn't be able to call
424 * GetNamedLWLockTranche() outside postmaster.
425 */
428
429 /* Space for the LWLock array, plus room for cache line alignment. */
430 size = add_size(size, LWLOCK_PADDED_SIZE);
431 size = add_size(size, mul_size(numLocks, sizeof(LWLockPadded)));
432
433 return size;
434}

References add_size(), LocalNamedLWLockTrancheRequestArray, LWLOCK_PADDED_SIZE, MAX_NAMED_TRANCHES, MAXALIGN, mul_size(), NAMEDATALEN, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, NUM_FIXED_LWLOCKS, and NumLWLocksForNamedTranches().

Referenced by CalculateShmemSize(), and CreateLWLocks().

◆ LWLockUpdateVar()

void LWLockUpdateVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  val 
)

Definition at line 1728 of file lwlock.c.

1729{
1732
1733 PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
1734
1735 /*
1736 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1737 * that the variable is updated before waking up waiters.
1738 */
1740
1742
1743 LWLockWaitListLock(lock);
1744
1746
1747 /*
1748 * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
1749 * up. They are always in the front of the queue.
1750 */
1751 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1752 {
1753 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1754
1755 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
1756 break;
1757
1758 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1759 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
1760
1761 /* see LWLockWakeup() */
1762 Assert(waiter->lwWaiting == LW_WS_WAITING);
1764 }
1765
1766 /* We are done updating shared state of the lock itself. */
1768
1769 /*
1770 * Awaken any waiters I removed from the queue.
1771 */
1772 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1773 {
1774 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1775
1776 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1777 /* check comment in LWLockWakeup() about this barrier */
1779 waiter->lwWaiting = LW_WS_NOT_WAITING;
1780 PGSemaphoreUnlock(waiter->sem);
1781 }
1782}

References Assert, proclist_mutable_iter::cur, fb(), GetPGProcByNumber, LW_EXCLUSIVE, LW_VAL_EXCLUSIVE, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_PENDING_WAKEUP, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, pg_atomic_exchange_u64(), pg_atomic_read_u32(), pg_write_barrier, PGSemaphoreUnlock(), PRINT_LWDEBUG, proclist_delete, proclist_foreach_modify, proclist_init(), proclist_push_tail, PGPROC::sem, LWLock::state, val, LWLock::waiters, and wakeup.

Referenced by WALInsertLockAcquireExclusive(), and WALInsertLockUpdateInsertingAt().

◆ LWLockWaitForVar()

bool LWLockWaitForVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  oldval,
uint64 newval 
)

Definition at line 1592 of file lwlock.c.

1594{
1595 PGPROC *proc = MyProc;
1596 int extraWaits = 0;
1597 bool result = false;
1598#ifdef LWLOCK_STATS
1600
1602#endif
1603
1604 PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
1605
1606 /*
1607 * Lock out cancel/die interrupts while we sleep on the lock. There is no
1608 * cleanup mechanism to remove us from the wait queue if we got
1609 * interrupted.
1610 */
1612
1613 /*
1614 * Loop here to check the lock's status after each time we are signaled.
1615 */
1616 for (;;)
1617 {
1618 bool mustwait;
1619
1621 &result);
1622
1623 if (!mustwait)
1624 break; /* the lock was free or value didn't match */
1625
1626 /*
1627 * Add myself to wait queue. Note that this is racy, somebody else
1628 * could wakeup before we're finished queuing. NB: We're using nearly
1629 * the same twice-in-a-row lock acquisition protocol as
1630 * LWLockAcquire(). Check its comments for details. The only
1631 * difference is that we also have to check the variable's values when
1632 * checking the state of the lock.
1633 */
1635
1636 /*
1637 * Clear LW_FLAG_WAKE_IN_PROGRESS flag, to make sure we get woken up
1638 * as soon as the lock is released.
1639 */
1641
1642 /*
1643 * We're now guaranteed to be woken up if necessary. Recheck the lock
1644 * and variables state.
1645 */
1647 &result);
1648
1649 /* Ok, no conflict after we queued ourselves. Undo queueing. */
1650 if (!mustwait)
1651 {
1652 LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
1653
1654 LWLockDequeueSelf(lock);
1655 break;
1656 }
1657
1658 /*
1659 * Wait until awakened.
1660 *
1661 * It is possible that we get awakened for a reason other than being
1662 * signaled by LWLockRelease. If so, loop back and wait again. Once
1663 * we've gotten the LWLock, re-increment the sema by the number of
1664 * additional signals received.
1665 */
1666 LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
1667
1668#ifdef LWLOCK_STATS
1669 lwstats->block_count++;
1670#endif
1671
1675
1676 for (;;)
1677 {
1678 PGSemaphoreLock(proc->sem);
1679 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1680 break;
1681 extraWaits++;
1682 }
1683
1684#ifdef LOCK_DEBUG
1685 {
1686 /* not waiting anymore */
1688
1690 }
1691#endif
1692
1696
1697 LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
1698
1699 /* Now loop back and check the status of the lock again. */
1700 }
1701
1702 /*
1703 * Fix the process wait semaphore's count for any absorbed wakeups.
1704 */
1705 while (extraWaits-- > 0)
1706 PGSemaphoreUnlock(proc->sem);
1707
1708 /*
1709 * Now okay to allow cancel/die interrupts.
1710 */
1712
1713 return result;
1714}

References Assert, fb(), HOLD_INTERRUPTS, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_WAKE_IN_PROGRESS, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LWLockConflictsWithVar(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MyProc, newval, pg_atomic_fetch_and_u32(), pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, RESUME_INTERRUPTS, PGPROC::sem, LWLock::state, and T_NAME.

Referenced by WaitXLogInsertionsToFinish().

◆ LWLockWaitListLock()

static void LWLockWaitListLock ( LWLock lock)
static

Definition at line 861 of file lwlock.c.

862{
864#ifdef LWLOCK_STATS
866 uint32 delays = 0;
867
869#endif
870
871 while (true)
872 {
873 /*
874 * Always try once to acquire the lock directly, without setting up
875 * the spin-delay infrastructure. The work necessary for that shows up
876 * in profiles and is rarely necessary.
877 */
880 break; /* got lock */
881
882 /* and then spin without atomic operations until lock is released */
883 {
885
887
888 while (old_state & LW_FLAG_LOCKED)
889 {
892 }
893#ifdef LWLOCK_STATS
894 delays += delayStatus.delays;
895#endif
897 }
898
899 /*
900 * Retry. The lock might obviously already be re-acquired by the time
901 * we're attempting to get it again.
902 */
903 }
904
905#ifdef LWLOCK_STATS
906 lwstats->spin_delay_count += delays;
907#endif
908}

References fb(), finish_spin_delay(), init_local_spin_delay, likely, LW_FLAG_LOCKED, perform_spin_delay(), pg_atomic_fetch_or_u32(), pg_atomic_read_u32(), and LWLock::state.

Referenced by LWLockDequeueSelf(), LWLockQueueSelf(), LWLockUpdateVar(), and LWLockWakeup().

◆ LWLockWaitListUnlock()

◆ LWLockWakeup()

static void LWLockWakeup ( LWLock lock)
static

Definition at line 930 of file lwlock.c.

931{
932 bool new_wake_in_progress = false;
933 bool wokeup_somebody = false;
936
938
939 /* lock wait list while collecting backends to wake up */
940 LWLockWaitListLock(lock);
941
942 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
943 {
944 PGPROC *waiter = GetPGProcByNumber(iter.cur);
945
946 if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
947 continue;
948
949 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
950 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
951
952 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
953 {
954 /*
955 * Prevent additional wakeups until retryer gets to run. Backends
956 * that are just waiting for the lock to become free don't retry
957 * automatically.
958 */
960
961 /*
962 * Don't wakeup (further) exclusive locks.
963 */
964 wokeup_somebody = true;
965 }
966
967 /*
968 * Signal that the process isn't on the wait list anymore. This allows
969 * LWLockDequeueSelf() to remove itself of the waitlist with a
970 * proclist_delete(), rather than having to check if it has been
971 * removed from the list.
972 */
973 Assert(waiter->lwWaiting == LW_WS_WAITING);
975
976 /*
977 * Once we've woken up an exclusive lock, there's no point in waking
978 * up anybody else.
979 */
980 if (waiter->lwWaitMode == LW_EXCLUSIVE)
981 break;
982 }
983
985
986 /* unset required flags, and release lock, in one fell swoop */
987 {
990
992 while (true)
993 {
995
996 /* compute desired flags */
997
1000 else
1002
1003 if (proclist_is_empty(&lock->waiters))
1005
1006 desired_state &= ~LW_FLAG_LOCKED; /* release lock */
1007
1010 break;
1011 }
1012 }
1013
1014 /* Awaken any waiters I removed from the queue. */
1015 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1016 {
1017 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1018
1019 LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
1020 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1021
1022 /*
1023 * Guarantee that lwWaiting being unset only becomes visible once the
1024 * unlink from the link has completed. Otherwise the target backend
1025 * could be woken up for other reason and enqueue for a new lock - if
1026 * that happens before the list unlink happens, the list would end up
1027 * being corrupted.
1028 *
1029 * The barrier pairs with the LWLockWaitListLock() when enqueuing for
1030 * another lock.
1031 */
1033 waiter->lwWaiting = LW_WS_NOT_WAITING;
1034 PGSemaphoreUnlock(waiter->sem);
1035 }
1036}

References Assert, proclist_mutable_iter::cur, fb(), GetPGProcByNumber, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_HAS_WAITERS, LW_FLAG_WAKE_IN_PROGRESS, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_PENDING_WAKEUP, LW_WS_WAITING, LWLockWaitListLock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pg_write_barrier, PGSemaphoreUnlock(), proclist_delete, proclist_foreach_modify, proclist_init(), proclist_is_empty(), proclist_push_tail, PGPROC::sem, LWLock::state, LWLock::waiters, and wakeup.

Referenced by LWLockRelease().

◆ NumLWLocksForNamedTranches()

static int NumLWLocksForNamedTranches ( void  )
static

Definition at line 382 of file lwlock.c.

383{
384 int numLocks = 0;
385 int i;
386
387 for (i = 0; i < NamedLWLockTrancheRequests; i++)
388 numLocks += NamedLWLockTrancheRequestArray[i].num_lwlocks;
389
390 return numLocks;
391}

References i, NamedLWLockTrancheRequestArray, and NamedLWLockTrancheRequests.

Referenced by LWLockShmemSize().

◆ RequestNamedLWLockTranche()

void RequestNamedLWLockTranche ( const char tranche_name,
int  num_lwlocks 
)

Definition at line 649 of file lwlock.c.

650{
653
655 elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook");
656
657 if (!tranche_name)
660 errmsg("tranche name cannot be NULL")));
661
662 if (strlen(tranche_name) >= NAMEDATALEN)
665 errmsg("tranche name too long"),
666 errdetail("LWLock tranche names must be no longer than %d bytes.",
667 NAMEDATALEN - 1)));
668
670 {
675 * sizeof(NamedLWLockTrancheRequest));
676 }
677
679 {
681
684 i * sizeof(NamedLWLockTrancheRequest));
686 }
687
689 strlcpy(request->tranche_name, tranche_name, NAMEDATALEN);
690 request->num_lwlocks = num_lwlocks;
692}

References elog, ereport, errcode(), errdetail(), errmsg(), ERROR, FATAL, fb(), i, MemoryContextAlloc(), NAMEDATALEN, NamedLWLockTrancheRequestArray, NamedLWLockTrancheRequests, pg_nextpower2_32(), process_shmem_requests_in_progress, repalloc(), strlcpy(), and TopMemoryContext.

Referenced by pgss_shmem_request(), and test_lwlock_tranches_shmem_request().

◆ StaticAssertDecl() [1/4]

StaticAssertDecl ( ((MAX_BACKENDS+1) &MAX_BACKENDS = =0,
"MAX_BACKENDS + 1 needs to be a power of 2"   
)

◆ StaticAssertDecl() [2/4]

StaticAssertDecl ( (LW_VAL_EXCLUSIVE &LW_FLAG_MASK = =0,
"LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap"   
)

◆ StaticAssertDecl() [3/4]

StaticAssertDecl ( (MAX_BACKENDS &LW_FLAG_MASK = =0,
"MAX_BACKENDS and LW_FLAG_MASK overlap"   
)

◆ StaticAssertDecl() [4/4]

StaticAssertDecl ( lengthof(BuiltinTrancheNames = =LWTRANCHE_FIRST_USER_DEFINED,
"missing entries in BuiltinTrancheNames"  [] 
)

Variable Documentation

◆ BuiltinTrancheNames

const char* const BuiltinTrancheNames[]
static
Initial value:
= {
#define PG_LWLOCK(id, lockname)
#define PG_LWLOCKTRANCHE(id, lockname)
}

Definition at line 135 of file lwlock.c.

135 {
136#define PG_LWLOCK(id, lockname) [id] = CppAsString(lockname),
137#define PG_LWLOCKTRANCHE(id, lockname) [LWTRANCHE_##id] = CppAsString(lockname),
138#include "storage/lwlocklist.h"
139#undef PG_LWLOCK
140#undef PG_LWLOCKTRANCHE
141};

Referenced by GetLWTrancheName().

◆ held_lwlocks

◆ LocalLWLockCounter

int LocalLWLockCounter
static

Definition at line 202 of file lwlock.c.

Referenced by GetLWTrancheName(), and LWLockNewTrancheId().

◆ LocalNamedLWLockTrancheRequestArray

NamedLWLockTrancheRequest* LocalNamedLWLockTrancheRequestArray = NULL
static

Definition at line 196 of file lwlock.c.

Referenced by CreateLWLocks(), and LWLockShmemSize().

◆ LWLockCounter

int* LWLockCounter = NULL

Definition at line 199 of file lwlock.c.

Referenced by CreateLWLocks(), GetLWTrancheName(), and LWLockNewTrancheId().

◆ LWLockTrancheNames

char** LWLockTrancheNames = NULL

Definition at line 154 of file lwlock.c.

Referenced by CreateLWLocks(), GetLWTrancheName(), and LWLockNewTrancheId().

◆ MainLWLockArray

◆ NamedLWLockTrancheRequestArray

◆ NamedLWLockTrancheRequests

◆ num_held_lwlocks