PostgreSQL Source Code git master
Loading...
Searching...
No Matches
lwlock.c File Reference
#include "postgres.h"
#include "miscadmin.h"
#include "pg_trace.h"
#include "pgstat.h"
#include "port/pg_bitutils.h"
#include "storage/proc.h"
#include "storage/proclist.h"
#include "storage/procnumber.h"
#include "storage/spin.h"
#include "storage/subsystems.h"
#include "utils/memutils.h"
#include "utils/wait_event.h"
#include "storage/lwlocklist.h"
Include dependency graph for lwlock.c:

Go to the source code of this file.

Data Structures

struct  LWLockHandle
 
struct  LWLockTrancheShmemData
 
struct  NamedLWLockTrancheRequest
 

Macros

#define LW_FLAG_HAS_WAITERS   ((uint32) 1 << 31)
 
#define LW_FLAG_WAKE_IN_PROGRESS   ((uint32) 1 << 30)
 
#define LW_FLAG_LOCKED   ((uint32) 1 << 29)
 
#define LW_FLAG_BITS   3
 
#define LW_FLAG_MASK   (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))
 
#define LW_VAL_EXCLUSIVE   (MAX_BACKENDS + 1)
 
#define LW_VAL_SHARED   1
 
#define LW_SHARED_MASK   MAX_BACKENDS
 
#define LW_LOCK_MASK   (MAX_BACKENDS | LW_VAL_EXCLUSIVE)
 
#define PG_LWLOCK(id, lockname)   [id] = CppAsString(lockname),
 
#define PG_LWLOCKTRANCHE(id, lockname)   [LWTRANCHE_##id] = CppAsString(lockname),
 
#define MAX_SIMUL_LWLOCKS   200
 
#define MAX_USER_DEFINED_TRANCHES   256
 
#define T_NAME(lock)    GetLWTrancheName((lock)->tranche)
 
#define PRINT_LWDEBUG(a, b, c)   ((void)0)
 
#define LOG_LWDEBUG(a, b, c)   ((void)0)
 

Typedefs

typedef struct LWLockHandle LWLockHandle
 
typedef struct LWLockTrancheShmemData LWLockTrancheShmemData
 
typedef struct NamedLWLockTrancheRequest NamedLWLockTrancheRequest
 

Functions

 StaticAssertDecl (((MAX_BACKENDS+1) &MAX_BACKENDS)==0, "MAX_BACKENDS + 1 needs to be a power of 2")
 
 StaticAssertDecl ((MAX_BACKENDS &LW_FLAG_MASK)==0, "MAX_BACKENDS and LW_FLAG_MASK overlap")
 
 StaticAssertDecl ((LW_VAL_EXCLUSIVE &LW_FLAG_MASK)==0, "LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap")
 
 StaticAssertDecl (lengthof(BuiltinTrancheNames)==LWTRANCHE_FIRST_USER_DEFINED, "missing entries in BuiltinTrancheNames[]")
 
static void LWLockShmemRequest (void *arg)
 
static void LWLockShmemInit (void *arg)
 
static void LWLockReportWaitStart (LWLock *lock)
 
static void LWLockReportWaitEnd (void)
 
static const charGetLWTrancheName (uint16 trancheId)
 
static int NumLWLocksForNamedTranches (void)
 
void InitLWLockAccess (void)
 
LWLockPaddedGetNamedLWLockTranche (const char *tranche_name)
 
int LWLockNewTrancheId (const char *name)
 
void RequestNamedLWLockTranche (const char *tranche_name, int num_lwlocks)
 
void LWLockInitialize (LWLock *lock, int tranche_id)
 
const charGetLWLockIdentifier (uint32 classId, uint16 eventId)
 
static bool LWLockAttemptLock (LWLock *lock, LWLockMode mode)
 
static void LWLockWaitListLock (LWLock *lock)
 
static void LWLockWaitListUnlock (LWLock *lock)
 
static void LWLockWakeup (LWLock *lock)
 
static void LWLockQueueSelf (LWLock *lock, LWLockMode mode)
 
static void LWLockDequeueSelf (LWLock *lock)
 
bool LWLockAcquire (LWLock *lock, LWLockMode mode)
 
bool LWLockConditionalAcquire (LWLock *lock, LWLockMode mode)
 
bool LWLockAcquireOrWait (LWLock *lock, LWLockMode mode)
 
static bool LWLockConflictsWithVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
 
bool LWLockWaitForVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
 
void LWLockUpdateVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
 
void LWLockRelease (LWLock *lock)
 
void LWLockReleaseClearVar (LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
 
void LWLockReleaseAll (void)
 
bool LWLockHeldByMe (LWLock *lock)
 
bool LWLockAnyHeldByMe (LWLock *lock, int nlocks, size_t stride)
 
bool LWLockHeldByMeInMode (LWLock *lock, LWLockMode mode)
 

Variables

static const char *const BuiltinTrancheNames []
 
LWLockPaddedMainLWLockArray = NULL
 
static int num_held_lwlocks = 0
 
static LWLockHandle held_lwlocks [MAX_SIMUL_LWLOCKS]
 
static LWLockTrancheShmemDataLWLockTranches
 
static int LocalNumUserDefinedTranches
 
static ListNamedLWLockTrancheRequests = NIL
 
static int num_main_array_locks
 
const ShmemCallbacks LWLockCallbacks
 

Macro Definition Documentation

◆ LOG_LWDEBUG

#define LOG_LWDEBUG (   a,
  b,
  c 
)    ((void)0)

Definition at line 294 of file lwlock.c.

◆ LW_FLAG_BITS

#define LW_FLAG_BITS   3

Definition at line 99 of file lwlock.c.

◆ LW_FLAG_HAS_WAITERS

#define LW_FLAG_HAS_WAITERS   ((uint32) 1 << 31)

Definition at line 96 of file lwlock.c.

◆ LW_FLAG_LOCKED

#define LW_FLAG_LOCKED   ((uint32) 1 << 29)

Definition at line 98 of file lwlock.c.

◆ LW_FLAG_MASK

#define LW_FLAG_MASK   (((1<<LW_FLAG_BITS)-1)<<(32-LW_FLAG_BITS))

Definition at line 100 of file lwlock.c.

◆ LW_FLAG_WAKE_IN_PROGRESS

#define LW_FLAG_WAKE_IN_PROGRESS   ((uint32) 1 << 30)

Definition at line 97 of file lwlock.c.

◆ LW_LOCK_MASK

#define LW_LOCK_MASK   (MAX_BACKENDS | LW_VAL_EXCLUSIVE)

Definition at line 108 of file lwlock.c.

◆ LW_SHARED_MASK

#define LW_SHARED_MASK   MAX_BACKENDS

Definition at line 107 of file lwlock.c.

◆ LW_VAL_EXCLUSIVE

#define LW_VAL_EXCLUSIVE   (MAX_BACKENDS + 1)

Definition at line 103 of file lwlock.c.

◆ LW_VAL_SHARED

#define LW_VAL_SHARED   1

Definition at line 104 of file lwlock.c.

◆ MAX_SIMUL_LWLOCKS

#define MAX_SIMUL_LWLOCKS   200

Definition at line 157 of file lwlock.c.

◆ MAX_USER_DEFINED_TRANCHES

#define MAX_USER_DEFINED_TRANCHES   256

Definition at line 170 of file lwlock.c.

◆ PG_LWLOCK

#define PG_LWLOCK (   id,
  lockname 
)    [id] = CppAsString(lockname),

◆ PG_LWLOCKTRANCHE

#define PG_LWLOCKTRANCHE (   id,
  lockname 
)    [LWTRANCHE_##id] = CppAsString(lockname),

◆ PRINT_LWDEBUG

#define PRINT_LWDEBUG (   a,
  b,
  c 
)    ((void)0)

Definition at line 293 of file lwlock.c.

◆ T_NAME

#define T_NAME (   lock)     GetLWTrancheName((lock)->tranche)

Definition at line 229 of file lwlock.c.

233{
234 int tranche;
235 void *instance;
237
238typedef struct lwlock_stats
239{
243 int block_count;
247
248static HTAB *lwlock_stats_htab;
250#endif
251
252#ifdef LOCK_DEBUG
253bool Trace_lwlocks = false;
254
255inline static void
256PRINT_LWDEBUG(const char *where, LWLock *lock, LWLockMode mode)
257{
258 /* hide statement & context here, otherwise the log is just too verbose */
259 if (Trace_lwlocks)
260 {
262
263 ereport(LOG,
264 (errhidestmt(true),
265 errhidecontext(true),
266 errmsg_internal("%d: %s(%s %p): excl %u shared %u haswaiters %u waiters %u waking %d",
267 MyProcPid,
268 where, T_NAME(lock), lock,
269 (state & LW_VAL_EXCLUSIVE) != 0,
271 (state & LW_FLAG_HAS_WAITERS) != 0,
272 pg_atomic_read_u32(&lock->nwaiters),
274 }
275}
276
277inline static void
278LOG_LWDEBUG(const char *where, LWLock *lock, const char *msg)
279{
280 /* hide statement & context here, otherwise the log is just too verbose */
281 if (Trace_lwlocks)
282 {
283 ereport(LOG,
284 (errhidestmt(true),
285 errhidecontext(true),
286 errmsg_internal("%s(%s %p): %s", where,
287 T_NAME(lock), lock, msg)));
288 }
289}
290
291#else /* not LOCK_DEBUG */
292#define PRINT_LWDEBUG(a,b,c) ((void)0)
293#define LOG_LWDEBUG(a,b,c) ((void)0)
294#endif /* LOCK_DEBUG */
295
296#ifdef LWLOCK_STATS
297
298static void init_lwlock_stats(void);
299static void print_lwlock_stats(int code, Datum arg);
301
302static void
304{
305 HASHCTL ctl;
307 static bool exit_registered = false;
308
309 if (lwlock_stats_cxt != NULL)
311
312 /*
313 * The LWLock stats will be updated within a critical section, which
314 * requires allocating new hash entries. Allocations within a critical
315 * section are normally not allowed because running out of memory would
316 * lead to a PANIC, but LWLOCK_STATS is debugging code that's not normally
317 * turned on in production, so that's an acceptable risk. The hash entries
318 * are small, so the risk of running out of memory is minimal in practice.
319 */
321 "LWLock stats",
324
325 ctl.keysize = sizeof(lwlock_stats_key);
326 ctl.entrysize = sizeof(lwlock_stats);
327 ctl.hcxt = lwlock_stats_cxt;
328 lwlock_stats_htab = hash_create("lwlock stats", 16384, &ctl,
330 if (!exit_registered)
331 {
333 exit_registered = true;
334 }
335}
336
337static void
339{
340 HASH_SEQ_STATUS scan;
342
344
345 /* Grab an LWLock to keep different backends from mixing reports */
347
348 while ((lwstats = (lwlock_stats *) hash_seq_search(&scan)) != NULL)
349 {
351 "PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
352 MyProcPid, GetLWTrancheName(lwstats->key.tranche),
353 lwstats->key.instance, lwstats->sh_acquire_count,
354 lwstats->ex_acquire_count, lwstats->block_count,
355 lwstats->spin_delay_count, lwstats->dequeue_self_count);
356 }
357
359}
360
361static lwlock_stats *
363{
366 bool found;
367
368 /*
369 * During shared memory initialization, the hash table doesn't exist yet.
370 * Stats of that phase aren't very interesting, so just collect operations
371 * on all locks in a single dummy entry.
372 */
373 if (lwlock_stats_htab == NULL)
374 return &lwlock_stats_dummy;
375
376 /* Fetch or create the entry. */
377 MemSet(&key, 0, sizeof(key));
378 key.tranche = lock->tranche;
379 key.instance = lock;
381 if (!found)
382 {
383 lwstats->sh_acquire_count = 0;
384 lwstats->ex_acquire_count = 0;
385 lwstats->block_count = 0;
386 lwstats->dequeue_self_count = 0;
387 lwstats->spin_delay_count = 0;
388 }
389 return lwstats;
390}
391#endif /* LWLOCK_STATS */
392
393
394/*
395 * Compute number of LWLocks required by user-defined tranches requested with
396 * RequestNamedLWLockTranche(). These will be allocated in the main array.
397 */
398static int
400{
401 int numLocks = 0;
402
404 {
405 numLocks += request->num_lwlocks;
406 }
407
408 return numLocks;
409}
410
411/*
412 * Request shmem space for user-defined tranches and the main LWLock array.
413 */
414static void
416{
417 size_t size;
418
419 /* Space for user-defined tranches */
420 ShmemRequestStruct(.name = "LWLock tranches",
421 .size = sizeof(LWLockTrancheShmemData),
422 .ptr = (void **) &LWLockTranches,
423 );
424
425 /* Space for the LWLock array */
427 {
429 size = num_main_array_locks * sizeof(LWLockPadded);
430 }
431 else
433
434 ShmemRequestStruct(.name = "Main LWLock array",
435 .size = size,
436 .ptr = (void **) &MainLWLockArray,
437 );
438}
439
440/*
441 * Initialize shmem space for user-defined tranches and the main LWLock array.
442 */
443static void
444LWLockShmemInit(void *arg)
445{
446 int pos;
447
448 /* Initialize the dynamic-allocation counter for tranches */
450
452
453 /*
454 * Allocate and initialize all LWLocks in the main array. It includes all
455 * LWLocks for built-in tranches and those requested with
456 * RequestNamedLWLockTranche().
457 */
458 pos = 0;
459
460 /* Initialize all individual LWLocks in main array */
461 for (int id = 0; id < NUM_INDIVIDUAL_LWLOCKS; id++)
462 LWLockInitialize(&MainLWLockArray[pos++].lock, id);
463
464 /* Initialize buffer mapping LWLocks in main array */
466 for (int i = 0; i < NUM_BUFFER_PARTITIONS; i++)
468
469 /* Initialize lmgrs' LWLocks in main array */
471 for (int i = 0; i < NUM_LOCK_PARTITIONS; i++)
473
474 /* Initialize predicate lmgrs' LWLocks in main array */
476 for (int i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
478
479 /*
480 * Copy the info about any user-defined tranches into shared memory (so
481 * that other processes can see it), and initialize the requested LWLocks.
482 */
485 {
487
489 request->tranche_name,
492
493 for (int i = 0; i < request->num_lwlocks; i++)
495 }
496
497 /* Cross-check that we agree on the total size with LWLockShmemRequest() */
499}
500
501/*
502 * InitLWLockAccess - initialize backend-local state needed to hold LWLocks
503 */
504void
506{
507#ifdef LWLOCK_STATS
509#endif
510}
511
512/*
513 * GetNamedLWLockTranche - returns the base address of LWLock from the
514 * specified tranche.
515 *
516 * Caller needs to retrieve the requested number of LWLocks starting from
517 * the base lock address returned by this API. This can be used for
518 * tranches that are requested by using RequestNamedLWLockTranche() API.
519 */
521GetNamedLWLockTranche(const char *tranche_name)
522{
526
527 /*
528 * Obtain the position of base address of LWLock belonging to requested
529 * tranche_name in MainLWLockArray. LWLocks for user-defined tranches
530 * requested with RequestNamedLWLockTranche() are placed in
531 * MainLWLockArray after fixed locks.
532 */
533 for (int i = 0; i < LocalNumUserDefinedTranches; i++)
534 {
536 tranche_name) == 0)
537 {
539
540 /*
541 * GetNamedLWLockTranche() should only be used for locks requested
542 * with RequestNamedLWLockTranche(), not those allocated with
543 * LWLockNewTrancheId().
544 */
545 if (lock_pos == -1)
546 elog(ERROR, "requested tranche was not registered with RequestNamedLWLockTranche()");
547 return &MainLWLockArray[lock_pos];
548 }
549 }
550
551 elog(ERROR, "requested tranche is not registered");
552
553 /* just to keep compiler quiet */
554 return NULL;
555}
556
557/*
558 * Allocate a new tranche ID with the provided name.
559 */
560int
561LWLockNewTrancheId(const char *name)
562{
563 int idx;
564
565 if (!name)
568 errmsg("tranche name cannot be NULL")));
569
570 if (strlen(name) >= NAMEDATALEN)
573 errmsg("tranche name too long"),
574 errdetail("LWLock tranche names must be no longer than %d bytes.",
575 NAMEDATALEN - 1)));
576
577 /* The counter and the tranche names are protected by the spinlock */
579
581 {
584 (errmsg("maximum number of tranches already registered"),
585 errdetail("No more than %d tranches may be registered.",
587 }
588
589 /* Allocate an entry in the user_defined array */
591
592 /* update our local copy while we're at it */
594
595 /* Initialize it */
597
598 /* the locks are not in the main array */
600
602
604}
605
606/*
607 * RequestNamedLWLockTranche
608 * Request that extra LWLocks be allocated during postmaster
609 * startup.
610 *
611 * This may only be called via the shmem_request_hook of a library that is
612 * loaded into the postmaster via shared_preload_libraries. Calls from
613 * elsewhere will fail.
614 *
615 * The tranche name will be user-visible as a wait event name, so try to
616 * use a name that fits the style for those.
617 */
618void
619RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
620{
622 MemoryContext oldcontext;
623
625 elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook");
626
627 if (!tranche_name)
630 errmsg("tranche name cannot be NULL")));
631
632 if (strlen(tranche_name) >= NAMEDATALEN)
635 errmsg("tranche name too long"),
636 errdetail("LWLock tranche names must be no longer than %d bytes.",
637 NAMEDATALEN - 1)));
638
641 (errmsg("maximum number of tranches already registered"),
642 errdetail("No more than %d tranches may be registered.",
644
645 /* Check that the name isn't already in use */
647 {
648 if (strcmp(existing->tranche_name, tranche_name) == 0)
649 elog(ERROR, "requested tranche \"%s\" is already registered", tranche_name);
650 }
651
654 else
656
658 strlcpy(request->tranche_name, tranche_name, NAMEDATALEN);
659 request->num_lwlocks = num_lwlocks;
661
662 MemoryContextSwitchTo(oldcontext);
663}
664
665/*
666 * LWLockInitialize - initialize a new lwlock; it's initially unlocked
667 */
668void
669LWLockInitialize(LWLock *lock, int tranche_id)
670{
671 /* verify the tranche_id is valid */
672 (void) GetLWTrancheName(tranche_id);
673
674 pg_atomic_init_u32(&lock->state, 0);
675#ifdef LOCK_DEBUG
676 pg_atomic_init_u32(&lock->nwaiters, 0);
677#endif
678 lock->tranche = tranche_id;
679 proclist_init(&lock->waiters);
680}
681
682/*
683 * Report start of wait event for light-weight locks.
684 *
685 * This function will be used by all the light-weight lock calls which
686 * needs to wait to acquire the lock. This function distinguishes wait
687 * event based on tranche and lock id.
688 */
689static inline void
691{
693}
694
695/*
696 * Report end of wait event for light-weight locks.
697 */
698static inline void
700{
702}
703
704/*
705 * Return the name of an LWLock tranche.
706 */
707static const char *
709{
710 int idx;
711
712 /* Built-in tranche or individual LWLock? */
715
716 /*
717 * It's an extension tranche, so look in LWLockTranches->user_defined.
718 */
720
721 /*
722 * We only ever add new entries to LWLockTranches->user_defined, so most
723 * lookups can avoid taking the spinlock as long as the backend-local
724 * counter (LocalNumUserDefinedTranches) is greater than the requested
725 * tranche ID. Else, we need to first update the backend-local counter
726 * with the spinlock held before attempting the lookup again. In
727 * practice, the latter case is probably rare.
728 */
730 {
734
736 elog(ERROR, "tranche %d is not registered", trancheId);
737 }
738
740}
741
742/*
743 * Return an identifier for an LWLock based on the wait class and event.
744 */
745const char *
747{
748 Assert(classId == PG_WAIT_LWLOCK);
749 /* The event IDs are just tranche numbers. */
751}
752
753/*
754 * Internal function that tries to atomically acquire the lwlock in the passed
755 * in mode.
756 *
757 * This function will not block waiting for a lock to become free - that's the
758 * caller's job.
759 *
760 * Returns true if the lock isn't free and we need to wait.
761 */
762static bool
764{
766
768
769 /*
770 * Read once outside the loop, later iterations will get the newer value
771 * via compare & exchange.
772 */
774
775 /* loop until we've determined whether we could acquire the lock or not */
776 while (true)
777 {
779 bool lock_free;
780
782
783 if (mode == LW_EXCLUSIVE)
784 {
786 if (lock_free)
788 }
789 else
790 {
792 if (lock_free)
794 }
795
796 /*
797 * Attempt to swap in the state we are expecting. If we didn't see
798 * lock to be free, that's just the old value. If we saw it as free,
799 * we'll attempt to mark it acquired. The reason that we always swap
800 * in the value is that this doubles as a memory barrier. We could try
801 * to be smarter and only swap in values if we saw the lock as free,
802 * but benchmark haven't shown it as beneficial so far.
803 *
804 * Retry if the value changed since we last looked at it.
805 */
808 {
809 if (lock_free)
810 {
811 /* Great! Got the lock. */
812#ifdef LOCK_DEBUG
813 if (mode == LW_EXCLUSIVE)
814 lock->owner = MyProc;
815#endif
816 return false;
817 }
818 else
819 return true; /* somebody else has the lock */
820 }
821 }
823}
824
825/*
826 * Lock the LWLock's wait list against concurrent activity.
827 *
828 * NB: even though the wait list is locked, non-conflicting lock operations
829 * may still happen concurrently.
830 *
831 * Time spent holding mutex should be short!
832 */
833static void
835{
837#ifdef LWLOCK_STATS
839 uint32 delays = 0;
840
842#endif
843
844 while (true)
845 {
846 /*
847 * Always try once to acquire the lock directly, without setting up
848 * the spin-delay infrastructure. The work necessary for that shows up
849 * in profiles and is rarely necessary.
850 */
853 break; /* got lock */
854
855 /* and then spin without atomic operations until lock is released */
856 {
858
860
861 while (old_state & LW_FLAG_LOCKED)
862 {
865 }
866#ifdef LWLOCK_STATS
867 delays += delayStatus.delays;
868#endif
870 }
871
872 /*
873 * Retry. The lock might obviously already be re-acquired by the time
874 * we're attempting to get it again.
875 */
876 }
877
878#ifdef LWLOCK_STATS
879 lwstats->spin_delay_count += delays;
880#endif
881}
882
883/*
884 * Unlock the LWLock's wait list.
885 *
886 * Note that it can be more efficient to manipulate flags and release the
887 * locks in a single atomic operation.
888 */
889static void
891{
893
895
897}
898
899/*
900 * Wakeup all the lockers that currently have a chance to acquire the lock.
901 */
902static void
903LWLockWakeup(LWLock *lock)
904{
905 bool new_wake_in_progress = false;
906 bool wokeup_somebody = false;
909
911
912 /* lock wait list while collecting backends to wake up */
913 LWLockWaitListLock(lock);
914
915 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
916 {
917 PGPROC *waiter = GetPGProcByNumber(iter.cur);
918
919 if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
920 continue;
921
922 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
923 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
924
925 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
926 {
927 /*
928 * Prevent additional wakeups until retryer gets to run. Backends
929 * that are just waiting for the lock to become free don't retry
930 * automatically.
931 */
933
934 /*
935 * Don't wakeup (further) exclusive locks.
936 */
937 wokeup_somebody = true;
938 }
939
940 /*
941 * Signal that the process isn't on the wait list anymore. This allows
942 * LWLockDequeueSelf() to remove itself of the waitlist with a
943 * proclist_delete(), rather than having to check if it has been
944 * removed from the list.
945 */
946 Assert(waiter->lwWaiting == LW_WS_WAITING);
948
949 /*
950 * Once we've woken up an exclusive lock, there's no point in waking
951 * up anybody else.
952 */
953 if (waiter->lwWaitMode == LW_EXCLUSIVE)
954 break;
955 }
956
958
959 /* unset required flags, and release lock, in one fell swoop */
960 {
963
965 while (true)
966 {
968
969 /* compute desired flags */
970
973 else
975
976 if (proclist_is_empty(&lock->waiters))
978
979 desired_state &= ~LW_FLAG_LOCKED; /* release lock */
980
983 break;
984 }
985 }
986
987 /* Awaken any waiters I removed from the queue. */
988 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
989 {
990 PGPROC *waiter = GetPGProcByNumber(iter.cur);
991
992 LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
993 proclist_delete(&wakeup, iter.cur, lwWaitLink);
994
995 /*
996 * Guarantee that lwWaiting being unset only becomes visible once the
997 * unlink from the link has completed. Otherwise the target backend
998 * could be woken up for other reason and enqueue for a new lock - if
999 * that happens before the list unlink happens, the list would end up
1000 * being corrupted.
1001 *
1002 * The barrier pairs with the LWLockWaitListLock() when enqueuing for
1003 * another lock.
1004 */
1006 waiter->lwWaiting = LW_WS_NOT_WAITING;
1007 PGSemaphoreUnlock(waiter->sem);
1008 }
1009}
1010
1011/*
1012 * Add ourselves to the end of the queue.
1013 *
1014 * NB: Mode can be LW_WAIT_UNTIL_FREE here!
1015 */
1016static void
1018{
1019 /*
1020 * If we don't have a PGPROC structure, there's no way to wait. This
1021 * should never occur, since MyProc should only be null during shared
1022 * memory initialization.
1023 */
1024 if (MyProc == NULL)
1025 elog(PANIC, "cannot wait without a PGPROC structure");
1026
1028 elog(PANIC, "queueing for lock while waiting on another one");
1029
1030 LWLockWaitListLock(lock);
1031
1032 /* setting the flag is protected by the spinlock */
1034
1037
1038 /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
1039 if (mode == LW_WAIT_UNTIL_FREE)
1040 proclist_push_head(&lock->waiters, MyProcNumber, lwWaitLink);
1041 else
1042 proclist_push_tail(&lock->waiters, MyProcNumber, lwWaitLink);
1043
1044 /* Can release the mutex now */
1046
1047#ifdef LOCK_DEBUG
1048 pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
1049#endif
1050}
1051
1052/*
1053 * Remove ourselves from the waitlist.
1054 *
1055 * This is used if we queued ourselves because we thought we needed to sleep
1056 * but, after further checking, we discovered that we don't actually need to
1057 * do so.
1058 */
1059static void
1061{
1062 bool on_waitlist;
1063
1064#ifdef LWLOCK_STATS
1066
1068
1069 lwstats->dequeue_self_count++;
1070#endif
1071
1072 LWLockWaitListLock(lock);
1073
1074 /*
1075 * Remove ourselves from the waitlist, unless we've already been removed.
1076 * The removal happens with the wait list lock held, so there's no race in
1077 * this check.
1078 */
1080 if (on_waitlist)
1081 proclist_delete(&lock->waiters, MyProcNumber, lwWaitLink);
1082
1083 if (proclist_is_empty(&lock->waiters) &&
1085 {
1087 }
1088
1089 /* XXX: combine with fetch_and above? */
1091
1092 /* clear waiting state again, nice for debugging */
1093 if (on_waitlist)
1095 else
1096 {
1097 int extraWaits = 0;
1098
1099 /*
1100 * Somebody else dequeued us and has or will wake us up. Deal with the
1101 * superfluous absorption of a wakeup.
1102 */
1103
1104 /*
1105 * Clear LW_FLAG_WAKE_IN_PROGRESS if somebody woke us before we
1106 * removed ourselves - they'll have set it.
1107 */
1109
1110 /*
1111 * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
1112 * get reset at some inconvenient point later. Most of the time this
1113 * will immediately return.
1114 */
1115 for (;;)
1116 {
1119 break;
1120 extraWaits++;
1121 }
1122
1123 /*
1124 * Fix the process wait semaphore's count for any absorbed wakeups.
1125 */
1126 while (extraWaits-- > 0)
1128 }
1129
1130#ifdef LOCK_DEBUG
1131 {
1132 /* not waiting anymore */
1134
1136 }
1137#endif
1138}
1139
1140/*
1141 * LWLockAcquire - acquire a lightweight lock in the specified mode
1142 *
1143 * If the lock is not available, sleep until it is. Returns true if the lock
1144 * was available immediately, false if we had to sleep.
1145 *
1146 * Side effect: cancel/die interrupts are held off until lock release.
1147 */
1148bool
1150{
1151 PGPROC *proc = MyProc;
1152 bool result = true;
1153 int extraWaits = 0;
1154#ifdef LWLOCK_STATS
1156
1158#endif
1159
1161
1162 PRINT_LWDEBUG("LWLockAcquire", lock, mode);
1163
1164#ifdef LWLOCK_STATS
1165 /* Count lock acquisition attempts */
1166 if (mode == LW_EXCLUSIVE)
1167 lwstats->ex_acquire_count++;
1168 else
1169 lwstats->sh_acquire_count++;
1170#endif /* LWLOCK_STATS */
1171
1172 /*
1173 * We can't wait if we haven't got a PGPROC. This should only occur
1174 * during bootstrap or shared memory initialization. Put an Assert here
1175 * to catch unsafe coding practices.
1176 */
1177 Assert(!(proc == NULL && IsUnderPostmaster));
1178
1179 /* Ensure we will have room to remember the lock */
1181 elog(ERROR, "too many LWLocks taken");
1182
1183 /*
1184 * Lock out cancel/die interrupts until we exit the code section protected
1185 * by the LWLock. This ensures that interrupts will not interfere with
1186 * manipulations of data structures in shared memory.
1187 */
1189
1190 /*
1191 * Loop here to try to acquire lock after each time we are signaled by
1192 * LWLockRelease.
1193 *
1194 * NOTE: it might seem better to have LWLockRelease actually grant us the
1195 * lock, rather than retrying and possibly having to go back to sleep. But
1196 * in practice that is no good because it means a process swap for every
1197 * lock acquisition when two or more processes are contending for the same
1198 * lock. Since LWLocks are normally used to protect not-very-long
1199 * sections of computation, a process needs to be able to acquire and
1200 * release the same lock many times during a single CPU time slice, even
1201 * in the presence of contention. The efficiency of being able to do that
1202 * outweighs the inefficiency of sometimes wasting a process dispatch
1203 * cycle because the lock is not free when a released waiter finally gets
1204 * to run. See pgsql-hackers archives for 29-Dec-01.
1205 */
1206 for (;;)
1207 {
1208 bool mustwait;
1209
1210 /*
1211 * Try to grab the lock the first time, we're not in the waitqueue
1212 * yet/anymore.
1213 */
1215
1216 if (!mustwait)
1217 {
1218 LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
1219 break; /* got the lock */
1220 }
1221
1222 /*
1223 * Ok, at this point we couldn't grab the lock on the first try. We
1224 * cannot simply queue ourselves to the end of the list and wait to be
1225 * woken up because by now the lock could long have been released.
1226 * Instead add us to the queue and try to grab the lock again. If we
1227 * succeed we need to revert the queuing and be happy, otherwise we
1228 * recheck the lock. If we still couldn't grab it, we know that the
1229 * other locker will see our queue entries when releasing since they
1230 * existed before we checked for the lock.
1231 */
1232
1233 /* add to the queue */
1234 LWLockQueueSelf(lock, mode);
1235
1236 /* we're now guaranteed to be woken up if necessary */
1238
1239 /* ok, grabbed the lock the second time round, need to undo queueing */
1240 if (!mustwait)
1241 {
1242 LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
1243
1244 LWLockDequeueSelf(lock);
1245 break;
1246 }
1247
1248 /*
1249 * Wait until awakened.
1250 *
1251 * It is possible that we get awakened for a reason other than being
1252 * signaled by LWLockRelease. If so, loop back and wait again. Once
1253 * we've gotten the LWLock, re-increment the sema by the number of
1254 * additional signals received.
1255 */
1256 LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
1257
1258#ifdef LWLOCK_STATS
1259 lwstats->block_count++;
1260#endif
1261
1265
1266 for (;;)
1267 {
1268 PGSemaphoreLock(proc->sem);
1269 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1270 break;
1271 extraWaits++;
1272 }
1273
1274 /* Retrying, allow LWLockRelease to release waiters again. */
1276
1277#ifdef LOCK_DEBUG
1278 {
1279 /* not waiting anymore */
1281
1283 }
1284#endif
1285
1289
1290 LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
1291
1292 /* Now loop back and try to acquire lock again. */
1293 result = false;
1294 }
1295
1298
1299 /* Add lock to list of locks held by this backend */
1302
1303 /*
1304 * Fix the process wait semaphore's count for any absorbed wakeups.
1305 */
1306 while (extraWaits-- > 0)
1307 PGSemaphoreUnlock(proc->sem);
1308
1309 return result;
1310}
1311
1312/*
1313 * LWLockConditionalAcquire - acquire a lightweight lock in the specified mode
1314 *
1315 * If the lock is not available, return false with no side-effects.
1316 *
1317 * If successful, cancel/die interrupts are held off until lock release.
1318 */
1319bool
1321{
1322 bool mustwait;
1323
1325
1326 PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
1327
1328 /* Ensure we will have room to remember the lock */
1330 elog(ERROR, "too many LWLocks taken");
1331
1332 /*
1333 * Lock out cancel/die interrupts until we exit the code section protected
1334 * by the LWLock. This ensures that interrupts will not interfere with
1335 * manipulations of data structures in shared memory.
1336 */
1338
1339 /* Check for the lock */
1341
1342 if (mustwait)
1343 {
1344 /* Failed to get lock, so release interrupt holdoff */
1346
1347 LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
1350 }
1351 else
1352 {
1353 /* Add lock to list of locks held by this backend */
1358 }
1359 return !mustwait;
1360}
1361
1362/*
1363 * LWLockAcquireOrWait - Acquire lock, or wait until it's free
1364 *
1365 * The semantics of this function are a bit funky. If the lock is currently
1366 * free, it is acquired in the given mode, and the function returns true. If
1367 * the lock isn't immediately free, the function waits until it is released
1368 * and returns false, but does not acquire the lock.
1369 *
1370 * This is currently used for WALWriteLock: when a backend flushes the WAL,
1371 * holding WALWriteLock, it can flush the commit records of many other
1372 * backends as a side-effect. Those other backends need to wait until the
1373 * flush finishes, but don't need to acquire the lock anymore. They can just
1374 * wake up, observe that their records have already been flushed, and return.
1375 */
1376bool
1378{
1379 PGPROC *proc = MyProc;
1380 bool mustwait;
1381 int extraWaits = 0;
1382#ifdef LWLOCK_STATS
1384
1386#endif
1387
1389
1390 PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
1391
1392 /* Ensure we will have room to remember the lock */
1394 elog(ERROR, "too many LWLocks taken");
1395
1396 /*
1397 * Lock out cancel/die interrupts until we exit the code section protected
1398 * by the LWLock. This ensures that interrupts will not interfere with
1399 * manipulations of data structures in shared memory.
1400 */
1402
1403 /*
1404 * NB: We're using nearly the same twice-in-a-row lock acquisition
1405 * protocol as LWLockAcquire(). Check its comments for details.
1406 */
1408
1409 if (mustwait)
1410 {
1412
1414
1415 if (mustwait)
1416 {
1417 /*
1418 * Wait until awakened. Like in LWLockAcquire, be prepared for
1419 * bogus wakeups.
1420 */
1421 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
1422
1423#ifdef LWLOCK_STATS
1424 lwstats->block_count++;
1425#endif
1426
1430
1431 for (;;)
1432 {
1433 PGSemaphoreLock(proc->sem);
1434 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1435 break;
1436 extraWaits++;
1437 }
1438
1439#ifdef LOCK_DEBUG
1440 {
1441 /* not waiting anymore */
1443
1445 }
1446#endif
1450
1451 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
1452 }
1453 else
1454 {
1455 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
1456
1457 /*
1458 * Got lock in the second attempt, undo queueing. We need to treat
1459 * this as having successfully acquired the lock, otherwise we'd
1460 * not necessarily wake up people we've prevented from acquiring
1461 * the lock.
1462 */
1463 LWLockDequeueSelf(lock);
1464 }
1465 }
1466
1467 /*
1468 * Fix the process wait semaphore's count for any absorbed wakeups.
1469 */
1470 while (extraWaits-- > 0)
1471 PGSemaphoreUnlock(proc->sem);
1472
1473 if (mustwait)
1474 {
1475 /* Failed to get lock, so release interrupt holdoff */
1477 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
1480 }
1481 else
1482 {
1483 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
1484 /* Add lock to list of locks held by this backend */
1489 }
1490
1491 return !mustwait;
1492}
1493
1494/*
1495 * Does the lwlock in its current state need to wait for the variable value to
1496 * change?
1497 *
1498 * If we don't need to wait, and it's because the value of the variable has
1499 * changed, store the current value in newval.
1500 *
1501 * *result is set to true if the lock was free, and false otherwise.
1502 */
1503static bool
1505 uint64 *newval, bool *result)
1506{
1507 bool mustwait;
1508 uint64 value;
1509
1510 /*
1511 * Test first to see if it the slot is free right now.
1512 *
1513 * XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
1514 * via LWLockWaitForVar(), uses an implied barrier with a spinlock before
1515 * this, so we don't need a memory barrier here as far as the current
1516 * usage is concerned. But that might not be safe in general.
1517 */
1519
1520 if (!mustwait)
1521 {
1522 *result = true;
1523 return false;
1524 }
1525
1526 *result = false;
1527
1528 /*
1529 * Reading this value atomically is safe even on platforms where uint64
1530 * cannot be read without observing a torn value.
1531 */
1533
1534 if (value != oldval)
1535 {
1536 mustwait = false;
1537 *newval = value;
1538 }
1539 else
1540 {
1541 mustwait = true;
1542 }
1543
1544 return mustwait;
1545}
1546
1547/*
1548 * LWLockWaitForVar - Wait until lock is free, or a variable is updated.
1549 *
1550 * If the lock is held and *valptr equals oldval, waits until the lock is
1551 * either freed, or the lock holder updates *valptr by calling
1552 * LWLockUpdateVar. If the lock is free on exit (immediately or after
1553 * waiting), returns true. If the lock is still held, but *valptr no longer
1554 * matches oldval, returns false and sets *newval to the current value in
1555 * *valptr.
1556 *
1557 * Note: this function ignores shared lock holders; if the lock is held
1558 * in shared mode, returns 'true'.
1559 *
1560 * Be aware that LWLockConflictsWithVar() does not include a memory barrier,
1561 * hence the caller of this function may want to rely on an explicit barrier or
1562 * an implied barrier via spinlock or LWLock to avoid memory ordering issues.
1563 */
1564bool
1566 uint64 *newval)
1567{
1568 PGPROC *proc = MyProc;
1569 int extraWaits = 0;
1570 bool result = false;
1571#ifdef LWLOCK_STATS
1573
1575#endif
1576
1577 PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
1578
1579 /*
1580 * Lock out cancel/die interrupts while we sleep on the lock. There is no
1581 * cleanup mechanism to remove us from the wait queue if we got
1582 * interrupted.
1583 */
1585
1586 /*
1587 * Loop here to check the lock's status after each time we are signaled.
1588 */
1589 for (;;)
1590 {
1591 bool mustwait;
1592
1594 &result);
1595
1596 if (!mustwait)
1597 break; /* the lock was free or value didn't match */
1598
1599 /*
1600 * Add myself to wait queue. Note that this is racy, somebody else
1601 * could wakeup before we're finished queuing. NB: We're using nearly
1602 * the same twice-in-a-row lock acquisition protocol as
1603 * LWLockAcquire(). Check its comments for details. The only
1604 * difference is that we also have to check the variable's values when
1605 * checking the state of the lock.
1606 */
1608
1609 /*
1610 * Clear LW_FLAG_WAKE_IN_PROGRESS flag, to make sure we get woken up
1611 * as soon as the lock is released.
1612 */
1614
1615 /*
1616 * We're now guaranteed to be woken up if necessary. Recheck the lock
1617 * and variables state.
1618 */
1620 &result);
1621
1622 /* Ok, no conflict after we queued ourselves. Undo queueing. */
1623 if (!mustwait)
1624 {
1625 LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
1626
1627 LWLockDequeueSelf(lock);
1628 break;
1629 }
1630
1631 /*
1632 * Wait until awakened.
1633 *
1634 * It is possible that we get awakened for a reason other than being
1635 * signaled by LWLockRelease. If so, loop back and wait again. Once
1636 * we've gotten the LWLock, re-increment the sema by the number of
1637 * additional signals received.
1638 */
1639 LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
1640
1641#ifdef LWLOCK_STATS
1642 lwstats->block_count++;
1643#endif
1644
1648
1649 for (;;)
1650 {
1651 PGSemaphoreLock(proc->sem);
1652 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1653 break;
1654 extraWaits++;
1655 }
1656
1657#ifdef LOCK_DEBUG
1658 {
1659 /* not waiting anymore */
1661
1663 }
1664#endif
1665
1669
1670 LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
1671
1672 /* Now loop back and check the status of the lock again. */
1673 }
1674
1675 /*
1676 * Fix the process wait semaphore's count for any absorbed wakeups.
1677 */
1678 while (extraWaits-- > 0)
1679 PGSemaphoreUnlock(proc->sem);
1680
1681 /*
1682 * Now okay to allow cancel/die interrupts.
1683 */
1685
1686 return result;
1687}
1688
1689
1690/*
1691 * LWLockUpdateVar - Update a variable and wake up waiters atomically
1692 *
1693 * Sets *valptr to 'val', and wakes up all processes waiting for us with
1694 * LWLockWaitForVar(). It first sets the value atomically and then wakes up
1695 * waiting processes so that any process calling LWLockWaitForVar() on the same
1696 * lock is guaranteed to see the new value, and act accordingly.
1697 *
1698 * The caller must be holding the lock in exclusive mode.
1699 */
1700void
1702{
1705
1706 PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
1707
1708 /*
1709 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1710 * that the variable is updated before waking up waiters.
1711 */
1713
1715
1716 LWLockWaitListLock(lock);
1717
1719
1720 /*
1721 * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
1722 * up. They are always in the front of the queue.
1723 */
1724 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1725 {
1726 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1727
1728 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
1729 break;
1730
1731 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1732 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
1733
1734 /* see LWLockWakeup() */
1735 Assert(waiter->lwWaiting == LW_WS_WAITING);
1737 }
1738
1739 /* We are done updating shared state of the lock itself. */
1741
1742 /*
1743 * Awaken any waiters I removed from the queue.
1744 */
1745 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1746 {
1747 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1748
1749 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1750 /* check comment in LWLockWakeup() about this barrier */
1752 waiter->lwWaiting = LW_WS_NOT_WAITING;
1753 PGSemaphoreUnlock(waiter->sem);
1754 }
1755}
1756
1757
1758/*
1759 * LWLockRelease - release a previously acquired lock
1760 *
1761 * NB: This will leave lock->owner pointing to the current backend (if
1762 * LOCK_DEBUG is set). This is somewhat intentional, as it makes it easier to
1763 * debug cases of missing wakeups during lock release.
1764 */
1765void
1766LWLockRelease(LWLock *lock)
1767{
1770 bool check_waiters;
1771 int i;
1772
1773 /*
1774 * Remove lock from list of locks held. Usually, but not always, it will
1775 * be the latest-acquired lock; so search array backwards.
1776 */
1777 for (i = num_held_lwlocks; --i >= 0;)
1778 if (lock == held_lwlocks[i].lock)
1779 break;
1780
1781 if (i < 0)
1782 elog(ERROR, "lock %s is not held", T_NAME(lock));
1783
1785
1787 for (; i < num_held_lwlocks; i++)
1788 held_lwlocks[i] = held_lwlocks[i + 1];
1789
1790 PRINT_LWDEBUG("LWLockRelease", lock, mode);
1791
1792 /*
1793 * Release my hold on lock, after that it can immediately be acquired by
1794 * others, even if we still have to wakeup other waiters.
1795 */
1796 if (mode == LW_EXCLUSIVE)
1798 else
1800
1801 /* nobody else can have that kind of lock */
1803
1806
1807 /*
1808 * Check if we're still waiting for backends to get scheduled, if so,
1809 * don't wake them up again.
1810 */
1811 if ((oldstate & LW_FLAG_HAS_WAITERS) &&
1813 (oldstate & LW_LOCK_MASK) == 0)
1814 check_waiters = true;
1815 else
1816 check_waiters = false;
1817
1818 /*
1819 * As waking up waiters requires the spinlock to be acquired, only do so
1820 * if necessary.
1821 */
1822 if (check_waiters)
1823 {
1824 /* XXX: remove before commit? */
1825 LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
1826 LWLockWakeup(lock);
1827 }
1828
1829 /*
1830 * Now okay to allow cancel/die interrupts.
1831 */
1833}
1834
1835/*
1836 * LWLockReleaseClearVar - release a previously acquired lock, reset variable
1837 */
1838void
1840{
1841 /*
1842 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1843 * that the variable is updated before releasing the lock.
1844 */
1846
1847 LWLockRelease(lock);
1848}
1849
1850
1851/*
1852 * LWLockReleaseAll - release all currently-held locks
1853 *
1854 * Used to clean up after ereport(ERROR). An important difference between this
1855 * function and retail LWLockRelease calls is that InterruptHoldoffCount is
1856 * unchanged by this operation. This is necessary since InterruptHoldoffCount
1857 * has been set to an appropriate level earlier in error recovery. We could
1858 * decrement it below zero if we allow it to drop for each released lock!
1859 *
1860 * Note that this function must be safe to call even before the LWLock
1861 * subsystem has been initialized (e.g., during early startup failures).
1862 * In that case, num_held_lwlocks will be 0 and we do nothing.
1863 */
1864void
1865LWLockReleaseAll(void)
1866{
1867 while (num_held_lwlocks > 0)
1868 {
1869 HOLD_INTERRUPTS(); /* match the upcoming RESUME_INTERRUPTS */
1870
1872 }
1873
1875}
1876
1877
1878/*
1879 * LWLockHeldByMe - test whether my process holds a lock in any mode
1880 *
1881 * This is meant as debug support only.
1882 */
1883bool
1885{
1886 int i;
1887
1888 for (i = 0; i < num_held_lwlocks; i++)
1889 {
1890 if (held_lwlocks[i].lock == lock)
1891 return true;
1892 }
1893 return false;
1894}
1895
1896/*
1897 * LWLockAnyHeldByMe - test whether my process holds any of an array of locks
1898 *
1899 * This is meant as debug support only.
1900 */
1901bool
1902LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride)
1903{
1904 char *held_lock_addr;
1905 char *begin;
1906 char *end;
1907 int i;
1908
1909 begin = (char *) lock;
1910 end = begin + nlocks * stride;
1911 for (i = 0; i < num_held_lwlocks; i++)
1912 {
1913 held_lock_addr = (char *) held_lwlocks[i].lock;
1914 if (held_lock_addr >= begin &&
1915 held_lock_addr < end &&
1916 (held_lock_addr - begin) % stride == 0)
1917 return true;
1918 }
1919 return false;
1920}
1921
1922/*
1923 * LWLockHeldByMeInMode - test whether my process holds a lock in given mode
1924 *
1925 * This is meant as debug support only.
1926 */
1927bool
1929{
1930 int i;
1931
1932 for (i = 0; i < num_held_lwlocks; i++)
1933 {
1934 if (held_lwlocks[i].lock == lock && held_lwlocks[i].mode == mode)
1935 return true;
1936 }
1937 return false;
1938}
Datum idx(PG_FUNCTION_ARGS)
Definition _int_op.c:262
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition atomics.h:396
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition atomics.h:349
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition atomics.h:410
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition atomics.h:439
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition atomics.h:381
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition atomics.h:219
#define pg_write_barrier()
Definition atomics.h:155
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition atomics.h:366
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition atomics.h:237
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition atomics.h:513
#define likely(x)
Definition c.h:437
#define PG_USED_FOR_ASSERTS_ONLY
Definition c.h:249
#define Assert(condition)
Definition c.h:943
uint64_t uint64
Definition c.h:625
uint16_t uint16
Definition c.h:623
#define pg_unreachable()
Definition c.h:367
uint32_t uint32
Definition c.h:624
#define MemSet(start, val, len)
Definition c.h:1107
uint32 result
#define fprintf(file, fmt, msg)
Definition cubescan.l:21
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition dynahash.c:889
HTAB * hash_create(const char *tabname, int64 nelem, const HASHCTL *info, int flags)
Definition dynahash.c:360
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition dynahash.c:1352
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition dynahash.c:1317
Datum arg
Definition elog.c:1322
int errcode(int sqlerrcode)
Definition elog.c:874
int int errhidestmt(bool hide_stmt)
#define LOG
Definition elog.h:32
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define FATAL
Definition elog.h:42
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define PANIC
Definition elog.h:44
#define ERROR
Definition elog.h:40
int errhidecontext(bool hide_ctx)
#define elog(elevel,...)
Definition elog.h:228
#define ereport(elevel,...)
Definition elog.h:152
int MyProcPid
Definition globals.c:49
ProcNumber MyProcNumber
Definition globals.c:92
bool IsUnderPostmaster
Definition globals.c:122
bool IsPostmasterEnvironment
Definition globals.c:121
#define newval
@ HASH_ENTER
Definition hsearch.h:109
#define HASH_CONTEXT
Definition hsearch.h:97
#define HASH_ELEM
Definition hsearch.h:90
#define HASH_BLOBS
Definition hsearch.h:92
long val
Definition informix.c:689
static struct @177 value
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition ipc.c:372
int i
Definition isn.c:77
List * lappend(List *list, void *datum)
Definition list.c:339
#define LW_VAL_EXCLUSIVE
Definition lwlock.c:103
void LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition lwlock.c:1702
static void LWLockWakeup(LWLock *lock)
Definition lwlock.c:904
#define LW_FLAG_LOCKED
Definition lwlock.c:98
bool LWLockHeldByMe(LWLock *lock)
Definition lwlock.c:1885
static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS]
Definition lwlock.c:167
void LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition lwlock.c:1840
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1150
#define MAX_USER_DEFINED_TRANCHES
Definition lwlock.c:170
static List * NamedLWLockTrancheRequests
Definition lwlock.c:211
int LWLockNewTrancheId(const char *name)
Definition lwlock.c:562
#define LW_VAL_SHARED
Definition lwlock.c:104
static bool LWLockAttemptLock(LWLock *lock, LWLockMode mode)
Definition lwlock.c:764
static void LWLockWaitListLock(LWLock *lock)
Definition lwlock.c:835
LWLockPadded * GetNamedLWLockTranche(const char *tranche_name)
Definition lwlock.c:522
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1929
static LWLockTrancheShmemData * LWLockTranches
Definition lwlock.c:195
static void LWLockReportWaitEnd(void)
Definition lwlock.c:700
static void LWLockShmemRequest(void *arg)
Definition lwlock.c:416
bool LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
Definition lwlock.c:1566
static const char * GetLWTrancheName(uint16 trancheId)
Definition lwlock.c:709
#define LW_LOCK_MASK
Definition lwlock.c:108
void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
Definition lwlock.c:620
static int LocalNumUserDefinedTranches
Definition lwlock.c:198
#define LW_FLAG_HAS_WAITERS
Definition lwlock.c:96
#define MAX_SIMUL_LWLOCKS
Definition lwlock.c:157
static int NumLWLocksForNamedTranches(void)
Definition lwlock.c:400
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1767
#define T_NAME(lock)
Definition lwlock.c:229
static int num_held_lwlocks
Definition lwlock.c:166
void LWLockReleaseAll(void)
Definition lwlock.c:1866
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition lwlock.c:670
static const char *const BuiltinTrancheNames[]
Definition lwlock.c:137
static void LWLockWaitListUnlock(LWLock *lock)
Definition lwlock.c:891
#define LOG_LWDEBUG(a, b, c)
Definition lwlock.c:294
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1321
bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1378
static void LWLockQueueSelf(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1018
#define PRINT_LWDEBUG(a, b, c)
Definition lwlock.c:293
static void LWLockReportWaitStart(LWLock *lock)
Definition lwlock.c:691
LWLockPadded * MainLWLockArray
Definition lwlock.c:150
static int num_main_array_locks
Definition lwlock.c:214
#define LW_FLAG_WAKE_IN_PROGRESS
Definition lwlock.c:97
const char * GetLWLockIdentifier(uint32 classId, uint16 eventId)
Definition lwlock.c:747
static void LWLockDequeueSelf(LWLock *lock)
Definition lwlock.c:1061
bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride)
Definition lwlock.c:1903
static void LWLockShmemInit(void *arg)
Definition lwlock.c:445
#define LW_SHARED_MASK
Definition lwlock.c:107
static bool LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
Definition lwlock.c:1505
void InitLWLockAccess(void)
Definition lwlock.c:506
@ LW_WS_NOT_WAITING
Definition lwlock.h:30
@ LW_WS_WAITING
Definition lwlock.h:31
@ LW_WS_PENDING_WAKEUP
Definition lwlock.h:32
#define BUFFER_MAPPING_LWLOCK_OFFSET
Definition lwlock.h:94
#define NUM_LOCK_PARTITIONS
Definition lwlock.h:87
@ LWTRANCHE_FIRST_USER_DEFINED
Definition lwlock.h:172
#define LOCK_MANAGER_LWLOCK_OFFSET
Definition lwlock.h:95
#define NUM_BUFFER_PARTITIONS
Definition lwlock.h:83
#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET
Definition lwlock.h:97
#define NUM_FIXED_LWLOCKS
Definition lwlock.h:99
LWLockMode
Definition lwlock.h:103
@ LW_SHARED
Definition lwlock.h:105
@ LW_WAIT_UNTIL_FREE
Definition lwlock.h:106
@ LW_EXCLUSIVE
Definition lwlock.h:104
#define NUM_PREDICATELOCK_PARTITIONS
Definition lwlock.h:91
void * palloc0(Size size)
Definition mcxt.c:1417
MemoryContext TopMemoryContext
Definition mcxt.c:166
MemoryContext PostmasterContext
Definition mcxt.c:168
void MemoryContextDelete(MemoryContext context)
Definition mcxt.c:472
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition mcxt.c:743
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define RESUME_INTERRUPTS()
Definition miscadmin.h:138
#define HOLD_INTERRUPTS()
Definition miscadmin.h:136
bool process_shmem_requests_in_progress
Definition miscinit.c:1792
static char * errmsg
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
static PgChecksumMode mode
#define NAMEDATALEN
static int list_length(const List *l)
Definition pg_list.h:152
#define foreach_ptr(type, var, lst)
Definition pg_list.h:501
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition strlcpy.c:45
void PGSemaphoreUnlock(PGSemaphore sema)
Definition posix_sema.c:333
void PGSemaphoreLock(PGSemaphore sema)
Definition posix_sema.c:313
uint64_t Datum
Definition postgres.h:70
static int fb(int x)
#define GetPGProcByNumber(n)
Definition proc.h:504
#define proclist_delete(list, procno, link_member)
Definition proclist.h:187
static void proclist_init(proclist_head *list)
Definition proclist.h:29
#define proclist_push_tail(list, procno, link_member)
Definition proclist.h:191
#define proclist_push_head(list, procno, link_member)
Definition proclist.h:189
#define proclist_foreach_modify(iter, lhead, link_member)
Definition proclist.h:206
static bool proclist_is_empty(const proclist_head *list)
Definition proclist.h:38
#define MAX_BACKENDS
Definition procnumber.h:39
tree ctl
Definition radixtree.h:1838
void perform_spin_delay(SpinDelayStatus *status)
Definition s_lock.c:126
void finish_spin_delay(SpinDelayStatus *status)
Definition s_lock.c:186
#define init_local_spin_delay(status)
Definition s_lock.h:749
#define SHMEM_ATTACH_UNKNOWN_SIZE
Definition shmem.h:69
#define ShmemRequestStruct(...)
Definition shmem.h:176
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
PGPROC * MyProc
Definition proc.c:71
LWLockMode mode
Definition lwlock.c:163
LWLock * lock
Definition lwlock.c:162
struct LWLockTrancheShmemData::@23 user_defined[MAX_USER_DEFINED_TRANCHES]
char name[NAMEDATALEN]
Definition lwlock.c:180
pg_atomic_uint32 state
Definition lwlock.h:44
uint16 tranche
Definition lwlock.h:43
proclist_head waiters
Definition lwlock.h:45
Definition proc.h:179
uint8 lwWaitMode
Definition proc.h:284
PGSemaphore sem
Definition proc.h:258
uint8 lwWaiting
Definition proc.h:283
#define PG_WAIT_LWLOCK
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition wait_event.h:67
static void pgstat_report_wait_end(void)
Definition wait_event.h:83
const char * name
static TimestampTz wakeup[NUM_WALRCV_WAKEUPS]

Typedef Documentation

◆ LWLockHandle

◆ LWLockTrancheShmemData

◆ NamedLWLockTrancheRequest

Function Documentation

◆ GetLWLockIdentifier()

const char * GetLWLockIdentifier ( uint32  classId,
uint16  eventId 
)

Definition at line 747 of file lwlock.c.

748{
749 Assert(classId == PG_WAIT_LWLOCK);
750 /* The event IDs are just tranche numbers. */
752}

References Assert, fb(), GetLWTrancheName(), and PG_WAIT_LWLOCK.

Referenced by pgstat_get_wait_event(), test_lwlock_get_lwlock_identifier(), and test_startup_lwlocks().

◆ GetLWTrancheName()

static const char * GetLWTrancheName ( uint16  trancheId)
static

Definition at line 709 of file lwlock.c.

710{
711 int idx;
712
713 /* Built-in tranche or individual LWLock? */
716
717 /*
718 * It's an extension tranche, so look in LWLockTranches->user_defined.
719 */
721
722 /*
723 * We only ever add new entries to LWLockTranches->user_defined, so most
724 * lookups can avoid taking the spinlock as long as the backend-local
725 * counter (LocalNumUserDefinedTranches) is greater than the requested
726 * tranche ID. Else, we need to first update the backend-local counter
727 * with the spinlock held before attempting the lookup again. In
728 * practice, the latter case is probably rare.
729 */
731 {
735
737 elog(ERROR, "tranche %d is not registered", trancheId);
738 }
739
741}

References BuiltinTrancheNames, elog, ERROR, fb(), idx(), LocalNumUserDefinedTranches, LWLockTrancheShmemData::lock, LWLockTranches, LWTRANCHE_FIRST_USER_DEFINED, LWLockTrancheShmemData::name, LWLockTrancheShmemData::num_user_defined, SpinLockAcquire(), SpinLockRelease(), and LWLockTrancheShmemData::user_defined.

Referenced by GetLWLockIdentifier(), and LWLockInitialize().

◆ GetNamedLWLockTranche()

LWLockPadded * GetNamedLWLockTranche ( const char tranche_name)

Definition at line 522 of file lwlock.c.

523{
527
528 /*
529 * Obtain the position of base address of LWLock belonging to requested
530 * tranche_name in MainLWLockArray. LWLocks for user-defined tranches
531 * requested with RequestNamedLWLockTranche() are placed in
532 * MainLWLockArray after fixed locks.
533 */
534 for (int i = 0; i < LocalNumUserDefinedTranches; i++)
535 {
537 tranche_name) == 0)
538 {
540
541 /*
542 * GetNamedLWLockTranche() should only be used for locks requested
543 * with RequestNamedLWLockTranche(), not those allocated with
544 * LWLockNewTrancheId().
545 */
546 if (lock_pos == -1)
547 elog(ERROR, "requested tranche was not registered with RequestNamedLWLockTranche()");
548 return &MainLWLockArray[lock_pos];
549 }
550 }
551
552 elog(ERROR, "requested tranche is not registered");
553
554 /* just to keep compiler quiet */
555 return NULL;
556}

References elog, ERROR, fb(), i, LocalNumUserDefinedTranches, LWLockTrancheShmemData::lock, LWLockTranches, LWLockTrancheShmemData::main_array_idx, MainLWLockArray, LWLockTrancheShmemData::name, LWLockTrancheShmemData::num_user_defined, SpinLockAcquire(), SpinLockRelease(), and LWLockTrancheShmemData::user_defined.

Referenced by test_lwlock_tranche_lookup(), and test_startup_lwlocks().

◆ InitLWLockAccess()

void InitLWLockAccess ( void  )

Definition at line 506 of file lwlock.c.

507{
508#ifdef LWLOCK_STATS
510#endif
511}

References fb().

Referenced by InitAuxiliaryProcess(), and InitProcess().

◆ LWLockAcquire()

bool LWLockAcquire ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1150 of file lwlock.c.

1151{
1152 PGPROC *proc = MyProc;
1153 bool result = true;
1154 int extraWaits = 0;
1155#ifdef LWLOCK_STATS
1157
1159#endif
1160
1162
1163 PRINT_LWDEBUG("LWLockAcquire", lock, mode);
1164
1165#ifdef LWLOCK_STATS
1166 /* Count lock acquisition attempts */
1167 if (mode == LW_EXCLUSIVE)
1168 lwstats->ex_acquire_count++;
1169 else
1170 lwstats->sh_acquire_count++;
1171#endif /* LWLOCK_STATS */
1172
1173 /*
1174 * We can't wait if we haven't got a PGPROC. This should only occur
1175 * during bootstrap or shared memory initialization. Put an Assert here
1176 * to catch unsafe coding practices.
1177 */
1178 Assert(!(proc == NULL && IsUnderPostmaster));
1179
1180 /* Ensure we will have room to remember the lock */
1182 elog(ERROR, "too many LWLocks taken");
1183
1184 /*
1185 * Lock out cancel/die interrupts until we exit the code section protected
1186 * by the LWLock. This ensures that interrupts will not interfere with
1187 * manipulations of data structures in shared memory.
1188 */
1190
1191 /*
1192 * Loop here to try to acquire lock after each time we are signaled by
1193 * LWLockRelease.
1194 *
1195 * NOTE: it might seem better to have LWLockRelease actually grant us the
1196 * lock, rather than retrying and possibly having to go back to sleep. But
1197 * in practice that is no good because it means a process swap for every
1198 * lock acquisition when two or more processes are contending for the same
1199 * lock. Since LWLocks are normally used to protect not-very-long
1200 * sections of computation, a process needs to be able to acquire and
1201 * release the same lock many times during a single CPU time slice, even
1202 * in the presence of contention. The efficiency of being able to do that
1203 * outweighs the inefficiency of sometimes wasting a process dispatch
1204 * cycle because the lock is not free when a released waiter finally gets
1205 * to run. See pgsql-hackers archives for 29-Dec-01.
1206 */
1207 for (;;)
1208 {
1209 bool mustwait;
1210
1211 /*
1212 * Try to grab the lock the first time, we're not in the waitqueue
1213 * yet/anymore.
1214 */
1216
1217 if (!mustwait)
1218 {
1219 LOG_LWDEBUG("LWLockAcquire", lock, "immediately acquired lock");
1220 break; /* got the lock */
1221 }
1222
1223 /*
1224 * Ok, at this point we couldn't grab the lock on the first try. We
1225 * cannot simply queue ourselves to the end of the list and wait to be
1226 * woken up because by now the lock could long have been released.
1227 * Instead add us to the queue and try to grab the lock again. If we
1228 * succeed we need to revert the queuing and be happy, otherwise we
1229 * recheck the lock. If we still couldn't grab it, we know that the
1230 * other locker will see our queue entries when releasing since they
1231 * existed before we checked for the lock.
1232 */
1233
1234 /* add to the queue */
1235 LWLockQueueSelf(lock, mode);
1236
1237 /* we're now guaranteed to be woken up if necessary */
1239
1240 /* ok, grabbed the lock the second time round, need to undo queueing */
1241 if (!mustwait)
1242 {
1243 LOG_LWDEBUG("LWLockAcquire", lock, "acquired, undoing queue");
1244
1245 LWLockDequeueSelf(lock);
1246 break;
1247 }
1248
1249 /*
1250 * Wait until awakened.
1251 *
1252 * It is possible that we get awakened for a reason other than being
1253 * signaled by LWLockRelease. If so, loop back and wait again. Once
1254 * we've gotten the LWLock, re-increment the sema by the number of
1255 * additional signals received.
1256 */
1257 LOG_LWDEBUG("LWLockAcquire", lock, "waiting");
1258
1259#ifdef LWLOCK_STATS
1260 lwstats->block_count++;
1261#endif
1262
1266
1267 for (;;)
1268 {
1269 PGSemaphoreLock(proc->sem);
1270 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1271 break;
1272 extraWaits++;
1273 }
1274
1275 /* Retrying, allow LWLockRelease to release waiters again. */
1277
1278#ifdef LOCK_DEBUG
1279 {
1280 /* not waiting anymore */
1282
1284 }
1285#endif
1286
1290
1291 LOG_LWDEBUG("LWLockAcquire", lock, "awakened");
1292
1293 /* Now loop back and try to acquire lock again. */
1294 result = false;
1295 }
1296
1299
1300 /* Add lock to list of locks held by this backend */
1303
1304 /*
1305 * Fix the process wait semaphore's count for any absorbed wakeups.
1306 */
1307 while (extraWaits-- > 0)
1308 PGSemaphoreUnlock(proc->sem);
1309
1310 return result;
1311}

References Assert, elog, ERROR, fb(), held_lwlocks, HOLD_INTERRUPTS, IsUnderPostmaster, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_WAKE_IN_PROGRESS, LW_SHARED, LW_WS_NOT_WAITING, LWLockAttemptLock(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, MyProc, num_held_lwlocks, pg_atomic_fetch_and_u32(), pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, result, PGPROC::sem, LWLock::state, and T_NAME.

Referenced by _bt_end_vacuum(), _bt_parallel_done(), _bt_parallel_primscan_schedule(), _bt_parallel_release(), _bt_parallel_seize(), _bt_start_vacuum(), _bt_vacuum_cycleid(), abort_logical_decoding_activation(), AbsorbSyncRequests(), ActivateCommitTs(), addLSNWaiter(), AdvanceNextFullTransactionIdPastXid(), AdvanceOldestClogXid(), AdvanceOldestCommitTsXid(), AdvanceXLInsertBuffer(), alloc_object(), AlterSystemSetConfigFile(), ApplyLauncherMain(), apw_detach_shmem(), apw_dump_now(), apw_load_buffers(), AsyncNotifyFreezeXids(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AtAbort_Twophase(), AtEOXact_LogicalRepWorkers(), AtPrepare_PredicateLocks(), attach_internal(), autoprewarm_main(), autoprewarm_start_worker(), AutoVacLauncherMain(), AutoVacuumRequestWork(), AutoVacWorkerMain(), BackendPidGetProc(), BackendXidGetPid(), BecomeLockGroupLeader(), BecomeLockGroupMember(), BecomeRegisteredListener(), btparallelrescan(), BufferAlloc(), CallShmemCallbacksAfterStartup(), check_for_freed_segments(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckLogicalSlotExists(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointReplicationOrigin(), CheckPointReplicationSlots(), CheckPointTwoPhase(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), choose_next_subplan_for_leader(), choose_next_subplan_for_worker(), CleanupInvalidationState(), ClearOldPredicateLocks(), ComputeXidHorizons(), consume_xids_shortcut(), copy_replication_slot(), CountDBBackends(), CountDBConnections(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreateInitDecodingContext(), CreatePredicateLock(), CreateRestartPoint(), DataChecksumsWorkerLauncherMain(), DataChecksumsWorkerMain(), DeactivateCommitTs(), DeleteChildTargetLocks(), DeleteLockTarget(), deleteLSNWaiter(), destroy_superblock(), DisableLogicalDecoding(), DisableLogicalDecodingIfNecessary(), do_autovacuum(), do_pg_backup_start(), do_pg_backup_stop(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_get_total_size(), dsa_get_total_size_from_handle(), dsa_pin(), dsa_release_in_place(), dsa_set_size_limit(), dsa_trim(), dsa_unpin(), dshash_delete_key(), dshash_dump(), dshash_find(), dshash_find_or_insert_extended(), dshash_seq_next(), dsm_attach(), dsm_create(), dsm_detach(), dsm_pin_segment(), dsm_unpin_segment(), EnableLogicalDecoding(), ensure_active_superblock(), entry_reset(), ExecParallelHashMergeCounters(), ExecParallelHashPopChunkQueue(), ExecParallelHashTupleAlloc(), ExecParallelHashTuplePrealloc(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendBufferedRelShared(), ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), FindAndDropRelationBuffers(), FindDeletedTupleInLocalRel(), FinishPreparedTransaction(), ForceTransactionIdLimitUpdate(), ForwardSyncRequest(), FreeWorkerInfo(), get_local_synced_slots(), get_val_in_shmem(), get_xid_status(), GetBackgroundWorkerPid(), GetBackgroundWorkerTypeByPid(), GetBlockerStatusData(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastImportantRecPtr(), GetLastSegSwitchData(), GetLatestCommitTsData(), GetLeaderApplyWorkerPid(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetMultiXactInfo(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestSafeDecodingTransactionId(), GetOldestUnsummarizedLSN(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionData(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSafeSnapshotBlockingPids(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetStrictOldestNonRemovableTransactionId(), GetVirtualXIDsDelayingChkpt(), GetWaitEventCustomIdentifier(), GetWaitEventCustomNames(), GetWalSummarizerState(), HaveVirtualXIDsDelayingChkpt(), init_conflict_slot_xmin(), init_dsm_registry(), initGlobalChannelTable(), InitWalSender(), InjectionPointAttach(), InjectionPointDetach(), InjectionPointList(), InstallXLogFileSegment(), InvalidateBuffer(), InvalidateObsoleteReplicationSlots(), InvalidatePossiblyObsoleteSlot(), InvalidateVictimBuffer(), IoWorkerMain(), IsInstallXLogFileSegmentActive(), IsLogicalDecodingEnabled(), IsXLogLogicalInfoEnabled(), KnownAssignedXidsCompress(), KnownAssignedXidsReset(), launcher_exit(), lock_twophase_recover(), LockAcquireExtended(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), LockWaiterCount(), logicalrep_launcher_attach_dshmem(), logicalrep_pa_worker_stop(), logicalrep_reset_seqsync_start_time(), logicalrep_worker_attach(), logicalrep_worker_detach(), logicalrep_worker_launch(), logicalrep_worker_stop(), logicalrep_worker_stop_internal(), logicalrep_worker_wakeup(), logicalrep_workers_find(), LookupGXact(), LookupGXactBySubid(), MarkAsPrepared(), MarkAsPreparing(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), PageIsPredicateLocked(), perform_relmap_update(), pg_control_checkpoint(), pg_control_init(), pg_control_recovery(), pg_control_system(), pg_create_advice_stash(), pg_drop_advice_stash(), pg_get_replication_slots(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_notification_queue_usage(), pg_set_stashed_advice(), pg_show_replication_origin_status(), pg_start_stash_advice_worker(), pg_stash_advice_worker_main(), pg_stat_get_subscription(), pg_stat_statements_internal(), pg_xact_status(), pgaio_worker_can_timeout(), pgaio_worker_die(), pgaio_worker_register(), pgsa_attach(), pgsa_detach_shmem(), pgsa_read_from_disk(), pgsa_restore_entries(), pgsa_restore_stashes(), pgss_store(), pgstat_archiver_reset_all_cb(), pgstat_archiver_snapshot_cb(), pgstat_bgwriter_reset_all_cb(), pgstat_bgwriter_snapshot_cb(), pgstat_build_snapshot(), pgstat_checkpointer_reset_all_cb(), pgstat_checkpointer_snapshot_cb(), pgstat_fetch_replslot(), pgstat_io_flush_cb(), pgstat_io_reset_all_cb(), pgstat_io_snapshot_cb(), pgstat_lock_entry(), pgstat_lock_entry_shared(), pgstat_lock_flush_cb(), pgstat_lock_reset_all_cb(), pgstat_lock_snapshot_cb(), pgstat_reset_matching_entries(), pgstat_reset_replslot(), pgstat_reset_slru_counter_internal(), pgstat_slru_flush_cb(), pgstat_slru_snapshot_cb(), pgstat_wal_flush_cb(), pgstat_wal_reset_all_cb(), pgstat_wal_snapshot_cb(), PostPrepare_Locks(), PostPrepare_MultiXact(), PostPrepare_Twophase(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockShmemInit(), PredicateLockTwoPhaseFinish(), PrefetchSharedBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayClearTransaction(), ProcArrayEndTransaction(), ProcArrayGetReplicationSlotXmin(), ProcArrayGroupClearXid(), ProcArrayInstallImportedXmin(), ProcArrayInstallRestoredXmin(), ProcArrayRemove(), ProcArraySetReplicationSlotXmin(), ProcessDatabase(), ProcessSequencesForSync(), ProcessSingleRelationFork(), ProcessSyncingTablesForApply(), ProcKill(), ProcNumberGetTransactionIds(), ProcSleep(), ReachedEndOfBackup(), read_relmap_file(), ReadMultiXactIdRange(), ReadNextFullTransactionId(), ReadNextMultiXactId(), ReadReplicationSlot(), RecordNewMultiXact(), RecoverPreparedTransactions(), RegisterDynamicBackgroundWorker(), RegisterPredicateLockingXid(), RelationCacheInitFilePreInvalidate(), RelationMapCopy(), RelationMapFinishBootstrap(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), relmap_redo(), RemoveScratchTarget(), ReplicationSlotAcquire(), ReplicationSlotCleanup(), ReplicationSlotCreate(), ReplicationSlotDropPtr(), ReplicationSlotName(), ReplicationSlotRelease(), ReplicationSlotReserveWal(), ReplicationSlotsComputeLogicalRestartLSN(), ReplicationSlotsComputeRequiredLSN(), ReplicationSlotsComputeRequiredXmin(), ReplicationSlotsCountDBSlots(), ReplicationSlotsDropDBSlots(), replorigin_advance(), replorigin_get_progress(), replorigin_session_advance(), replorigin_session_get_progress(), replorigin_session_reset_internal(), replorigin_session_setup(), replorigin_state_clear(), RequestDisableLogicalDecoding(), reserve_wal_for_local_slot(), ResetInstallXLogFileSegmentActive(), resize(), RestoreScratchTarget(), restoreTwoPhaseData(), SaveSlotToPath(), SearchNamedReplicationSlot(), SerialAdd(), SerialGetMinConflictCommitSeqNo(), SerialSetActiveSerXmin(), set_indexsafe_procflags(), set_val_in_shmem(), SetCommitTsLimit(), SetDataChecksumsOff(), SetDataChecksumsOn(), SetDataChecksumsOnInProgress(), SetInstallXLogFileSegmentActive(), SetMultiXactIdLimit(), SetNextObjectId(), SetOldestOffset(), SetTransactionIdLimit(), SetXidCommitTsInPage(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SignalRecoveryConflict(), SignalRecoveryConflictWithDatabase(), SignalRecoveryConflictWithVirtualXID(), SIInsertDataEntries(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SimpleLruWriteAll(), SimpleLruZeroAndWritePage(), SlruDeleteSegment(), SlruInternalWritePage(), SnapBuildInitialSnapshot(), ss_get_location(), StandbyRecoverPreparedTransactions(), StandbySlotsHaveCaughtup(), StartDataChecksumsWorkerLauncher(), StartupDecodingContext(), StartupSUBTRANS(), StartupXLOG(), sts_parallel_scan_next(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SummarizeWAL(), SwitchIntoArchiveRecovery(), synchronize_one_slot(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), tbm_shared_iterate(), TerminateBackgroundWorker(), TerminateBackgroundWorkersForDatabase(), TerminateOtherDBBackends(), test_custom_stats_fixed_reset_all_cb(), test_custom_stats_fixed_snapshot_cb(), test_custom_stats_fixed_update(), test_slru_page_exists(), test_slru_page_read(), test_slru_page_write(), test_startup_lwlocks(), TransactionGroupUpdateXidStatus(), TransactionIdGetCommitTsData(), TransactionIdIsInProgress(), TransactionIdSetPageStatus(), TransactionTreeSetCommitTsData(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TrimMultiXact(), TruncateMultiXact(), TwoPhaseGetGXact(), TwoPhaseGetOldestXidInCommit(), TwoPhaseGetXidByVirtualXID(), update_cached_xid_range(), update_synced_slots_inactive_since(), UpdateLogicalDecodingStatusEndOfRecovery(), UpdateMinRecoveryPoint(), vac_truncate_clog(), vacuum_rel(), VacuumUpdateCosts(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), wait_for_table_state_change(), wait_for_worker_state_change(), WaitEventCustomNew(), WaitForAllTransactionsToFinish(), WaitForReplicationWorkerAttach(), WaitForWalSummarization(), wakeupWaiters(), WakeupWalSummarizer(), WALInsertLockAcquire(), WALInsertLockAcquireExclusive(), WalSummarizerMain(), WalSummarizerShutdown(), write_relcache_init_file(), xact_redo(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), and XLogReportParameters().

◆ LWLockAcquireOrWait()

bool LWLockAcquireOrWait ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1378 of file lwlock.c.

1379{
1380 PGPROC *proc = MyProc;
1381 bool mustwait;
1382 int extraWaits = 0;
1383#ifdef LWLOCK_STATS
1385
1387#endif
1388
1390
1391 PRINT_LWDEBUG("LWLockAcquireOrWait", lock, mode);
1392
1393 /* Ensure we will have room to remember the lock */
1395 elog(ERROR, "too many LWLocks taken");
1396
1397 /*
1398 * Lock out cancel/die interrupts until we exit the code section protected
1399 * by the LWLock. This ensures that interrupts will not interfere with
1400 * manipulations of data structures in shared memory.
1401 */
1403
1404 /*
1405 * NB: We're using nearly the same twice-in-a-row lock acquisition
1406 * protocol as LWLockAcquire(). Check its comments for details.
1407 */
1409
1410 if (mustwait)
1411 {
1413
1415
1416 if (mustwait)
1417 {
1418 /*
1419 * Wait until awakened. Like in LWLockAcquire, be prepared for
1420 * bogus wakeups.
1421 */
1422 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "waiting");
1423
1424#ifdef LWLOCK_STATS
1425 lwstats->block_count++;
1426#endif
1427
1431
1432 for (;;)
1433 {
1434 PGSemaphoreLock(proc->sem);
1435 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1436 break;
1437 extraWaits++;
1438 }
1439
1440#ifdef LOCK_DEBUG
1441 {
1442 /* not waiting anymore */
1444
1446 }
1447#endif
1451
1452 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "awakened");
1453 }
1454 else
1455 {
1456 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "acquired, undoing queue");
1457
1458 /*
1459 * Got lock in the second attempt, undo queueing. We need to treat
1460 * this as having successfully acquired the lock, otherwise we'd
1461 * not necessarily wake up people we've prevented from acquiring
1462 * the lock.
1463 */
1464 LWLockDequeueSelf(lock);
1465 }
1466 }
1467
1468 /*
1469 * Fix the process wait semaphore's count for any absorbed wakeups.
1470 */
1471 while (extraWaits-- > 0)
1472 PGSemaphoreUnlock(proc->sem);
1473
1474 if (mustwait)
1475 {
1476 /* Failed to get lock, so release interrupt holdoff */
1478 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "failed");
1481 }
1482 else
1483 {
1484 LOG_LWDEBUG("LWLockAcquireOrWait", lock, "succeeded");
1485 /* Add lock to list of locks held by this backend */
1490 }
1491
1492 return !mustwait;
1493}

References Assert, elog, ERROR, fb(), held_lwlocks, HOLD_INTERRUPTS, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_SHARED, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LWLockAttemptLock(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, MyProc, num_held_lwlocks, pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, RESUME_INTERRUPTS, PGPROC::sem, and T_NAME.

Referenced by XLogFlush().

◆ LWLockAnyHeldByMe()

bool LWLockAnyHeldByMe ( LWLock lock,
int  nlocks,
size_t  stride 
)

Definition at line 1903 of file lwlock.c.

1904{
1905 char *held_lock_addr;
1906 char *begin;
1907 char *end;
1908 int i;
1909
1910 begin = (char *) lock;
1911 end = begin + nlocks * stride;
1912 for (i = 0; i < num_held_lwlocks; i++)
1913 {
1914 held_lock_addr = (char *) held_lwlocks[i].lock;
1915 if (held_lock_addr >= begin &&
1916 held_lock_addr < end &&
1917 (held_lock_addr - begin) % stride == 0)
1918 return true;
1919 }
1920 return false;
1921}

References fb(), held_lwlocks, i, and num_held_lwlocks.

◆ LWLockAttemptLock()

static bool LWLockAttemptLock ( LWLock lock,
LWLockMode  mode 
)
static

Definition at line 764 of file lwlock.c.

765{
767
769
770 /*
771 * Read once outside the loop, later iterations will get the newer value
772 * via compare & exchange.
773 */
775
776 /* loop until we've determined whether we could acquire the lock or not */
777 while (true)
778 {
780 bool lock_free;
781
783
784 if (mode == LW_EXCLUSIVE)
785 {
787 if (lock_free)
789 }
790 else
791 {
793 if (lock_free)
795 }
796
797 /*
798 * Attempt to swap in the state we are expecting. If we didn't see
799 * lock to be free, that's just the old value. If we saw it as free,
800 * we'll attempt to mark it acquired. The reason that we always swap
801 * in the value is that this doubles as a memory barrier. We could try
802 * to be smarter and only swap in values if we saw the lock as free,
803 * but benchmark haven't shown it as beneficial so far.
804 *
805 * Retry if the value changed since we last looked at it.
806 */
809 {
810 if (lock_free)
811 {
812 /* Great! Got the lock. */
813#ifdef LOCK_DEBUG
814 if (mode == LW_EXCLUSIVE)
815 lock->owner = MyProc;
816#endif
817 return false;
818 }
819 else
820 return true; /* somebody else has the lock */
821 }
822 }
824}

References Assert, fb(), LW_EXCLUSIVE, LW_LOCK_MASK, LW_SHARED, LW_VAL_EXCLUSIVE, LW_VAL_SHARED, mode, MyProc, pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pg_unreachable, and LWLock::state.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockConditionalAcquire().

◆ LWLockConditionalAcquire()

bool LWLockConditionalAcquire ( LWLock lock,
LWLockMode  mode 
)

Definition at line 1321 of file lwlock.c.

1322{
1323 bool mustwait;
1324
1326
1327 PRINT_LWDEBUG("LWLockConditionalAcquire", lock, mode);
1328
1329 /* Ensure we will have room to remember the lock */
1331 elog(ERROR, "too many LWLocks taken");
1332
1333 /*
1334 * Lock out cancel/die interrupts until we exit the code section protected
1335 * by the LWLock. This ensures that interrupts will not interfere with
1336 * manipulations of data structures in shared memory.
1337 */
1339
1340 /* Check for the lock */
1342
1343 if (mustwait)
1344 {
1345 /* Failed to get lock, so release interrupt holdoff */
1347
1348 LOG_LWDEBUG("LWLockConditionalAcquire", lock, "failed");
1351 }
1352 else
1353 {
1354 /* Add lock to list of locks held by this backend */
1359 }
1360 return !mustwait;
1361}

References Assert, elog, ERROR, fb(), held_lwlocks, HOLD_INTERRUPTS, LWLockHandle::lock, LOG_LWDEBUG, LW_EXCLUSIVE, LW_SHARED, LWLockAttemptLock(), MAX_SIMUL_LWLOCKS, LWLockHandle::mode, mode, num_held_lwlocks, PRINT_LWDEBUG, RESUME_INTERRUPTS, and T_NAME.

Referenced by pgaio_worker_submit(), pgstat_io_flush_cb(), pgstat_lock_entry(), pgstat_lock_entry_shared(), pgstat_lock_flush_cb(), pgstat_slru_flush_cb(), pgstat_wal_flush_cb(), ProcArrayEndTransaction(), SimpleLruWaitIO(), ss_report_location(), TransactionIdSetPageStatus(), and XLogNeedsFlush().

◆ LWLockConflictsWithVar()

static bool LWLockConflictsWithVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  oldval,
uint64 newval,
bool result 
)
static

Definition at line 1505 of file lwlock.c.

1507{
1508 bool mustwait;
1509 uint64 value;
1510
1511 /*
1512 * Test first to see if it the slot is free right now.
1513 *
1514 * XXX: the unique caller of this routine, WaitXLogInsertionsToFinish()
1515 * via LWLockWaitForVar(), uses an implied barrier with a spinlock before
1516 * this, so we don't need a memory barrier here as far as the current
1517 * usage is concerned. But that might not be safe in general.
1518 */
1520
1521 if (!mustwait)
1522 {
1523 *result = true;
1524 return false;
1525 }
1526
1527 *result = false;
1528
1529 /*
1530 * Reading this value atomically is safe even on platforms where uint64
1531 * cannot be read without observing a torn value.
1532 */
1534
1535 if (value != oldval)
1536 {
1537 mustwait = false;
1538 *newval = value;
1539 }
1540 else
1541 {
1542 mustwait = true;
1543 }
1544
1545 return mustwait;
1546}

References fb(), LW_VAL_EXCLUSIVE, newval, pg_atomic_read_u32(), pg_atomic_read_u64(), result, LWLock::state, and value.

Referenced by LWLockWaitForVar().

◆ LWLockDequeueSelf()

static void LWLockDequeueSelf ( LWLock lock)
static

Definition at line 1061 of file lwlock.c.

1062{
1063 bool on_waitlist;
1064
1065#ifdef LWLOCK_STATS
1067
1069
1070 lwstats->dequeue_self_count++;
1071#endif
1072
1073 LWLockWaitListLock(lock);
1074
1075 /*
1076 * Remove ourselves from the waitlist, unless we've already been removed.
1077 * The removal happens with the wait list lock held, so there's no race in
1078 * this check.
1079 */
1081 if (on_waitlist)
1082 proclist_delete(&lock->waiters, MyProcNumber, lwWaitLink);
1083
1084 if (proclist_is_empty(&lock->waiters) &&
1086 {
1088 }
1089
1090 /* XXX: combine with fetch_and above? */
1092
1093 /* clear waiting state again, nice for debugging */
1094 if (on_waitlist)
1096 else
1097 {
1098 int extraWaits = 0;
1099
1100 /*
1101 * Somebody else dequeued us and has or will wake us up. Deal with the
1102 * superfluous absorption of a wakeup.
1103 */
1104
1105 /*
1106 * Clear LW_FLAG_WAKE_IN_PROGRESS if somebody woke us before we
1107 * removed ourselves - they'll have set it.
1108 */
1110
1111 /*
1112 * Now wait for the scheduled wakeup, otherwise our ->lwWaiting would
1113 * get reset at some inconvenient point later. Most of the time this
1114 * will immediately return.
1115 */
1116 for (;;)
1117 {
1120 break;
1121 extraWaits++;
1122 }
1123
1124 /*
1125 * Fix the process wait semaphore's count for any absorbed wakeups.
1126 */
1127 while (extraWaits-- > 0)
1129 }
1130
1131#ifdef LOCK_DEBUG
1132 {
1133 /* not waiting anymore */
1135
1137 }
1138#endif
1139}

References Assert, fb(), LW_FLAG_HAS_WAITERS, LW_FLAG_WAKE_IN_PROGRESS, LW_WS_NOT_WAITING, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, MAX_BACKENDS, MyProc, MyProcNumber, pg_atomic_fetch_and_u32(), pg_atomic_fetch_sub_u32(), pg_atomic_read_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), proclist_delete, proclist_is_empty(), PGPROC::sem, LWLock::state, and LWLock::waiters.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockHeldByMe()

◆ LWLockHeldByMeInMode()

◆ LWLockInitialize()

◆ LWLockNewTrancheId()

int LWLockNewTrancheId ( const char name)

Definition at line 562 of file lwlock.c.

563{
564 int idx;
565
566 if (!name)
569 errmsg("tranche name cannot be NULL")));
570
571 if (strlen(name) >= NAMEDATALEN)
574 errmsg("tranche name too long"),
575 errdetail("LWLock tranche names must be no longer than %d bytes.",
576 NAMEDATALEN - 1)));
577
578 /* The counter and the tranche names are protected by the spinlock */
580
582 {
585 (errmsg("maximum number of tranches already registered"),
586 errdetail("No more than %d tranches may be registered.",
588 }
589
590 /* Allocate an entry in the user_defined array */
592
593 /* update our local copy while we're at it */
595
596 /* Initialize it */
598
599 /* the locks are not in the main array */
601
603
605}

References ereport, errcode(), errdetail(), errmsg, ERROR, fb(), idx(), LocalNumUserDefinedTranches, LWLockTrancheShmemData::lock, LWLockTranches, LWTRANCHE_FIRST_USER_DEFINED, LWLockTrancheShmemData::main_array_idx, MAX_USER_DEFINED_TRANCHES, LWLockTrancheShmemData::name, name, NAMEDATALEN, LWLockTrancheShmemData::num_user_defined, SpinLockAcquire(), SpinLockRelease(), strlcpy(), and LWLockTrancheShmemData::user_defined.

Referenced by apw_init_state(), GetNamedDSA(), GetNamedDSHash(), init_tdr_dsm(), init_tranche(), pgsa_init_shared_state(), pgss_shmem_init(), shmem_slru_init(), test_basic(), test_create(), test_empty(), test_lwlock_tranche_create(), and test_random().

◆ LWLockQueueSelf()

static void LWLockQueueSelf ( LWLock lock,
LWLockMode  mode 
)
static

Definition at line 1018 of file lwlock.c.

1019{
1020 /*
1021 * If we don't have a PGPROC structure, there's no way to wait. This
1022 * should never occur, since MyProc should only be null during shared
1023 * memory initialization.
1024 */
1025 if (MyProc == NULL)
1026 elog(PANIC, "cannot wait without a PGPROC structure");
1027
1029 elog(PANIC, "queueing for lock while waiting on another one");
1030
1031 LWLockWaitListLock(lock);
1032
1033 /* setting the flag is protected by the spinlock */
1035
1038
1039 /* LW_WAIT_UNTIL_FREE waiters are always at the front of the queue */
1040 if (mode == LW_WAIT_UNTIL_FREE)
1041 proclist_push_head(&lock->waiters, MyProcNumber, lwWaitLink);
1042 else
1043 proclist_push_tail(&lock->waiters, MyProcNumber, lwWaitLink);
1044
1045 /* Can release the mutex now */
1047
1048#ifdef LOCK_DEBUG
1049 pg_atomic_fetch_add_u32(&lock->nwaiters, 1);
1050#endif
1051}

References elog, fb(), LW_FLAG_HAS_WAITERS, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, mode, MyProc, MyProcNumber, PANIC, pg_atomic_fetch_add_u32(), pg_atomic_fetch_or_u32(), proclist_push_head, proclist_push_tail, LWLock::state, and LWLock::waiters.

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockRelease()

void LWLockRelease ( LWLock lock)

Definition at line 1767 of file lwlock.c.

1768{
1771 bool check_waiters;
1772 int i;
1773
1774 /*
1775 * Remove lock from list of locks held. Usually, but not always, it will
1776 * be the latest-acquired lock; so search array backwards.
1777 */
1778 for (i = num_held_lwlocks; --i >= 0;)
1779 if (lock == held_lwlocks[i].lock)
1780 break;
1781
1782 if (i < 0)
1783 elog(ERROR, "lock %s is not held", T_NAME(lock));
1784
1786
1788 for (; i < num_held_lwlocks; i++)
1789 held_lwlocks[i] = held_lwlocks[i + 1];
1790
1791 PRINT_LWDEBUG("LWLockRelease", lock, mode);
1792
1793 /*
1794 * Release my hold on lock, after that it can immediately be acquired by
1795 * others, even if we still have to wakeup other waiters.
1796 */
1797 if (mode == LW_EXCLUSIVE)
1799 else
1801
1802 /* nobody else can have that kind of lock */
1804
1807
1808 /*
1809 * Check if we're still waiting for backends to get scheduled, if so,
1810 * don't wake them up again.
1811 */
1812 if ((oldstate & LW_FLAG_HAS_WAITERS) &&
1814 (oldstate & LW_LOCK_MASK) == 0)
1815 check_waiters = true;
1816 else
1817 check_waiters = false;
1818
1819 /*
1820 * As waking up waiters requires the spinlock to be acquired, only do so
1821 * if necessary.
1822 */
1823 if (check_waiters)
1824 {
1825 /* XXX: remove before commit? */
1826 LOG_LWDEBUG("LWLockRelease", lock, "releasing waiters");
1827 LWLockWakeup(lock);
1828 }
1829
1830 /*
1831 * Now okay to allow cancel/die interrupts.
1832 */
1834}

References Assert, elog, ERROR, fb(), held_lwlocks, i, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_HAS_WAITERS, LW_FLAG_WAKE_IN_PROGRESS, LW_LOCK_MASK, LW_VAL_EXCLUSIVE, LW_VAL_SHARED, LWLockWakeup(), LWLockHandle::mode, mode, num_held_lwlocks, pg_atomic_sub_fetch_u32(), PRINT_LWDEBUG, RESUME_INTERRUPTS, LWLock::state, and T_NAME.

Referenced by _bt_end_vacuum(), _bt_parallel_done(), _bt_parallel_primscan_schedule(), _bt_parallel_release(), _bt_parallel_seize(), _bt_start_vacuum(), _bt_vacuum_cycleid(), abort_logical_decoding_activation(), AbsorbSyncRequests(), ActivateCommitTs(), addLSNWaiter(), AdvanceNextFullTransactionIdPastXid(), AdvanceOldestClogXid(), AdvanceOldestCommitTsXid(), AdvanceXLInsertBuffer(), alloc_object(), AlterSystemSetConfigFile(), ApplyLauncherMain(), apw_detach_shmem(), apw_dump_now(), apw_load_buffers(), AsyncNotifyFreezeXids(), asyncQueueAddEntries(), asyncQueueAdvanceTail(), asyncQueueProcessPageEntries(), asyncQueueReadAllNotifications(), asyncQueueUnregister(), AtAbort_Twophase(), AtEOXact_LogicalRepWorkers(), AtPrepare_PredicateLocks(), attach_internal(), autoprewarm_main(), autoprewarm_start_worker(), AutoVacLauncherMain(), AutoVacuumRequestWork(), AutoVacWorkerMain(), BackendPidGetProc(), BackendXidGetPid(), BecomeLockGroupLeader(), BecomeLockGroupMember(), BecomeRegisteredListener(), btparallelrescan(), BufferAlloc(), CallShmemCallbacksAfterStartup(), check_for_freed_segments(), CheckDeadLock(), CheckForSerializableConflictOut(), CheckLogicalSlotExists(), CheckPointPredicate(), CheckPointRelationMap(), CheckPointReplicationOrigin(), CheckPointReplicationSlots(), CheckPointTwoPhase(), CheckTableForSerializableConflictIn(), CheckTargetForConflictsIn(), choose_next_subplan_for_leader(), choose_next_subplan_for_worker(), CleanupInvalidationState(), ClearOldPredicateLocks(), ComputeXidHorizons(), consume_xids_shortcut(), copy_replication_slot(), CountDBBackends(), CountDBConnections(), CountOtherDBBackends(), CountUserBackends(), CreateCheckPoint(), CreateEndOfRecoveryRecord(), CreateInitDecodingContext(), CreatePredicateLock(), CreateRestartPoint(), DataChecksumsWorkerLauncherMain(), DataChecksumsWorkerMain(), DeactivateCommitTs(), DeleteChildTargetLocks(), DeleteLockTarget(), deleteLSNWaiter(), destroy_superblock(), DisableLogicalDecoding(), DisableLogicalDecodingIfNecessary(), do_autovacuum(), do_pg_backup_start(), do_pg_backup_stop(), do_start_worker(), DropAllPredicateLocksFromTable(), DropTableSpace(), dsa_allocate_extended(), dsa_dump(), dsa_free(), dsa_get_total_size(), dsa_get_total_size_from_handle(), dsa_pin(), dsa_release_in_place(), dsa_set_size_limit(), dsa_trim(), dsa_unpin(), dshash_delete_entry(), dshash_delete_key(), dshash_dump(), dshash_find(), dshash_find_or_insert_extended(), dshash_release_lock(), dshash_seq_next(), dshash_seq_term(), dsm_attach(), dsm_create(), dsm_detach(), dsm_pin_segment(), dsm_unpin_segment(), EnableLogicalDecoding(), ensure_active_superblock(), entry_reset(), ExecParallelHashMergeCounters(), ExecParallelHashPopChunkQueue(), ExecParallelHashTupleAlloc(), ExecParallelHashTuplePrealloc(), ExpireAllKnownAssignedTransactionIds(), ExpireOldKnownAssignedTransactionIds(), ExpireTreeKnownAssignedTransactionIds(), ExtendBufferedRelShared(), ExtendCLOG(), ExtendCommitTs(), ExtendMultiXactMember(), ExtendMultiXactOffset(), ExtendSUBTRANS(), FastPathGetRelationLockEntry(), FastPathTransferRelationLocks(), find_multixact_start(), FindAndDropRelationBuffers(), FindDeletedTupleInLocalRel(), FinishPreparedTransaction(), ForceTransactionIdLimitUpdate(), ForwardSyncRequest(), FreeWorkerInfo(), get_local_synced_slots(), get_val_in_shmem(), get_xid_status(), GetBackgroundWorkerPid(), GetBackgroundWorkerTypeByPid(), GetBlockerStatusData(), GetConflictingVirtualXIDs(), GetCurrentVirtualXIDs(), GetLastImportantRecPtr(), GetLastSegSwitchData(), GetLatestCommitTsData(), GetLeaderApplyWorkerPid(), GetLockConflicts(), GetLockStatusData(), GetMultiXactIdMembers(), GetMultiXactInfo(), GetNewMultiXactId(), GetNewObjectId(), GetNewTransactionId(), GetOldestActiveTransactionId(), GetOldestMultiXactId(), GetOldestRestartPoint(), GetOldestSafeDecodingTransactionId(), GetOldestUnsummarizedLSN(), GetPredicateLockStatusData(), GetPreparedTransactionList(), GetRunningTransactionLocks(), GetSafeSnapshot(), GetSafeSnapshotBlockingPids(), GetSerializableTransactionSnapshotInt(), GetSnapshotData(), GetStrictOldestNonRemovableTransactionId(), GetVirtualXIDsDelayingChkpt(), GetWaitEventCustomIdentifier(), GetWaitEventCustomNames(), GetWalSummarizerState(), HaveVirtualXIDsDelayingChkpt(), init_conflict_slot_xmin(), init_dsm_registry(), initGlobalChannelTable(), InitWalSender(), InjectionPointAttach(), InjectionPointDetach(), InjectionPointList(), InstallXLogFileSegment(), InvalidateBuffer(), InvalidateObsoleteReplicationSlots(), InvalidatePossiblyObsoleteSlot(), InvalidateVictimBuffer(), IoWorkerMain(), IsInstallXLogFileSegmentActive(), IsLogicalDecodingEnabled(), IsXLogLogicalInfoEnabled(), KnownAssignedXidsCompress(), KnownAssignedXidsReset(), launcher_exit(), lock_twophase_recover(), LockAcquireExtended(), LockErrorCleanup(), LockGXact(), LockHasWaiters(), LockRefindAndRelease(), LockRelease(), LockReleaseAll(), LockWaiterCount(), logicalrep_launcher_attach_dshmem(), logicalrep_pa_worker_stop(), logicalrep_reset_seqsync_start_time(), logicalrep_worker_attach(), logicalrep_worker_detach(), logicalrep_worker_launch(), logicalrep_worker_stop(), logicalrep_worker_stop_internal(), logicalrep_worker_wakeup(), logicalrep_workers_find(), LogStandbySnapshot(), LookupGXact(), LookupGXactBySubid(), LWLockReleaseAll(), LWLockReleaseClearVar(), MarkAsPrepared(), MarkAsPreparing(), multixact_redo(), MultiXactAdvanceNextMXact(), MultiXactGetCheckptMulti(), MultiXactIdSetOldestMember(), MultiXactIdSetOldestVisible(), MultiXactSetNextMXact(), OnConflict_CheckForSerializationFailure(), PageIsPredicateLocked(), perform_relmap_update(), pg_control_checkpoint(), pg_control_init(), pg_control_recovery(), pg_control_system(), pg_create_advice_stash(), pg_drop_advice_stash(), pg_get_replication_slots(), pg_get_shmem_allocations(), pg_get_shmem_allocations_numa(), pg_notification_queue_usage(), pg_set_stashed_advice(), pg_show_replication_origin_status(), pg_start_stash_advice_worker(), pg_stash_advice_worker_main(), pg_stat_get_subscription(), pg_stat_statements_internal(), pg_xact_status(), pgaio_worker_can_timeout(), pgaio_worker_die(), pgaio_worker_register(), pgaio_worker_submit(), pgsa_attach(), pgsa_detach_shmem(), pgsa_read_from_disk(), pgsa_restore_entries(), pgsa_restore_stashes(), pgss_store(), pgstat_archiver_reset_all_cb(), pgstat_archiver_snapshot_cb(), pgstat_bgwriter_reset_all_cb(), pgstat_bgwriter_snapshot_cb(), pgstat_build_snapshot(), pgstat_checkpointer_reset_all_cb(), pgstat_checkpointer_snapshot_cb(), pgstat_fetch_replslot(), pgstat_io_flush_cb(), pgstat_io_reset_all_cb(), pgstat_io_snapshot_cb(), pgstat_lock_flush_cb(), pgstat_lock_reset_all_cb(), pgstat_lock_snapshot_cb(), pgstat_reset_matching_entries(), pgstat_reset_replslot(), pgstat_reset_slru_counter_internal(), pgstat_slru_flush_cb(), pgstat_slru_snapshot_cb(), pgstat_unlock_entry(), pgstat_wal_flush_cb(), pgstat_wal_reset_all_cb(), pgstat_wal_snapshot_cb(), PostPrepare_Locks(), PostPrepare_MultiXact(), PostPrepare_Twophase(), PreCommit_CheckForSerializationFailure(), PreCommit_Notify(), predicatelock_twophase_recover(), PredicateLockPageSplit(), PredicateLockShmemInit(), PredicateLockTwoPhaseFinish(), PrefetchSharedBuffer(), PrescanPreparedTransactions(), ProcArrayAdd(), ProcArrayApplyRecoveryInfo(), ProcArrayApplyXidAssignment(), ProcArrayClearTransaction(), ProcArrayEndTransaction(), ProcArrayGetReplicationSlotXmin(), ProcArrayGroupClearXid(), ProcArrayInstallImportedXmin(), ProcArrayInstallRestoredXmin(), ProcArrayRemove(), ProcArraySetReplicationSlotXmin(), ProcessDatabase(), ProcessSequencesForSync(), ProcessSingleRelationFork(), ProcessSyncingTablesForApply(), ProcKill(), ProcNumberGetTransactionIds(), ProcSleep(), ReachedEndOfBackup(), read_relmap_file(), ReadMultiXactIdRange(), ReadNextFullTransactionId(), ReadNextMultiXactId(), ReadReplicationSlot(), RecordNewMultiXact(), RecoverPreparedTransactions(), RegisterDynamicBackgroundWorker(), RegisterPredicateLockingXid(), RelationCacheInitFilePostInvalidate(), RelationMapCopy(), RelationMapFinishBootstrap(), ReleaseOneSerializableXact(), ReleasePredicateLocks(), relmap_redo(), RemoveScratchTarget(), ReplicationSlotAcquire(), ReplicationSlotCleanup(), ReplicationSlotCreate(), ReplicationSlotDropPtr(), ReplicationSlotName(), ReplicationSlotRelease(), ReplicationSlotReserveWal(), ReplicationSlotsComputeLogicalRestartLSN(), ReplicationSlotsComputeRequiredLSN(), ReplicationSlotsComputeRequiredXmin(), ReplicationSlotsCountDBSlots(), ReplicationSlotsDropDBSlots(), replorigin_advance(), replorigin_get_progress(), replorigin_session_advance(), replorigin_session_get_progress(), replorigin_session_reset_internal(), replorigin_session_setup(), replorigin_state_clear(), RequestDisableLogicalDecoding(), reserve_wal_for_local_slot(), ResetInstallXLogFileSegmentActive(), resize(), RestoreScratchTarget(), restoreTwoPhaseData(), SaveSlotToPath(), SearchNamedReplicationSlot(), SerialAdd(), SerialGetMinConflictCommitSeqNo(), SerialSetActiveSerXmin(), set_indexsafe_procflags(), set_val_in_shmem(), SetCommitTsLimit(), SetDataChecksumsOff(), SetDataChecksumsOn(), SetDataChecksumsOnInProgress(), SetInstallXLogFileSegmentActive(), SetMultiXactIdLimit(), SetNextObjectId(), SetOldestOffset(), SetTransactionIdLimit(), SetXidCommitTsInPage(), SharedInvalBackendInit(), ShmemInitStruct(), SICleanupQueue(), SIGetDataEntries(), SignalBackends(), SignalRecoveryConflict(), SignalRecoveryConflictWithDatabase(), SignalRecoveryConflictWithVirtualXID(), SIInsertDataEntries(), SimpleLruReadPage(), SimpleLruReadPage_ReadOnly(), SimpleLruTruncate(), SimpleLruWaitIO(), SimpleLruWriteAll(), SimpleLruZeroAndWritePage(), SlruDeleteSegment(), SlruInternalWritePage(), SnapBuildInitialSnapshot(), ss_get_location(), ss_report_location(), StandbyRecoverPreparedTransactions(), StandbySlotsHaveCaughtup(), StartDataChecksumsWorkerLauncher(), StartupDecodingContext(), StartupSUBTRANS(), StartupXLOG(), sts_parallel_scan_next(), SubTransGetParent(), SubTransSetParent(), SummarizeOldestCommittedSxact(), SummarizeWAL(), SwitchIntoArchiveRecovery(), synchronize_one_slot(), SyncRepCancelWait(), SyncRepCleanupAtProcExit(), SyncRepReleaseWaiters(), SyncRepUpdateSyncStandbysDefined(), SyncRepWaitForLSN(), TablespaceCreateDbspace(), tbm_shared_iterate(), TerminateBackgroundWorker(), TerminateBackgroundWorkersForDatabase(), TerminateOtherDBBackends(), test_custom_stats_fixed_reset_all_cb(), test_custom_stats_fixed_snapshot_cb(), test_custom_stats_fixed_update(), test_slru_page_exists(), test_slru_page_read(), test_slru_page_readonly(), test_slru_page_write(), test_startup_lwlocks(), TransactionGroupUpdateXidStatus(), TransactionIdGetCommitTsData(), TransactionIdGetStatus(), TransactionIdIsInProgress(), TransactionIdSetPageStatus(), TransactionTreeSetCommitTsData(), TransferPredicateLocksToNewTarget(), TrimCLOG(), TrimMultiXact(), TruncateMultiXact(), TwoPhaseGetGXact(), TwoPhaseGetOldestXidInCommit(), TwoPhaseGetXidByVirtualXID(), update_cached_xid_range(), update_synced_slots_inactive_since(), UpdateLogicalDecodingStatusEndOfRecovery(), UpdateMinRecoveryPoint(), vac_truncate_clog(), vacuum_rel(), VacuumUpdateCosts(), VirtualXactLock(), VirtualXactLockTableCleanup(), VirtualXactLockTableInsert(), wait_for_table_state_change(), wait_for_worker_state_change(), WaitEventCustomNew(), WaitForAllTransactionsToFinish(), WaitForReplicationWorkerAttach(), WaitForWalSummarization(), wakeupWaiters(), WakeupWalSummarizer(), WalSummarizerMain(), WalSummarizerShutdown(), write_relcache_init_file(), xact_redo(), XidCacheRemoveRunningXids(), xlog_redo(), XLogBackgroundFlush(), XLogFlush(), XLogNeedsFlush(), and XLogReportParameters().

◆ LWLockReleaseAll()

◆ LWLockReleaseClearVar()

void LWLockReleaseClearVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  val 
)

Definition at line 1840 of file lwlock.c.

1841{
1842 /*
1843 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1844 * that the variable is updated before releasing the lock.
1845 */
1847
1848 LWLockRelease(lock);
1849}

References fb(), LWLockRelease(), pg_atomic_exchange_u64(), and val.

Referenced by WALInsertLockRelease().

◆ LWLockReportWaitEnd()

static void LWLockReportWaitEnd ( void  )
inlinestatic

Definition at line 700 of file lwlock.c.

701{
703}

References pgstat_report_wait_end().

Referenced by LWLockAcquire(), LWLockAcquireOrWait(), and LWLockWaitForVar().

◆ LWLockReportWaitStart()

static void LWLockReportWaitStart ( LWLock lock)
inlinestatic

◆ LWLockShmemInit()

static void LWLockShmemInit ( void arg)
static

Definition at line 445 of file lwlock.c.

446{
447 int pos;
448
449 /* Initialize the dynamic-allocation counter for tranches */
451
453
454 /*
455 * Allocate and initialize all LWLocks in the main array. It includes all
456 * LWLocks for built-in tranches and those requested with
457 * RequestNamedLWLockTranche().
458 */
459 pos = 0;
460
461 /* Initialize all individual LWLocks in main array */
462 for (int id = 0; id < NUM_INDIVIDUAL_LWLOCKS; id++)
463 LWLockInitialize(&MainLWLockArray[pos++].lock, id);
464
465 /* Initialize buffer mapping LWLocks in main array */
467 for (int i = 0; i < NUM_BUFFER_PARTITIONS; i++)
469
470 /* Initialize lmgrs' LWLocks in main array */
472 for (int i = 0; i < NUM_LOCK_PARTITIONS; i++)
474
475 /* Initialize predicate lmgrs' LWLocks in main array */
477 for (int i = 0; i < NUM_PREDICATELOCK_PARTITIONS; i++)
479
480 /*
481 * Copy the info about any user-defined tranches into shared memory (so
482 * that other processes can see it), and initialize the requested LWLocks.
483 */
486 {
488
490 request->tranche_name,
493
494 for (int i = 0; i < request->num_lwlocks; i++)
496 }
497
498 /* Cross-check that we agree on the total size with LWLockShmemRequest() */
500}

References Assert, BUFFER_MAPPING_LWLOCK_OFFSET, fb(), foreach_ptr, i, idx(), LWLockTrancheShmemData::lock, LOCK_MANAGER_LWLOCK_OFFSET, LWLockInitialize(), LWLockTranches, LWTRANCHE_FIRST_USER_DEFINED, LWLockTrancheShmemData::main_array_idx, MainLWLockArray, LWLockTrancheShmemData::name, NAMEDATALEN, NamedLWLockTrancheRequests, NUM_BUFFER_PARTITIONS, NUM_FIXED_LWLOCKS, NUM_LOCK_PARTITIONS, num_main_array_locks, NUM_PREDICATELOCK_PARTITIONS, LWLockTrancheShmemData::num_user_defined, PREDICATELOCK_MANAGER_LWLOCK_OFFSET, SpinLockInit(), strlcpy(), and LWLockTrancheShmemData::user_defined.

◆ LWLockShmemRequest()

static void LWLockShmemRequest ( void arg)
static

Definition at line 416 of file lwlock.c.

417{
418 size_t size;
419
420 /* Space for user-defined tranches */
421 ShmemRequestStruct(.name = "LWLock tranches",
422 .size = sizeof(LWLockTrancheShmemData),
423 .ptr = (void **) &LWLockTranches,
424 );
425
426 /* Space for the LWLock array */
428 {
430 size = num_main_array_locks * sizeof(LWLockPadded);
431 }
432 else
434
435 ShmemRequestStruct(.name = "Main LWLock array",
436 .size = size,
437 .ptr = (void **) &MainLWLockArray,
438 );
439}

References IsUnderPostmaster, LWLockTranches, MainLWLockArray, name, NUM_FIXED_LWLOCKS, num_main_array_locks, NumLWLocksForNamedTranches(), SHMEM_ATTACH_UNKNOWN_SIZE, and ShmemRequestStruct.

◆ LWLockUpdateVar()

void LWLockUpdateVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  val 
)

Definition at line 1702 of file lwlock.c.

1703{
1706
1707 PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
1708
1709 /*
1710 * Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1711 * that the variable is updated before waking up waiters.
1712 */
1714
1716
1717 LWLockWaitListLock(lock);
1718
1720
1721 /*
1722 * See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
1723 * up. They are always in the front of the queue.
1724 */
1725 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
1726 {
1727 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1728
1729 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
1730 break;
1731
1732 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
1733 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
1734
1735 /* see LWLockWakeup() */
1736 Assert(waiter->lwWaiting == LW_WS_WAITING);
1738 }
1739
1740 /* We are done updating shared state of the lock itself. */
1742
1743 /*
1744 * Awaken any waiters I removed from the queue.
1745 */
1746 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
1747 {
1748 PGPROC *waiter = GetPGProcByNumber(iter.cur);
1749
1750 proclist_delete(&wakeup, iter.cur, lwWaitLink);
1751 /* check comment in LWLockWakeup() about this barrier */
1753 waiter->lwWaiting = LW_WS_NOT_WAITING;
1754 PGSemaphoreUnlock(waiter->sem);
1755 }
1756}

References Assert, proclist_mutable_iter::cur, fb(), GetPGProcByNumber, LW_EXCLUSIVE, LW_VAL_EXCLUSIVE, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_PENDING_WAKEUP, LW_WS_WAITING, LWLockWaitListLock(), LWLockWaitListUnlock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, pg_atomic_exchange_u64(), pg_atomic_read_u32(), pg_write_barrier, PGSemaphoreUnlock(), PRINT_LWDEBUG, proclist_delete, proclist_foreach_modify, proclist_init(), proclist_push_tail, PGPROC::sem, LWLock::state, val, LWLock::waiters, and wakeup.

Referenced by WALInsertLockAcquireExclusive(), and WALInsertLockUpdateInsertingAt().

◆ LWLockWaitForVar()

bool LWLockWaitForVar ( LWLock lock,
pg_atomic_uint64 valptr,
uint64  oldval,
uint64 newval 
)

Definition at line 1566 of file lwlock.c.

1568{
1569 PGPROC *proc = MyProc;
1570 int extraWaits = 0;
1571 bool result = false;
1572#ifdef LWLOCK_STATS
1574
1576#endif
1577
1578 PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE);
1579
1580 /*
1581 * Lock out cancel/die interrupts while we sleep on the lock. There is no
1582 * cleanup mechanism to remove us from the wait queue if we got
1583 * interrupted.
1584 */
1586
1587 /*
1588 * Loop here to check the lock's status after each time we are signaled.
1589 */
1590 for (;;)
1591 {
1592 bool mustwait;
1593
1595 &result);
1596
1597 if (!mustwait)
1598 break; /* the lock was free or value didn't match */
1599
1600 /*
1601 * Add myself to wait queue. Note that this is racy, somebody else
1602 * could wakeup before we're finished queuing. NB: We're using nearly
1603 * the same twice-in-a-row lock acquisition protocol as
1604 * LWLockAcquire(). Check its comments for details. The only
1605 * difference is that we also have to check the variable's values when
1606 * checking the state of the lock.
1607 */
1609
1610 /*
1611 * Clear LW_FLAG_WAKE_IN_PROGRESS flag, to make sure we get woken up
1612 * as soon as the lock is released.
1613 */
1615
1616 /*
1617 * We're now guaranteed to be woken up if necessary. Recheck the lock
1618 * and variables state.
1619 */
1621 &result);
1622
1623 /* Ok, no conflict after we queued ourselves. Undo queueing. */
1624 if (!mustwait)
1625 {
1626 LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue");
1627
1628 LWLockDequeueSelf(lock);
1629 break;
1630 }
1631
1632 /*
1633 * Wait until awakened.
1634 *
1635 * It is possible that we get awakened for a reason other than being
1636 * signaled by LWLockRelease. If so, loop back and wait again. Once
1637 * we've gotten the LWLock, re-increment the sema by the number of
1638 * additional signals received.
1639 */
1640 LOG_LWDEBUG("LWLockWaitForVar", lock, "waiting");
1641
1642#ifdef LWLOCK_STATS
1643 lwstats->block_count++;
1644#endif
1645
1649
1650 for (;;)
1651 {
1652 PGSemaphoreLock(proc->sem);
1653 if (proc->lwWaiting == LW_WS_NOT_WAITING)
1654 break;
1655 extraWaits++;
1656 }
1657
1658#ifdef LOCK_DEBUG
1659 {
1660 /* not waiting anymore */
1662
1664 }
1665#endif
1666
1670
1671 LOG_LWDEBUG("LWLockWaitForVar", lock, "awakened");
1672
1673 /* Now loop back and check the status of the lock again. */
1674 }
1675
1676 /*
1677 * Fix the process wait semaphore's count for any absorbed wakeups.
1678 */
1679 while (extraWaits-- > 0)
1680 PGSemaphoreUnlock(proc->sem);
1681
1682 /*
1683 * Now okay to allow cancel/die interrupts.
1684 */
1686
1687 return result;
1688}

References Assert, fb(), HOLD_INTERRUPTS, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_WAKE_IN_PROGRESS, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LWLockConflictsWithVar(), LWLockDequeueSelf(), LWLockQueueSelf(), LWLockReportWaitEnd(), LWLockReportWaitStart(), PGPROC::lwWaiting, MAX_BACKENDS, MyProc, newval, pg_atomic_fetch_and_u32(), pg_atomic_fetch_sub_u32(), PG_USED_FOR_ASSERTS_ONLY, PGSemaphoreLock(), PGSemaphoreUnlock(), PRINT_LWDEBUG, result, RESUME_INTERRUPTS, PGPROC::sem, LWLock::state, and T_NAME.

Referenced by WaitXLogInsertionsToFinish().

◆ LWLockWaitListLock()

static void LWLockWaitListLock ( LWLock lock)
static

Definition at line 835 of file lwlock.c.

836{
838#ifdef LWLOCK_STATS
840 uint32 delays = 0;
841
843#endif
844
845 while (true)
846 {
847 /*
848 * Always try once to acquire the lock directly, without setting up
849 * the spin-delay infrastructure. The work necessary for that shows up
850 * in profiles and is rarely necessary.
851 */
854 break; /* got lock */
855
856 /* and then spin without atomic operations until lock is released */
857 {
859
861
862 while (old_state & LW_FLAG_LOCKED)
863 {
866 }
867#ifdef LWLOCK_STATS
868 delays += delayStatus.delays;
869#endif
871 }
872
873 /*
874 * Retry. The lock might obviously already be re-acquired by the time
875 * we're attempting to get it again.
876 */
877 }
878
879#ifdef LWLOCK_STATS
880 lwstats->spin_delay_count += delays;
881#endif
882}

References fb(), finish_spin_delay(), init_local_spin_delay, likely, LW_FLAG_LOCKED, perform_spin_delay(), pg_atomic_fetch_or_u32(), pg_atomic_read_u32(), and LWLock::state.

Referenced by LWLockDequeueSelf(), LWLockQueueSelf(), LWLockUpdateVar(), and LWLockWakeup().

◆ LWLockWaitListUnlock()

◆ LWLockWakeup()

static void LWLockWakeup ( LWLock lock)
static

Definition at line 904 of file lwlock.c.

905{
906 bool new_wake_in_progress = false;
907 bool wokeup_somebody = false;
910
912
913 /* lock wait list while collecting backends to wake up */
914 LWLockWaitListLock(lock);
915
916 proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
917 {
918 PGPROC *waiter = GetPGProcByNumber(iter.cur);
919
920 if (wokeup_somebody && waiter->lwWaitMode == LW_EXCLUSIVE)
921 continue;
922
923 proclist_delete(&lock->waiters, iter.cur, lwWaitLink);
924 proclist_push_tail(&wakeup, iter.cur, lwWaitLink);
925
926 if (waiter->lwWaitMode != LW_WAIT_UNTIL_FREE)
927 {
928 /*
929 * Prevent additional wakeups until retryer gets to run. Backends
930 * that are just waiting for the lock to become free don't retry
931 * automatically.
932 */
934
935 /*
936 * Don't wakeup (further) exclusive locks.
937 */
938 wokeup_somebody = true;
939 }
940
941 /*
942 * Signal that the process isn't on the wait list anymore. This allows
943 * LWLockDequeueSelf() to remove itself of the waitlist with a
944 * proclist_delete(), rather than having to check if it has been
945 * removed from the list.
946 */
947 Assert(waiter->lwWaiting == LW_WS_WAITING);
949
950 /*
951 * Once we've woken up an exclusive lock, there's no point in waking
952 * up anybody else.
953 */
954 if (waiter->lwWaitMode == LW_EXCLUSIVE)
955 break;
956 }
957
959
960 /* unset required flags, and release lock, in one fell swoop */
961 {
964
966 while (true)
967 {
969
970 /* compute desired flags */
971
974 else
976
977 if (proclist_is_empty(&lock->waiters))
979
980 desired_state &= ~LW_FLAG_LOCKED; /* release lock */
981
984 break;
985 }
986 }
987
988 /* Awaken any waiters I removed from the queue. */
989 proclist_foreach_modify(iter, &wakeup, lwWaitLink)
990 {
991 PGPROC *waiter = GetPGProcByNumber(iter.cur);
992
993 LOG_LWDEBUG("LWLockRelease", lock, "release waiter");
994 proclist_delete(&wakeup, iter.cur, lwWaitLink);
995
996 /*
997 * Guarantee that lwWaiting being unset only becomes visible once the
998 * unlink from the link has completed. Otherwise the target backend
999 * could be woken up for other reason and enqueue for a new lock - if
1000 * that happens before the list unlink happens, the list would end up
1001 * being corrupted.
1002 *
1003 * The barrier pairs with the LWLockWaitListLock() when enqueuing for
1004 * another lock.
1005 */
1007 waiter->lwWaiting = LW_WS_NOT_WAITING;
1008 PGSemaphoreUnlock(waiter->sem);
1009 }
1010}

References Assert, proclist_mutable_iter::cur, fb(), GetPGProcByNumber, LOG_LWDEBUG, LW_EXCLUSIVE, LW_FLAG_HAS_WAITERS, LW_FLAG_WAKE_IN_PROGRESS, LW_WAIT_UNTIL_FREE, LW_WS_NOT_WAITING, LW_WS_PENDING_WAKEUP, LW_WS_WAITING, LWLockWaitListLock(), PGPROC::lwWaiting, PGPROC::lwWaitMode, pg_atomic_compare_exchange_u32(), pg_atomic_read_u32(), pg_write_barrier, PGSemaphoreUnlock(), proclist_delete, proclist_foreach_modify, proclist_init(), proclist_is_empty(), proclist_push_tail, PGPROC::sem, LWLock::state, LWLock::waiters, and wakeup.

Referenced by LWLockRelease().

◆ NumLWLocksForNamedTranches()

static int NumLWLocksForNamedTranches ( void  )
static

Definition at line 400 of file lwlock.c.

401{
402 int numLocks = 0;
403
405 {
406 numLocks += request->num_lwlocks;
407 }
408
409 return numLocks;
410}

References fb(), foreach_ptr, and NamedLWLockTrancheRequests.

Referenced by LWLockShmemRequest().

◆ RequestNamedLWLockTranche()

void RequestNamedLWLockTranche ( const char tranche_name,
int  num_lwlocks 
)

Definition at line 620 of file lwlock.c.

621{
623 MemoryContext oldcontext;
624
626 elog(FATAL, "cannot request additional LWLocks outside shmem_request_hook");
627
628 if (!tranche_name)
631 errmsg("tranche name cannot be NULL")));
632
633 if (strlen(tranche_name) >= NAMEDATALEN)
636 errmsg("tranche name too long"),
637 errdetail("LWLock tranche names must be no longer than %d bytes.",
638 NAMEDATALEN - 1)));
639
642 (errmsg("maximum number of tranches already registered"),
643 errdetail("No more than %d tranches may be registered.",
645
646 /* Check that the name isn't already in use */
648 {
649 if (strcmp(existing->tranche_name, tranche_name) == 0)
650 elog(ERROR, "requested tranche \"%s\" is already registered", tranche_name);
651 }
652
655 else
657
659 strlcpy(request->tranche_name, tranche_name, NAMEDATALEN);
660 request->num_lwlocks = num_lwlocks;
662
663 MemoryContextSwitchTo(oldcontext);
664}

References elog, ereport, errcode(), errdetail(), errmsg, ERROR, FATAL, fb(), foreach_ptr, IsPostmasterEnvironment, lappend(), list_length(), MAX_USER_DEFINED_TRANCHES, MemoryContextSwitchTo(), NAMEDATALEN, NamedLWLockTrancheRequests, palloc0(), PostmasterContext, process_shmem_requests_in_progress, strlcpy(), and TopMemoryContext.

Referenced by test_lwlock_tranches_shmem_request().

◆ StaticAssertDecl() [1/4]

StaticAssertDecl ( ((MAX_BACKENDS+1) &MAX_BACKENDS = =0,
"MAX_BACKENDS + 1 needs to be a power of 2"   
)

◆ StaticAssertDecl() [2/4]

StaticAssertDecl ( (LW_VAL_EXCLUSIVE &LW_FLAG_MASK = =0,
"LW_VAL_EXCLUSIVE and LW_FLAG_MASK overlap"   
)

◆ StaticAssertDecl() [3/4]

StaticAssertDecl ( (MAX_BACKENDS &LW_FLAG_MASK = =0,
"MAX_BACKENDS and LW_FLAG_MASK overlap"   
)

◆ StaticAssertDecl() [4/4]

StaticAssertDecl ( lengthof(BuiltinTrancheNames = =LWTRANCHE_FIRST_USER_DEFINED,
"missing entries in BuiltinTrancheNames"  [] 
)

Variable Documentation

◆ BuiltinTrancheNames

const char* const BuiltinTrancheNames[]
static
Initial value:
= {
#define PG_LWLOCK(id, lockname)
#define PG_LWLOCKTRANCHE(id, lockname)
}

Definition at line 137 of file lwlock.c.

137 {
138#define PG_LWLOCK(id, lockname) [id] = CppAsString(lockname),
139#define PG_LWLOCKTRANCHE(id, lockname) [LWTRANCHE_##id] = CppAsString(lockname),
140#include "storage/lwlocklist.h"
141#undef PG_LWLOCK
142#undef PG_LWLOCKTRANCHE
143};

Referenced by GetLWTrancheName().

◆ held_lwlocks

◆ LocalNumUserDefinedTranches

int LocalNumUserDefinedTranches
static

Definition at line 198 of file lwlock.c.

Referenced by GetLWTrancheName(), GetNamedLWLockTranche(), and LWLockNewTrancheId().

◆ LWLockCallbacks

const ShmemCallbacks LWLockCallbacks
Initial value:
= {
.request_fn = LWLockShmemRequest,
.init_fn = LWLockShmemInit,
}

Definition at line 219 of file lwlock.c.

219 {
220 .request_fn = LWLockShmemRequest,
221 .init_fn = LWLockShmemInit,
222};

◆ LWLockTranches

◆ MainLWLockArray

◆ NamedLWLockTrancheRequests

List* NamedLWLockTrancheRequests = NIL
static

◆ num_held_lwlocks

◆ num_main_array_locks

int num_main_array_locks
static

Definition at line 214 of file lwlock.c.

Referenced by LWLockShmemInit(), and LWLockShmemRequest().