100 #define LW_FLAG_HAS_WAITERS ((uint32) 1 << 30)
101 #define LW_FLAG_RELEASE_OK ((uint32) 1 << 29)
102 #define LW_FLAG_LOCKED ((uint32) 1 << 28)
104 #define LW_VAL_EXCLUSIVE ((uint32) 1 << 24)
105 #define LW_VAL_SHARED 1
107 #define LW_LOCK_MASK ((uint32) ((1 << 25)-1))
109 #define LW_SHARED_MASK ((uint32) ((1 << 24)-1))
112 "MAX_BACKENDS too big for lwlock.c");
142 "MultiXactOffsetBuffer",
144 "MultiXactMemberBuffer",
154 "ReplicationOriginState",
164 "PredicateLockManager",
172 "PerSessionRecordType",
174 "PerSessionRecordTypmod",
182 "PerXactPredicateList",
190 "LogicalRepLauncherDSA",
192 "LogicalRepLauncherHash",
197 "missing entries in BuiltinTrancheNames[]");
220 #define MAX_SIMUL_LWLOCKS 200
258 #define T_NAME(lock) \
259 GetLWTrancheName((lock)->tranche)
262 typedef struct lwlock_stats_key
268 typedef struct lwlock_stats
270 lwlock_stats_key
key;
271 int sh_acquire_count;
272 int ex_acquire_count;
274 int dequeue_self_count;
275 int spin_delay_count;
278 static HTAB *lwlock_stats_htab;
279 static lwlock_stats lwlock_stats_dummy;
283 bool Trace_lwlocks =
false;
296 errmsg_internal(
"%d: %s(%s %p): excl %u shared %u haswaiters %u waiters %u rOK %d",
298 where,
T_NAME(lock), lock,
317 T_NAME(lock), lock, msg)));
322 #define PRINT_LWDEBUG(a,b,c) ((void)0)
323 #define LOG_LWDEBUG(a,b,c) ((void)0)
328 static void init_lwlock_stats(
void);
329 static void print_lwlock_stats(
int code,
Datum arg);
330 static lwlock_stats * get_lwlock_stats_entry(
LWLock *lock);
333 init_lwlock_stats(
void)
337 static bool exit_registered =
false;
339 if (lwlock_stats_cxt != NULL)
355 ctl.
keysize =
sizeof(lwlock_stats_key);
357 ctl.
hcxt = lwlock_stats_cxt;
358 lwlock_stats_htab =
hash_create(
"lwlock stats", 16384, &ctl,
360 if (!exit_registered)
363 exit_registered =
true;
368 print_lwlock_stats(
int code,
Datum arg)
371 lwlock_stats *lwstats;
381 "PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
383 lwstats->key.instance, lwstats->sh_acquire_count,
384 lwstats->ex_acquire_count, lwstats->block_count,
385 lwstats->spin_delay_count, lwstats->dequeue_self_count);
391 static lwlock_stats *
392 get_lwlock_stats_entry(
LWLock *lock)
394 lwlock_stats_key
key;
395 lwlock_stats *lwstats;
403 if (lwlock_stats_htab == NULL)
404 return &lwlock_stats_dummy;
413 lwstats->sh_acquire_count = 0;
414 lwstats->ex_acquire_count = 0;
415 lwstats->block_count = 0;
416 lwstats->dequeue_self_count = 0;
417 lwstats->spin_delay_count = 0;
523 for (
id = 0, lock =
MainLWLockArray;
id < NUM_INDIVIDUAL_LWLOCKS;
id++, lock++)
617 elog(
ERROR,
"requested tranche is not registered");
634 result = (*LWLockCounter)++;
670 newalloc *
sizeof(
char *));
698 elog(
FATAL,
"cannot request additional LWLocks outside shmem_request_hook");
769 if (trancheId < NUM_INDIVIDUAL_LWLOCKS)
829 desired_state = old_state;
855 &old_state, desired_state))
886 lwlock_stats *lwstats;
889 lwstats = get_lwlock_stats_entry(lock);
911 delays += delayStatus.
delays;
923 lwstats->spin_delay_count += delays;
950 bool wokeup_somebody =
false;
956 new_release_ok =
true;
978 new_release_ok =
false;
983 wokeup_somebody =
true;
1013 desired_state = old_state;
1038 LOG_LWDEBUG(
"LWLockRelease", lock,
"release waiter");
1071 elog(
PANIC,
"cannot wait without a PGPROC structure");
1074 elog(
PANIC,
"queueing for lock while waiting on another one");
1111 lwlock_stats *lwstats;
1113 lwstats = get_lwlock_stats_entry(lock);
1115 lwstats->dequeue_self_count++;
1172 while (extraWaits-- > 0)
1201 lwlock_stats *lwstats;
1203 lwstats = get_lwlock_stats_entry(lock);
1213 lwstats->ex_acquire_count++;
1215 lwstats->sh_acquire_count++;
1264 LOG_LWDEBUG(
"LWLockAcquire", lock,
"immediately acquired lock");
1288 LOG_LWDEBUG(
"LWLockAcquire", lock,
"acquired, undoing queue");
1305 lwstats->block_count++;
1309 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1310 TRACE_POSTGRESQL_LWLOCK_WAIT_START(
T_NAME(lock),
mode);
1332 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1333 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(
T_NAME(lock),
mode);
1342 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_ENABLED())
1343 TRACE_POSTGRESQL_LWLOCK_ACQUIRE(
T_NAME(lock),
mode);
1352 while (extraWaits-- > 0)
1393 LOG_LWDEBUG(
"LWLockConditionalAcquire", lock,
"failed");
1394 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL_ENABLED())
1395 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(
T_NAME(lock),
mode);
1402 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_ENABLED())
1403 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(
T_NAME(lock),
mode);
1429 lwlock_stats *lwstats;
1431 lwstats = get_lwlock_stats_entry(lock);
1467 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"waiting");
1470 lwstats->block_count++;
1474 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1475 TRACE_POSTGRESQL_LWLOCK_WAIT_START(
T_NAME(lock),
mode);
1493 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1494 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(
T_NAME(lock),
mode);
1497 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"awakened");
1501 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"acquired, undoing queue");
1516 while (extraWaits-- > 0)
1523 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"failed");
1524 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL_ENABLED())
1525 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(
T_NAME(lock),
mode);
1529 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"succeeded");
1533 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_ENABLED())
1534 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(
T_NAME(lock),
mode);
1551 uint64 *
newval,
bool *result)
1580 if (
value != oldval)
1616 bool result =
false;
1618 lwlock_stats *lwstats;
1620 lwstats = get_lwlock_stats_entry(lock);
1671 LOG_LWDEBUG(
"LWLockWaitForVar", lock,
"free, undoing queue");
1688 lwstats->block_count++;
1692 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1712 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1716 LOG_LWDEBUG(
"LWLockWaitForVar", lock,
"awakened");
1724 while (extraWaits-- > 0)
1846 if (TRACE_POSTGRESQL_LWLOCK_RELEASE_ENABLED())
1847 TRACE_POSTGRESQL_LWLOCK_RELEASE(
T_NAME(lock));
1856 check_waiters =
true;
1858 check_waiters =
false;
1867 LOG_LWDEBUG(
"LWLockRelease", lock,
"releasing waiters");
1940 char *held_lock_addr;
1945 begin = (
char *) lock;
1946 end = begin + nlocks * stride;
1950 if (held_lock_addr >= begin &&
1951 held_lock_addr < end &&
1952 (held_lock_addr - begin) % stride == 0)
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
#define pg_write_barrier()
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
#define PG_USED_FOR_ASSERTS_ONLY
#define MemSet(start, val, len)
elog(ERROR, "%s: %s", p2, msg)
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
void * hash_seq_search(HASH_SEQ_STATUS *status)
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
int errmsg_internal(const char *fmt,...)
int errhidestmt(bool hide_stmt)
int errhidecontext(bool hide_ctx)
#define ereport(elevel,...)
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Assert(fmt[strlen(fmt) - 1] !='\n')
void LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
StaticAssertDecl(LW_VAL_EXCLUSIVE >(uint32) MAX_BACKENDS, "MAX_BACKENDS too big for lwlock.c")
static void LWLockWakeup(LWLock *lock)
bool LWLockHeldByMe(LWLock *lock)
const char * GetLWLockIdentifier(uint32 classId, uint16 eventId)
LWLockPadded * GetNamedLWLockTranche(const char *tranche_name)
static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS]
static int LWLockTrancheNamesAllocated
void LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
const char *const IndividualLWLockNames[]
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
NamedLWLockTranche * NamedLWLockTrancheArray
static bool LWLockAttemptLock(LWLock *lock, LWLockMode mode)
static void LWLockWaitListLock(LWLock *lock)
void LWLockRegisterTranche(int tranche_id, const char *tranche_name)
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
static void LWLockReportWaitEnd(void)
struct LWLockHandle LWLockHandle
bool LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
int LWLockNewTrancheId(void)
static const char * GetLWTrancheName(uint16 trancheId)
int NamedLWLockTrancheRequests
void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
#define LW_FLAG_RELEASE_OK
#define LW_FLAG_HAS_WAITERS
#define MAX_SIMUL_LWLOCKS
struct NamedLWLockTrancheRequest NamedLWLockTrancheRequest
static int NumLWLocksForNamedTranches(void)
void LWLockRelease(LWLock *lock)
static int num_held_lwlocks
void LWLockReleaseAll(void)
static void InitializeLWLocks(void)
void LWLockInitialize(LWLock *lock, int tranche_id)
static int NamedLWLockTrancheRequestsAllocated
static const char *const BuiltinTrancheNames[]
static NamedLWLockTrancheRequest * NamedLWLockTrancheRequestArray
static void LWLockWaitListUnlock(LWLock *lock)
static const char ** LWLockTrancheNames
#define LOG_LWDEBUG(a, b, c)
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
static void LWLockQueueSelf(LWLock *lock, LWLockMode mode)
#define PRINT_LWDEBUG(a, b, c)
static void LWLockReportWaitStart(LWLock *lock)
LWLockPadded * MainLWLockArray
static void LWLockDequeueSelf(LWLock *lock)
Size LWLockShmemSize(void)
bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride)
static bool LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
void InitLWLockAccess(void)
#define LWLOCK_PADDED_SIZE
#define BUFFER_MAPPING_LWLOCK_OFFSET
#define NUM_LOCK_PARTITIONS
@ LWTRANCHE_FIRST_USER_DEFINED
@ LWTRANCHE_PREDICATE_LOCK_MANAGER
@ LWTRANCHE_BUFFER_MAPPING
#define LOCK_MANAGER_LWLOCK_OFFSET
#define NUM_BUFFER_PARTITIONS
#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET
#define NUM_FIXED_LWLOCKS
#define NUM_PREDICATELOCK_PARTITIONS
MemoryContext TopMemoryContext
void * MemoryContextAllocZero(MemoryContext context, Size size)
void * repalloc(void *pointer, Size size)
void * MemoryContextAlloc(MemoryContext context, Size size)
void MemoryContextDelete(MemoryContext context)
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define RESUME_INTERRUPTS()
#define HOLD_INTERRUPTS()
bool process_shmem_requests_in_progress
#define repalloc0_array(pointer, type, oldcount, count)
static uint32 pg_nextpower2_32(uint32 num)
static PgChecksumMode mode
size_t strlcpy(char *dst, const char *src, size_t siz)
void PGSemaphoreUnlock(PGSemaphore sema)
void PGSemaphoreLock(PGSemaphore sema)
#define GetPGProcByNumber(n)
#define proclist_delete(list, procno, link_member)
static void proclist_init(proclist_head *list)
#define proclist_push_tail(list, procno, link_member)
#define proclist_push_head(list, procno, link_member)
#define proclist_foreach_modify(iter, lhead, link_member)
static bool proclist_is_empty(const proclist_head *list)
void perform_spin_delay(SpinDelayStatus *status)
void finish_spin_delay(SpinDelayStatus *status)
#define init_local_spin_delay(status)
void * ShmemAlloc(Size size)
Size add_size(Size s1, Size s2)
Size mul_size(Size s1, Size s2)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
char tranche_name[NAMEDATALEN]
static void pgstat_report_wait_start(uint32 wait_event_info)
static void pgstat_report_wait_end(void)
static TimestampTz wakeup[NUM_WALRCV_WAKEUPS]