100 #define LW_FLAG_HAS_WAITERS ((uint32) 1 << 30)
101 #define LW_FLAG_RELEASE_OK ((uint32) 1 << 29)
102 #define LW_FLAG_LOCKED ((uint32) 1 << 28)
104 #define LW_VAL_EXCLUSIVE ((uint32) 1 << 24)
105 #define LW_VAL_SHARED 1
107 #define LW_LOCK_MASK ((uint32) ((1 << 25)-1))
109 #define LW_SHARED_MASK ((uint32) ((1 << 24)-1))
139 "MultiXactOffsetBuffer",
141 "MultiXactMemberBuffer",
151 "ReplicationOriginState",
161 "PredicateLockManager",
169 "PerSessionRecordType",
171 "PerSessionRecordTypmod",
179 "PerXactPredicateList",
190 "missing entries in BuiltinTrancheNames[]");
213 #define MAX_SIMUL_LWLOCKS 200
251 #define T_NAME(lock) \
252 GetLWTrancheName((lock)->tranche)
255 typedef struct lwlock_stats_key
261 typedef struct lwlock_stats
263 lwlock_stats_key
key;
264 int sh_acquire_count;
265 int ex_acquire_count;
267 int dequeue_self_count;
268 int spin_delay_count;
271 static HTAB *lwlock_stats_htab;
272 static lwlock_stats lwlock_stats_dummy;
276 bool Trace_lwlocks =
false;
289 errmsg_internal(
"%d: %s(%s %p): excl %u shared %u haswaiters %u waiters %u rOK %d",
291 where,
T_NAME(lock), lock,
310 T_NAME(lock), lock, msg)));
315 #define PRINT_LWDEBUG(a,b,c) ((void)0)
316 #define LOG_LWDEBUG(a,b,c) ((void)0)
321 static void init_lwlock_stats(
void);
322 static void print_lwlock_stats(
int code,
Datum arg);
323 static lwlock_stats * get_lwlock_stats_entry(
LWLock *lock);
326 init_lwlock_stats(
void)
330 static bool exit_registered =
false;
332 if (lwlock_stats_cxt != NULL)
348 ctl.
keysize =
sizeof(lwlock_stats_key);
350 ctl.
hcxt = lwlock_stats_cxt;
351 lwlock_stats_htab =
hash_create(
"lwlock stats", 16384, &ctl,
353 if (!exit_registered)
356 exit_registered =
true;
361 print_lwlock_stats(
int code,
Datum arg)
364 lwlock_stats *lwstats;
374 "PID %d lwlock %s %p: shacq %u exacq %u blk %u spindelay %u dequeue self %u\n",
376 lwstats->key.instance, lwstats->sh_acquire_count,
377 lwstats->ex_acquire_count, lwstats->block_count,
378 lwstats->spin_delay_count, lwstats->dequeue_self_count);
384 static lwlock_stats *
385 get_lwlock_stats_entry(
LWLock *lock)
387 lwlock_stats_key
key;
388 lwlock_stats *lwstats;
396 if (lwlock_stats_htab == NULL)
397 return &lwlock_stats_dummy;
406 lwstats->sh_acquire_count = 0;
407 lwstats->ex_acquire_count = 0;
408 lwstats->block_count = 0;
409 lwstats->dequeue_self_count = 0;
410 lwstats->spin_delay_count = 0;
470 "MAX_BACKENDS too big for lwlock.c");
473 "Miscalculated LWLock padding");
522 for (
id = 0, lock =
MainLWLockArray;
id < NUM_INDIVIDUAL_LWLOCKS;
id++, lock++)
616 elog(
ERROR,
"requested tranche is not registered");
633 result = (*LWLockCounter)++;
669 newalloc *
sizeof(
char *));
702 elog(
FATAL,
"cannot request additional LWLocks outside shmem_request_hook");
773 if (trancheId < NUM_INDIVIDUAL_LWLOCKS)
833 desired_state = old_state;
859 &old_state, desired_state))
890 lwlock_stats *lwstats;
893 lwstats = get_lwlock_stats_entry(lock);
915 delays += delayStatus.
delays;
927 lwstats->spin_delay_count += delays;
954 bool wokeup_somebody =
false;
960 new_release_ok =
true;
982 new_release_ok =
false;
987 wokeup_somebody =
true;
1008 desired_state = old_state;
1033 LOG_LWDEBUG(
"LWLockRelease", lock,
"release waiter");
1066 elog(
PANIC,
"cannot wait without a PGPROC structure");
1069 elog(
PANIC,
"queueing for lock while waiting on another one");
1107 lwlock_stats *lwstats;
1109 lwstats = get_lwlock_stats_entry(lock);
1111 lwstats->dequeue_self_count++;
1173 while (extraWaits-- > 0)
1202 lwlock_stats *lwstats;
1204 lwstats = get_lwlock_stats_entry(lock);
1214 lwstats->ex_acquire_count++;
1216 lwstats->sh_acquire_count++;
1265 LOG_LWDEBUG(
"LWLockAcquire", lock,
"immediately acquired lock");
1289 LOG_LWDEBUG(
"LWLockAcquire", lock,
"acquired, undoing queue");
1306 lwstats->block_count++;
1310 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1311 TRACE_POSTGRESQL_LWLOCK_WAIT_START(
T_NAME(lock),
mode);
1333 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1334 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(
T_NAME(lock),
mode);
1343 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_ENABLED())
1344 TRACE_POSTGRESQL_LWLOCK_ACQUIRE(
T_NAME(lock),
mode);
1353 while (extraWaits-- > 0)
1394 LOG_LWDEBUG(
"LWLockConditionalAcquire", lock,
"failed");
1395 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL_ENABLED())
1396 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_FAIL(
T_NAME(lock),
mode);
1403 if (TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE_ENABLED())
1404 TRACE_POSTGRESQL_LWLOCK_CONDACQUIRE(
T_NAME(lock),
mode);
1430 lwlock_stats *lwstats;
1432 lwstats = get_lwlock_stats_entry(lock);
1468 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"waiting");
1471 lwstats->block_count++;
1475 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1476 TRACE_POSTGRESQL_LWLOCK_WAIT_START(
T_NAME(lock),
mode);
1494 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1495 TRACE_POSTGRESQL_LWLOCK_WAIT_DONE(
T_NAME(lock),
mode);
1498 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"awakened");
1502 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"acquired, undoing queue");
1517 while (extraWaits-- > 0)
1524 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"failed");
1525 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL_ENABLED())
1526 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_FAIL(
T_NAME(lock),
mode);
1530 LOG_LWDEBUG(
"LWLockAcquireOrWait", lock,
"succeeded");
1534 if (TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT_ENABLED())
1535 TRACE_POSTGRESQL_LWLOCK_ACQUIRE_OR_WAIT(
T_NAME(lock),
mode);
1552 uint64 *valptr, uint64 oldval, uint64 *
newval,
1584 if (
value != oldval)
1615 bool result =
false;
1617 lwlock_stats *lwstats;
1619 lwstats = get_lwlock_stats_entry(lock);
1670 LOG_LWDEBUG(
"LWLockWaitForVar", lock,
"free, undoing queue");
1687 lwstats->block_count++;
1691 if (TRACE_POSTGRESQL_LWLOCK_WAIT_START_ENABLED())
1711 if (TRACE_POSTGRESQL_LWLOCK_WAIT_DONE_ENABLED())
1715 LOG_LWDEBUG(
"LWLockWaitForVar", lock,
"awakened");
1723 while (extraWaits-- > 0)
1838 if (TRACE_POSTGRESQL_LWLOCK_RELEASE_ENABLED())
1839 TRACE_POSTGRESQL_LWLOCK_RELEASE(
T_NAME(lock));
1848 check_waiters =
true;
1850 check_waiters =
false;
1859 LOG_LWDEBUG(
"LWLockRelease", lock,
"releasing waiters");
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
#define pg_write_barrier()
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
#define MemSet(start, val, len)
#define StaticAssertStmt(condition, errmessage)
#define PG_USED_FOR_ASSERTS_ONLY
#define AssertArg(condition)
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
void * hash_seq_search(HASH_SEQ_STATUS *status)
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
int errmsg_internal(const char *fmt,...)
int errhidestmt(bool hide_stmt)
int errhidecontext(bool hide_ctx)
#define ereport(elevel,...)
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Assert(fmt[strlen(fmt) - 1] !='\n')
static void LWLockWakeup(LWLock *lock)
const char * GetLWLockIdentifier(uint32 classId, uint16 eventId)
LWLockPadded * GetNamedLWLockTranche(const char *tranche_name)
static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS]
static int LWLockTrancheNamesAllocated
const char *const IndividualLWLockNames[]
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
int LWLockHeldCount(void)
NamedLWLockTranche * NamedLWLockTrancheArray
static bool LWLockAttemptLock(LWLock *lock, LWLockMode mode)
static void LWLockWaitListLock(LWLock *lock)
void LWLockRegisterTranche(int tranche_id, const char *tranche_name)
static void LWLockReportWaitEnd(void)
struct LWLockHandle LWLockHandle
StaticAssertDecl(lengthof(BuiltinTrancheNames)==LWTRANCHE_FIRST_USER_DEFINED - NUM_INDIVIDUAL_LWLOCKS, "missing entries in BuiltinTrancheNames[]")
int LWLockNewTrancheId(void)
static const char * GetLWTrancheName(uint16 trancheId)
int NamedLWLockTrancheRequests
void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
#define LW_FLAG_RELEASE_OK
#define LW_FLAG_HAS_WAITERS
#define MAX_SIMUL_LWLOCKS
struct NamedLWLockTrancheRequest NamedLWLockTrancheRequest
static int NumLWLocksForNamedTranches(void)
void LWLockRelease(LWLock *lock)
void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
bool LWLockHeldByMeInMode(LWLock *l, LWLockMode mode)
static int num_held_lwlocks
void LWLockReleaseAll(void)
static void InitializeLWLocks(void)
void LWLockInitialize(LWLock *lock, int tranche_id)
static int NamedLWLockTrancheRequestsAllocated
static const char *const BuiltinTrancheNames[]
static NamedLWLockTrancheRequest * NamedLWLockTrancheRequestArray
static void LWLockWaitListUnlock(LWLock *lock)
static const char ** LWLockTrancheNames
#define LOG_LWDEBUG(a, b, c)
static bool LWLockConflictsWithVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval, bool *result)
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
static void LWLockQueueSelf(LWLock *lock, LWLockMode mode)
#define PRINT_LWDEBUG(a, b, c)
static void LWLockReportWaitStart(LWLock *lock)
bool LWLockHeldByMe(LWLock *l)
LWLockPadded * MainLWLockArray
bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
static void LWLockDequeueSelf(LWLock *lock)
void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val)
Size LWLockShmemSize(void)
void InitLWLockAccess(void)
#define LWLOCK_PADDED_SIZE
#define BUFFER_MAPPING_LWLOCK_OFFSET
#define NUM_LOCK_PARTITIONS
@ LWTRANCHE_FIRST_USER_DEFINED
@ LWTRANCHE_PREDICATE_LOCK_MANAGER
@ LWTRANCHE_BUFFER_MAPPING
#define LOCK_MANAGER_LWLOCK_OFFSET
#define NUM_BUFFER_PARTITIONS
#define PREDICATELOCK_MANAGER_LWLOCK_OFFSET
#define NUM_FIXED_LWLOCKS
#define NUM_PREDICATELOCK_PARTITIONS
MemoryContext TopMemoryContext
void * MemoryContextAllocZero(MemoryContext context, Size size)
void * repalloc(void *pointer, Size size)
void * MemoryContextAlloc(MemoryContext context, Size size)
void MemoryContextDelete(MemoryContext context)
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
#define AllocSetContextCreate
#define ALLOCSET_DEFAULT_SIZES
#define RESUME_INTERRUPTS()
#define HOLD_INTERRUPTS()
bool process_shmem_requests_in_progress
static uint32 pg_nextpower2_32(uint32 num)
static PgChecksumMode mode
size_t strlcpy(char *dst, const char *src, size_t siz)
void PGSemaphoreUnlock(PGSemaphore sema)
void PGSemaphoreLock(PGSemaphore sema)
#define GetPGProcByNumber(n)
#define proclist_delete(list, procno, link_member)
static void proclist_init(proclist_head *list)
#define proclist_push_tail(list, procno, link_member)
static bool proclist_is_empty(proclist_head *list)
#define proclist_push_head(list, procno, link_member)
#define proclist_foreach_modify(iter, lhead, link_member)
void perform_spin_delay(SpinDelayStatus *status)
void finish_spin_delay(SpinDelayStatus *status)
#define init_local_spin_delay(status)
void * ShmemAlloc(Size size)
Size add_size(Size s1, Size s2)
Size mul_size(Size s1, Size s2)
#define SpinLockRelease(lock)
#define SpinLockAcquire(lock)
char tranche_name[NAMEDATALEN]
static void pgstat_report_wait_start(uint32 wait_event_info)
static void pgstat_report_wait_end(void)