PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
lwlock.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * lwlock.h
4  * Lightweight lock manager
5  *
6  *
7  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/storage/lwlock.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef LWLOCK_H
15 #define LWLOCK_H
16 
17 #ifdef FRONTEND
18 #error "lwlock.h may not be included from frontend code"
19 #endif
20 
21 #include "lib/ilist.h"
22 #include "storage/s_lock.h"
23 #include "port/atomics.h"
24 
25 struct PGPROC;
26 
27 /*
28  * Prior to PostgreSQL 9.4, every lightweight lock in the system was stored
29  * in a single array. For convenience and for compatibility with past
30  * releases, we still have a main array, but it's now also permissible to
31  * store LWLocks elsewhere in the main shared memory segment or in a dynamic
32  * shared memory segment. Each array of lwlocks forms a separate "tranche".
33  *
34  * It's occasionally necessary to identify a particular LWLock "by name"; e.g.
35  * because we wish to report the lock to dtrace. We could store a name or
36  * other identifying information in the lock itself, but since it's common
37  * to have many nearly-identical locks (e.g. one per buffer) this would end
38  * up wasting significant amounts of memory. Instead, each lwlock stores a
39  * tranche ID which tells us which array it's part of. Based on that, we can
40  * figure out where the lwlock lies within the array using the data structure
41  * shown below; the lock is then identified based on the tranche name and
42  * computed array index. We need the array stride because the array might not
43  * be an array of lwlocks, but rather some larger data structure that includes
44  * one or more lwlocks per element.
45  */
46 typedef struct LWLockTranche
47 {
48  const char *name;
49  void *array_base;
52 
53 /*
54  * Code outside of lwlock.c should not manipulate the contents of this
55  * structure directly, but we have to declare it here to allow LWLocks to be
56  * incorporated into other data structures.
57  */
58 typedef struct LWLock
59 {
60  slock_t mutex; /* Protects LWLock and queue of PGPROCs */
61  uint16 tranche; /* tranche ID */
62 
63  pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */
64 #ifdef LOCK_DEBUG
65  pg_atomic_uint32 nwaiters; /* number of waiters */
66 #endif
67  dlist_head waiters; /* list of waiting PGPROCs */
68 #ifdef LOCK_DEBUG
69  struct PGPROC *owner; /* last exclusive owner of the lock */
70 #endif
71 } LWLock;
72 
73 /*
74  * In most cases, it's desirable to force each tranche of LWLocks to be aligned
75  * on a cache line boundary and make the array stride a power of 2. This saves
76  * a few cycles in indexing, but more importantly ensures that individual
77  * LWLocks don't cross cache line boundaries. This reduces cache contention
78  * problems, especially on AMD Opterons. In some cases, it's useful to add
79  * even more padding so that each LWLock takes up an entire cache line; this is
80  * useful, for example, in the main LWLock array, where the overall number of
81  * locks is small but some are heavily contended.
82  *
83  * When allocating a tranche that contains data other than LWLocks, it is
84  * probably best to include a bare LWLock and then pad the resulting structure
85  * as necessary for performance. For an array that contains only LWLocks,
86  * LWLockMinimallyPadded can be used for cases where we just want to ensure
87  * that we don't cross cache line boundaries within a single lock, while
88  * LWLockPadded can be used for cases where we want each lock to be an entire
89  * cache line.
90  *
91  * On 32-bit platforms, an LWLockMinimallyPadded might actually contain more
92  * than the absolute minimum amount of padding required to keep a lock from
93  * crossing a cache line boundary, because an unpadded LWLock might fit into
94  * 16 bytes. We ignore that possibility when determining the minimal amount
95  * of padding. Older releases had larger LWLocks, so 32 really was the
96  * minimum, and packing them in tighter might hurt performance.
97  *
98  * LWLOCK_MINIMAL_SIZE should be 32 on basically all common platforms, but
99  * because slock_t is more than 2 bytes on some obscure platforms, we allow
100  * for the possibility that it might be 64.
101  */
102 #define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE
103 #define LWLOCK_MINIMAL_SIZE (sizeof(LWLock) <= 32 ? 32 : 64)
104 
105 /* LWLock, padded to a full cache line size */
106 typedef union LWLockPadded
107 {
110 } LWLockPadded;
111 
112 /* LWLock, minimally padded */
114 {
118 
120 extern char *MainLWLockNames[];
121 
122 /* struct for storing named tranche information */
123 typedef struct NamedLWLockTranche
124 {
128 
131 
132 /* Names for fixed lwlocks */
133 #include "storage/lwlocknames.h"
134 
135 /*
136  * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
137  * here, but we need them to figure out offsets within MainLWLockArray, and
138  * having this file include lock.h or bufmgr.h would be backwards.
139  */
140 
141 /* Number of partitions of the shared buffer mapping hashtable */
142 #define NUM_BUFFER_PARTITIONS 128
143 
144 /* Number of partitions the shared lock tables are divided into */
145 #define LOG2_NUM_LOCK_PARTITIONS 4
146 #define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
147 
148 /* Number of partitions the shared predicate lock tables are divided into */
149 #define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
150 #define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
151 
152 /* Offsets for various chunks of preallocated lwlocks. */
153 #define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS
154 #define LOCK_MANAGER_LWLOCK_OFFSET \
155  (BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
156 #define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
157  (LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS)
158 #define NUM_FIXED_LWLOCKS \
159  (PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
160 
161 typedef enum LWLockMode
162 {
165  LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode,
166  * when waiting for lock to become free. Not
167  * to be used as LWLockAcquire argument */
168 } LWLockMode;
169 
170 
171 #ifdef LOCK_DEBUG
172 extern bool Trace_lwlocks;
173 #endif
174 
175 extern bool LWLockAcquire(LWLock *lock, LWLockMode mode);
176 extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode);
177 extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
178 extern void LWLockRelease(LWLock *lock);
179 extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
180 extern void LWLockReleaseAll(void);
181 extern bool LWLockHeldByMe(LWLock *lock);
182 
183 extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval);
184 extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value);
185 
186 extern Size LWLockShmemSize(void);
187 extern void CreateLWLocks(void);
188 extern void InitLWLockAccess(void);
189 
190 /*
191  * Extensions (or core code) can obtain an LWLocks by calling
192  * RequestNamedLWLockTranche() during postmaster startup. Subsequently,
193  * call GetNamedLWLockTranche() to obtain a pointer to an array containing
194  * the number of LWLocks requested.
195  */
196 extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks);
197 extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name);
198 
199 extern LWLock *LWLockAssign(void);
200 
201 /*
202  * There is another, more flexible method of obtaining lwlocks. First, call
203  * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from
204  * a shared counter. Next, each individual process using the tranche should
205  * call LWLockRegisterTranche() to associate that tranche ID with appropriate
206  * metadata. Finally, LWLockInitialize should be called just once per lwlock,
207  * passing the tranche ID as an argument.
208  *
209  * It may seem strange that each process using the tranche must register it
210  * separately, but dynamic shared memory segments aren't guaranteed to be
211  * mapped at the same address in all coordinating backends, so storing the
212  * registration in the main shared memory segment wouldn't work for that case.
213  */
214 extern int LWLockNewTrancheId(void);
215 extern void LWLockRegisterTranche(int tranche_id, LWLockTranche *tranche);
216 extern void LWLockInitialize(LWLock *lock, int tranche_id);
217 
218 /*
219  * We reserve a few predefined tranche IDs. A call to LWLockNewTrancheId
220  * will never return a value less than LWTRANCHE_FIRST_USER_DEFINED.
221  */
222 typedef enum BuiltinTrancheIds
223 {
240 
241 /*
242  * Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer
243  * to LWLocks. New code should instead use LWLock *. However, for the
244  * convenience of third-party code, we include the following typedef.
245  */
246 typedef LWLock *LWLockId;
247 
248 #endif /* LWLOCK_H */
int slock_t
Definition: s_lock.h:911
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1786
Definition: lwlock.h:58
LWLock * LWLockId
Definition: lwlock.h:246
char pad[LWLOCK_MINIMAL_SIZE]
Definition: lwlock.h:116
void LWLockRegisterTranche(int tranche_id, LWLockTranche *tranche)
Definition: lwlock.c:627
bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1268
Size LWLockShmemSize(void)
Definition: lwlock.c:394
LWLockMode
Definition: lwlock.h:161
void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value)
Definition: lwlock.c:1598
void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val)
Definition: lwlock.c:1736
struct LWLockTranche LWLockTranche
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:700
void InitLWLockAccess(void)
Definition: lwlock.c:531
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1213
pg_atomic_uint32 state
Definition: lwlock.h:63
struct NamedLWLockTranche NamedLWLockTranche
int LWLockNewTrancheId(void)
Definition: lwlock.c:607
void LWLockReleaseAll(void)
Definition: lwlock.c:1768
#define PGDLLIMPORT
Definition: c.h:1032
const char * name
Definition: lwlock.h:48
LWLockTranche lwLockTranche
Definition: lwlock.h:125
Size array_stride
Definition: lwlock.h:50
unsigned short uint16
Definition: c.h:253
#define LWLOCK_MINIMAL_SIZE
Definition: lwlock.h:103
void CreateLWLocks(void)
Definition: lwlock.c:423
union LWLockMinimallyPadded LWLockMinimallyPadded
uint16 tranche
Definition: lwlock.h:61
PGDLLIMPORT NamedLWLockTranche * NamedLWLockTrancheArray
Definition: lwlock.c:158
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1043
static struct @74 value
LWLock lock
Definition: lwlock.h:108
void * array_base
Definition: lwlock.h:49
size_t Size
Definition: c.h:341
dlist_head waiters
Definition: lwlock.h:67
#define newval
char * MainLWLockNames[]
BuiltinTrancheIds
Definition: lwlock.h:222
PGDLLIMPORT LWLockPadded * MainLWLockArray
Definition: lwlock.c:126
void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
Definition: lwlock.c:660
bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
Definition: lwlock.c:1459
#define LWLOCK_PADDED_SIZE
Definition: lwlock.h:102
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1662
Definition: proc.h:83
char pad[LWLOCK_PADDED_SIZE]
Definition: lwlock.h:109
LWLockPadded * GetNamedLWLockTranche(const char *tranche_name)
Definition: lwlock.c:573
long val
Definition: informix.c:689
slock_t mutex
Definition: lwlock.h:60
struct LWLock LWLock
LWLock * LWLockAssign(void)
Definition: lwlock.c:547
union LWLockPadded LWLockPadded
PGDLLIMPORT int NamedLWLockTrancheRequests
Definition: lwlock.c:156