PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
atomics.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * atomics.h
4  * Atomic operations.
5  *
6  * Hardware and compiler dependent functions for manipulating memory
7  * atomically and dealing with cache coherency. Used to implement locking
8  * facilities and lockless algorithms/data structures.
9  *
10  * To bring up postgres on a platform/compiler at the very least
11  * implementations for the following operations should be provided:
12  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14  * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
15  * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
16  *
17  * There exist generic, hardware independent, implementations for several
18  * compilers which might be sufficient, although possibly not optimal, for a
19  * new platform. If no such generic implementation is available spinlocks (or
20  * even OS provided semaphores) will be used to implement the API.
21  *
22  * Implement _u64 atomics if and only if your platform can use them
23  * efficiently (and obviously correctly).
24  *
25  * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
26  * whenever possible. Writing correct code using these facilities is hard.
27  *
28  * For an introduction to using memory barriers within the PostgreSQL backend,
29  * see src/backend/storage/lmgr/README.barrier
30  *
31  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
32  * Portions Copyright (c) 1994, Regents of the University of California
33  *
34  * src/include/port/atomics.h
35  *
36  *-------------------------------------------------------------------------
37  */
38 #ifndef ATOMICS_H
39 #define ATOMICS_H
40 
41 #ifdef FRONTEND
42 #error "atomics.h may not be included from frontend code"
43 #endif
44 
45 #define INSIDE_ATOMICS_H
46 
47 #include <limits.h>
48 
49 /*
50  * First a set of architecture specific files is included.
51  *
52  * These files can provide the full set of atomics or can do pretty much
53  * nothing if all the compilers commonly used on these platforms provide
54  * usable generics.
55  *
56  * Don't add an inline assembly of the actual atomic operations if all the
57  * common implementations of your platform provide intrinsics. Intrinsics are
58  * much easier to understand and potentially support more architectures.
59  *
60  * It will often make sense to define memory barrier semantics here, since
61  * e.g. generic compiler intrinsics for x86 memory barriers can't know that
62  * postgres doesn't need x86 read/write barriers do anything more than a
63  * compiler barrier.
64  *
65  */
66 #if defined(__arm__) || defined(__arm) || \
67  defined(__aarch64__) || defined(__aarch64)
68 #include "port/atomics/arch-arm.h"
69 #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
70 #include "port/atomics/arch-x86.h"
71 #elif defined(__ia64__) || defined(__ia64)
72 #include "port/atomics/arch-ia64.h"
73 #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
74 #include "port/atomics/arch-ppc.h"
75 #elif defined(__hppa) || defined(__hppa__)
76 #include "port/atomics/arch-hppa.h"
77 #endif
78 
79 /*
80  * Compiler specific, but architecture independent implementations.
81  *
82  * Provide architecture independent implementations of the atomic
83  * facilities. At the very least compiler barriers should be provided, but a
84  * full implementation of
85  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
86  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
87  * using compiler intrinsics are a good idea.
88  */
89 /*
90  * Given a gcc-compatible xlc compiler, prefer the xlc implementation. The
91  * ppc64le "IBM XL C/C++ for Linux, V13.1.2" implements both interfaces, but
92  * __sync_lock_test_and_set() of one-byte types elicits SIGSEGV.
93  */
94 #if defined(__IBMC__) || defined(__IBMCPP__)
96 /* gcc or compatible, including clang and icc */
97 #elif defined(__GNUC__) || defined(__INTEL_COMPILER)
99 #elif defined(_MSC_VER)
101 #elif defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
103 #elif defined(__SUNPRO_C) && !defined(__GNUC__)
105 #else
106 /*
107  * Unsupported compiler, we'll likely use slower fallbacks... At least
108  * compiler barriers should really be provided.
109  */
110 #endif
111 
112 /*
113  * Provide a full fallback of the pg_*_barrier(), pg_atomic**_flag and
114  * pg_atomic_* APIs for platforms without sufficient spinlock and/or atomics
115  * support. In the case of spinlock backed atomics the emulation is expected
116  * to be efficient, although less so than native atomics support.
117  */
118 #include "port/atomics/fallback.h"
119 
120 /*
121  * Provide additional operations using supported infrastructure. These are
122  * expected to be efficient if the underlying atomic operations are efficient.
123  */
124 #include "port/atomics/generic.h"
125 
126 
127 /*
128  * pg_compiler_barrier - prevent the compiler from moving code across
129  *
130  * A compiler barrier need not (and preferably should not) emit any actual
131  * machine code, but must act as an optimization fence: the compiler must not
132  * reorder loads or stores to main memory around the barrier. However, the
133  * CPU may still reorder loads or stores at runtime, if the architecture's
134  * memory model permits this.
135  */
136 #define pg_compiler_barrier() pg_compiler_barrier_impl()
137 
138 /*
139  * pg_memory_barrier - prevent the CPU from reordering memory access
140  *
141  * A memory barrier must act as a compiler barrier, and in addition must
142  * guarantee that all loads and stores issued prior to the barrier are
143  * completed before any loads or stores issued after the barrier. Unless
144  * loads and stores are totally ordered (which is not the case on most
145  * architectures) this requires issuing some sort of memory fencing
146  * instruction.
147  */
148 #define pg_memory_barrier() pg_memory_barrier_impl()
149 
150 /*
151  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
152  *
153  * A read barrier must act as a compiler barrier, and in addition must
154  * guarantee that any loads issued prior to the barrier are completed before
155  * any loads issued after the barrier. Similarly, a write barrier acts
156  * as a compiler barrier, and also orders stores. Read and write barriers
157  * are thus weaker than a full memory barrier, but stronger than a compiler
158  * barrier. In practice, on machines with strong memory ordering, read and
159  * write barriers may require nothing more than a compiler barrier.
160  */
161 #define pg_read_barrier() pg_read_barrier_impl()
162 #define pg_write_barrier() pg_write_barrier_impl()
163 
164 /*
165  * Spinloop delay - Allow CPU to relax in busy loops
166  */
167 #define pg_spin_delay() pg_spin_delay_impl()
168 
169 /*
170  * pg_atomic_init_flag - initialize atomic flag.
171  *
172  * No barrier semantics.
173  */
174 static inline void
176 {
177  AssertPointerAlignment(ptr, sizeof(*ptr));
178 
180 }
181 
182 /*
183  * pg_atomic_test_and_set_flag - TAS()
184  *
185  * Returns true if the flag has successfully been set, false otherwise.
186  *
187  * Acquire (including read barrier) semantics.
188  */
189 static inline bool
191 {
192  AssertPointerAlignment(ptr, sizeof(*ptr));
193 
194  return pg_atomic_test_set_flag_impl(ptr);
195 }
196 
197 /*
198  * pg_atomic_unlocked_test_flag - Check if the lock is free
199  *
200  * Returns true if the flag currently is not set, false otherwise.
201  *
202  * No barrier semantics.
203  */
204 static inline bool
206 {
207  AssertPointerAlignment(ptr, sizeof(*ptr));
208 
210 }
211 
212 /*
213  * pg_atomic_clear_flag - release lock set by TAS()
214  *
215  * Release (including write barrier) semantics.
216  */
217 static inline void
219 {
220  AssertPointerAlignment(ptr, sizeof(*ptr));
221 
223 }
224 
225 
226 /*
227  * pg_atomic_init_u32 - initialize atomic variable
228  *
229  * Has to be done before any concurrent usage..
230  *
231  * No barrier semantics.
232  */
233 static inline void
235 {
236  AssertPointerAlignment(ptr, 4);
237 
238  pg_atomic_init_u32_impl(ptr, val);
239 }
240 
241 /*
242  * pg_atomic_read_u32 - unlocked read from atomic variable.
243  *
244  * The read is guaranteed to return a value as it has been written by this or
245  * another process at some point in the past. There's however no cache
246  * coherency interaction guaranteeing the value hasn't since been written to
247  * again.
248  *
249  * No barrier semantics.
250  */
251 static inline uint32
253 {
254  AssertPointerAlignment(ptr, 4);
255  return pg_atomic_read_u32_impl(ptr);
256 }
257 
258 /*
259  * pg_atomic_write_u32 - write to atomic variable.
260  *
261  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
262  * observe a partial write for any reader. Note that this correctly interacts
263  * with pg_atomic_compare_exchange_u32, in contrast to
264  * pg_atomic_unlocked_write_u32().
265  *
266  * No barrier semantics.
267  */
268 static inline void
270 {
271  AssertPointerAlignment(ptr, 4);
272 
273  pg_atomic_write_u32_impl(ptr, val);
274 }
275 
276 /*
277  * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
278  *
279  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
280  * observe a partial write for any reader. But note that writing this way is
281  * not guaranteed to correctly interact with read-modify-write operations like
282  * pg_atomic_compare_exchange_u32. This should only be used in cases where
283  * minor performance regressions due to atomics emulation are unacceptable.
284  *
285  * No barrier semantics.
286  */
287 static inline void
289 {
290  AssertPointerAlignment(ptr, 4);
291 
293 }
294 
295 /*
296  * pg_atomic_exchange_u32 - exchange newval with current value
297  *
298  * Returns the old value of 'ptr' before the swap.
299  *
300  * Full barrier semantics.
301  */
302 static inline uint32
304 {
305  AssertPointerAlignment(ptr, 4);
306 
307  return pg_atomic_exchange_u32_impl(ptr, newval);
308 }
309 
310 /*
311  * pg_atomic_compare_exchange_u32 - CAS operation
312  *
313  * Atomically compare the current value of ptr with *expected and store newval
314  * iff ptr and *expected have the same value. The current value of *ptr will
315  * always be stored in *expected.
316  *
317  * Return true if values have been exchanged, false otherwise.
318  *
319  * Full barrier semantics.
320  */
321 static inline bool
323  uint32 *expected, uint32 newval)
324 {
325  AssertPointerAlignment(ptr, 4);
326  AssertPointerAlignment(expected, 4);
327 
328  return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
329 }
330 
331 /*
332  * pg_atomic_fetch_add_u32 - atomically add to variable
333  *
334  * Returns the value of ptr before the arithmetic operation.
335  *
336  * Full barrier semantics.
337  */
338 static inline uint32
340 {
341  AssertPointerAlignment(ptr, 4);
342  return pg_atomic_fetch_add_u32_impl(ptr, add_);
343 }
344 
345 /*
346  * pg_atomic_fetch_sub_u32 - atomically subtract from variable
347  *
348  * Returns the value of ptr before the arithmetic operation. Note that sub_
349  * may not be INT_MIN due to platform limitations.
350  *
351  * Full barrier semantics.
352  */
353 static inline uint32
355 {
356  AssertPointerAlignment(ptr, 4);
357  Assert(sub_ != INT_MIN);
358  return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
359 }
360 
361 /*
362  * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
363  *
364  * Returns the value of ptr before the arithmetic operation.
365  *
366  * Full barrier semantics.
367  */
368 static inline uint32
370 {
371  AssertPointerAlignment(ptr, 4);
372  return pg_atomic_fetch_and_u32_impl(ptr, and_);
373 }
374 
375 /*
376  * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
377  *
378  * Returns the value of ptr before the arithmetic operation.
379  *
380  * Full barrier semantics.
381  */
382 static inline uint32
384 {
385  AssertPointerAlignment(ptr, 4);
386  return pg_atomic_fetch_or_u32_impl(ptr, or_);
387 }
388 
389 /*
390  * pg_atomic_add_fetch_u32 - atomically add to variable
391  *
392  * Returns the value of ptr after the arithmetic operation.
393  *
394  * Full barrier semantics.
395  */
396 static inline uint32
398 {
399  AssertPointerAlignment(ptr, 4);
400  return pg_atomic_add_fetch_u32_impl(ptr, add_);
401 }
402 
403 /*
404  * pg_atomic_sub_fetch_u32 - atomically subtract from variable
405  *
406  * Returns the value of ptr after the arithmetic operation. Note that sub_ may
407  * not be INT_MIN due to platform limitations.
408  *
409  * Full barrier semantics.
410  */
411 static inline uint32
413 {
414  AssertPointerAlignment(ptr, 4);
415  Assert(sub_ != INT_MIN);
416  return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
417 }
418 
419 /* ----
420  * The 64 bit operations have the same semantics as their 32bit counterparts
421  * if they are available. Check the corresponding 32bit function for
422  * documentation.
423  * ----
424  */
425 static inline void
427 {
428  /*
429  * Can't necessarily enforce alignment - and don't need it - when using
430  * the spinlock based fallback implementation. Therefore only assert when
431  * not using it.
432  */
433 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
434  AssertPointerAlignment(ptr, 8);
435 #endif
436  pg_atomic_init_u64_impl(ptr, val);
437 }
438 
439 static inline uint64
441 {
442 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
443  AssertPointerAlignment(ptr, 8);
444 #endif
445  return pg_atomic_read_u64_impl(ptr);
446 }
447 
448 static inline void
450 {
451 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
452  AssertPointerAlignment(ptr, 8);
453 #endif
454  pg_atomic_write_u64_impl(ptr, val);
455 }
456 
457 static inline uint64
459 {
460 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
461  AssertPointerAlignment(ptr, 8);
462 #endif
463  return pg_atomic_exchange_u64_impl(ptr, newval);
464 }
465 
466 static inline bool
468  uint64 *expected, uint64 newval)
469 {
470 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
471  AssertPointerAlignment(ptr, 8);
472  AssertPointerAlignment(expected, 8);
473 #endif
474  return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
475 }
476 
477 static inline uint64
478 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
479 {
480 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
481  AssertPointerAlignment(ptr, 8);
482 #endif
483  return pg_atomic_fetch_add_u64_impl(ptr, add_);
484 }
485 
486 static inline uint64
487 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
488 {
489 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
490  AssertPointerAlignment(ptr, 8);
491 #endif
492  Assert(sub_ != PG_INT64_MIN);
493  return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
494 }
495 
496 static inline uint64
497 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
498 {
499 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
500  AssertPointerAlignment(ptr, 8);
501 #endif
502  return pg_atomic_fetch_and_u64_impl(ptr, and_);
503 }
504 
505 static inline uint64
506 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
507 {
508 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
509  AssertPointerAlignment(ptr, 8);
510 #endif
511  return pg_atomic_fetch_or_u64_impl(ptr, or_);
512 }
513 
514 static inline uint64
515 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
516 {
517 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
518  AssertPointerAlignment(ptr, 8);
519 #endif
520  return pg_atomic_add_fetch_u64_impl(ptr, add_);
521 }
522 
523 static inline uint64
524 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
525 {
526 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
527  AssertPointerAlignment(ptr, 8);
528 #endif
529  Assert(sub_ != PG_INT64_MIN);
530  return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
531 }
532 
533 #undef INSIDE_ATOMICS_H
534 
535 #endif /* ATOMICS_H */
static uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
Definition: generic.h:46
static void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:218
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:354
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:478
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:322
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:412
void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.c:107
static uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:524
static void pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:64
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:183
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:80
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:120
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:397
static uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:487
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:449
static bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.h:467
signed int int32
Definition: c.h:246
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:303
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:148
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:458
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:426
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:211
#define PG_INT64_MIN
Definition: c.h:327
static bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:190
static uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
Definition: atomics.h:506
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:369
unsigned int uint32
Definition: c.h:258
void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: atomics.c:89
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:55
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:685
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:339
#define Assert(condition)
Definition: c.h:681
#define newval
static uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
Definition: atomics.h:497
static bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:205
static uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:515
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:440
static void pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: generic.h:280
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:288
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:74
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:383
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:234
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:269
static void pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:175
void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
Definition: atomics.c:165
long val
Definition: informix.c:689
static bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
Definition: fallback.h:136
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:252
static uint64 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
Definition: generic.h:311