PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
atomics.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * atomics.h
4  * Atomic operations.
5  *
6  * Hardware and compiler dependent functions for manipulating memory
7  * atomically and dealing with cache coherency. Used to implement locking
8  * facilities and lockless algorithms/data structures.
9  *
10  * To bring up postgres on a platform/compiler at the very least
11  * implementations for the following operations should be provided:
12  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14  * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
15  *
16  * There exist generic, hardware independent, implementations for several
17  * compilers which might be sufficient, although possibly not optimal, for a
18  * new platform. If no such generic implementation is available spinlocks (or
19  * even OS provided semaphores) will be used to implement the API.
20  *
21  * Implement the _u64 variants if and only if your platform can use them
22  * efficiently (and obviously correctly).
23  *
24  * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
25  * whenever possible. Writing correct code using these facilities is hard.
26  *
27  * For an introduction to using memory barriers within the PostgreSQL backend,
28  * see src/backend/storage/lmgr/README.barrier
29  *
30  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
31  * Portions Copyright (c) 1994, Regents of the University of California
32  *
33  * src/include/port/atomics.h
34  *
35  *-------------------------------------------------------------------------
36  */
37 #ifndef ATOMICS_H
38 #define ATOMICS_H
39 
40 #ifdef FRONTEND
41 #error "atomics.h may not be included from frontend code"
42 #endif
43 
44 #define INSIDE_ATOMICS_H
45 
46 #include <limits.h>
47 
48 /*
49  * First a set of architecture specific files is included.
50  *
51  * These files can provide the full set of atomics or can do pretty much
52  * nothing if all the compilers commonly used on these platforms provide
53  * usable generics.
54  *
55  * Don't add an inline assembly of the actual atomic operations if all the
56  * common implementations of your platform provide intrinsics. Intrinsics are
57  * much easier to understand and potentially support more architectures.
58  *
59  * It will often make sense to define memory barrier semantics here, since
60  * e.g. generic compiler intrinsics for x86 memory barriers can't know that
61  * postgres doesn't need x86 read/write barriers do anything more than a
62  * compiler barrier.
63  *
64  */
65 #if defined(__arm__) || defined(__arm) || \
66  defined(__aarch64__) || defined(__aarch64)
67 #include "port/atomics/arch-arm.h"
68 #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
69 #include "port/atomics/arch-x86.h"
70 #elif defined(__ia64__) || defined(__ia64)
71 #include "port/atomics/arch-ia64.h"
72 #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
73 #include "port/atomics/arch-ppc.h"
74 #elif defined(__hppa) || defined(__hppa__)
75 #include "port/atomics/arch-hppa.h"
76 #endif
77 
78 /*
79  * Compiler specific, but architecture independent implementations.
80  *
81  * Provide architecture independent implementations of the atomic
82  * facilities. At the very least compiler barriers should be provided, but a
83  * full implementation of
84  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
85  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
86  * using compiler intrinsics are a good idea.
87  */
88 /*
89  * Given a gcc-compatible xlc compiler, prefer the xlc implementation. The
90  * ppc64le "IBM XL C/C++ for Linux, V13.1.2" implements both interfaces, but
91  * __sync_lock_test_and_set() of one-byte types elicits SIGSEGV.
92  */
93 #if defined(__IBMC__) || defined(__IBMCPP__)
95 /* gcc or compatible, including clang and icc */
96 #elif defined(__GNUC__) || defined(__INTEL_COMPILER)
98 #elif defined(WIN32_ONLY_COMPILER)
100 #elif defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
102 #elif defined(__SUNPRO_C) && !defined(__GNUC__)
104 #else
105 /*
106  * Unsupported compiler, we'll likely use slower fallbacks... At least
107  * compiler barriers should really be provided.
108  */
109 #endif
110 
111 /*
112  * Provide a full fallback of the pg_*_barrier(), pg_atomic**_flag and
113  * pg_atomic_*_u32 APIs for platforms without sufficient spinlock and/or
114  * atomics support. In the case of spinlock backed atomics the emulation is
115  * expected to be efficient, although less so than native atomics support.
116  */
117 #include "port/atomics/fallback.h"
118 
119 /*
120  * Provide additional operations using supported infrastructure. These are
121  * expected to be efficient if the underlying atomic operations are efficient.
122  */
123 #include "port/atomics/generic.h"
124 
125 
126 /*
127  * pg_compiler_barrier - prevent the compiler from moving code across
128  *
129  * A compiler barrier need not (and preferably should not) emit any actual
130  * machine code, but must act as an optimization fence: the compiler must not
131  * reorder loads or stores to main memory around the barrier. However, the
132  * CPU may still reorder loads or stores at runtime, if the architecture's
133  * memory model permits this.
134  */
135 #define pg_compiler_barrier() pg_compiler_barrier_impl()
136 
137 /*
138  * pg_memory_barrier - prevent the CPU from reordering memory access
139  *
140  * A memory barrier must act as a compiler barrier, and in addition must
141  * guarantee that all loads and stores issued prior to the barrier are
142  * completed before any loads or stores issued after the barrier. Unless
143  * loads and stores are totally ordered (which is not the case on most
144  * architectures) this requires issuing some sort of memory fencing
145  * instruction.
146  */
147 #define pg_memory_barrier() pg_memory_barrier_impl()
148 
149 /*
150  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
151  *
152  * A read barrier must act as a compiler barrier, and in addition must
153  * guarantee that any loads issued prior to the barrier are completed before
154  * any loads issued after the barrier. Similarly, a write barrier acts
155  * as a compiler barrier, and also orders stores. Read and write barriers
156  * are thus weaker than a full memory barrier, but stronger than a compiler
157  * barrier. In practice, on machines with strong memory ordering, read and
158  * write barriers may require nothing more than a compiler barrier.
159  */
160 #define pg_read_barrier() pg_read_barrier_impl()
161 #define pg_write_barrier() pg_write_barrier_impl()
162 
163 /*
164  * Spinloop delay - Allow CPU to relax in busy loops
165  */
166 #define pg_spin_delay() pg_spin_delay_impl()
167 
168 /*
169  * pg_atomic_init_flag - initialize atomic flag.
170  *
171  * No barrier semantics.
172  */
173 static inline void
175 {
176  AssertPointerAlignment(ptr, sizeof(*ptr));
177 
179 }
180 
181 /*
182  * pg_atomic_test_and_set_flag - TAS()
183  *
184  * Returns true if the flag has successfully been set, false otherwise.
185  *
186  * Acquire (including read barrier) semantics.
187  */
188 static inline bool
190 {
191  AssertPointerAlignment(ptr, sizeof(*ptr));
192 
193  return pg_atomic_test_set_flag_impl(ptr);
194 }
195 
196 /*
197  * pg_atomic_unlocked_test_flag - Check if the lock is free
198  *
199  * Returns true if the flag currently is not set, false otherwise.
200  *
201  * No barrier semantics.
202  */
203 static inline bool
205 {
206  AssertPointerAlignment(ptr, sizeof(*ptr));
207 
209 }
210 
211 /*
212  * pg_atomic_clear_flag - release lock set by TAS()
213  *
214  * Release (including write barrier) semantics.
215  */
216 static inline void
218 {
219  AssertPointerAlignment(ptr, sizeof(*ptr));
220 
222 }
223 
224 
225 /*
226  * pg_atomic_init_u32 - initialize atomic variable
227  *
228  * Has to be done before any concurrent usage..
229  *
230  * No barrier semantics.
231  */
232 static inline void
234 {
235  AssertPointerAlignment(ptr, 4);
236 
237  pg_atomic_init_u32_impl(ptr, val);
238 }
239 
240 /*
241  * pg_atomic_read_u32 - unlocked read from atomic variable.
242  *
243  * The read is guaranteed to return a value as it has been written by this or
244  * another process at some point in the past. There's however no cache
245  * coherency interaction guaranteeing the value hasn't since been written to
246  * again.
247  *
248  * No barrier semantics.
249  */
250 static inline uint32
252 {
253  AssertPointerAlignment(ptr, 4);
254  return pg_atomic_read_u32_impl(ptr);
255 }
256 
257 /*
258  * pg_atomic_write_u32 - write to atomic variable.
259  *
260  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
261  * observe a partial write for any reader. Note that this correctly interacts
262  * with pg_atomic_compare_exchange_u32, in contrast to
263  * pg_atomic_unlocked_write_u32().
264  *
265  * No barrier semantics.
266  */
267 static inline void
269 {
270  AssertPointerAlignment(ptr, 4);
271 
272  pg_atomic_write_u32_impl(ptr, val);
273 }
274 
275 /*
276  * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
277  *
278  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
279  * observe a partial write for any reader. But note that writing this way is
280  * not guaranteed to correctly interact with read-modify-write operations like
281  * pg_atomic_compare_exchange_u32. This should only be used in cases where
282  * minor performance regressions due to atomics emulation are unacceptable.
283  *
284  * No barrier semantics.
285  */
286 static inline void
288 {
289  AssertPointerAlignment(ptr, 4);
290 
292 }
293 
294 /*
295  * pg_atomic_exchange_u32 - exchange newval with current value
296  *
297  * Returns the old value of 'ptr' before the swap.
298  *
299  * Full barrier semantics.
300  */
301 static inline uint32
303 {
304  AssertPointerAlignment(ptr, 4);
305 
306  return pg_atomic_exchange_u32_impl(ptr, newval);
307 }
308 
309 /*
310  * pg_atomic_compare_exchange_u32 - CAS operation
311  *
312  * Atomically compare the current value of ptr with *expected and store newval
313  * iff ptr and *expected have the same value. The current value of *ptr will
314  * always be stored in *expected.
315  *
316  * Return true if values have been exchanged, false otherwise.
317  *
318  * Full barrier semantics.
319  */
320 static inline bool
322  uint32 *expected, uint32 newval)
323 {
324  AssertPointerAlignment(ptr, 4);
325  AssertPointerAlignment(expected, 4);
326 
327  return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
328 }
329 
330 /*
331  * pg_atomic_fetch_add_u32 - atomically add to variable
332  *
333  * Returns the value of ptr before the arithmetic operation.
334  *
335  * Full barrier semantics.
336  */
337 static inline uint32
339 {
340  AssertPointerAlignment(ptr, 4);
341  return pg_atomic_fetch_add_u32_impl(ptr, add_);
342 }
343 
344 /*
345  * pg_atomic_fetch_sub_u32 - atomically subtract from variable
346  *
347  * Returns the value of ptr before the arithmetic operation. Note that sub_
348  * may not be INT_MIN due to platform limitations.
349  *
350  * Full barrier semantics.
351  */
352 static inline uint32
354 {
355  AssertPointerAlignment(ptr, 4);
356  Assert(sub_ != INT_MIN);
357  return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
358 }
359 
360 /*
361  * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
362  *
363  * Returns the value of ptr before the arithmetic operation.
364  *
365  * Full barrier semantics.
366  */
367 static inline uint32
369 {
370  AssertPointerAlignment(ptr, 4);
371  return pg_atomic_fetch_and_u32_impl(ptr, and_);
372 }
373 
374 /*
375  * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
376  *
377  * Returns the value of ptr before the arithmetic operation.
378  *
379  * Full barrier semantics.
380  */
381 static inline uint32
383 {
384  AssertPointerAlignment(ptr, 4);
385  return pg_atomic_fetch_or_u32_impl(ptr, or_);
386 }
387 
388 /*
389  * pg_atomic_add_fetch_u32 - atomically add to variable
390  *
391  * Returns the value of ptr after the arithmetic operation.
392  *
393  * Full barrier semantics.
394  */
395 static inline uint32
397 {
398  AssertPointerAlignment(ptr, 4);
399  return pg_atomic_add_fetch_u32_impl(ptr, add_);
400 }
401 
402 /*
403  * pg_atomic_sub_fetch_u32 - atomically subtract from variable
404  *
405  * Returns the value of ptr after the arithmetic operation. Note that sub_ may
406  * not be INT_MIN due to platform limitations.
407  *
408  * Full barrier semantics.
409  */
410 static inline uint32
412 {
413  AssertPointerAlignment(ptr, 4);
414  Assert(sub_ != INT_MIN);
415  return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
416 }
417 
418 /* ----
419  * The 64 bit operations have the same semantics as their 32bit counterparts
420  * if they are available. Check the corresponding 32bit function for
421  * documentation.
422  * ----
423  */
424 #ifdef PG_HAVE_ATOMIC_U64_SUPPORT
425 
426 static inline void
427 pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
428 {
429  AssertPointerAlignment(ptr, 8);
430 
431  pg_atomic_init_u64_impl(ptr, val);
432 }
433 
434 static inline uint64
435 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
436 {
437  AssertPointerAlignment(ptr, 8);
438  return pg_atomic_read_u64_impl(ptr);
439 }
440 
441 static inline void
442 pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
443 {
444  AssertPointerAlignment(ptr, 8);
445  pg_atomic_write_u64_impl(ptr, val);
446 }
447 
448 static inline uint64
449 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
450 {
451  AssertPointerAlignment(ptr, 8);
452 
453  return pg_atomic_exchange_u64_impl(ptr, newval);
454 }
455 
456 static inline bool
457 pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr,
458  uint64 *expected, uint64 newval)
459 {
460  AssertPointerAlignment(ptr, 8);
461  AssertPointerAlignment(expected, 8);
462  return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
463 }
464 
465 static inline uint64
466 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
467 {
468  AssertPointerAlignment(ptr, 8);
469  return pg_atomic_fetch_add_u64_impl(ptr, add_);
470 }
471 
472 static inline uint64
473 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
474 {
475  AssertPointerAlignment(ptr, 8);
476  Assert(sub_ != PG_INT64_MIN);
477  return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
478 }
479 
480 static inline uint64
481 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
482 {
483  AssertPointerAlignment(ptr, 8);
484  return pg_atomic_fetch_and_u64_impl(ptr, and_);
485 }
486 
487 static inline uint64
488 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
489 {
490  AssertPointerAlignment(ptr, 8);
491  return pg_atomic_fetch_or_u64_impl(ptr, or_);
492 }
493 
494 static inline uint64
495 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
496 {
497  AssertPointerAlignment(ptr, 8);
498  return pg_atomic_add_fetch_u64_impl(ptr, add_);
499 }
500 
501 static inline uint64
502 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
503 {
504  AssertPointerAlignment(ptr, 8);
505  Assert(sub_ != PG_INT64_MIN);
506  return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
507 }
508 
509 #endif /* PG_HAVE_64_BIT_ATOMICS */
510 
511 #undef INSIDE_ATOMICS_H
512 
513 #endif /* ATOMICS_H */
static uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
Definition: generic.h:46
static void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:217
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:353
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:321
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:411
void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.c:107
static void pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:64
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:80
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:120
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:396
signed int int32
Definition: c.h:256
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:302
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:148
#define PG_INT64_MIN
Definition: c.h:342
static bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:189
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:368
unsigned int uint32
Definition: c.h:268
void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: atomics.c:89
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:55
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:679
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:338
#define Assert(condition)
Definition: c.h:675
#define newval
static bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:204
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:287
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:74
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:382
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:233
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:268
static void pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:174
long val
Definition: informix.c:689
static bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
Definition: fallback.h:118
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:251