PostgreSQL Source Code  git master
atomics.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * atomics.h
4  * Atomic operations.
5  *
6  * Hardware and compiler dependent functions for manipulating memory
7  * atomically and dealing with cache coherency. Used to implement locking
8  * facilities and lockless algorithms/data structures.
9  *
10  * To bring up postgres on a platform/compiler at the very least
11  * implementations for the following operations should be provided:
12  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14  * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
15  * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
16  *
17  * There exist generic, hardware independent, implementations for several
18  * compilers which might be sufficient, although possibly not optimal, for a
19  * new platform. If no such generic implementation is available spinlocks (or
20  * even OS provided semaphores) will be used to implement the API.
21  *
22  * Implement _u64 atomics if and only if your platform can use them
23  * efficiently (and obviously correctly).
24  *
25  * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
26  * whenever possible. Writing correct code using these facilities is hard.
27  *
28  * For an introduction to using memory barriers within the PostgreSQL backend,
29  * see src/backend/storage/lmgr/README.barrier
30  *
31  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
32  * Portions Copyright (c) 1994, Regents of the University of California
33  *
34  * src/include/port/atomics.h
35  *
36  *-------------------------------------------------------------------------
37  */
38 #ifndef ATOMICS_H
39 #define ATOMICS_H
40 
41 #ifdef FRONTEND
42 #error "atomics.h may not be included from frontend code"
43 #endif
44 
45 #define INSIDE_ATOMICS_H
46 
47 #include <limits.h>
48 
49 /*
50  * First a set of architecture specific files is included.
51  *
52  * These files can provide the full set of atomics or can do pretty much
53  * nothing if all the compilers commonly used on these platforms provide
54  * usable generics.
55  *
56  * Don't add an inline assembly of the actual atomic operations if all the
57  * common implementations of your platform provide intrinsics. Intrinsics are
58  * much easier to understand and potentially support more architectures.
59  *
60  * It will often make sense to define memory barrier semantics here, since
61  * e.g. generic compiler intrinsics for x86 memory barriers can't know that
62  * postgres doesn't need x86 read/write barriers do anything more than a
63  * compiler barrier.
64  *
65  */
66 #if defined(__arm__) || defined(__arm) || \
67  defined(__aarch64__) || defined(__aarch64)
68 #include "port/atomics/arch-arm.h"
69 #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
70 #include "port/atomics/arch-x86.h"
71 #elif defined(__ia64__) || defined(__ia64)
72 #include "port/atomics/arch-ia64.h"
73 #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
74 #include "port/atomics/arch-ppc.h"
75 #elif defined(__hppa) || defined(__hppa__)
76 #include "port/atomics/arch-hppa.h"
77 #endif
78 
79 /*
80  * Compiler specific, but architecture independent implementations.
81  *
82  * Provide architecture independent implementations of the atomic
83  * facilities. At the very least compiler barriers should be provided, but a
84  * full implementation of
85  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
86  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
87  * using compiler intrinsics are a good idea.
88  */
89 /*
90  * gcc or compatible, including clang and icc. Exclude xlc. The ppc64le "IBM
91  * XL C/C++ for Linux, V13.1.2" emulates gcc, but __sync_lock_test_and_set()
92  * of one-byte types elicits SIGSEGV. That bug was gone by V13.1.5 (2016-12).
93  */
94 #if (defined(__GNUC__) || defined(__INTEL_COMPILER)) && !(defined(__IBMC__) || defined(__IBMCPP__))
96 #elif defined(_MSC_VER)
98 #elif defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
100 #elif defined(__SUNPRO_C) && !defined(__GNUC__)
102 #else
103 /*
104  * Unsupported compiler, we'll likely use slower fallbacks... At least
105  * compiler barriers should really be provided.
106  */
107 #endif
108 
109 /*
110  * Provide a full fallback of the pg_*_barrier(), pg_atomic**_flag and
111  * pg_atomic_* APIs for platforms without sufficient spinlock and/or atomics
112  * support. In the case of spinlock backed atomics the emulation is expected
113  * to be efficient, although less so than native atomics support.
114  */
115 #include "port/atomics/fallback.h"
116 
117 /*
118  * Provide additional operations using supported infrastructure. These are
119  * expected to be efficient if the underlying atomic operations are efficient.
120  */
121 #include "port/atomics/generic.h"
122 
123 
124 /*
125  * pg_compiler_barrier - prevent the compiler from moving code across
126  *
127  * A compiler barrier need not (and preferably should not) emit any actual
128  * machine code, but must act as an optimization fence: the compiler must not
129  * reorder loads or stores to main memory around the barrier. However, the
130  * CPU may still reorder loads or stores at runtime, if the architecture's
131  * memory model permits this.
132  */
133 #define pg_compiler_barrier() pg_compiler_barrier_impl()
134 
135 /*
136  * pg_memory_barrier - prevent the CPU from reordering memory access
137  *
138  * A memory barrier must act as a compiler barrier, and in addition must
139  * guarantee that all loads and stores issued prior to the barrier are
140  * completed before any loads or stores issued after the barrier. Unless
141  * loads and stores are totally ordered (which is not the case on most
142  * architectures) this requires issuing some sort of memory fencing
143  * instruction.
144  */
145 #define pg_memory_barrier() pg_memory_barrier_impl()
146 
147 /*
148  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
149  *
150  * A read barrier must act as a compiler barrier, and in addition must
151  * guarantee that any loads issued prior to the barrier are completed before
152  * any loads issued after the barrier. Similarly, a write barrier acts
153  * as a compiler barrier, and also orders stores. Read and write barriers
154  * are thus weaker than a full memory barrier, but stronger than a compiler
155  * barrier. In practice, on machines with strong memory ordering, read and
156  * write barriers may require nothing more than a compiler barrier.
157  */
158 #define pg_read_barrier() pg_read_barrier_impl()
159 #define pg_write_barrier() pg_write_barrier_impl()
160 
161 /*
162  * Spinloop delay - Allow CPU to relax in busy loops
163  */
164 #define pg_spin_delay() pg_spin_delay_impl()
165 
166 /*
167  * pg_atomic_init_flag - initialize atomic flag.
168  *
169  * No barrier semantics.
170  */
171 static inline void
173 {
175 }
176 
177 /*
178  * pg_atomic_test_set_flag - TAS()
179  *
180  * Returns true if the flag has successfully been set, false otherwise.
181  *
182  * Acquire (including read barrier) semantics.
183  */
184 static inline bool
186 {
187  return pg_atomic_test_set_flag_impl(ptr);
188 }
189 
190 /*
191  * pg_atomic_unlocked_test_flag - Check if the lock is free
192  *
193  * Returns true if the flag currently is not set, false otherwise.
194  *
195  * No barrier semantics.
196  */
197 static inline bool
199 {
201 }
202 
203 /*
204  * pg_atomic_clear_flag - release lock set by TAS()
205  *
206  * Release (including write barrier) semantics.
207  */
208 static inline void
210 {
212 }
213 
214 
215 /*
216  * pg_atomic_init_u32 - initialize atomic variable
217  *
218  * Has to be done before any concurrent usage..
219  *
220  * No barrier semantics.
221  */
222 static inline void
224 {
225  AssertPointerAlignment(ptr, 4);
226 
227  pg_atomic_init_u32_impl(ptr, val);
228 }
229 
230 /*
231  * pg_atomic_read_u32 - unlocked read from atomic variable.
232  *
233  * The read is guaranteed to return a value as it has been written by this or
234  * another process at some point in the past. There's however no cache
235  * coherency interaction guaranteeing the value hasn't since been written to
236  * again.
237  *
238  * No barrier semantics.
239  */
240 static inline uint32
242 {
243  AssertPointerAlignment(ptr, 4);
244  return pg_atomic_read_u32_impl(ptr);
245 }
246 
247 /*
248  * pg_atomic_write_u32 - write to atomic variable.
249  *
250  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
251  * observe a partial write for any reader. Note that this correctly interacts
252  * with pg_atomic_compare_exchange_u32, in contrast to
253  * pg_atomic_unlocked_write_u32().
254  *
255  * No barrier semantics.
256  */
257 static inline void
259 {
260  AssertPointerAlignment(ptr, 4);
261 
262  pg_atomic_write_u32_impl(ptr, val);
263 }
264 
265 /*
266  * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
267  *
268  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
269  * observe a partial write for any reader. But note that writing this way is
270  * not guaranteed to correctly interact with read-modify-write operations like
271  * pg_atomic_compare_exchange_u32. This should only be used in cases where
272  * minor performance regressions due to atomics emulation are unacceptable.
273  *
274  * No barrier semantics.
275  */
276 static inline void
278 {
279  AssertPointerAlignment(ptr, 4);
280 
282 }
283 
284 /*
285  * pg_atomic_exchange_u32 - exchange newval with current value
286  *
287  * Returns the old value of 'ptr' before the swap.
288  *
289  * Full barrier semantics.
290  */
291 static inline uint32
293 {
294  AssertPointerAlignment(ptr, 4);
295 
296  return pg_atomic_exchange_u32_impl(ptr, newval);
297 }
298 
299 /*
300  * pg_atomic_compare_exchange_u32 - CAS operation
301  *
302  * Atomically compare the current value of ptr with *expected and store newval
303  * iff ptr and *expected have the same value. The current value of *ptr will
304  * always be stored in *expected.
305  *
306  * Return true if values have been exchanged, false otherwise.
307  *
308  * Full barrier semantics.
309  */
310 static inline bool
312  uint32 *expected, uint32 newval)
313 {
314  AssertPointerAlignment(ptr, 4);
315  AssertPointerAlignment(expected, 4);
316 
317  return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
318 }
319 
320 /*
321  * pg_atomic_fetch_add_u32 - atomically add to variable
322  *
323  * Returns the value of ptr before the arithmetic operation.
324  *
325  * Full barrier semantics.
326  */
327 static inline uint32
329 {
330  AssertPointerAlignment(ptr, 4);
331  return pg_atomic_fetch_add_u32_impl(ptr, add_);
332 }
333 
334 /*
335  * pg_atomic_fetch_sub_u32 - atomically subtract from variable
336  *
337  * Returns the value of ptr before the arithmetic operation. Note that sub_
338  * may not be INT_MIN due to platform limitations.
339  *
340  * Full barrier semantics.
341  */
342 static inline uint32
344 {
345  AssertPointerAlignment(ptr, 4);
346  Assert(sub_ != INT_MIN);
347  return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
348 }
349 
350 /*
351  * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
352  *
353  * Returns the value of ptr before the arithmetic operation.
354  *
355  * Full barrier semantics.
356  */
357 static inline uint32
359 {
360  AssertPointerAlignment(ptr, 4);
361  return pg_atomic_fetch_and_u32_impl(ptr, and_);
362 }
363 
364 /*
365  * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
366  *
367  * Returns the value of ptr before the arithmetic operation.
368  *
369  * Full barrier semantics.
370  */
371 static inline uint32
373 {
374  AssertPointerAlignment(ptr, 4);
375  return pg_atomic_fetch_or_u32_impl(ptr, or_);
376 }
377 
378 /*
379  * pg_atomic_add_fetch_u32 - atomically add to variable
380  *
381  * Returns the value of ptr after the arithmetic operation.
382  *
383  * Full barrier semantics.
384  */
385 static inline uint32
387 {
388  AssertPointerAlignment(ptr, 4);
389  return pg_atomic_add_fetch_u32_impl(ptr, add_);
390 }
391 
392 /*
393  * pg_atomic_sub_fetch_u32 - atomically subtract from variable
394  *
395  * Returns the value of ptr after the arithmetic operation. Note that sub_ may
396  * not be INT_MIN due to platform limitations.
397  *
398  * Full barrier semantics.
399  */
400 static inline uint32
402 {
403  AssertPointerAlignment(ptr, 4);
404  Assert(sub_ != INT_MIN);
405  return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
406 }
407 
408 /* ----
409  * The 64 bit operations have the same semantics as their 32bit counterparts
410  * if they are available. Check the corresponding 32bit function for
411  * documentation.
412  * ----
413  */
414 static inline void
416 {
417  /*
418  * Can't necessarily enforce alignment - and don't need it - when using
419  * the spinlock based fallback implementation. Therefore only assert when
420  * not using it.
421  */
422 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
423  AssertPointerAlignment(ptr, 8);
424 #endif
425  pg_atomic_init_u64_impl(ptr, val);
426 }
427 
428 static inline uint64
430 {
431 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
432  AssertPointerAlignment(ptr, 8);
433 #endif
434  return pg_atomic_read_u64_impl(ptr);
435 }
436 
437 static inline void
439 {
440 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
441  AssertPointerAlignment(ptr, 8);
442 #endif
443  pg_atomic_write_u64_impl(ptr, val);
444 }
445 
446 static inline uint64
448 {
449 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
450  AssertPointerAlignment(ptr, 8);
451 #endif
452  return pg_atomic_exchange_u64_impl(ptr, newval);
453 }
454 
455 static inline bool
457  uint64 *expected, uint64 newval)
458 {
459 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
460  AssertPointerAlignment(ptr, 8);
461  AssertPointerAlignment(expected, 8);
462 #endif
463  return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
464 }
465 
466 static inline uint64
467 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
468 {
469 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
470  AssertPointerAlignment(ptr, 8);
471 #endif
472  return pg_atomic_fetch_add_u64_impl(ptr, add_);
473 }
474 
475 static inline uint64
476 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
477 {
478 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
479  AssertPointerAlignment(ptr, 8);
480 #endif
481  Assert(sub_ != PG_INT64_MIN);
482  return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
483 }
484 
485 static inline uint64
486 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
487 {
488 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
489  AssertPointerAlignment(ptr, 8);
490 #endif
491  return pg_atomic_fetch_and_u64_impl(ptr, and_);
492 }
493 
494 static inline uint64
495 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
496 {
497 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
498  AssertPointerAlignment(ptr, 8);
499 #endif
500  return pg_atomic_fetch_or_u64_impl(ptr, or_);
501 }
502 
503 static inline uint64
504 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
505 {
506 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
507  AssertPointerAlignment(ptr, 8);
508 #endif
509  return pg_atomic_add_fetch_u64_impl(ptr, add_);
510 }
511 
512 static inline uint64
513 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
514 {
515 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
516  AssertPointerAlignment(ptr, 8);
517 #endif
518  Assert(sub_ != PG_INT64_MIN);
519  return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
520 }
521 
522 #undef INSIDE_ATOMICS_H
523 
524 #endif /* ATOMICS_H */
static uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
Definition: generic.h:46
static void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:209
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:343
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:467
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:311
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:401
void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.c:124
static uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:513
static void pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:64
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:200
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:89
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:137
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:386
static uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:476
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:438
static bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.h:456
signed int int32
Definition: c.h:347
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:292
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:165
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:447
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:415
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:228
#define PG_INT64_MIN
Definition: c.h:444
static bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:185
static uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
Definition: atomics.h:495
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:358
unsigned int uint32
Definition: c.h:359
void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: atomics.c:106
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:55
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:743
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:328
#define Assert(condition)
Definition: c.h:739
#define newval
static uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
Definition: atomics.h:486
static bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:198
static uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:504
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:429
bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:97
static void pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: generic.h:280
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:277
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:76
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:372
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:223
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:258
static void pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:172
void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
Definition: atomics.c:182
long val
Definition: informix.c:664
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:241
static uint64 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
Definition: generic.h:311