PostgreSQL Source Code  git master
atomics.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * atomics.h
4  * Atomic operations.
5  *
6  * Hardware and compiler dependent functions for manipulating memory
7  * atomically and dealing with cache coherency. Used to implement locking
8  * facilities and lockless algorithms/data structures.
9  *
10  * To bring up postgres on a platform/compiler at the very least
11  * implementations for the following operations should be provided:
12  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14  * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
15  * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
16  *
17  * There exist generic, hardware independent, implementations for several
18  * compilers which might be sufficient, although possibly not optimal, for a
19  * new platform. If no such generic implementation is available spinlocks (or
20  * even OS provided semaphores) will be used to implement the API.
21  *
22  * Implement _u64 atomics if and only if your platform can use them
23  * efficiently (and obviously correctly).
24  *
25  * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
26  * whenever possible. Writing correct code using these facilities is hard.
27  *
28  * For an introduction to using memory barriers within the PostgreSQL backend,
29  * see src/backend/storage/lmgr/README.barrier
30  *
31  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
32  * Portions Copyright (c) 1994, Regents of the University of California
33  *
34  * src/include/port/atomics.h
35  *
36  *-------------------------------------------------------------------------
37  */
38 #ifndef ATOMICS_H
39 #define ATOMICS_H
40 
41 #ifdef FRONTEND
42 #error "atomics.h may not be included from frontend code"
43 #endif
44 
45 #define INSIDE_ATOMICS_H
46 
47 #include <limits.h>
48 
49 /*
50  * First a set of architecture specific files is included.
51  *
52  * These files can provide the full set of atomics or can do pretty much
53  * nothing if all the compilers commonly used on these platforms provide
54  * usable generics.
55  *
56  * Don't add an inline assembly of the actual atomic operations if all the
57  * common implementations of your platform provide intrinsics. Intrinsics are
58  * much easier to understand and potentially support more architectures.
59  *
60  * It will often make sense to define memory barrier semantics here, since
61  * e.g. generic compiler intrinsics for x86 memory barriers can't know that
62  * postgres doesn't need x86 read/write barriers do anything more than a
63  * compiler barrier.
64  *
65  */
66 #if defined(__arm__) || defined(__arm) || defined(__aarch64__)
67 #include "port/atomics/arch-arm.h"
68 #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
69 #include "port/atomics/arch-x86.h"
70 #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
71 #include "port/atomics/arch-ppc.h"
72 #elif defined(__hppa) || defined(__hppa__)
73 #include "port/atomics/arch-hppa.h"
74 #endif
75 
76 /*
77  * Compiler specific, but architecture independent implementations.
78  *
79  * Provide architecture independent implementations of the atomic
80  * facilities. At the very least compiler barriers should be provided, but a
81  * full implementation of
82  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
83  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
84  * using compiler intrinsics are a good idea.
85  */
86 /*
87  * gcc or compatible, including clang and icc. Exclude xlc. The ppc64le "IBM
88  * XL C/C++ for Linux, V13.1.2" emulates gcc, but __sync_lock_test_and_set()
89  * of one-byte types elicits SIGSEGV. That bug was gone by V13.1.5 (2016-12).
90  */
91 #if (defined(__GNUC__) || defined(__INTEL_COMPILER)) && !(defined(__IBMC__) || defined(__IBMCPP__))
93 #elif defined(_MSC_VER)
95 #elif defined(__SUNPRO_C) && !defined(__GNUC__)
97 #else
98 /*
99  * Unsupported compiler, we'll likely use slower fallbacks... At least
100  * compiler barriers should really be provided.
101  */
102 #endif
103 
104 /*
105  * Provide a full fallback of the pg_*_barrier(), pg_atomic**_flag and
106  * pg_atomic_* APIs for platforms without sufficient spinlock and/or atomics
107  * support. In the case of spinlock backed atomics the emulation is expected
108  * to be efficient, although less so than native atomics support.
109  */
110 #include "port/atomics/fallback.h"
111 
112 /*
113  * Provide additional operations using supported infrastructure. These are
114  * expected to be efficient if the underlying atomic operations are efficient.
115  */
116 #include "port/atomics/generic.h"
117 
118 
119 /*
120  * pg_compiler_barrier - prevent the compiler from moving code across
121  *
122  * A compiler barrier need not (and preferably should not) emit any actual
123  * machine code, but must act as an optimization fence: the compiler must not
124  * reorder loads or stores to main memory around the barrier. However, the
125  * CPU may still reorder loads or stores at runtime, if the architecture's
126  * memory model permits this.
127  */
128 #define pg_compiler_barrier() pg_compiler_barrier_impl()
129 
130 /*
131  * pg_memory_barrier - prevent the CPU from reordering memory access
132  *
133  * A memory barrier must act as a compiler barrier, and in addition must
134  * guarantee that all loads and stores issued prior to the barrier are
135  * completed before any loads or stores issued after the barrier. Unless
136  * loads and stores are totally ordered (which is not the case on most
137  * architectures) this requires issuing some sort of memory fencing
138  * instruction.
139  */
140 #define pg_memory_barrier() pg_memory_barrier_impl()
141 
142 /*
143  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
144  *
145  * A read barrier must act as a compiler barrier, and in addition must
146  * guarantee that any loads issued prior to the barrier are completed before
147  * any loads issued after the barrier. Similarly, a write barrier acts
148  * as a compiler barrier, and also orders stores. Read and write barriers
149  * are thus weaker than a full memory barrier, but stronger than a compiler
150  * barrier. In practice, on machines with strong memory ordering, read and
151  * write barriers may require nothing more than a compiler barrier.
152  */
153 #define pg_read_barrier() pg_read_barrier_impl()
154 #define pg_write_barrier() pg_write_barrier_impl()
155 
156 /*
157  * Spinloop delay - Allow CPU to relax in busy loops
158  */
159 #define pg_spin_delay() pg_spin_delay_impl()
160 
161 /*
162  * pg_atomic_init_flag - initialize atomic flag.
163  *
164  * No barrier semantics.
165  */
166 static inline void
168 {
170 }
171 
172 /*
173  * pg_atomic_test_set_flag - TAS()
174  *
175  * Returns true if the flag has successfully been set, false otherwise.
176  *
177  * Acquire (including read barrier) semantics.
178  */
179 static inline bool
181 {
182  return pg_atomic_test_set_flag_impl(ptr);
183 }
184 
185 /*
186  * pg_atomic_unlocked_test_flag - Check if the lock is free
187  *
188  * Returns true if the flag currently is not set, false otherwise.
189  *
190  * No barrier semantics.
191  */
192 static inline bool
194 {
196 }
197 
198 /*
199  * pg_atomic_clear_flag - release lock set by TAS()
200  *
201  * Release (including write barrier) semantics.
202  */
203 static inline void
205 {
207 }
208 
209 
210 /*
211  * pg_atomic_init_u32 - initialize atomic variable
212  *
213  * Has to be done before any concurrent usage..
214  *
215  * No barrier semantics.
216  */
217 static inline void
219 {
220  AssertPointerAlignment(ptr, 4);
221 
223 }
224 
225 /*
226  * pg_atomic_read_u32 - unlocked read from atomic variable.
227  *
228  * The read is guaranteed to return a value as it has been written by this or
229  * another process at some point in the past. There's however no cache
230  * coherency interaction guaranteeing the value hasn't since been written to
231  * again.
232  *
233  * No barrier semantics.
234  */
235 static inline uint32
237 {
238  AssertPointerAlignment(ptr, 4);
239  return pg_atomic_read_u32_impl(ptr);
240 }
241 
242 /*
243  * pg_atomic_write_u32 - write to atomic variable.
244  *
245  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
246  * observe a partial write for any reader. Note that this correctly interacts
247  * with pg_atomic_compare_exchange_u32, in contrast to
248  * pg_atomic_unlocked_write_u32().
249  *
250  * No barrier semantics.
251  */
252 static inline void
254 {
255  AssertPointerAlignment(ptr, 4);
256 
258 }
259 
260 /*
261  * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
262  *
263  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
264  * observe a partial write for any reader. But note that writing this way is
265  * not guaranteed to correctly interact with read-modify-write operations like
266  * pg_atomic_compare_exchange_u32. This should only be used in cases where
267  * minor performance regressions due to atomics emulation are unacceptable.
268  *
269  * No barrier semantics.
270  */
271 static inline void
273 {
274  AssertPointerAlignment(ptr, 4);
275 
277 }
278 
279 /*
280  * pg_atomic_exchange_u32 - exchange newval with current value
281  *
282  * Returns the old value of 'ptr' before the swap.
283  *
284  * Full barrier semantics.
285  */
286 static inline uint32
288 {
289  AssertPointerAlignment(ptr, 4);
290 
291  return pg_atomic_exchange_u32_impl(ptr, newval);
292 }
293 
294 /*
295  * pg_atomic_compare_exchange_u32 - CAS operation
296  *
297  * Atomically compare the current value of ptr with *expected and store newval
298  * iff ptr and *expected have the same value. The current value of *ptr will
299  * always be stored in *expected.
300  *
301  * Return true if values have been exchanged, false otherwise.
302  *
303  * Full barrier semantics.
304  */
305 static inline bool
307  uint32 *expected, uint32 newval)
308 {
309  AssertPointerAlignment(ptr, 4);
310  AssertPointerAlignment(expected, 4);
311 
312  return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
313 }
314 
315 /*
316  * pg_atomic_fetch_add_u32 - atomically add to variable
317  *
318  * Returns the value of ptr before the arithmetic operation.
319  *
320  * Full barrier semantics.
321  */
322 static inline uint32
324 {
325  AssertPointerAlignment(ptr, 4);
326  return pg_atomic_fetch_add_u32_impl(ptr, add_);
327 }
328 
329 /*
330  * pg_atomic_fetch_sub_u32 - atomically subtract from variable
331  *
332  * Returns the value of ptr before the arithmetic operation. Note that sub_
333  * may not be INT_MIN due to platform limitations.
334  *
335  * Full barrier semantics.
336  */
337 static inline uint32
339 {
340  AssertPointerAlignment(ptr, 4);
341  Assert(sub_ != INT_MIN);
342  return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
343 }
344 
345 /*
346  * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
347  *
348  * Returns the value of ptr before the arithmetic operation.
349  *
350  * Full barrier semantics.
351  */
352 static inline uint32
354 {
355  AssertPointerAlignment(ptr, 4);
356  return pg_atomic_fetch_and_u32_impl(ptr, and_);
357 }
358 
359 /*
360  * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
361  *
362  * Returns the value of ptr before the arithmetic operation.
363  *
364  * Full barrier semantics.
365  */
366 static inline uint32
368 {
369  AssertPointerAlignment(ptr, 4);
370  return pg_atomic_fetch_or_u32_impl(ptr, or_);
371 }
372 
373 /*
374  * pg_atomic_add_fetch_u32 - atomically add to variable
375  *
376  * Returns the value of ptr after the arithmetic operation.
377  *
378  * Full barrier semantics.
379  */
380 static inline uint32
382 {
383  AssertPointerAlignment(ptr, 4);
384  return pg_atomic_add_fetch_u32_impl(ptr, add_);
385 }
386 
387 /*
388  * pg_atomic_sub_fetch_u32 - atomically subtract from variable
389  *
390  * Returns the value of ptr after the arithmetic operation. Note that sub_ may
391  * not be INT_MIN due to platform limitations.
392  *
393  * Full barrier semantics.
394  */
395 static inline uint32
397 {
398  AssertPointerAlignment(ptr, 4);
399  Assert(sub_ != INT_MIN);
400  return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
401 }
402 
403 /* ----
404  * The 64 bit operations have the same semantics as their 32bit counterparts
405  * if they are available. Check the corresponding 32bit function for
406  * documentation.
407  * ----
408  */
409 static inline void
411 {
412  /*
413  * Can't necessarily enforce alignment - and don't need it - when using
414  * the spinlock based fallback implementation. Therefore only assert when
415  * not using it.
416  */
417 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
418  AssertPointerAlignment(ptr, 8);
419 #endif
421 }
422 
423 static inline uint64
425 {
426 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
427  AssertPointerAlignment(ptr, 8);
428 #endif
429  return pg_atomic_read_u64_impl(ptr);
430 }
431 
432 static inline void
434 {
435 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
436  AssertPointerAlignment(ptr, 8);
437 #endif
439 }
440 
441 static inline uint64
443 {
444 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
445  AssertPointerAlignment(ptr, 8);
446 #endif
447  return pg_atomic_exchange_u64_impl(ptr, newval);
448 }
449 
450 static inline bool
452  uint64 *expected, uint64 newval)
453 {
454 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
455  AssertPointerAlignment(ptr, 8);
456  AssertPointerAlignment(expected, 8);
457 #endif
458  return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
459 }
460 
461 static inline uint64
462 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
463 {
464 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
465  AssertPointerAlignment(ptr, 8);
466 #endif
467  return pg_atomic_fetch_add_u64_impl(ptr, add_);
468 }
469 
470 static inline uint64
471 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
472 {
473 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
474  AssertPointerAlignment(ptr, 8);
475 #endif
476  Assert(sub_ != PG_INT64_MIN);
477  return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
478 }
479 
480 static inline uint64
481 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
482 {
483 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
484  AssertPointerAlignment(ptr, 8);
485 #endif
486  return pg_atomic_fetch_and_u64_impl(ptr, and_);
487 }
488 
489 static inline uint64
490 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
491 {
492 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
493  AssertPointerAlignment(ptr, 8);
494 #endif
495  return pg_atomic_fetch_or_u64_impl(ptr, or_);
496 }
497 
498 static inline uint64
499 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
500 {
501 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
502  AssertPointerAlignment(ptr, 8);
503 #endif
504  return pg_atomic_add_fetch_u64_impl(ptr, add_);
505 }
506 
507 static inline uint64
508 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
509 {
510 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
511  AssertPointerAlignment(ptr, 8);
512 #endif
513  Assert(sub_ != PG_INT64_MIN);
514  return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
515 }
516 
517 #undef INSIDE_ATOMICS_H
518 
519 #endif /* ATOMICS_H */
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:137
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:89
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:228
void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
Definition: atomics.c:182
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:165
bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:97
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:76
void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.c:124
void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: atomics.c:106
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:200
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:55
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:353
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:306
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:433
static void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:204
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:367
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:396
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:338
static bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.h:451
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:272
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:218
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:323
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:381
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:462
static bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:180
static uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:508
static bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:193
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:253
static uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
Definition: atomics.h:481
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:236
static uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
Definition: atomics.h:490
static uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:499
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:287
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:410
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:424
static uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:471
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:442
static void pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:167
unsigned int uint32
Definition: c.h:495
signed int int32
Definition: c.h:483
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:883
#define PG_INT64_MIN
Definition: c.h:580
static void pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:64
static uint64 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
Definition: generic.h:311
static void pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: generic.h:280
static uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
Definition: generic.h:46
#define newval
long val
Definition: informix.c:664
Assert(fmt[strlen(fmt) - 1] !='\n')