PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
atomics.h
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * atomics.h
4 * Atomic operations.
5 *
6 * Hardware and compiler dependent functions for manipulating memory
7 * atomically and dealing with cache coherency. Used to implement locking
8 * facilities and lockless algorithms/data structures.
9 *
10 * To bring up postgres on a platform/compiler at the very least
11 * implementations for the following operations should be provided:
12 * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13 * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14 * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
15 * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
16 *
17 * There exist generic, hardware independent, implementations for several
18 * compilers which might be sufficient, although possibly not optimal, for a
19 * new platform. If no such generic implementation is available spinlocks will
20 * be used to implement the 64-bit parts of the API.
21 *
22 * Implement _u64 atomics if and only if your platform can use them
23 * efficiently (and obviously correctly).
24 *
25 * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
26 * whenever possible. Writing correct code using these facilities is hard.
27 *
28 * For an introduction to using memory barriers within the PostgreSQL backend,
29 * see src/backend/storage/lmgr/README.barrier
30 *
31 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
32 * Portions Copyright (c) 1994, Regents of the University of California
33 *
34 * src/include/port/atomics.h
35 *
36 *-------------------------------------------------------------------------
37 */
38#ifndef ATOMICS_H
39#define ATOMICS_H
40
41#ifdef FRONTEND
42#error "atomics.h may not be included from frontend code"
43#endif
44
45#define INSIDE_ATOMICS_H
46
47#include <limits.h>
48
49/*
50 * First a set of architecture specific files is included.
51 *
52 * These files can provide the full set of atomics or can do pretty much
53 * nothing if all the compilers commonly used on these platforms provide
54 * usable generics.
55 *
56 * Don't add an inline assembly of the actual atomic operations if all the
57 * common implementations of your platform provide intrinsics. Intrinsics are
58 * much easier to understand and potentially support more architectures.
59 *
60 * It will often make sense to define memory barrier semantics here, since
61 * e.g. generic compiler intrinsics for x86 memory barriers can't know that
62 * postgres doesn't need x86 read/write barriers do anything more than a
63 * compiler barrier.
64 *
65 */
66#if defined(__arm__) || defined(__arm) || defined(__aarch64__)
68#elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
70#elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
72#endif
73
74/*
75 * Compiler specific, but architecture independent implementations.
76 *
77 * Provide architecture independent implementations of the atomic
78 * facilities. At the very least compiler barriers should be provided, but a
79 * full implementation of
80 * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
81 * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
82 * using compiler intrinsics are a good idea.
83 */
84/*
85 * gcc or compatible, including clang and icc.
86 */
87#if defined(__GNUC__) || defined(__INTEL_COMPILER)
89#elif defined(_MSC_VER)
91#elif defined(__SUNPRO_C) && !defined(__GNUC__)
93#else
94/* Unknown compiler. */
95#endif
96
97/* Fail if we couldn't find implementations of required facilities. */
98#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
99#error "could not find an implementation of pg_atomic_uint32"
100#endif
101#if !defined(pg_compiler_barrier_impl)
102#error "could not find an implementation of pg_compiler_barrier"
103#endif
104#if !defined(pg_memory_barrier_impl)
105#error "could not find an implementation of pg_memory_barrier_impl"
106#endif
107
108
109/*
110 * Provide a spinlock-based implementation of the 64 bit variants, if
111 * necessary.
112 */
114
115/*
116 * Provide additional operations using supported infrastructure. These are
117 * expected to be efficient if the underlying atomic operations are efficient.
118 */
119#include "port/atomics/generic.h"
120
121
122/*
123 * pg_compiler_barrier - prevent the compiler from moving code across
124 *
125 * A compiler barrier need not (and preferably should not) emit any actual
126 * machine code, but must act as an optimization fence: the compiler must not
127 * reorder loads or stores to main memory around the barrier. However, the
128 * CPU may still reorder loads or stores at runtime, if the architecture's
129 * memory model permits this.
130 */
131#define pg_compiler_barrier() pg_compiler_barrier_impl()
132
133/*
134 * pg_memory_barrier - prevent the CPU from reordering memory access
135 *
136 * A memory barrier must act as a compiler barrier, and in addition must
137 * guarantee that all loads and stores issued prior to the barrier are
138 * completed before any loads or stores issued after the barrier. Unless
139 * loads and stores are totally ordered (which is not the case on most
140 * architectures) this requires issuing some sort of memory fencing
141 * instruction.
142 */
143#define pg_memory_barrier() pg_memory_barrier_impl()
144
145/*
146 * pg_(read|write)_barrier - prevent the CPU from reordering memory access
147 *
148 * A read barrier must act as a compiler barrier, and in addition must
149 * guarantee that any loads issued prior to the barrier are completed before
150 * any loads issued after the barrier. Similarly, a write barrier acts
151 * as a compiler barrier, and also orders stores. Read and write barriers
152 * are thus weaker than a full memory barrier, but stronger than a compiler
153 * barrier. In practice, on machines with strong memory ordering, read and
154 * write barriers may require nothing more than a compiler barrier.
155 */
156#define pg_read_barrier() pg_read_barrier_impl()
157#define pg_write_barrier() pg_write_barrier_impl()
158
159/*
160 * Spinloop delay - Allow CPU to relax in busy loops
161 */
162#define pg_spin_delay() pg_spin_delay_impl()
163
164/*
165 * pg_atomic_init_flag - initialize atomic flag.
166 *
167 * No barrier semantics.
168 */
169static inline void
170pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
171{
172 pg_atomic_init_flag_impl(ptr);
173}
174
175/*
176 * pg_atomic_test_set_flag - TAS()
177 *
178 * Returns true if the flag has successfully been set, false otherwise.
179 *
180 * Acquire (including read barrier) semantics.
181 */
182static inline bool
183pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
184{
185 return pg_atomic_test_set_flag_impl(ptr);
186}
187
188/*
189 * pg_atomic_unlocked_test_flag - Check if the lock is free
190 *
191 * Returns true if the flag currently is not set, false otherwise.
192 *
193 * No barrier semantics.
194 */
195static inline bool
196pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
197{
198 return pg_atomic_unlocked_test_flag_impl(ptr);
199}
200
201/*
202 * pg_atomic_clear_flag - release lock set by TAS()
203 *
204 * Release (including write barrier) semantics.
205 */
206static inline void
207pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
208{
209 pg_atomic_clear_flag_impl(ptr);
210}
211
212
213/*
214 * pg_atomic_init_u32 - initialize atomic variable
215 *
216 * Has to be done before any concurrent usage..
217 *
218 * No barrier semantics.
219 */
220static inline void
222{
224
226}
227
228/*
229 * pg_atomic_read_u32 - unlocked read from atomic variable.
230 *
231 * The read is guaranteed to return a value as it has been written by this or
232 * another process at some point in the past. There's however no cache
233 * coherency interaction guaranteeing the value hasn't since been written to
234 * again.
235 *
236 * No barrier semantics.
237 */
238static inline uint32
240{
242 return pg_atomic_read_u32_impl(ptr);
243}
244
245/*
246 * pg_atomic_read_membarrier_u32 - read with barrier semantics.
247 *
248 * This read is guaranteed to return the current value, provided that the value
249 * is only ever updated via operations with barrier semantics, such as
250 * pg_atomic_compare_exchange_u32() and pg_atomic_write_membarrier_u32().
251 * While this may be less performant than pg_atomic_read_u32(), it may be
252 * easier to reason about correctness with this function in less performance-
253 * sensitive code.
254 *
255 * Full barrier semantics.
256 */
257static inline uint32
259{
261
262 return pg_atomic_read_membarrier_u32_impl(ptr);
263}
264
265/*
266 * pg_atomic_write_u32 - write to atomic variable.
267 *
268 * The write is guaranteed to succeed as a whole, i.e. it's not possible to
269 * observe a partial write for any reader. Note that this correctly interacts
270 * with pg_atomic_compare_exchange_u32, in contrast to
271 * pg_atomic_unlocked_write_u32().
272 *
273 * No barrier semantics.
274 */
275static inline void
277{
279
281}
282
283/*
284 * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
285 *
286 * The write is guaranteed to succeed as a whole, i.e. it's not possible to
287 * observe a partial write for any reader. But note that writing this way is
288 * not guaranteed to correctly interact with read-modify-write operations like
289 * pg_atomic_compare_exchange_u32. This should only be used in cases where
290 * minor performance regressions due to atomics emulation are unacceptable.
291 *
292 * No barrier semantics.
293 */
294static inline void
296{
298
300}
301
302/*
303 * pg_atomic_write_membarrier_u32 - write with barrier semantics.
304 *
305 * The write is guaranteed to succeed as a whole, i.e., it's not possible to
306 * observe a partial write for any reader. Note that this correctly interacts
307 * with both pg_atomic_compare_exchange_u32() and
308 * pg_atomic_read_membarrier_u32(). While this may be less performant than
309 * pg_atomic_write_u32(), it may be easier to reason about correctness with
310 * this function in less performance-sensitive code.
311 *
312 * Full barrier semantics.
313 */
314static inline void
316{
318
319 pg_atomic_write_membarrier_u32_impl(ptr, val);
320}
321
322/*
323 * pg_atomic_exchange_u32 - exchange newval with current value
324 *
325 * Returns the old value of 'ptr' before the swap.
326 *
327 * Full barrier semantics.
328 */
329static inline uint32
331{
333
335}
336
337/*
338 * pg_atomic_compare_exchange_u32 - CAS operation
339 *
340 * Atomically compare the current value of ptr with *expected and store newval
341 * iff ptr and *expected have the same value. The current value of *ptr will
342 * always be stored in *expected.
343 *
344 * Return true if values have been exchanged, false otherwise.
345 *
346 * Full barrier semantics.
347 */
348static inline bool
350 uint32 *expected, uint32 newval)
351{
353 AssertPointerAlignment(expected, 4);
354
355 return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
356}
357
358/*
359 * pg_atomic_fetch_add_u32 - atomically add to variable
360 *
361 * Returns the value of ptr before the arithmetic operation.
362 *
363 * Full barrier semantics.
364 */
365static inline uint32
367{
369 return pg_atomic_fetch_add_u32_impl(ptr, add_);
370}
371
372/*
373 * pg_atomic_fetch_sub_u32 - atomically subtract from variable
374 *
375 * Returns the value of ptr before the arithmetic operation. Note that sub_
376 * may not be INT_MIN due to platform limitations.
377 *
378 * Full barrier semantics.
379 */
380static inline uint32
382{
384 Assert(sub_ != INT_MIN);
385 return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
386}
387
388/*
389 * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
390 *
391 * Returns the value of ptr before the arithmetic operation.
392 *
393 * Full barrier semantics.
394 */
395static inline uint32
397{
399 return pg_atomic_fetch_and_u32_impl(ptr, and_);
400}
401
402/*
403 * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
404 *
405 * Returns the value of ptr before the arithmetic operation.
406 *
407 * Full barrier semantics.
408 */
409static inline uint32
411{
413 return pg_atomic_fetch_or_u32_impl(ptr, or_);
414}
415
416/*
417 * pg_atomic_add_fetch_u32 - atomically add to variable
418 *
419 * Returns the value of ptr after the arithmetic operation.
420 *
421 * Full barrier semantics.
422 */
423static inline uint32
425{
427 return pg_atomic_add_fetch_u32_impl(ptr, add_);
428}
429
430/*
431 * pg_atomic_sub_fetch_u32 - atomically subtract from variable
432 *
433 * Returns the value of ptr after the arithmetic operation. Note that sub_ may
434 * not be INT_MIN due to platform limitations.
435 *
436 * Full barrier semantics.
437 */
438static inline uint32
440{
442 Assert(sub_ != INT_MIN);
443 return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
444}
445
446/* ----
447 * The 64 bit operations have the same semantics as their 32bit counterparts
448 * if they are available. Check the corresponding 32bit function for
449 * documentation.
450 * ----
451 */
452static inline void
454{
455 /*
456 * Can't necessarily enforce alignment - and don't need it - when using
457 * the spinlock based fallback implementation. Therefore only assert when
458 * not using it.
459 */
460#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
462#endif
464}
465
466static inline uint64
468{
469#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
471#endif
472 return pg_atomic_read_u64_impl(ptr);
473}
474
475static inline uint64
477{
478#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
480#endif
481 return pg_atomic_read_membarrier_u64_impl(ptr);
482}
483
484static inline void
486{
487#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
489#endif
491}
492
493static inline void
495{
496#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
498#endif
499 pg_atomic_write_membarrier_u64_impl(ptr, val);
500}
501
502static inline uint64
504{
505#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
507#endif
508 return pg_atomic_exchange_u64_impl(ptr, newval);
509}
510
511static inline bool
513 uint64 *expected, uint64 newval)
514{
515#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
517#endif
518 return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
519}
520
521static inline uint64
523{
524#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
526#endif
527 return pg_atomic_fetch_add_u64_impl(ptr, add_);
528}
529
530static inline uint64
532{
533#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
535#endif
536 Assert(sub_ != PG_INT64_MIN);
537 return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
538}
539
540static inline uint64
542{
543#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
545#endif
546 return pg_atomic_fetch_and_u64_impl(ptr, and_);
547}
548
549static inline uint64
551{
552#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
554#endif
555 return pg_atomic_fetch_or_u64_impl(ptr, or_);
556}
557
558static inline uint64
560{
561#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
563#endif
564 return pg_atomic_add_fetch_u64_impl(ptr, add_);
565}
566
567static inline uint64
569{
570#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
572#endif
573 Assert(sub_ != PG_INT64_MIN);
574 return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
575}
576
577/*
578 * Monotonically advance the given variable using only atomic operations until
579 * it's at least the target value. Returns the latest value observed, which
580 * may or may not be the target value.
581 *
582 * Full barrier semantics (even when value is unchanged).
583 */
584static inline uint64
586{
587 uint64 currval;
588
589#ifndef PG_HAVE_ATOMIC_U64_SIMULATION
591#endif
592
593 currval = pg_atomic_read_u64_impl(ptr);
594 if (currval >= target)
595 {
597 return currval;
598 }
599
600 while (currval < target)
601 {
602 if (pg_atomic_compare_exchange_u64(ptr, &currval, target))
603 return target;
604 }
605
606 return currval;
607}
608
609#undef INSIDE_ATOMICS_H
610
611#endif /* ATOMICS_H */
static bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: arch-ppc.h:80
static uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: arch-ppc.h:131
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:62
void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
Definition: atomics.c:24
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:34
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:396
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:349
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:485
static void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:207
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:410
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:439
#define pg_memory_barrier()
Definition: atomics.h:143
static uint32 pg_atomic_read_membarrier_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:258
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:381
static bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.h:512
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:295
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:221
static uint64 pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:476
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:366
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:424
static uint64 pg_atomic_monotonic_advance_u64(volatile pg_atomic_uint64 *ptr, uint64 target)
Definition: atomics.h:585
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:522
static bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:183
static uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:568
static bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:196
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:276
static void pg_atomic_write_membarrier_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:315
static uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
Definition: atomics.h:541
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:239
static uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
Definition: atomics.h:550
static uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:559
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:330
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:453
static void pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:494
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:467
static uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:531
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:503
static void pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:170
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:865
int64_t int64
Definition: c.h:499
int32_t int32
Definition: c.h:498
#define PG_INT64_MIN
Definition: c.h:562
uint64_t uint64
Definition: c.h:503
uint32_t uint32
Definition: c.h:502
static uint32 pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: generic-msvc.h:61
static void pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:64
static uint64 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
Definition: generic.h:319
static void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:55
static void pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: generic.h:288
static uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
Definition: generic.h:46
static void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: generic.h:151
#define newval
Assert(PointerIsAligned(start, uint64))
long val
Definition: informix.c:689