PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
generic-gcc.h
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * generic-gcc.h
4 * Atomic operations, implemented using gcc (or compatible) intrinsics.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 * NOTES:
10 *
11 * Documentation:
12 * * Legacy __sync Built-in Functions for Atomic Memory Access
13 * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
14 * * Built-in functions for memory model aware atomic operations
15 * https://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
16 *
17 * src/include/port/atomics/generic-gcc.h
18 *
19 *-------------------------------------------------------------------------
20 */
21
22/* intentionally no include guards, should only be included by atomics.h */
23#ifndef INSIDE_ATOMICS_H
24#error "should be included via atomics.h"
25#endif
26
27/*
28 * An empty asm block should be a sufficient compiler barrier.
29 */
30#define pg_compiler_barrier_impl() __asm__ __volatile__("" ::: "memory")
31
32/*
33 * If we're on GCC 4.1.0 or higher, we should be able to get a memory barrier
34 * out of this compiler built-in. But we prefer to rely on platform specific
35 * definitions where possible, and use this only as a fallback.
36 */
37#if !defined(pg_memory_barrier_impl)
38# if defined(HAVE_GCC__ATOMIC_INT32_CAS)
39# define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
40# elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
41# define pg_memory_barrier_impl() __sync_synchronize()
42# endif
43#endif /* !defined(pg_memory_barrier_impl) */
44
45#if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
46/* acquire semantics include read barrier semantics */
47# define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
48#endif
49
50#if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
51/* release semantics include write barrier semantics */
52# define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
53#endif
54
55
56/* generic gcc based atomic flag implementation */
57#if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) \
58 && (defined(HAVE_GCC__SYNC_INT32_TAS) || defined(HAVE_GCC__SYNC_CHAR_TAS))
59
60#define PG_HAVE_ATOMIC_FLAG_SUPPORT
61typedef struct pg_atomic_flag
62{
63 /*
64 * If we have a choice, use int-width TAS, because that is more efficient
65 * and/or more reliably implemented on most non-Intel platforms. (Note
66 * that this code isn't used on x86[_64]; see arch-x86.h for that.)
67 */
68#ifdef HAVE_GCC__SYNC_INT32_TAS
69 volatile int value;
70#else
71 volatile char value;
72#endif
73} pg_atomic_flag;
74
75#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
76
77/* generic gcc based atomic uint32 implementation */
78#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
79 && (defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS))
80
81#define PG_HAVE_ATOMIC_U32_SUPPORT
82typedef struct pg_atomic_uint32
83{
84 volatile uint32 value;
86
87#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
88
89/* generic gcc based atomic uint64 implementation */
90#if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
91 && !defined(PG_DISABLE_64_BIT_ATOMICS) \
92 && (defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS))
93
94#define PG_HAVE_ATOMIC_U64_SUPPORT
95
96typedef struct pg_atomic_uint64
97{
100
101#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
102
103#ifdef PG_HAVE_ATOMIC_FLAG_SUPPORT
104
105#if defined(HAVE_GCC__SYNC_CHAR_TAS) || defined(HAVE_GCC__SYNC_INT32_TAS)
106
107#ifndef PG_HAVE_ATOMIC_TEST_SET_FLAG
108#define PG_HAVE_ATOMIC_TEST_SET_FLAG
109static inline bool
110pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
111{
112 /* NB: only an acquire barrier, not a full one */
113 /* some platform only support a 1 here */
114 return __sync_lock_test_and_set(&ptr->value, 1) == 0;
115}
116#endif
117
118#endif /* defined(HAVE_GCC__SYNC_*_TAS) */
119
120#ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
121#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
122static inline bool
123pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
124{
125 return ptr->value == 0;
126}
127#endif
128
129#ifndef PG_HAVE_ATOMIC_CLEAR_FLAG
130#define PG_HAVE_ATOMIC_CLEAR_FLAG
131static inline void
132pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
133{
134 __sync_lock_release(&ptr->value);
135}
136#endif
137
138#ifndef PG_HAVE_ATOMIC_INIT_FLAG
139#define PG_HAVE_ATOMIC_INIT_FLAG
140static inline void
141pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
142{
143 pg_atomic_clear_flag_impl(ptr);
144}
145#endif
146
147#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
148
149/* prefer __atomic, it has a better API */
150#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
151#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
152static inline bool
154 uint32 *expected, uint32 newval)
155{
156 /* FIXME: we can probably use a lower consistency model */
157 return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
158 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
159}
160#endif
161
162#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
163#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
164static inline bool
166 uint32 *expected, uint32 newval)
167{
168 bool ret;
169 uint32 current;
170 current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
171 ret = current == *expected;
172 *expected = current;
173 return ret;
174}
175#endif
176
177/*
178 * __sync_lock_test_and_set() only supports setting the value to 1 on some
179 * platforms, so we only provide an __atomic implementation for
180 * pg_atomic_exchange.
181 *
182 * We assume the availability of 32-bit __atomic_compare_exchange_n() implies
183 * the availability of 32-bit __atomic_exchange_n().
184 */
185#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
186#define PG_HAVE_ATOMIC_EXCHANGE_U32
187static inline uint32
189{
190 return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
191}
192#endif
193
194/* if we have 32-bit __sync_val_compare_and_swap, assume we have these too: */
195
196#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
197#define PG_HAVE_ATOMIC_FETCH_ADD_U32
198static inline uint32
200{
201 return __sync_fetch_and_add(&ptr->value, add_);
202}
203#endif
204
205#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
206#define PG_HAVE_ATOMIC_FETCH_SUB_U32
207static inline uint32
208pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
209{
210 return __sync_fetch_and_sub(&ptr->value, sub_);
211}
212#endif
213
214#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
215#define PG_HAVE_ATOMIC_FETCH_AND_U32
216static inline uint32
217pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
218{
219 return __sync_fetch_and_and(&ptr->value, and_);
220}
221#endif
222
223#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(HAVE_GCC__SYNC_INT32_CAS)
224#define PG_HAVE_ATOMIC_FETCH_OR_U32
225static inline uint32
226pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
227{
228 return __sync_fetch_and_or(&ptr->value, or_);
229}
230#endif
231
232
233#if !defined(PG_DISABLE_64_BIT_ATOMICS)
234
235#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
236#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
237static inline bool
239 uint64 *expected, uint64 newval)
240{
241 AssertPointerAlignment(expected, 8);
242 return __atomic_compare_exchange_n(&ptr->value, expected, newval, false,
243 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
244}
245#endif
246
247#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
248#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
249static inline bool
251 uint64 *expected, uint64 newval)
252{
253 bool ret;
254 uint64 current;
255
256 AssertPointerAlignment(expected, 8);
257 current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
258 ret = current == *expected;
259 *expected = current;
260 return ret;
261}
262#endif
263
264/*
265 * __sync_lock_test_and_set() only supports setting the value to 1 on some
266 * platforms, so we only provide an __atomic implementation for
267 * pg_atomic_exchange.
268 *
269 * We assume the availability of 64-bit __atomic_compare_exchange_n() implies
270 * the availability of 64-bit __atomic_exchange_n().
271 */
272#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(HAVE_GCC__ATOMIC_INT64_CAS)
273#define PG_HAVE_ATOMIC_EXCHANGE_U64
274static inline uint64
275pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 newval)
276{
277 return __atomic_exchange_n(&ptr->value, newval, __ATOMIC_SEQ_CST);
278}
279#endif
280
281/* if we have 64-bit __sync_val_compare_and_swap, assume we have these too: */
282
283#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
284#define PG_HAVE_ATOMIC_FETCH_ADD_U64
285static inline uint64
287{
288 return __sync_fetch_and_add(&ptr->value, add_);
289}
290#endif
291
292#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
293#define PG_HAVE_ATOMIC_FETCH_SUB_U64
294static inline uint64
295pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
296{
297 return __sync_fetch_and_sub(&ptr->value, sub_);
298}
299#endif
300
301#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
302#define PG_HAVE_ATOMIC_FETCH_AND_U64
303static inline uint64
304pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
305{
306 return __sync_fetch_and_and(&ptr->value, and_);
307}
308#endif
309
310#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(HAVE_GCC__SYNC_INT64_CAS)
311#define PG_HAVE_ATOMIC_FETCH_OR_U64
312static inline uint64
313pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
314{
315 return __sync_fetch_and_or(&ptr->value, or_);
316}
317#endif
318
319#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
static bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: arch-ppc.h:80
static uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: arch-ppc.h:131
struct pg_atomic_uint32 pg_atomic_uint32
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:62
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:34
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:851
int64_t int64
Definition: c.h:485
int32_t int32
Definition: c.h:484
uint64_t uint64
Definition: c.h:489
uint32_t uint32
Definition: c.h:488
struct pg_atomic_uint64 pg_atomic_uint64
struct pg_attribute_aligned(8) pg_atomic_uint64
Definition: generic-msvc.h:40
static uint32 pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: generic-msvc.h:61
#define newval
static struct @162 value
volatile uint32 value
Definition: arch-ppc.h:31
volatile uint64 value
Definition: fallback.h:29