PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
generic.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * generic.h
4  * Implement higher level operations based on some lower level atomic
5  * operations.
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/port/atomics/generic.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 
15 /* intentionally no include guards, should only be included by atomics.h */
16 #ifndef INSIDE_ATOMICS_H
17 # error "should be included via atomics.h"
18 #endif
19 
20 /*
21  * If read or write barriers are undefined, we upgrade them to full memory
22  * barriers.
23  */
24 #if !defined(pg_read_barrier_impl)
25 # define pg_read_barrier_impl pg_memory_barrier_impl
26 #endif
27 #if !defined(pg_write_barrier_impl)
28 # define pg_write_barrier_impl pg_memory_barrier_impl
29 #endif
30 
31 #ifndef PG_HAVE_SPIN_DELAY
32 #define PG_HAVE_SPIN_DELAY
33 #define pg_spin_delay_impl() ((void)0)
34 #endif
35 
36 
37 /* provide fallback */
38 #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && defined(PG_HAVE_ATOMIC_U32_SUPPORT)
39 #define PG_HAVE_ATOMIC_FLAG_SUPPORT
41 #endif
42 
43 #ifndef PG_HAVE_ATOMIC_READ_U32
44 #define PG_HAVE_ATOMIC_READ_U32
45 static inline uint32
47 {
48  return *(&ptr->value);
49 }
50 #endif
51 
52 #ifndef PG_HAVE_ATOMIC_WRITE_U32
53 #define PG_HAVE_ATOMIC_WRITE_U32
54 static inline void
56 {
57  ptr->value = val;
58 }
59 #endif
60 
61 #ifndef PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32
62 #define PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32
63 static inline void
65 {
66  ptr->value = val;
67 }
68 #endif
69 
70 /*
71  * provide fallback for test_and_set using atomic_exchange if available
72  */
73 #if !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_EXCHANGE_U32)
74 
75 #define PG_HAVE_ATOMIC_INIT_FLAG
76 static inline void
78 {
80 }
81 
82 #define PG_HAVE_ATOMIC_TEST_SET_FLAG
83 static inline bool
85 {
86  return pg_atomic_exchange_u32_impl(ptr, &value, 1) == 0;
87 }
88 
89 #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
90 static inline bool
92 {
93  return pg_atomic_read_u32_impl(ptr) == 0;
94 }
95 
96 
97 #define PG_HAVE_ATOMIC_CLEAR_FLAG
98 static inline void
100 {
101  /* XXX: release semantics suffice? */
103  pg_atomic_write_u32_impl(ptr, 0);
104 }
105 
106 /*
107  * provide fallback for test_and_set using atomic_compare_exchange if
108  * available.
109  */
110 #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
111 
112 #define PG_HAVE_ATOMIC_INIT_FLAG
113 static inline void
115 {
116  pg_atomic_write_u32_impl(ptr, 0);
117 }
118 
119 #define PG_HAVE_ATOMIC_TEST_SET_FLAG
120 static inline bool
122 {
123  uint32 value = 0;
124  return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
125 }
126 
127 #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
128 static inline bool
130 {
131  return pg_atomic_read_u32_impl(ptr) == 0;
132 }
133 
134 #define PG_HAVE_ATOMIC_CLEAR_FLAG
135 static inline void
137 {
138  /*
139  * Use a memory barrier + plain write if we have a native memory
140  * barrier. But don't do so if memory barriers use spinlocks - that'd lead
141  * to circularity if flags are used to implement spinlocks.
142  */
143 #ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
144  /* XXX: release semantics suffice? */
146  pg_atomic_write_u32_impl(ptr, 0);
147 #else
148  uint32 value = 1;
149  pg_atomic_compare_exchange_u32_impl(ptr, &value, 0);
150 #endif
151 }
152 
153 #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
154 # error "No pg_atomic_test_and_set provided"
155 #endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
156 
157 
158 #ifndef PG_HAVE_ATOMIC_INIT_U32
159 #define PG_HAVE_ATOMIC_INIT_U32
160 static inline void
162 {
163  pg_atomic_write_u32_impl(ptr, val_);
164 }
165 #endif
166 
167 #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
168 #define PG_HAVE_ATOMIC_EXCHANGE_U32
169 static inline uint32
170 pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
171 {
172  uint32 old;
173  while (true)
174  {
175  old = pg_atomic_read_u32_impl(ptr);
176  if (pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_))
177  break;
178  }
179  return old;
180 }
181 #endif
182 
183 #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
184 #define PG_HAVE_ATOMIC_FETCH_ADD_U32
185 static inline uint32
187 {
188  uint32 old;
189  while (true)
190  {
191  old = pg_atomic_read_u32_impl(ptr);
192  if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_))
193  break;
194  }
195  return old;
196 }
197 #endif
198 
199 #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
200 #define PG_HAVE_ATOMIC_FETCH_SUB_U32
201 static inline uint32
202 pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
203 {
204  return pg_atomic_fetch_add_u32_impl(ptr, -sub_);
205 }
206 #endif
207 
208 #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
209 #define PG_HAVE_ATOMIC_FETCH_AND_U32
210 static inline uint32
211 pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
212 {
213  uint32 old;
214  while (true)
215  {
216  old = pg_atomic_read_u32_impl(ptr);
217  if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_))
218  break;
219  }
220  return old;
221 }
222 #endif
223 
224 #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
225 #define PG_HAVE_ATOMIC_FETCH_OR_U32
226 static inline uint32
227 pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
228 {
229  uint32 old;
230  while (true)
231  {
232  old = pg_atomic_read_u32_impl(ptr);
233  if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_))
234  break;
235  }
236  return old;
237 }
238 #endif
239 
240 #if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U32)
241 #define PG_HAVE_ATOMIC_ADD_FETCH_U32
242 static inline uint32
243 pg_atomic_add_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
244 {
245  return pg_atomic_fetch_add_u32_impl(ptr, add_) + add_;
246 }
247 #endif
248 
249 #if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U32)
250 #define PG_HAVE_ATOMIC_SUB_FETCH_U32
251 static inline uint32
252 pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
253 {
254  return pg_atomic_fetch_sub_u32_impl(ptr, sub_) - sub_;
255 }
256 #endif
257 
258 #if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
259 #define PG_HAVE_ATOMIC_EXCHANGE_U64
260 static inline uint64
261 pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
262 {
263  uint64 old;
264  while (true)
265  {
266  old = ptr->value;
267  if (pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_))
268  break;
269  }
270  return old;
271 }
272 #endif
273 
274 #ifndef PG_HAVE_ATOMIC_WRITE_U64
275 #define PG_HAVE_ATOMIC_WRITE_U64
276 
277 #if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \
278  !defined(PG_HAVE_ATOMIC_U64_SIMULATION)
279 
280 static inline void
281 pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
282 {
283  /*
284  * On this platform aligned 64bit writes are guaranteed to be atomic,
285  * except if using the fallback implementation, where can't guarantee the
286  * required alignment.
287  */
288  AssertPointerAlignment(ptr, 8);
289  ptr->value = val;
290 }
291 
292 #else
293 
294 static inline void
296 {
297  /*
298  * 64 bit writes aren't safe on all platforms. In the generic
299  * implementation implement them as an atomic exchange.
300  */
301  pg_atomic_exchange_u64_impl(ptr, val);
302 }
303 
304 #endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */
305 #endif /* PG_HAVE_ATOMIC_WRITE_U64 */
306 
307 #ifndef PG_HAVE_ATOMIC_READ_U64
308 #define PG_HAVE_ATOMIC_READ_U64
309 
310 #if defined(PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY) && \
311  !defined(PG_HAVE_ATOMIC_U64_SIMULATION)
312 
313 static inline uint64
315 {
316  /*
317  * On this platform aligned 64bit reads are guaranteed to be atomic,
318  * except if using the fallback implementation, where can't guarantee the
319  * required alignment.
320  */
321  AssertPointerAlignment(ptr, 8);
322  return *(&ptr->value);
323 }
324 
325 #else
326 
327 static inline uint64
329 {
330  uint64 old = 0;
331 
332  /*
333  * 64 bit reads aren't safe on all platforms. In the generic
334  * implementation implement them as a compare/exchange with 0. That'll
335  * fail or succeed, but always return the old value. Possible might store
336  * a 0, but only if the prev. value also was a 0 - i.e. harmless.
337  */
339 
340  return old;
341 }
342 #endif /* PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY && !PG_HAVE_ATOMIC_U64_SIMULATION */
343 #endif /* PG_HAVE_ATOMIC_READ_U64 */
344 
345 #ifndef PG_HAVE_ATOMIC_INIT_U64
346 #define PG_HAVE_ATOMIC_INIT_U64
347 static inline void
348 pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
349 {
350  pg_atomic_write_u64_impl(ptr, val_);
351 }
352 #endif
353 
354 #if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
355 #define PG_HAVE_ATOMIC_FETCH_ADD_U64
356 static inline uint64
357 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
358 {
359  uint64 old;
360  while (true)
361  {
362  old = pg_atomic_read_u64_impl(ptr);
363  if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_))
364  break;
365  }
366  return old;
367 }
368 #endif
369 
370 #if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
371 #define PG_HAVE_ATOMIC_FETCH_SUB_U64
372 static inline uint64
373 pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
374 {
375  return pg_atomic_fetch_add_u64_impl(ptr, -sub_);
376 }
377 #endif
378 
379 #if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
380 #define PG_HAVE_ATOMIC_FETCH_AND_U64
381 static inline uint64
382 pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
383 {
384  uint64 old;
385  while (true)
386  {
387  old = pg_atomic_read_u64_impl(ptr);
388  if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_))
389  break;
390  }
391  return old;
392 }
393 #endif
394 
395 #if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
396 #define PG_HAVE_ATOMIC_FETCH_OR_U64
397 static inline uint64
398 pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
399 {
400  uint64 old;
401  while (true)
402  {
403  old = pg_atomic_read_u64_impl(ptr);
404  if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_))
405  break;
406  }
407  return old;
408 }
409 #endif
410 
411 #if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U64)
412 #define PG_HAVE_ATOMIC_ADD_FETCH_U64
413 static inline uint64
414 pg_atomic_add_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
415 {
416  return pg_atomic_fetch_add_u64_impl(ptr, add_) + add_;
417 }
418 #endif
419 
420 #if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U64)
421 #define PG_HAVE_ATOMIC_SUB_FETCH_U64
422 static inline uint64
423 pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
424 {
425  return pg_atomic_fetch_sub_u64_impl(ptr, sub_) - sub_;
426 }
427 #endif
static uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
Definition: generic.h:46
static void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
Definition: generic.h:348
struct pg_atomic_flag pg_atomic_flag
static void pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:64
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:183
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:80
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:120
volatile uint32 value
Definition: fallback.h:100
signed int int32
Definition: c.h:256
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:148
static void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:55
static struct @121 value
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:211
unsigned int uint32
Definition: c.h:268
volatile uint64 value
Definition: fallback.h:118
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:55
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:679
static void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: generic.h:161
#define pg_memory_barrier_impl()
Definition: arch-hppa.h:17
static void pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: generic.h:295
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:74
long val
Definition: informix.c:689
static bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
Definition: fallback.h:136
static uint64 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
Definition: generic.h:328