PostgreSQL Source Code  git master
atomics.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * atomics.h
4  * Atomic operations.
5  *
6  * Hardware and compiler dependent functions for manipulating memory
7  * atomically and dealing with cache coherency. Used to implement locking
8  * facilities and lockless algorithms/data structures.
9  *
10  * To bring up postgres on a platform/compiler at the very least
11  * implementations for the following operations should be provided:
12  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14  * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
15  * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
16  *
17  * There exist generic, hardware independent, implementations for several
18  * compilers which might be sufficient, although possibly not optimal, for a
19  * new platform. If no such generic implementation is available spinlocks (or
20  * even OS provided semaphores) will be used to implement the API.
21  *
22  * Implement _u64 atomics if and only if your platform can use them
23  * efficiently (and obviously correctly).
24  *
25  * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
26  * whenever possible. Writing correct code using these facilities is hard.
27  *
28  * For an introduction to using memory barriers within the PostgreSQL backend,
29  * see src/backend/storage/lmgr/README.barrier
30  *
31  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
32  * Portions Copyright (c) 1994, Regents of the University of California
33  *
34  * src/include/port/atomics.h
35  *
36  *-------------------------------------------------------------------------
37  */
38 #ifndef ATOMICS_H
39 #define ATOMICS_H
40 
41 #ifdef FRONTEND
42 #error "atomics.h may not be included from frontend code"
43 #endif
44 
45 #define INSIDE_ATOMICS_H
46 
47 #include <limits.h>
48 
49 /*
50  * First a set of architecture specific files is included.
51  *
52  * These files can provide the full set of atomics or can do pretty much
53  * nothing if all the compilers commonly used on these platforms provide
54  * usable generics.
55  *
56  * Don't add an inline assembly of the actual atomic operations if all the
57  * common implementations of your platform provide intrinsics. Intrinsics are
58  * much easier to understand and potentially support more architectures.
59  *
60  * It will often make sense to define memory barrier semantics here, since
61  * e.g. generic compiler intrinsics for x86 memory barriers can't know that
62  * postgres doesn't need x86 read/write barriers do anything more than a
63  * compiler barrier.
64  *
65  */
66 #if defined(__arm__) || defined(__arm) || defined(__aarch64__)
67 #include "port/atomics/arch-arm.h"
68 #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
69 #include "port/atomics/arch-x86.h"
70 #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
71 #include "port/atomics/arch-ppc.h"
72 #endif
73 
74 /*
75  * Compiler specific, but architecture independent implementations.
76  *
77  * Provide architecture independent implementations of the atomic
78  * facilities. At the very least compiler barriers should be provided, but a
79  * full implementation of
80  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
81  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
82  * using compiler intrinsics are a good idea.
83  */
84 /*
85  * gcc or compatible, including clang and icc.
86  */
87 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
89 #elif defined(_MSC_VER)
91 #elif defined(__SUNPRO_C) && !defined(__GNUC__)
93 #else
94 /*
95  * Unsupported compiler, we'll likely use slower fallbacks... At least
96  * compiler barriers should really be provided.
97  */
98 #endif
99 
100 /*
101  * Provide a full fallback of the pg_*_barrier(), pg_atomic**_flag and
102  * pg_atomic_* APIs for platforms without sufficient spinlock and/or atomics
103  * support. In the case of spinlock backed atomics the emulation is expected
104  * to be efficient, although less so than native atomics support.
105  */
106 #include "port/atomics/fallback.h"
107 
108 /*
109  * Provide additional operations using supported infrastructure. These are
110  * expected to be efficient if the underlying atomic operations are efficient.
111  */
112 #include "port/atomics/generic.h"
113 
114 
115 /*
116  * pg_compiler_barrier - prevent the compiler from moving code across
117  *
118  * A compiler barrier need not (and preferably should not) emit any actual
119  * machine code, but must act as an optimization fence: the compiler must not
120  * reorder loads or stores to main memory around the barrier. However, the
121  * CPU may still reorder loads or stores at runtime, if the architecture's
122  * memory model permits this.
123  */
124 #define pg_compiler_barrier() pg_compiler_barrier_impl()
125 
126 /*
127  * pg_memory_barrier - prevent the CPU from reordering memory access
128  *
129  * A memory barrier must act as a compiler barrier, and in addition must
130  * guarantee that all loads and stores issued prior to the barrier are
131  * completed before any loads or stores issued after the barrier. Unless
132  * loads and stores are totally ordered (which is not the case on most
133  * architectures) this requires issuing some sort of memory fencing
134  * instruction.
135  */
136 #define pg_memory_barrier() pg_memory_barrier_impl()
137 
138 /*
139  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
140  *
141  * A read barrier must act as a compiler barrier, and in addition must
142  * guarantee that any loads issued prior to the barrier are completed before
143  * any loads issued after the barrier. Similarly, a write barrier acts
144  * as a compiler barrier, and also orders stores. Read and write barriers
145  * are thus weaker than a full memory barrier, but stronger than a compiler
146  * barrier. In practice, on machines with strong memory ordering, read and
147  * write barriers may require nothing more than a compiler barrier.
148  */
149 #define pg_read_barrier() pg_read_barrier_impl()
150 #define pg_write_barrier() pg_write_barrier_impl()
151 
152 /*
153  * Spinloop delay - Allow CPU to relax in busy loops
154  */
155 #define pg_spin_delay() pg_spin_delay_impl()
156 
157 /*
158  * pg_atomic_init_flag - initialize atomic flag.
159  *
160  * No barrier semantics.
161  */
162 static inline void
164 {
166 }
167 
168 /*
169  * pg_atomic_test_set_flag - TAS()
170  *
171  * Returns true if the flag has successfully been set, false otherwise.
172  *
173  * Acquire (including read barrier) semantics.
174  */
175 static inline bool
177 {
178  return pg_atomic_test_set_flag_impl(ptr);
179 }
180 
181 /*
182  * pg_atomic_unlocked_test_flag - Check if the lock is free
183  *
184  * Returns true if the flag currently is not set, false otherwise.
185  *
186  * No barrier semantics.
187  */
188 static inline bool
190 {
192 }
193 
194 /*
195  * pg_atomic_clear_flag - release lock set by TAS()
196  *
197  * Release (including write barrier) semantics.
198  */
199 static inline void
201 {
203 }
204 
205 
206 /*
207  * pg_atomic_init_u32 - initialize atomic variable
208  *
209  * Has to be done before any concurrent usage..
210  *
211  * No barrier semantics.
212  */
213 static inline void
215 {
216  AssertPointerAlignment(ptr, 4);
217 
219 }
220 
221 /*
222  * pg_atomic_read_u32 - unlocked read from atomic variable.
223  *
224  * The read is guaranteed to return a value as it has been written by this or
225  * another process at some point in the past. There's however no cache
226  * coherency interaction guaranteeing the value hasn't since been written to
227  * again.
228  *
229  * No barrier semantics.
230  */
231 static inline uint32
233 {
234  AssertPointerAlignment(ptr, 4);
235  return pg_atomic_read_u32_impl(ptr);
236 }
237 
238 /*
239  * pg_atomic_read_membarrier_u32 - read with barrier semantics.
240  *
241  * This read is guaranteed to return the current value, provided that the value
242  * is only ever updated via operations with barrier semantics, such as
243  * pg_atomic_compare_exchange_u32() and pg_atomic_write_membarrier_u32().
244  * While this may be less performant than pg_atomic_read_u32(), it may be
245  * easier to reason about correctness with this function in less performance-
246  * sensitive code.
247  *
248  * Full barrier semantics.
249  */
250 static inline uint32
252 {
253  AssertPointerAlignment(ptr, 4);
254 
255  return pg_atomic_read_membarrier_u32_impl(ptr);
256 }
257 
258 /*
259  * pg_atomic_write_u32 - write to atomic variable.
260  *
261  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
262  * observe a partial write for any reader. Note that this correctly interacts
263  * with pg_atomic_compare_exchange_u32, in contrast to
264  * pg_atomic_unlocked_write_u32().
265  *
266  * No barrier semantics.
267  */
268 static inline void
270 {
271  AssertPointerAlignment(ptr, 4);
272 
274 }
275 
276 /*
277  * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
278  *
279  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
280  * observe a partial write for any reader. But note that writing this way is
281  * not guaranteed to correctly interact with read-modify-write operations like
282  * pg_atomic_compare_exchange_u32. This should only be used in cases where
283  * minor performance regressions due to atomics emulation are unacceptable.
284  *
285  * No barrier semantics.
286  */
287 static inline void
289 {
290  AssertPointerAlignment(ptr, 4);
291 
293 }
294 
295 /*
296  * pg_atomic_write_membarrier_u32 - write with barrier semantics.
297  *
298  * The write is guaranteed to succeed as a whole, i.e., it's not possible to
299  * observe a partial write for any reader. Note that this correctly interacts
300  * with both pg_atomic_compare_exchange_u32() and
301  * pg_atomic_read_membarrier_u32(). While this may be less performant than
302  * pg_atomic_write_u32(), it may be easier to reason about correctness with
303  * this function in less performance-sensitive code.
304  *
305  * Full barrier semantics.
306  */
307 static inline void
309 {
310  AssertPointerAlignment(ptr, 4);
311 
312  pg_atomic_write_membarrier_u32_impl(ptr, val);
313 }
314 
315 /*
316  * pg_atomic_exchange_u32 - exchange newval with current value
317  *
318  * Returns the old value of 'ptr' before the swap.
319  *
320  * Full barrier semantics.
321  */
322 static inline uint32
324 {
325  AssertPointerAlignment(ptr, 4);
326 
327  return pg_atomic_exchange_u32_impl(ptr, newval);
328 }
329 
330 /*
331  * pg_atomic_compare_exchange_u32 - CAS operation
332  *
333  * Atomically compare the current value of ptr with *expected and store newval
334  * iff ptr and *expected have the same value. The current value of *ptr will
335  * always be stored in *expected.
336  *
337  * Return true if values have been exchanged, false otherwise.
338  *
339  * Full barrier semantics.
340  */
341 static inline bool
343  uint32 *expected, uint32 newval)
344 {
345  AssertPointerAlignment(ptr, 4);
346  AssertPointerAlignment(expected, 4);
347 
348  return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
349 }
350 
351 /*
352  * pg_atomic_fetch_add_u32 - atomically add to variable
353  *
354  * Returns the value of ptr before the arithmetic operation.
355  *
356  * Full barrier semantics.
357  */
358 static inline uint32
360 {
361  AssertPointerAlignment(ptr, 4);
362  return pg_atomic_fetch_add_u32_impl(ptr, add_);
363 }
364 
365 /*
366  * pg_atomic_fetch_sub_u32 - atomically subtract from variable
367  *
368  * Returns the value of ptr before the arithmetic operation. Note that sub_
369  * may not be INT_MIN due to platform limitations.
370  *
371  * Full barrier semantics.
372  */
373 static inline uint32
375 {
376  AssertPointerAlignment(ptr, 4);
377  Assert(sub_ != INT_MIN);
378  return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
379 }
380 
381 /*
382  * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
383  *
384  * Returns the value of ptr before the arithmetic operation.
385  *
386  * Full barrier semantics.
387  */
388 static inline uint32
390 {
391  AssertPointerAlignment(ptr, 4);
392  return pg_atomic_fetch_and_u32_impl(ptr, and_);
393 }
394 
395 /*
396  * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
397  *
398  * Returns the value of ptr before the arithmetic operation.
399  *
400  * Full barrier semantics.
401  */
402 static inline uint32
404 {
405  AssertPointerAlignment(ptr, 4);
406  return pg_atomic_fetch_or_u32_impl(ptr, or_);
407 }
408 
409 /*
410  * pg_atomic_add_fetch_u32 - atomically add to variable
411  *
412  * Returns the value of ptr after the arithmetic operation.
413  *
414  * Full barrier semantics.
415  */
416 static inline uint32
418 {
419  AssertPointerAlignment(ptr, 4);
420  return pg_atomic_add_fetch_u32_impl(ptr, add_);
421 }
422 
423 /*
424  * pg_atomic_sub_fetch_u32 - atomically subtract from variable
425  *
426  * Returns the value of ptr after the arithmetic operation. Note that sub_ may
427  * not be INT_MIN due to platform limitations.
428  *
429  * Full barrier semantics.
430  */
431 static inline uint32
433 {
434  AssertPointerAlignment(ptr, 4);
435  Assert(sub_ != INT_MIN);
436  return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
437 }
438 
439 /* ----
440  * The 64 bit operations have the same semantics as their 32bit counterparts
441  * if they are available. Check the corresponding 32bit function for
442  * documentation.
443  * ----
444  */
445 static inline void
447 {
448  /*
449  * Can't necessarily enforce alignment - and don't need it - when using
450  * the spinlock based fallback implementation. Therefore only assert when
451  * not using it.
452  */
453 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
454  AssertPointerAlignment(ptr, 8);
455 #endif
457 }
458 
459 static inline uint64
461 {
462 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
463  AssertPointerAlignment(ptr, 8);
464 #endif
465  return pg_atomic_read_u64_impl(ptr);
466 }
467 
468 static inline uint64
470 {
471 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
472  AssertPointerAlignment(ptr, 8);
473 #endif
474  return pg_atomic_read_membarrier_u64_impl(ptr);
475 }
476 
477 static inline void
479 {
480 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
481  AssertPointerAlignment(ptr, 8);
482 #endif
484 }
485 
486 static inline void
488 {
489 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
490  AssertPointerAlignment(ptr, 8);
491 #endif
492  pg_atomic_write_membarrier_u64_impl(ptr, val);
493 }
494 
495 static inline uint64
497 {
498 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
499  AssertPointerAlignment(ptr, 8);
500 #endif
501  return pg_atomic_exchange_u64_impl(ptr, newval);
502 }
503 
504 static inline bool
506  uint64 *expected, uint64 newval)
507 {
508 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
509  AssertPointerAlignment(ptr, 8);
510 #endif
511  return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
512 }
513 
514 static inline uint64
515 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
516 {
517 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
518  AssertPointerAlignment(ptr, 8);
519 #endif
520  return pg_atomic_fetch_add_u64_impl(ptr, add_);
521 }
522 
523 static inline uint64
524 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
525 {
526 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
527  AssertPointerAlignment(ptr, 8);
528 #endif
529  Assert(sub_ != PG_INT64_MIN);
530  return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
531 }
532 
533 static inline uint64
534 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
535 {
536 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
537  AssertPointerAlignment(ptr, 8);
538 #endif
539  return pg_atomic_fetch_and_u64_impl(ptr, and_);
540 }
541 
542 static inline uint64
543 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
544 {
545 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
546  AssertPointerAlignment(ptr, 8);
547 #endif
548  return pg_atomic_fetch_or_u64_impl(ptr, or_);
549 }
550 
551 static inline uint64
552 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
553 {
554 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
555  AssertPointerAlignment(ptr, 8);
556 #endif
557  return pg_atomic_add_fetch_u64_impl(ptr, add_);
558 }
559 
560 static inline uint64
561 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
562 {
563 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
564  AssertPointerAlignment(ptr, 8);
565 #endif
566  Assert(sub_ != PG_INT64_MIN);
567  return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
568 }
569 
570 /*
571  * Monotonically advance the given variable using only atomic operations until
572  * it's at least the target value. Returns the latest value observed, which
573  * may or may not be the target value.
574  *
575  * Full barrier semantics (even when value is unchanged).
576  */
577 static inline uint64
579 {
580  uint64 currval;
581 
582 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
583  AssertPointerAlignment(ptr, 8);
584 #endif
585 
586  currval = pg_atomic_read_u64_impl(ptr);
587  if (currval >= target)
588  {
590  return currval;
591  }
592 
593  while (currval < target)
594  {
595  if (pg_atomic_compare_exchange_u64(ptr, &currval, target))
596  return target;
597  }
598 
599  return currval;
600 }
601 
602 #undef INSIDE_ATOMICS_H
603 
604 #endif /* ATOMICS_H */
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:137
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:89
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:228
void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
Definition: atomics.c:182
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:165
bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:97
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:76
void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.c:124
void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: atomics.c:106
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:200
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:55
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:389
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:342
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:478
static void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:200
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:403
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:432
#define pg_memory_barrier()
Definition: atomics.h:136
static uint32 pg_atomic_read_membarrier_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:251
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:374
static bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.h:505
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:288
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:214
static uint64 pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:469
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:359
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:417
static uint64 pg_atomic_monotonic_advance_u64(volatile pg_atomic_uint64 *ptr, uint64 target)
Definition: atomics.h:578
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:515
static bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:176
static uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:561
static bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:189
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:269
static void pg_atomic_write_membarrier_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:308
static uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
Definition: atomics.h:534
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:232
static uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
Definition: atomics.h:543
static uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:552
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:323
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:446
static void pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:487
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:460
static uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:524
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:496
static void pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:163
unsigned int uint32
Definition: c.h:506
signed int int32
Definition: c.h:494
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:894
#define Assert(condition)
Definition: c.h:858
#define PG_INT64_MIN
Definition: c.h:591
static void pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:64
static uint64 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
Definition: generic.h:329
static void pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: generic.h:298
static uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
Definition: generic.h:46
#define newval
long val
Definition: informix.c:670