PostgreSQL Source Code  git master
atomics.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * atomics.h
4  * Atomic operations.
5  *
6  * Hardware and compiler dependent functions for manipulating memory
7  * atomically and dealing with cache coherency. Used to implement locking
8  * facilities and lockless algorithms/data structures.
9  *
10  * To bring up postgres on a platform/compiler at the very least
11  * implementations for the following operations should be provided:
12  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
13  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
14  * * pg_atomic_test_set_flag(), pg_atomic_init_flag(), pg_atomic_clear_flag()
15  * * PG_HAVE_8BYTE_SINGLE_COPY_ATOMICITY should be defined if appropriate.
16  *
17  * There exist generic, hardware independent, implementations for several
18  * compilers which might be sufficient, although possibly not optimal, for a
19  * new platform. If no such generic implementation is available spinlocks (or
20  * even OS provided semaphores) will be used to implement the API.
21  *
22  * Implement _u64 atomics if and only if your platform can use them
23  * efficiently (and obviously correctly).
24  *
25  * Use higher level functionality (lwlocks, spinlocks, heavyweight locks)
26  * whenever possible. Writing correct code using these facilities is hard.
27  *
28  * For an introduction to using memory barriers within the PostgreSQL backend,
29  * see src/backend/storage/lmgr/README.barrier
30  *
31  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
32  * Portions Copyright (c) 1994, Regents of the University of California
33  *
34  * src/include/port/atomics.h
35  *
36  *-------------------------------------------------------------------------
37  */
38 #ifndef ATOMICS_H
39 #define ATOMICS_H
40 
41 #ifdef FRONTEND
42 #error "atomics.h may not be included from frontend code"
43 #endif
44 
45 #define INSIDE_ATOMICS_H
46 
47 #include <limits.h>
48 
49 /*
50  * First a set of architecture specific files is included.
51  *
52  * These files can provide the full set of atomics or can do pretty much
53  * nothing if all the compilers commonly used on these platforms provide
54  * usable generics.
55  *
56  * Don't add an inline assembly of the actual atomic operations if all the
57  * common implementations of your platform provide intrinsics. Intrinsics are
58  * much easier to understand and potentially support more architectures.
59  *
60  * It will often make sense to define memory barrier semantics here, since
61  * e.g. generic compiler intrinsics for x86 memory barriers can't know that
62  * postgres doesn't need x86 read/write barriers do anything more than a
63  * compiler barrier.
64  *
65  */
66 #if defined(__arm__) || defined(__arm) || defined(__aarch64__)
67 #include "port/atomics/arch-arm.h"
68 #elif defined(__i386__) || defined(__i386) || defined(__x86_64__)
69 #include "port/atomics/arch-x86.h"
70 #elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
71 #include "port/atomics/arch-ppc.h"
72 #elif defined(__hppa) || defined(__hppa__)
73 #include "port/atomics/arch-hppa.h"
74 #endif
75 
76 /*
77  * Compiler specific, but architecture independent implementations.
78  *
79  * Provide architecture independent implementations of the atomic
80  * facilities. At the very least compiler barriers should be provided, but a
81  * full implementation of
82  * * pg_compiler_barrier(), pg_write_barrier(), pg_read_barrier()
83  * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32()
84  * using compiler intrinsics are a good idea.
85  */
86 /*
87  * gcc or compatible, including clang and icc.
88  */
89 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
91 #elif defined(_MSC_VER)
93 #elif defined(__SUNPRO_C) && !defined(__GNUC__)
95 #else
96 /*
97  * Unsupported compiler, we'll likely use slower fallbacks... At least
98  * compiler barriers should really be provided.
99  */
100 #endif
101 
102 /*
103  * Provide a full fallback of the pg_*_barrier(), pg_atomic**_flag and
104  * pg_atomic_* APIs for platforms without sufficient spinlock and/or atomics
105  * support. In the case of spinlock backed atomics the emulation is expected
106  * to be efficient, although less so than native atomics support.
107  */
108 #include "port/atomics/fallback.h"
109 
110 /*
111  * Provide additional operations using supported infrastructure. These are
112  * expected to be efficient if the underlying atomic operations are efficient.
113  */
114 #include "port/atomics/generic.h"
115 
116 
117 /*
118  * pg_compiler_barrier - prevent the compiler from moving code across
119  *
120  * A compiler barrier need not (and preferably should not) emit any actual
121  * machine code, but must act as an optimization fence: the compiler must not
122  * reorder loads or stores to main memory around the barrier. However, the
123  * CPU may still reorder loads or stores at runtime, if the architecture's
124  * memory model permits this.
125  */
126 #define pg_compiler_barrier() pg_compiler_barrier_impl()
127 
128 /*
129  * pg_memory_barrier - prevent the CPU from reordering memory access
130  *
131  * A memory barrier must act as a compiler barrier, and in addition must
132  * guarantee that all loads and stores issued prior to the barrier are
133  * completed before any loads or stores issued after the barrier. Unless
134  * loads and stores are totally ordered (which is not the case on most
135  * architectures) this requires issuing some sort of memory fencing
136  * instruction.
137  */
138 #define pg_memory_barrier() pg_memory_barrier_impl()
139 
140 /*
141  * pg_(read|write)_barrier - prevent the CPU from reordering memory access
142  *
143  * A read barrier must act as a compiler barrier, and in addition must
144  * guarantee that any loads issued prior to the barrier are completed before
145  * any loads issued after the barrier. Similarly, a write barrier acts
146  * as a compiler barrier, and also orders stores. Read and write barriers
147  * are thus weaker than a full memory barrier, but stronger than a compiler
148  * barrier. In practice, on machines with strong memory ordering, read and
149  * write barriers may require nothing more than a compiler barrier.
150  */
151 #define pg_read_barrier() pg_read_barrier_impl()
152 #define pg_write_barrier() pg_write_barrier_impl()
153 
154 /*
155  * Spinloop delay - Allow CPU to relax in busy loops
156  */
157 #define pg_spin_delay() pg_spin_delay_impl()
158 
159 /*
160  * pg_atomic_init_flag - initialize atomic flag.
161  *
162  * No barrier semantics.
163  */
164 static inline void
166 {
168 }
169 
170 /*
171  * pg_atomic_test_set_flag - TAS()
172  *
173  * Returns true if the flag has successfully been set, false otherwise.
174  *
175  * Acquire (including read barrier) semantics.
176  */
177 static inline bool
179 {
180  return pg_atomic_test_set_flag_impl(ptr);
181 }
182 
183 /*
184  * pg_atomic_unlocked_test_flag - Check if the lock is free
185  *
186  * Returns true if the flag currently is not set, false otherwise.
187  *
188  * No barrier semantics.
189  */
190 static inline bool
192 {
194 }
195 
196 /*
197  * pg_atomic_clear_flag - release lock set by TAS()
198  *
199  * Release (including write barrier) semantics.
200  */
201 static inline void
203 {
205 }
206 
207 
208 /*
209  * pg_atomic_init_u32 - initialize atomic variable
210  *
211  * Has to be done before any concurrent usage..
212  *
213  * No barrier semantics.
214  */
215 static inline void
217 {
218  AssertPointerAlignment(ptr, 4);
219 
221 }
222 
223 /*
224  * pg_atomic_read_u32 - unlocked read from atomic variable.
225  *
226  * The read is guaranteed to return a value as it has been written by this or
227  * another process at some point in the past. There's however no cache
228  * coherency interaction guaranteeing the value hasn't since been written to
229  * again.
230  *
231  * No barrier semantics.
232  */
233 static inline uint32
235 {
236  AssertPointerAlignment(ptr, 4);
237  return pg_atomic_read_u32_impl(ptr);
238 }
239 
240 /*
241  * pg_atomic_read_membarrier_u32 - read with barrier semantics.
242  *
243  * This read is guaranteed to return the current value, provided that the value
244  * is only ever updated via operations with barrier semantics, such as
245  * pg_atomic_compare_exchange_u32() and pg_atomic_write_membarrier_u32().
246  * While this may be less performant than pg_atomic_read_u32(), it may be
247  * easier to reason about correctness with this function in less performance-
248  * sensitive code.
249  *
250  * Full barrier semantics.
251  */
252 static inline uint32
254 {
255  AssertPointerAlignment(ptr, 4);
256 
257  return pg_atomic_read_membarrier_u32_impl(ptr);
258 }
259 
260 /*
261  * pg_atomic_write_u32 - write to atomic variable.
262  *
263  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
264  * observe a partial write for any reader. Note that this correctly interacts
265  * with pg_atomic_compare_exchange_u32, in contrast to
266  * pg_atomic_unlocked_write_u32().
267  *
268  * No barrier semantics.
269  */
270 static inline void
272 {
273  AssertPointerAlignment(ptr, 4);
274 
276 }
277 
278 /*
279  * pg_atomic_unlocked_write_u32 - unlocked write to atomic variable.
280  *
281  * The write is guaranteed to succeed as a whole, i.e. it's not possible to
282  * observe a partial write for any reader. But note that writing this way is
283  * not guaranteed to correctly interact with read-modify-write operations like
284  * pg_atomic_compare_exchange_u32. This should only be used in cases where
285  * minor performance regressions due to atomics emulation are unacceptable.
286  *
287  * No barrier semantics.
288  */
289 static inline void
291 {
292  AssertPointerAlignment(ptr, 4);
293 
295 }
296 
297 /*
298  * pg_atomic_write_membarrier_u32 - write with barrier semantics.
299  *
300  * The write is guaranteed to succeed as a whole, i.e., it's not possible to
301  * observe a partial write for any reader. Note that this correctly interacts
302  * with both pg_atomic_compare_exchange_u32() and
303  * pg_atomic_read_membarrier_u32(). While this may be less performant than
304  * pg_atomic_write_u32(), it may be easier to reason about correctness with
305  * this function in less performance-sensitive code.
306  *
307  * Full barrier semantics.
308  */
309 static inline void
311 {
312  AssertPointerAlignment(ptr, 4);
313 
314  pg_atomic_write_membarrier_u32_impl(ptr, val);
315 }
316 
317 /*
318  * pg_atomic_exchange_u32 - exchange newval with current value
319  *
320  * Returns the old value of 'ptr' before the swap.
321  *
322  * Full barrier semantics.
323  */
324 static inline uint32
326 {
327  AssertPointerAlignment(ptr, 4);
328 
329  return pg_atomic_exchange_u32_impl(ptr, newval);
330 }
331 
332 /*
333  * pg_atomic_compare_exchange_u32 - CAS operation
334  *
335  * Atomically compare the current value of ptr with *expected and store newval
336  * iff ptr and *expected have the same value. The current value of *ptr will
337  * always be stored in *expected.
338  *
339  * Return true if values have been exchanged, false otherwise.
340  *
341  * Full barrier semantics.
342  */
343 static inline bool
345  uint32 *expected, uint32 newval)
346 {
347  AssertPointerAlignment(ptr, 4);
348  AssertPointerAlignment(expected, 4);
349 
350  return pg_atomic_compare_exchange_u32_impl(ptr, expected, newval);
351 }
352 
353 /*
354  * pg_atomic_fetch_add_u32 - atomically add to variable
355  *
356  * Returns the value of ptr before the arithmetic operation.
357  *
358  * Full barrier semantics.
359  */
360 static inline uint32
362 {
363  AssertPointerAlignment(ptr, 4);
364  return pg_atomic_fetch_add_u32_impl(ptr, add_);
365 }
366 
367 /*
368  * pg_atomic_fetch_sub_u32 - atomically subtract from variable
369  *
370  * Returns the value of ptr before the arithmetic operation. Note that sub_
371  * may not be INT_MIN due to platform limitations.
372  *
373  * Full barrier semantics.
374  */
375 static inline uint32
377 {
378  AssertPointerAlignment(ptr, 4);
379  Assert(sub_ != INT_MIN);
380  return pg_atomic_fetch_sub_u32_impl(ptr, sub_);
381 }
382 
383 /*
384  * pg_atomic_fetch_and_u32 - atomically bit-and and_ with variable
385  *
386  * Returns the value of ptr before the arithmetic operation.
387  *
388  * Full barrier semantics.
389  */
390 static inline uint32
392 {
393  AssertPointerAlignment(ptr, 4);
394  return pg_atomic_fetch_and_u32_impl(ptr, and_);
395 }
396 
397 /*
398  * pg_atomic_fetch_or_u32 - atomically bit-or or_ with variable
399  *
400  * Returns the value of ptr before the arithmetic operation.
401  *
402  * Full barrier semantics.
403  */
404 static inline uint32
406 {
407  AssertPointerAlignment(ptr, 4);
408  return pg_atomic_fetch_or_u32_impl(ptr, or_);
409 }
410 
411 /*
412  * pg_atomic_add_fetch_u32 - atomically add to variable
413  *
414  * Returns the value of ptr after the arithmetic operation.
415  *
416  * Full barrier semantics.
417  */
418 static inline uint32
420 {
421  AssertPointerAlignment(ptr, 4);
422  return pg_atomic_add_fetch_u32_impl(ptr, add_);
423 }
424 
425 /*
426  * pg_atomic_sub_fetch_u32 - atomically subtract from variable
427  *
428  * Returns the value of ptr after the arithmetic operation. Note that sub_ may
429  * not be INT_MIN due to platform limitations.
430  *
431  * Full barrier semantics.
432  */
433 static inline uint32
435 {
436  AssertPointerAlignment(ptr, 4);
437  Assert(sub_ != INT_MIN);
438  return pg_atomic_sub_fetch_u32_impl(ptr, sub_);
439 }
440 
441 /* ----
442  * The 64 bit operations have the same semantics as their 32bit counterparts
443  * if they are available. Check the corresponding 32bit function for
444  * documentation.
445  * ----
446  */
447 static inline void
449 {
450  /*
451  * Can't necessarily enforce alignment - and don't need it - when using
452  * the spinlock based fallback implementation. Therefore only assert when
453  * not using it.
454  */
455 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
456  AssertPointerAlignment(ptr, 8);
457 #endif
459 }
460 
461 static inline uint64
463 {
464 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
465  AssertPointerAlignment(ptr, 8);
466 #endif
467  return pg_atomic_read_u64_impl(ptr);
468 }
469 
470 static inline uint64
472 {
473 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
474  AssertPointerAlignment(ptr, 8);
475 #endif
476  return pg_atomic_read_membarrier_u64_impl(ptr);
477 }
478 
479 static inline void
481 {
482 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
483  AssertPointerAlignment(ptr, 8);
484 #endif
486 }
487 
488 static inline void
490 {
491 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
492  AssertPointerAlignment(ptr, 8);
493 #endif
494  pg_atomic_write_membarrier_u64_impl(ptr, val);
495 }
496 
497 static inline uint64
499 {
500 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
501  AssertPointerAlignment(ptr, 8);
502 #endif
503  return pg_atomic_exchange_u64_impl(ptr, newval);
504 }
505 
506 static inline bool
508  uint64 *expected, uint64 newval)
509 {
510 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
511  AssertPointerAlignment(ptr, 8);
512  AssertPointerAlignment(expected, 8);
513 #endif
514  return pg_atomic_compare_exchange_u64_impl(ptr, expected, newval);
515 }
516 
517 static inline uint64
518 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
519 {
520 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
521  AssertPointerAlignment(ptr, 8);
522 #endif
523  return pg_atomic_fetch_add_u64_impl(ptr, add_);
524 }
525 
526 static inline uint64
527 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
528 {
529 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
530  AssertPointerAlignment(ptr, 8);
531 #endif
532  Assert(sub_ != PG_INT64_MIN);
533  return pg_atomic_fetch_sub_u64_impl(ptr, sub_);
534 }
535 
536 static inline uint64
537 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
538 {
539 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
540  AssertPointerAlignment(ptr, 8);
541 #endif
542  return pg_atomic_fetch_and_u64_impl(ptr, and_);
543 }
544 
545 static inline uint64
546 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
547 {
548 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
549  AssertPointerAlignment(ptr, 8);
550 #endif
551  return pg_atomic_fetch_or_u64_impl(ptr, or_);
552 }
553 
554 static inline uint64
555 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
556 {
557 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
558  AssertPointerAlignment(ptr, 8);
559 #endif
560  return pg_atomic_add_fetch_u64_impl(ptr, add_);
561 }
562 
563 static inline uint64
564 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
565 {
566 #ifndef PG_HAVE_ATOMIC_U64_SIMULATION
567  AssertPointerAlignment(ptr, 8);
568 #endif
569  Assert(sub_ != PG_INT64_MIN);
570  return pg_atomic_sub_fetch_u64_impl(ptr, sub_);
571 }
572 
573 #undef INSIDE_ATOMICS_H
574 
575 #endif /* ATOMICS_H */
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:137
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:89
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:228
void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
Definition: atomics.c:182
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:165
bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:97
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:76
void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.c:124
void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: atomics.c:106
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:200
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:55
static uint32 pg_atomic_fetch_and_u32(volatile pg_atomic_uint32 *ptr, uint32 and_)
Definition: atomics.h:391
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:344
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:480
static void pg_atomic_clear_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:202
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:405
static uint32 pg_atomic_sub_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:434
static uint32 pg_atomic_read_membarrier_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:253
static uint32 pg_atomic_fetch_sub_u32(volatile pg_atomic_uint32 *ptr, int32 sub_)
Definition: atomics.h:376
static bool pg_atomic_compare_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.h:507
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:290
static void pg_atomic_init_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:216
static uint64 pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:471
static uint32 pg_atomic_fetch_add_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:361
static uint32 pg_atomic_add_fetch_u32(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.h:419
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:518
static bool pg_atomic_test_set_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:178
static uint64 pg_atomic_sub_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:564
static bool pg_atomic_unlocked_test_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:191
static void pg_atomic_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:271
static void pg_atomic_write_membarrier_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:310
static uint64 pg_atomic_fetch_and_u64(volatile pg_atomic_uint64 *ptr, uint64 and_)
Definition: atomics.h:537
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:234
static uint64 pg_atomic_fetch_or_u64(volatile pg_atomic_uint64 *ptr, uint64 or_)
Definition: atomics.h:546
static uint64 pg_atomic_add_fetch_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.h:555
static uint32 pg_atomic_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 newval)
Definition: atomics.h:325
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:448
static void pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: atomics.h:489
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition: atomics.h:462
static uint64 pg_atomic_fetch_sub_u64(volatile pg_atomic_uint64 *ptr, int64 sub_)
Definition: atomics.h:527
static uint64 pg_atomic_exchange_u64(volatile pg_atomic_uint64 *ptr, uint64 newval)
Definition: atomics.h:498
static void pg_atomic_init_flag(volatile pg_atomic_flag *ptr)
Definition: atomics.h:165
unsigned int uint32
Definition: c.h:493
signed int int32
Definition: c.h:481
#define AssertPointerAlignment(ptr, bndr)
Definition: c.h:881
#define PG_INT64_MIN
Definition: c.h:578
static void pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: generic.h:64
static uint64 pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
Definition: generic.h:329
static void pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition: generic.h:298
static uint32 pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
Definition: generic.h:46
#define newval
long val
Definition: informix.c:664
Assert(fmt[strlen(fmt) - 1] !='\n')