PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
fallback.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * fallback.h
4  * Fallback for platforms without spinlock and/or atomics support. Slower
5  * than native atomics support, but not unusably slow.
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/port/atomics/fallback.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 
15 /* intentionally no include guards, should only be included by atomics.h */
16 #ifndef INSIDE_ATOMICS_H
17 # error "should be included via atomics.h"
18 #endif
19 
20 #ifndef pg_memory_barrier_impl
21 /*
22  * If we have no memory barrier implementation for this architecture, we
23  * fall back to acquiring and releasing a spinlock. This might, in turn,
24  * fall back to the semaphore-based spinlock implementation, which will be
25  * amazingly slow.
26  *
27  * It's not self-evident that every possible legal implementation of a
28  * spinlock acquire-and-release would be equivalent to a full memory barrier.
29  * For example, I'm not sure that Itanium's acq and rel add up to a full
30  * fence. But all of our actual implementations seem OK in this regard.
31  */
32 #define PG_HAVE_MEMORY_BARRIER_EMULATION
33 
34 extern void pg_spinlock_barrier(void);
35 #define pg_memory_barrier_impl pg_spinlock_barrier
36 #endif
37 
38 #ifndef pg_compiler_barrier_impl
39 /*
40  * If the compiler/arch combination does not provide compiler barriers,
41  * provide a fallback. The fallback simply consists of a function call into
42  * an externally defined function. That should guarantee compiler barrier
43  * semantics except for compilers that do inter translation unit/global
44  * optimization - those better provide an actual compiler barrier.
45  *
46  * A native compiler barrier for sure is a lot faster than this...
47  */
48 #define PG_HAVE_COMPILER_BARRIER_EMULATION
49 extern void pg_extern_compiler_barrier(void);
50 #define pg_compiler_barrier_impl pg_extern_compiler_barrier
51 #endif
52 
53 
54 /*
55  * If we have atomics implementation for this platform, fall back to providing
56  * the atomics API using a spinlock to protect the internal state. Possibly
57  * the spinlock implementation uses semaphores internally...
58  *
59  * We have to be a bit careful here, as it's not guaranteed that atomic
60  * variables are mapped to the same address in every process (e.g. dynamic
61  * shared memory segments). We can't just hash the address and use that to map
62  * to a spinlock. Instead assign a spinlock on initialization of the atomic
63  * variable.
64  */
65 #if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
66 
67 #define PG_HAVE_ATOMIC_FLAG_SIMULATION
68 #define PG_HAVE_ATOMIC_FLAG_SUPPORT
69 
70 typedef struct pg_atomic_flag
71 {
72  /*
73  * To avoid circular includes we can't use s_lock as a type here. Instead
74  * just reserve enough space for all spinlock types. Some platforms would
75  * be content with just one byte instead of 4, but that's not too much
76  * waste.
77  */
78 #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
79  int sema[4];
80 #else
81  int sema;
82 #endif
84 
85 #endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
86 
87 #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
88 
89 #define PG_HAVE_ATOMIC_U32_SIMULATION
90 
91 #define PG_HAVE_ATOMIC_U32_SUPPORT
92 typedef struct pg_atomic_uint32
93 {
94  /* Check pg_atomic_flag's definition above for an explanation */
95 #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
96  int sema[4];
97 #else
98  int sema;
99 #endif
100  volatile uint32 value;
102 
103 #endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
104 
105 #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT)
106 
107 #define PG_HAVE_ATOMIC_U64_SIMULATION
108 
109 #define PG_HAVE_ATOMIC_U64_SUPPORT
110 typedef struct pg_atomic_uint64
111 {
112  /* Check pg_atomic_flag's definition above for an explanation */
113 #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
114  int sema[4];
115 #else
116  int sema;
117 #endif
118  volatile uint64 value;
120 
121 #endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
122 
123 #ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION
124 
125 #define PG_HAVE_ATOMIC_INIT_FLAG
126 extern void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr);
127 
128 #define PG_HAVE_ATOMIC_TEST_SET_FLAG
129 extern bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr);
130 
131 #define PG_HAVE_ATOMIC_CLEAR_FLAG
132 extern void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr);
133 
134 #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
135 static inline bool
137 {
138  /*
139  * Can't do this efficiently in the semaphore based implementation - we'd
140  * have to try to acquire the semaphore - so always return true. That's
141  * correct, because this is only an unlocked test anyway. Do this in the
142  * header so compilers can optimize the test away.
143  */
144  return true;
145 }
146 
147 #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
148 
149 #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
150 
151 #define PG_HAVE_ATOMIC_INIT_U32
152 extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_);
153 
154 #define PG_HAVE_ATOMIC_WRITE_U32
155 extern void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val);
156 
157 #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
158 extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
159  uint32 *expected, uint32 newval);
160 
161 #define PG_HAVE_ATOMIC_FETCH_ADD_U32
162 extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);
163 
164 #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
165 
166 
167 #ifdef PG_HAVE_ATOMIC_U64_SIMULATION
168 
169 #define PG_HAVE_ATOMIC_INIT_U64
170 extern void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_);
171 
172 #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
173 extern bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
174  uint64 *expected, uint64 newval);
175 
176 #define PG_HAVE_ATOMIC_FETCH_ADD_U64
177 extern uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_);
178 
179 #endif /* PG_HAVE_ATOMIC_U64_SIMULATION */
struct pg_atomic_flag pg_atomic_flag
bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
Definition: atomics.c:183
void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: atomics.c:89
struct pg_atomic_uint32 pg_atomic_uint32
void pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
Definition: atomics.c:165
struct pg_atomic_uint64 pg_atomic_uint64
volatile uint32 value
Definition: fallback.h:100
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:148
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:55
signed int int32
Definition: c.h:256
void pg_spinlock_barrier(void)
Definition: atomics.c:29
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:74
unsigned int uint32
Definition: c.h:268
volatile uint64 value
Definition: fallback.h:118
void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.c:107
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:80
uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition: atomics.c:211
void pg_extern_compiler_barrier(void)
Definition: atomics.c:45
#define newval
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:120
long val
Definition: informix.c:689
static bool pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
Definition: fallback.h:136