Commit | Line | Data |
---|---|---|
7ac06cef MD |
1 | #ifndef _URCU_QSBR_STATIC_H |
2 | #define _URCU_QSBR_STATIC_H | |
3 | ||
4 | /* | |
5 | * urcu-qsbr-static.h | |
6 | * | |
7 | * Userspace RCU QSBR header. | |
8 | * | |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu-qsbr.h for linking | |
10 | * dynamically with the userspace rcu QSBR library. | |
11 | * | |
12 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
13 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
14 | * | |
15 | * This library is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU Lesser General Public | |
17 | * License as published by the Free Software Foundation; either | |
18 | * version 2.1 of the License, or (at your option) any later version. | |
19 | * | |
20 | * This library is distributed in the hope that it will be useful, | |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
23 | * Lesser General Public License for more details. | |
24 | * | |
25 | * You should have received a copy of the GNU Lesser General Public | |
26 | * License along with this library; if not, write to the Free Software | |
27 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
28 | * | |
29 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
30 | */ | |
31 | ||
32 | #include <stdlib.h> | |
33 | #include <pthread.h> | |
34 | #include <assert.h> | |
f0f7dbdd | 35 | #include <limits.h> |
bc6c15bb MD |
36 | #include <syscall.h> |
37 | #include <unistd.h> | |
7ac06cef | 38 | |
ec4e58a3 MD |
39 | #include <urcu/compiler.h> |
40 | #include <urcu/arch.h> | |
7ac06cef MD |
41 | |
42 | /* | |
43 | * Identify a shared load. A smp_rmc() or smp_mc() should come before the load. | |
44 | */ | |
45 | #define _LOAD_SHARED(p) ACCESS_ONCE(p) | |
46 | ||
47 | /* | |
48 | * Load a data from shared memory, doing a cache flush if required. | |
49 | */ | |
50 | #define LOAD_SHARED(p) \ | |
51 | ({ \ | |
52 | smp_rmc(); \ | |
53 | _LOAD_SHARED(p); \ | |
54 | }) | |
55 | ||
56 | /* | |
57 | * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. | |
58 | */ | |
59 | #define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); }) | |
60 | ||
61 | /* | |
62 | * Store v into x, where x is located in shared memory. Performs the required | |
63 | * cache flush after writing. Returns v. | |
64 | */ | |
65 | #define STORE_SHARED(x, v) \ | |
66 | ({ \ | |
67 | _STORE_SHARED(x, v); \ | |
68 | smp_wmc(); \ | |
69 | (v); \ | |
70 | }) | |
71 | ||
72 | /** | |
73 | * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable | |
74 | * into a RCU read-side critical section. The pointer can later be safely | |
75 | * dereferenced within the critical section. | |
76 | * | |
77 | * This ensures that the pointer copy is invariant thorough the whole critical | |
78 | * section. | |
79 | * | |
80 | * Inserts memory barriers on architectures that require them (currently only | |
81 | * Alpha) and documents which pointers are protected by RCU. | |
82 | * | |
83 | * Should match rcu_assign_pointer() or rcu_xchg_pointer(). | |
84 | */ | |
85 | ||
86 | #define _rcu_dereference(p) ({ \ | |
87 | typeof(p) _________p1 = LOAD_SHARED(p); \ | |
88 | smp_read_barrier_depends(); \ | |
89 | (_________p1); \ | |
90 | }) | |
91 | ||
bc6c15bb MD |
92 | #define futex(...) syscall(__NR_futex, __VA_ARGS__) |
93 | #define FUTEX_WAIT 0 | |
94 | #define FUTEX_WAKE 1 | |
95 | ||
7ac06cef MD |
96 | /* |
97 | * This code section can only be included in LGPL 2.1 compatible source code. | |
98 | * See below for the function call wrappers which can be used in code meant to | |
99 | * be only linked with the Userspace RCU library. This comes with a small | |
100 | * performance degradation on the read-side due to the added function calls. | |
101 | * This is required to permit relinking with newer versions of the library. | |
102 | */ | |
103 | ||
104 | /* | |
105 | * If a reader is really non-cooperative and refuses to commit its | |
106 | * rcu_reader_qs_gp count to memory (there is no barrier in the reader | |
107 | * per-se), kick it after a few loops waiting for it. | |
108 | */ | |
109 | #define KICK_READER_LOOPS 10000 | |
110 | ||
bc6c15bb MD |
111 | /* |
112 | * Active attempts to check for reader Q.S. before calling futex(). | |
113 | */ | |
114 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
115 | ||
7ac06cef MD |
116 | #ifdef DEBUG_RCU |
117 | #define rcu_assert(args...) assert(args) | |
118 | #else | |
119 | #define rcu_assert(args...) | |
120 | #endif | |
121 | ||
122 | #ifdef DEBUG_YIELD | |
123 | #include <sched.h> | |
124 | #include <time.h> | |
125 | #include <pthread.h> | |
126 | #include <unistd.h> | |
127 | ||
128 | #define YIELD_READ (1 << 0) | |
129 | #define YIELD_WRITE (1 << 1) | |
130 | ||
131 | /* maximum sleep delay, in us */ | |
132 | #define MAX_SLEEP 50 | |
133 | ||
134 | extern unsigned int yield_active; | |
135 | extern unsigned int __thread rand_yield; | |
136 | ||
137 | static inline void debug_yield_read(void) | |
138 | { | |
139 | if (yield_active & YIELD_READ) | |
140 | if (rand_r(&rand_yield) & 0x1) | |
141 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
142 | } | |
143 | ||
144 | static inline void debug_yield_write(void) | |
145 | { | |
146 | if (yield_active & YIELD_WRITE) | |
147 | if (rand_r(&rand_yield) & 0x1) | |
148 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
149 | } | |
150 | ||
151 | static inline void debug_yield_init(void) | |
152 | { | |
153 | rand_yield = time(NULL) ^ pthread_self(); | |
154 | } | |
155 | #else | |
156 | static inline void debug_yield_read(void) | |
157 | { | |
158 | } | |
159 | ||
160 | static inline void debug_yield_write(void) | |
161 | { | |
162 | } | |
163 | ||
164 | static inline void debug_yield_init(void) | |
165 | { | |
166 | ||
167 | } | |
168 | #endif | |
169 | ||
170 | static inline void reader_barrier() | |
171 | { | |
172 | smp_mb(); | |
173 | } | |
174 | ||
ac258107 | 175 | #define RCU_GP_ONLINE (1UL << 0) |
55570466 | 176 | #define RCU_GP_CTR (1UL << 1) |
ac258107 | 177 | |
7ac06cef MD |
178 | /* |
179 | * Global quiescent period counter with low-order bits unused. | |
180 | * Using a int rather than a char to eliminate false register dependencies | |
181 | * causing stalls on some architectures. | |
182 | */ | |
f0f7dbdd | 183 | extern unsigned long urcu_gp_ctr; |
7ac06cef | 184 | |
f0f7dbdd | 185 | extern unsigned long __thread rcu_reader_qs_gp; |
7ac06cef | 186 | |
bc6c15bb MD |
187 | extern int gp_futex; |
188 | ||
189 | /* | |
190 | * Wake-up waiting synchronize_rcu(). Called from many concurrent threads. | |
191 | */ | |
192 | static inline void wake_up_gp(void) | |
193 | { | |
ec4e58a3 MD |
194 | if (unlikely(uatomic_read(&gp_futex) == -1)) { |
195 | uatomic_set(&gp_futex, 0); | |
bc6c15bb MD |
196 | futex(&gp_futex, FUTEX_WAKE, 1, |
197 | NULL, NULL, 0); | |
198 | } | |
199 | } | |
200 | ||
47d2f29e MD |
201 | #if (BITS_PER_LONG < 64) |
202 | static inline int rcu_gp_ongoing(unsigned long *value) | |
203 | { | |
204 | unsigned long reader_gp; | |
205 | ||
206 | if (value == NULL) | |
207 | return 0; | |
208 | reader_gp = LOAD_SHARED(*value); | |
209 | return reader_gp && ((reader_gp ^ urcu_gp_ctr) & RCU_GP_CTR); | |
210 | } | |
211 | #else /* !(BITS_PER_LONG < 64) */ | |
f0f7dbdd | 212 | static inline int rcu_gp_ongoing(unsigned long *value) |
7ac06cef | 213 | { |
f0f7dbdd | 214 | unsigned long reader_gp; |
4e560c17 | 215 | |
7ac06cef MD |
216 | if (value == NULL) |
217 | return 0; | |
4e560c17 | 218 | reader_gp = LOAD_SHARED(*value); |
f0f7dbdd | 219 | return reader_gp && (reader_gp - urcu_gp_ctr > ULONG_MAX / 2); |
7ac06cef | 220 | } |
47d2f29e | 221 | #endif /* !(BITS_PER_LONG < 64) */ |
7ac06cef MD |
222 | |
223 | static inline void _rcu_read_lock(void) | |
224 | { | |
ac258107 | 225 | rcu_assert(rcu_reader_qs_gp); |
7ac06cef MD |
226 | } |
227 | ||
228 | static inline void _rcu_read_unlock(void) | |
229 | { | |
230 | } | |
231 | ||
232 | static inline void _rcu_quiescent_state(void) | |
233 | { | |
234 | smp_mb(); | |
ab179a17 | 235 | _STORE_SHARED(rcu_reader_qs_gp, _LOAD_SHARED(urcu_gp_ctr)); |
bc6c15bb MD |
236 | smp_mb(); /* write rcu_reader_qs_gp before read futex */ |
237 | wake_up_gp(); | |
7ac06cef MD |
238 | smp_mb(); |
239 | } | |
240 | ||
241 | static inline void _rcu_thread_offline(void) | |
242 | { | |
243 | smp_mb(); | |
4e560c17 | 244 | STORE_SHARED(rcu_reader_qs_gp, 0); |
bc6c15bb MD |
245 | smp_mb(); /* write rcu_reader_qs_gp before read futex */ |
246 | wake_up_gp(); | |
7ac06cef MD |
247 | } |
248 | ||
249 | static inline void _rcu_thread_online(void) | |
250 | { | |
ab179a17 | 251 | _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr)); |
7ac06cef MD |
252 | smp_mb(); |
253 | } | |
254 | ||
255 | /** | |
256 | * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure | |
257 | * meant to be read by RCU read-side critical sections. Returns the assigned | |
258 | * value. | |
259 | * | |
260 | * Documents which pointers will be dereferenced by RCU read-side critical | |
261 | * sections and adds the required memory barriers on architectures requiring | |
262 | * them. It also makes sure the compiler does not reorder code initializing the | |
263 | * data structure before its publication. | |
264 | * | |
265 | * Should match rcu_dereference_pointer(). | |
266 | */ | |
267 | ||
268 | #define _rcu_assign_pointer(p, v) \ | |
269 | ({ \ | |
270 | if (!__builtin_constant_p(v) || \ | |
271 | ((v) != NULL)) \ | |
272 | wmb(); \ | |
273 | STORE_SHARED(p, v); \ | |
274 | }) | |
275 | ||
4d1ce26f MD |
276 | /** |
277 | * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer | |
278 | * is as expected by "old". If succeeds, returns the previous pointer to the | |
279 | * data structure, which can be safely freed after waiting for a quiescent state | |
280 | * using synchronize_rcu(). If fails (unexpected value), returns old (which | |
281 | * should not be freed !). | |
282 | */ | |
283 | ||
284 | #define _rcu_cmpxchg_pointer(p, old, _new) \ | |
285 | ({ \ | |
286 | if (!__builtin_constant_p(_new) || \ | |
287 | ((_new) != NULL)) \ | |
288 | wmb(); \ | |
ec4e58a3 | 289 | uatomic_cmpxchg(p, old, _new); \ |
4d1ce26f MD |
290 | }) |
291 | ||
7ac06cef MD |
292 | /** |
293 | * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous | |
294 | * pointer to the data structure, which can be safely freed after waiting for a | |
295 | * quiescent state using synchronize_rcu(). | |
296 | */ | |
297 | ||
298 | #define _rcu_xchg_pointer(p, v) \ | |
299 | ({ \ | |
300 | if (!__builtin_constant_p(v) || \ | |
301 | ((v) != NULL)) \ | |
302 | wmb(); \ | |
ec4e58a3 | 303 | uatomic_xchg(p, v); \ |
7ac06cef MD |
304 | }) |
305 | ||
306 | /* | |
307 | * Exchanges the pointer and waits for quiescent state. | |
308 | * The pointer returned can be freed. | |
309 | */ | |
310 | #define _rcu_publish_content(p, v) \ | |
311 | ({ \ | |
312 | void *oldptr; \ | |
313 | oldptr = _rcu_xchg_pointer(p, v); \ | |
314 | synchronize_rcu(); \ | |
315 | oldptr; \ | |
316 | }) | |
317 | ||
318 | #endif /* _URCU_QSBR_STATIC_H */ |