Commit | Line | Data |
---|---|---|
adcfce54 MD |
1 | #ifndef _URCU_STATIC_H |
2 | #define _URCU_STATIC_H | |
3 | ||
4 | /* | |
5 | * urcu-static.h | |
6 | * | |
d2d23035 | 7 | * Userspace RCU header. |
adcfce54 | 8 | * |
d2d23035 MD |
9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking |
10 | * dynamically with the userspace rcu library. | |
adcfce54 | 11 | * |
d2d23035 MD |
12 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
13 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
adcfce54 | 14 | * |
d2d23035 MD |
15 | * This library is free software; you can redistribute it and/or |
16 | * modify it under the terms of the GNU Lesser General Public | |
17 | * License as published by the Free Software Foundation; either | |
18 | * version 2.1 of the License, or (at your option) any later version. | |
19 | * | |
20 | * This library is distributed in the hope that it will be useful, | |
21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
23 | * Lesser General Public License for more details. | |
24 | * | |
25 | * You should have received a copy of the GNU Lesser General Public | |
26 | * License along with this library; if not, write to the Free Software | |
27 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
adcfce54 MD |
28 | * |
29 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
30 | */ | |
31 | ||
32 | #include <stdlib.h> | |
33 | #include <pthread.h> | |
34 | ||
35 | #include <compiler.h> | |
36 | #include <arch.h> | |
37 | ||
38 | /* | |
39 | * Identify a shared load. A smp_rmc() or smp_mc() should come before the load. | |
40 | */ | |
41 | #define _LOAD_SHARED(p) ACCESS_ONCE(p) | |
42 | ||
43 | /* | |
44 | * Load a data from shared memory, doing a cache flush if required. | |
45 | */ | |
46 | #define LOAD_SHARED(p) \ | |
47 | ({ \ | |
48 | smp_rmc(); \ | |
49 | _LOAD_SHARED(p); \ | |
50 | }) | |
51 | ||
52 | /* | |
53 | * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. | |
54 | */ | |
55 | #define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); }) | |
56 | ||
57 | /* | |
58 | * Store v into x, where x is located in shared memory. Performs the required | |
59 | * cache flush after writing. Returns v. | |
60 | */ | |
61 | #define STORE_SHARED(x, v) \ | |
62 | ({ \ | |
63 | _STORE_SHARED(x, v); \ | |
64 | smp_wmc(); \ | |
65 | (v); \ | |
66 | }) | |
67 | ||
68 | /** | |
69 | * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable | |
70 | * into a RCU read-side critical section. The pointer can later be safely | |
71 | * dereferenced within the critical section. | |
72 | * | |
73 | * This ensures that the pointer copy is invariant thorough the whole critical | |
74 | * section. | |
75 | * | |
76 | * Inserts memory barriers on architectures that require them (currently only | |
77 | * Alpha) and documents which pointers are protected by RCU. | |
78 | * | |
809f4fde MD |
79 | * The compiler memory barrier in LOAD_SHARED() ensures that value-speculative |
80 | * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the | |
81 | * data read before the pointer read by speculating the value of the pointer. | |
82 | * Correct ordering is ensured because the pointer is read as a volatile access. | |
83 | * This acts as a global side-effect operation, which forbids reordering of | |
015c702f MD |
84 | * dependent memory operations. Note that such concern about dependency-breaking |
85 | * optimizations will eventually be taken care of by the "memory_order_consume" | |
86 | * addition to forthcoming C++ standard. | |
809f4fde | 87 | * |
adcfce54 MD |
88 | * Should match rcu_assign_pointer() or rcu_xchg_pointer(). |
89 | */ | |
90 | ||
91 | #define _rcu_dereference(p) ({ \ | |
92 | typeof(p) _________p1 = LOAD_SHARED(p); \ | |
93 | smp_read_barrier_depends(); \ | |
94 | (_________p1); \ | |
95 | }) | |
96 | ||
97 | /* | |
98 | * This code section can only be included in LGPL 2.1 compatible source code. | |
99 | * See below for the function call wrappers which can be used in code meant to | |
100 | * be only linked with the Userspace RCU library. This comes with a small | |
101 | * performance degradation on the read-side due to the added function calls. | |
102 | * This is required to permit relinking with newer versions of the library. | |
103 | */ | |
104 | ||
105 | /* | |
106 | * The signal number used by the RCU library can be overridden with | |
107 | * -DSIGURCU= when compiling the library. | |
108 | */ | |
109 | #ifndef SIGURCU | |
110 | #define SIGURCU SIGUSR1 | |
111 | #endif | |
112 | ||
113 | /* | |
114 | * If a reader is really non-cooperative and refuses to commit its | |
115 | * urcu_active_readers count to memory (there is no barrier in the reader | |
116 | * per-se), kick it after a few loops waiting for it. | |
117 | */ | |
118 | #define KICK_READER_LOOPS 10000 | |
119 | ||
7ac06cef MD |
120 | #ifdef DEBUG_RCU |
121 | #define rcu_assert(args...) assert(args) | |
122 | #else | |
123 | #define rcu_assert(args...) | |
124 | #endif | |
125 | ||
adcfce54 MD |
126 | #ifdef DEBUG_YIELD |
127 | #include <sched.h> | |
128 | #include <time.h> | |
129 | #include <pthread.h> | |
130 | #include <unistd.h> | |
131 | ||
132 | #define YIELD_READ (1 << 0) | |
133 | #define YIELD_WRITE (1 << 1) | |
134 | ||
b4ce1526 MD |
135 | /* |
136 | * Updates without CONFIG_URCU_AVOID_SIGNALS are much slower. Account this in | |
137 | * the delay. | |
138 | */ | |
139 | #ifdef CONFIG_URCU_AVOID_SIGNALS | |
adcfce54 MD |
140 | /* maximum sleep delay, in us */ |
141 | #define MAX_SLEEP 50 | |
142 | #else | |
143 | #define MAX_SLEEP 30000 | |
144 | #endif | |
145 | ||
146 | extern unsigned int yield_active; | |
147 | extern unsigned int __thread rand_yield; | |
148 | ||
149 | static inline void debug_yield_read(void) | |
150 | { | |
151 | if (yield_active & YIELD_READ) | |
152 | if (rand_r(&rand_yield) & 0x1) | |
153 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
154 | } | |
155 | ||
156 | static inline void debug_yield_write(void) | |
157 | { | |
158 | if (yield_active & YIELD_WRITE) | |
159 | if (rand_r(&rand_yield) & 0x1) | |
160 | usleep(rand_r(&rand_yield) % MAX_SLEEP); | |
161 | } | |
162 | ||
163 | static inline void debug_yield_init(void) | |
164 | { | |
165 | rand_yield = time(NULL) ^ pthread_self(); | |
166 | } | |
167 | #else | |
168 | static inline void debug_yield_read(void) | |
169 | { | |
170 | } | |
171 | ||
172 | static inline void debug_yield_write(void) | |
173 | { | |
174 | } | |
175 | ||
176 | static inline void debug_yield_init(void) | |
177 | { | |
178 | ||
179 | } | |
180 | #endif | |
181 | ||
b4ce1526 | 182 | #ifdef CONFIG_URCU_AVOID_SIGNALS |
adcfce54 MD |
183 | static inline void reader_barrier() |
184 | { | |
185 | smp_mb(); | |
186 | } | |
187 | #else | |
188 | static inline void reader_barrier() | |
189 | { | |
190 | barrier(); | |
191 | } | |
192 | #endif | |
193 | ||
194 | /* | |
195 | * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a | |
196 | * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. | |
197 | */ | |
198 | #define RCU_GP_COUNT (1UL << 0) | |
199 | /* Use the amount of bits equal to half of the architecture long size */ | |
200 | #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) | |
201 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) | |
202 | ||
203 | /* | |
204 | * Global quiescent period counter with low-order bits unused. | |
205 | * Using a int rather than a char to eliminate false register dependencies | |
206 | * causing stalls on some architectures. | |
207 | */ | |
208 | extern long urcu_gp_ctr; | |
209 | ||
210 | extern long __thread urcu_active_readers; | |
211 | ||
212 | static inline int rcu_old_gp_ongoing(long *value) | |
213 | { | |
214 | long v; | |
215 | ||
216 | if (value == NULL) | |
217 | return 0; | |
218 | /* | |
219 | * Make sure both tests below are done on the same version of *value | |
220 | * to insure consistency. | |
221 | */ | |
222 | v = LOAD_SHARED(*value); | |
223 | return (v & RCU_GP_CTR_NEST_MASK) && | |
224 | ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); | |
225 | } | |
226 | ||
227 | static inline void _rcu_read_lock(void) | |
228 | { | |
229 | long tmp; | |
230 | ||
231 | tmp = urcu_active_readers; | |
232 | /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ | |
67ef1a2c | 233 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { |
adcfce54 | 234 | _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr)); |
67ef1a2c MD |
235 | /* |
236 | * Set active readers count for outermost nesting level before | |
237 | * accessing the pointer. See force_mb_all_threads(). | |
238 | */ | |
239 | reader_barrier(); | |
240 | } else { | |
adcfce54 | 241 | _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT); |
67ef1a2c | 242 | } |
adcfce54 MD |
243 | } |
244 | ||
245 | static inline void _rcu_read_unlock(void) | |
246 | { | |
247 | reader_barrier(); | |
248 | /* | |
249 | * Finish using rcu before decrementing the pointer. | |
250 | * See force_mb_all_threads(). | |
67ef1a2c MD |
251 | * Formally only needed for outermost nesting level, but leave barrier |
252 | * in place for nested unlocks to remove a branch from the common case | |
253 | * (no nesting). | |
adcfce54 MD |
254 | */ |
255 | _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT); | |
256 | } | |
257 | ||
258 | /** | |
259 | * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure | |
260 | * meant to be read by RCU read-side critical sections. Returns the assigned | |
261 | * value. | |
262 | * | |
263 | * Documents which pointers will be dereferenced by RCU read-side critical | |
264 | * sections and adds the required memory barriers on architectures requiring | |
265 | * them. It also makes sure the compiler does not reorder code initializing the | |
266 | * data structure before its publication. | |
267 | * | |
268 | * Should match rcu_dereference_pointer(). | |
269 | */ | |
270 | ||
271 | #define _rcu_assign_pointer(p, v) \ | |
272 | ({ \ | |
273 | if (!__builtin_constant_p(v) || \ | |
274 | ((v) != NULL)) \ | |
275 | wmb(); \ | |
276 | STORE_SHARED(p, v); \ | |
277 | }) | |
278 | ||
279 | /** | |
280 | * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous | |
67ef1a2c | 281 | * pointer to the data structure, which can be safely freed after waiting for a |
adcfce54 MD |
282 | * quiescent state using synchronize_rcu(). |
283 | */ | |
284 | ||
285 | #define _rcu_xchg_pointer(p, v) \ | |
286 | ({ \ | |
287 | if (!__builtin_constant_p(v) || \ | |
288 | ((v) != NULL)) \ | |
289 | wmb(); \ | |
290 | xchg(p, v); \ | |
291 | }) | |
292 | ||
293 | /* | |
294 | * Exchanges the pointer and waits for quiescent state. | |
295 | * The pointer returned can be freed. | |
296 | */ | |
297 | #define _rcu_publish_content(p, v) \ | |
298 | ({ \ | |
299 | void *oldptr; \ | |
300 | oldptr = _rcu_xchg_pointer(p, v); \ | |
301 | synchronize_rcu(); \ | |
302 | oldptr; \ | |
303 | }) | |
304 | ||
305 | #endif /* _URCU_STATIC_H */ |