Implement sched_yield UP support
[userspace-rcu.git] / urcu-static.h
CommitLineData
adcfce54
MD
1#ifndef _URCU_STATIC_H
2#define _URCU_STATIC_H
3
4/*
5 * urcu-static.h
6 *
d2d23035 7 * Userspace RCU header.
adcfce54 8 *
d2d23035
MD
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
10 * dynamically with the userspace rcu library.
adcfce54 11 *
d2d23035
MD
12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
adcfce54 14 *
d2d23035
MD
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
adcfce54
MD
28 *
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
30 */
31
32#include <stdlib.h>
33#include <pthread.h>
ae62b5e8 34#include <sched.h>
adcfce54
MD
35
36#include <compiler.h>
37#include <arch.h>
38
39/*
40 * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
41 */
42#define _LOAD_SHARED(p) ACCESS_ONCE(p)
43
44/*
45 * Load a data from shared memory, doing a cache flush if required.
46 */
47#define LOAD_SHARED(p) \
48 ({ \
49 smp_rmc(); \
50 _LOAD_SHARED(p); \
51 })
52
53/*
54 * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
55 */
56#define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); })
57
58/*
59 * Store v into x, where x is located in shared memory. Performs the required
60 * cache flush after writing. Returns v.
61 */
62#define STORE_SHARED(x, v) \
63 ({ \
64 _STORE_SHARED(x, v); \
65 smp_wmc(); \
66 (v); \
67 })
68
69/**
70 * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable
71 * into a RCU read-side critical section. The pointer can later be safely
72 * dereferenced within the critical section.
73 *
74 * This ensures that the pointer copy is invariant thorough the whole critical
75 * section.
76 *
77 * Inserts memory barriers on architectures that require them (currently only
78 * Alpha) and documents which pointers are protected by RCU.
79 *
809f4fde
MD
80 * The compiler memory barrier in LOAD_SHARED() ensures that value-speculative
81 * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the
82 * data read before the pointer read by speculating the value of the pointer.
83 * Correct ordering is ensured because the pointer is read as a volatile access.
84 * This acts as a global side-effect operation, which forbids reordering of
015c702f
MD
85 * dependent memory operations. Note that such concern about dependency-breaking
86 * optimizations will eventually be taken care of by the "memory_order_consume"
87 * addition to forthcoming C++ standard.
809f4fde 88 *
adcfce54
MD
89 * Should match rcu_assign_pointer() or rcu_xchg_pointer().
90 */
91
92#define _rcu_dereference(p) ({ \
93 typeof(p) _________p1 = LOAD_SHARED(p); \
94 smp_read_barrier_depends(); \
95 (_________p1); \
96 })
97
98/*
99 * This code section can only be included in LGPL 2.1 compatible source code.
100 * See below for the function call wrappers which can be used in code meant to
101 * be only linked with the Userspace RCU library. This comes with a small
102 * performance degradation on the read-side due to the added function calls.
103 * This is required to permit relinking with newer versions of the library.
104 */
105
106/*
107 * The signal number used by the RCU library can be overridden with
108 * -DSIGURCU= when compiling the library.
109 */
110#ifndef SIGURCU
111#define SIGURCU SIGUSR1
112#endif
113
114/*
115 * If a reader is really non-cooperative and refuses to commit its
116 * urcu_active_readers count to memory (there is no barrier in the reader
117 * per-se), kick it after a few loops waiting for it.
118 */
119#define KICK_READER_LOOPS 10000
120
bc6c15bb 121/*
ae62b5e8 122 * Active attempts to check for reader Q.S. before calling sched_yield().
bc6c15bb
MD
123 */
124#define RCU_QS_ACTIVE_ATTEMPTS 100
125
7ac06cef
MD
126#ifdef DEBUG_RCU
127#define rcu_assert(args...) assert(args)
128#else
129#define rcu_assert(args...)
130#endif
131
adcfce54
MD
132#ifdef DEBUG_YIELD
133#include <sched.h>
134#include <time.h>
135#include <pthread.h>
136#include <unistd.h>
137
138#define YIELD_READ (1 << 0)
139#define YIELD_WRITE (1 << 1)
140
b4ce1526 141/*
0a1d290b 142 * Updates without URCU_MB are much slower. Account this in
b4ce1526
MD
143 * the delay.
144 */
0a1d290b 145#ifdef URCU_MB
adcfce54
MD
146/* maximum sleep delay, in us */
147#define MAX_SLEEP 50
148#else
149#define MAX_SLEEP 30000
150#endif
151
152extern unsigned int yield_active;
153extern unsigned int __thread rand_yield;
154
155static inline void debug_yield_read(void)
156{
157 if (yield_active & YIELD_READ)
158 if (rand_r(&rand_yield) & 0x1)
159 usleep(rand_r(&rand_yield) % MAX_SLEEP);
160}
161
162static inline void debug_yield_write(void)
163{
164 if (yield_active & YIELD_WRITE)
165 if (rand_r(&rand_yield) & 0x1)
166 usleep(rand_r(&rand_yield) % MAX_SLEEP);
167}
168
169static inline void debug_yield_init(void)
170{
171 rand_yield = time(NULL) ^ pthread_self();
172}
173#else
174static inline void debug_yield_read(void)
175{
176}
177
178static inline void debug_yield_write(void)
179{
180}
181
182static inline void debug_yield_init(void)
183{
184
185}
186#endif
187
0a1d290b 188#ifdef URCU_MB
adcfce54
MD
189static inline void reader_barrier()
190{
191 smp_mb();
192}
193#else
194static inline void reader_barrier()
195{
196 barrier();
197}
198#endif
199
200/*
201 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
202 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
203 */
204#define RCU_GP_COUNT (1UL << 0)
205/* Use the amount of bits equal to half of the architecture long size */
206#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
207#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
ae62b5e8 208#define RCU_GP_ONGOING (RCU_GP_CTR_BIT << 1)
adcfce54
MD
209
210/*
211 * Global quiescent period counter with low-order bits unused.
212 * Using a int rather than a char to eliminate false register dependencies
213 * causing stalls on some architectures.
214 */
215extern long urcu_gp_ctr;
216
217extern long __thread urcu_active_readers;
218
219static inline int rcu_old_gp_ongoing(long *value)
220{
221 long v;
222
223 if (value == NULL)
224 return 0;
225 /*
226 * Make sure both tests below are done on the same version of *value
227 * to insure consistency.
228 */
229 v = LOAD_SHARED(*value);
230 return (v & RCU_GP_CTR_NEST_MASK) &&
231 ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
232}
233
234static inline void _rcu_read_lock(void)
235{
ae62b5e8 236 long tmp, gp_ctr;
adcfce54
MD
237
238 tmp = urcu_active_readers;
239 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
67ef1a2c 240 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
ae62b5e8
MD
241 gp_ctr = _LOAD_SHARED(urcu_gp_ctr);
242 if (unlikely(gp_ctr & RCU_GP_ONGOING)) {
243 sched_yield();
244 gp_ctr = _LOAD_SHARED(urcu_gp_ctr);
245 }
246 _STORE_SHARED(urcu_active_readers, gp_ctr);
67ef1a2c
MD
247 /*
248 * Set active readers count for outermost nesting level before
249 * accessing the pointer. See force_mb_all_threads().
250 */
251 reader_barrier();
252 } else {
adcfce54 253 _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT);
67ef1a2c 254 }
adcfce54
MD
255}
256
257static inline void _rcu_read_unlock(void)
258{
adcfce54
MD
259 /*
260 * Finish using rcu before decrementing the pointer.
261 * See force_mb_all_threads().
ae62b5e8
MD
262 * Formally only needed for outermost nesting level, but leave barrier
263 * in place for nested unlocks to remove a branch from the common case
264 * (no nesting).
adcfce54 265 */
ae62b5e8
MD
266 reader_barrier();
267 _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT);
adcfce54
MD
268}
269
270/**
271 * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure
272 * meant to be read by RCU read-side critical sections. Returns the assigned
273 * value.
274 *
275 * Documents which pointers will be dereferenced by RCU read-side critical
276 * sections and adds the required memory barriers on architectures requiring
277 * them. It also makes sure the compiler does not reorder code initializing the
278 * data structure before its publication.
279 *
280 * Should match rcu_dereference_pointer().
281 */
282
283#define _rcu_assign_pointer(p, v) \
284 ({ \
285 if (!__builtin_constant_p(v) || \
286 ((v) != NULL)) \
287 wmb(); \
288 STORE_SHARED(p, v); \
289 })
290
4d1ce26f
MD
291/**
292 * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer
293 * is as expected by "old". If succeeds, returns the previous pointer to the
294 * data structure, which can be safely freed after waiting for a quiescent state
295 * using synchronize_rcu(). If fails (unexpected value), returns old (which
296 * should not be freed !).
297 */
298
299#define _rcu_cmpxchg_pointer(p, old, _new) \
300 ({ \
301 if (!__builtin_constant_p(_new) || \
302 ((_new) != NULL)) \
303 wmb(); \
304 cmpxchg(p, old, _new); \
305 })
306
adcfce54
MD
307/**
308 * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
67ef1a2c 309 * pointer to the data structure, which can be safely freed after waiting for a
adcfce54
MD
310 * quiescent state using synchronize_rcu().
311 */
312
313#define _rcu_xchg_pointer(p, v) \
314 ({ \
315 if (!__builtin_constant_p(v) || \
316 ((v) != NULL)) \
317 wmb(); \
318 xchg(p, v); \
319 })
320
321/*
322 * Exchanges the pointer and waits for quiescent state.
323 * The pointer returned can be freed.
324 */
325#define _rcu_publish_content(p, v) \
326 ({ \
327 void *oldptr; \
328 oldptr = _rcu_xchg_pointer(p, v); \
329 synchronize_rcu(); \
330 oldptr; \
331 })
332
333#endif /* _URCU_STATIC_H */
This page took 0.035823 seconds and 4 git commands to generate.