Only make the threads for which we are waiting call sched_yield()
[urcu.git] / urcu-qsbr-static.h
CommitLineData
7ac06cef
MD
1#ifndef _URCU_QSBR_STATIC_H
2#define _URCU_QSBR_STATIC_H
3
4/*
5 * urcu-qsbr-static.h
6 *
7 * Userspace RCU QSBR header.
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu-qsbr.h for linking
10 * dynamically with the userspace rcu QSBR library.
11 *
12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 *
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
30 */
31
32#include <stdlib.h>
33#include <pthread.h>
34#include <assert.h>
f0f7dbdd 35#include <limits.h>
ae62b5e8 36#include <sched.h>
7ac06cef
MD
37
38#include <compiler.h>
39#include <arch.h>
40
41/*
42 * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
43 */
44#define _LOAD_SHARED(p) ACCESS_ONCE(p)
45
46/*
47 * Load a data from shared memory, doing a cache flush if required.
48 */
49#define LOAD_SHARED(p) \
50 ({ \
51 smp_rmc(); \
52 _LOAD_SHARED(p); \
53 })
54
55/*
56 * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
57 */
58#define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); })
59
60/*
61 * Store v into x, where x is located in shared memory. Performs the required
62 * cache flush after writing. Returns v.
63 */
64#define STORE_SHARED(x, v) \
65 ({ \
66 _STORE_SHARED(x, v); \
67 smp_wmc(); \
68 (v); \
69 })
70
71/**
72 * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable
73 * into a RCU read-side critical section. The pointer can later be safely
74 * dereferenced within the critical section.
75 *
76 * This ensures that the pointer copy is invariant thorough the whole critical
77 * section.
78 *
79 * Inserts memory barriers on architectures that require them (currently only
80 * Alpha) and documents which pointers are protected by RCU.
81 *
82 * Should match rcu_assign_pointer() or rcu_xchg_pointer().
83 */
84
85#define _rcu_dereference(p) ({ \
86 typeof(p) _________p1 = LOAD_SHARED(p); \
87 smp_read_barrier_depends(); \
88 (_________p1); \
89 })
90
91/*
92 * This code section can only be included in LGPL 2.1 compatible source code.
93 * See below for the function call wrappers which can be used in code meant to
94 * be only linked with the Userspace RCU library. This comes with a small
95 * performance degradation on the read-side due to the added function calls.
96 * This is required to permit relinking with newer versions of the library.
97 */
98
99/*
100 * If a reader is really non-cooperative and refuses to commit its
8b25e300 101 * rcu_reader qs_gp count to memory (there is no barrier in the reader
7ac06cef
MD
102 * per-se), kick it after a few loops waiting for it.
103 */
104#define KICK_READER_LOOPS 10000
105
bc6c15bb 106/*
ae62b5e8 107 * Active attempts to check for reader Q.S. before calling sched_yield().
bc6c15bb
MD
108 */
109#define RCU_QS_ACTIVE_ATTEMPTS 100
110
7ac06cef
MD
111#ifdef DEBUG_RCU
112#define rcu_assert(args...) assert(args)
113#else
114#define rcu_assert(args...)
115#endif
116
117#ifdef DEBUG_YIELD
118#include <sched.h>
119#include <time.h>
120#include <pthread.h>
121#include <unistd.h>
122
123#define YIELD_READ (1 << 0)
124#define YIELD_WRITE (1 << 1)
125
126/* maximum sleep delay, in us */
127#define MAX_SLEEP 50
128
129extern unsigned int yield_active;
130extern unsigned int __thread rand_yield;
131
132static inline void debug_yield_read(void)
133{
134 if (yield_active & YIELD_READ)
135 if (rand_r(&rand_yield) & 0x1)
136 usleep(rand_r(&rand_yield) % MAX_SLEEP);
137}
138
139static inline void debug_yield_write(void)
140{
141 if (yield_active & YIELD_WRITE)
142 if (rand_r(&rand_yield) & 0x1)
143 usleep(rand_r(&rand_yield) % MAX_SLEEP);
144}
145
146static inline void debug_yield_init(void)
147{
148 rand_yield = time(NULL) ^ pthread_self();
149}
150#else
151static inline void debug_yield_read(void)
152{
153}
154
155static inline void debug_yield_write(void)
156{
157}
158
159static inline void debug_yield_init(void)
160{
161
162}
163#endif
164
165static inline void reader_barrier()
166{
167 smp_mb();
168}
169
ac258107 170#define RCU_GP_ONLINE (1UL << 0)
ae62b5e8
MD
171#define RCU_GP_ONGOING (1UL << 1)
172#define RCU_GP_CTR (1UL << 2)
ac258107 173
7ac06cef
MD
174/*
175 * Global quiescent period counter with low-order bits unused.
176 * Using a int rather than a char to eliminate false register dependencies
177 * causing stalls on some architectures.
178 */
f0f7dbdd 179extern unsigned long urcu_gp_ctr;
7ac06cef 180
8b25e300
MD
181struct urcu_reader_status {
182 unsigned long qs_gp;
183 unsigned long gp_waiting;
184};
185
186extern struct urcu_reader_status __thread urcu_reader_status;
7ac06cef 187
47d2f29e
MD
188#if (BITS_PER_LONG < 64)
189static inline int rcu_gp_ongoing(unsigned long *value)
190{
191 unsigned long reader_gp;
192
193 if (value == NULL)
194 return 0;
195 reader_gp = LOAD_SHARED(*value);
196 return reader_gp && ((reader_gp ^ urcu_gp_ctr) & RCU_GP_CTR);
197}
198#else /* !(BITS_PER_LONG < 64) */
f0f7dbdd 199static inline int rcu_gp_ongoing(unsigned long *value)
7ac06cef 200{
f0f7dbdd 201 unsigned long reader_gp;
4e560c17 202
7ac06cef
MD
203 if (value == NULL)
204 return 0;
4e560c17 205 reader_gp = LOAD_SHARED(*value);
f0f7dbdd 206 return reader_gp && (reader_gp - urcu_gp_ctr > ULONG_MAX / 2);
7ac06cef 207}
47d2f29e 208#endif /* !(BITS_PER_LONG < 64) */
7ac06cef
MD
209
210static inline void _rcu_read_lock(void)
211{
8b25e300 212 rcu_assert(urcu_reader_status.qs_gp);
7ac06cef
MD
213}
214
215static inline void _rcu_read_unlock(void)
216{
217}
218
219static inline void _rcu_quiescent_state(void)
220{
ae62b5e8
MD
221 long gp_ctr;
222
223 smp_mb();
8b25e300
MD
224 /*
225 * volatile accesses can be reordered by the compiler when put in the
226 * same expression.
227 */
228 if (unlikely((gp_ctr = LOAD_SHARED(urcu_gp_ctr)) & RCU_GP_ONGOING) &&
229 unlikely(urcu_reader_status.gp_waiting)) {
230 _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr);
ae62b5e8 231 sched_yield();
8b25e300
MD
232 } else {
233 _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr);
ae62b5e8 234 }
7ac06cef
MD
235 smp_mb();
236}
237
238static inline void _rcu_thread_offline(void)
239{
240 smp_mb();
8b25e300 241 STORE_SHARED(urcu_reader_status.qs_gp, 0);
7ac06cef
MD
242}
243
244static inline void _rcu_thread_online(void)
245{
ae62b5e8
MD
246 long gp_ctr;
247
8b25e300
MD
248 if (unlikely((gp_ctr = LOAD_SHARED(urcu_gp_ctr)) & RCU_GP_ONGOING) &&
249 unlikely(urcu_reader_status.gp_waiting)) {
ae62b5e8
MD
250 sched_yield();
251 gp_ctr = LOAD_SHARED(urcu_gp_ctr);
252 }
8b25e300 253 _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr);
7ac06cef
MD
254 smp_mb();
255}
256
257/**
258 * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure
259 * meant to be read by RCU read-side critical sections. Returns the assigned
260 * value.
261 *
262 * Documents which pointers will be dereferenced by RCU read-side critical
263 * sections and adds the required memory barriers on architectures requiring
264 * them. It also makes sure the compiler does not reorder code initializing the
265 * data structure before its publication.
266 *
267 * Should match rcu_dereference_pointer().
268 */
269
270#define _rcu_assign_pointer(p, v) \
271 ({ \
272 if (!__builtin_constant_p(v) || \
273 ((v) != NULL)) \
274 wmb(); \
275 STORE_SHARED(p, v); \
276 })
277
4d1ce26f
MD
278/**
279 * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer
280 * is as expected by "old". If succeeds, returns the previous pointer to the
281 * data structure, which can be safely freed after waiting for a quiescent state
282 * using synchronize_rcu(). If fails (unexpected value), returns old (which
283 * should not be freed !).
284 */
285
286#define _rcu_cmpxchg_pointer(p, old, _new) \
287 ({ \
288 if (!__builtin_constant_p(_new) || \
289 ((_new) != NULL)) \
290 wmb(); \
291 cmpxchg(p, old, _new); \
292 })
293
7ac06cef
MD
294/**
295 * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
296 * pointer to the data structure, which can be safely freed after waiting for a
297 * quiescent state using synchronize_rcu().
298 */
299
300#define _rcu_xchg_pointer(p, v) \
301 ({ \
302 if (!__builtin_constant_p(v) || \
303 ((v) != NULL)) \
304 wmb(); \
305 xchg(p, v); \
306 })
307
308/*
309 * Exchanges the pointer and waits for quiescent state.
310 * The pointer returned can be freed.
311 */
312#define _rcu_publish_content(p, v) \
313 ({ \
314 void *oldptr; \
315 oldptr = _rcu_xchg_pointer(p, v); \
316 synchronize_rcu(); \
317 oldptr; \
318 })
319
320#endif /* _URCU_QSBR_STATIC_H */
This page took 0.041389 seconds and 4 git commands to generate.