Android: implement rand_r()
[urcu.git] / urcu / static / urcu.h
CommitLineData
adcfce54
MD
1#ifndef _URCU_STATIC_H
2#define _URCU_STATIC_H
3
4/*
5 * urcu-static.h
6 *
d2d23035 7 * Userspace RCU header.
adcfce54 8 *
a5a9f428
PM
9 * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
10 * RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
adcfce54 11 *
6982d6d7 12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
d2d23035 13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
adcfce54 14 *
d2d23035
MD
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
adcfce54
MD
28 *
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
30 */
31
32#include <stdlib.h>
33#include <pthread.h>
bc6c15bb 34#include <unistd.h>
6d841bc2 35#include <stdint.h>
adcfce54 36
ec4e58a3
MD
37#include <urcu/compiler.h>
38#include <urcu/arch.h>
7e30abe3 39#include <urcu/system.h>
a2e7bf9c 40#include <urcu/uatomic.h>
e3b0cef0 41#include <urcu/list.h>
41849996 42#include <urcu/futex.h>
bd252a04 43#include <urcu/tls-compat.h>
e1c0b55c 44#include <urcu/rand-compat.h>
bc6c15bb 45
36bc70a8
MD
46#ifdef __cplusplus
47extern "C" {
48#endif
49
65f1e634 50/* Default is RCU_MEMBARRIER */
fdf01eed 51#if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL)
65f1e634
MD
52#define RCU_MEMBARRIER
53#endif
54
adcfce54
MD
55/*
56 * This code section can only be included in LGPL 2.1 compatible source code.
57 * See below for the function call wrappers which can be used in code meant to
58 * be only linked with the Userspace RCU library. This comes with a small
59 * performance degradation on the read-side due to the added function calls.
60 * This is required to permit relinking with newer versions of the library.
61 */
62
63/*
64 * The signal number used by the RCU library can be overridden with
02be5561 65 * -DSIGRCU= when compiling the library.
ddf7eefb 66 * Provide backward compatibility for liburcu 0.3.x SIGURCU.
adcfce54 67 */
ddf7eefb
MD
68#ifdef SIGURCU
69#define SIGRCU SIGURCU
70#endif
71
02be5561
MD
72#ifndef SIGRCU
73#define SIGRCU SIGUSR1
adcfce54
MD
74#endif
75
fd189fa5
MD
76enum rcu_state {
77 RCU_READER_ACTIVE_CURRENT,
78 RCU_READER_ACTIVE_OLD,
79 RCU_READER_INACTIVE,
80};
81
7ac06cef
MD
82#ifdef DEBUG_RCU
83#define rcu_assert(args...) assert(args)
84#else
85#define rcu_assert(args...)
86#endif
87
adcfce54
MD
88#ifdef DEBUG_YIELD
89#include <sched.h>
90#include <time.h>
91#include <pthread.h>
92#include <unistd.h>
93
1de4df4b
MD
94#define RCU_YIELD_READ (1 << 0)
95#define RCU_YIELD_WRITE (1 << 1)
adcfce54 96
b4ce1526 97/*
fdf01eed 98 * Updates with RCU_SIGNAL are much slower. Account this in the delay.
b4ce1526 99 */
fdf01eed 100#ifdef RCU_SIGNAL
adcfce54 101/* maximum sleep delay, in us */
adcfce54 102#define MAX_SLEEP 30000
fdf01eed
MD
103#else
104#define MAX_SLEEP 50
adcfce54
MD
105#endif
106
1de4df4b
MD
107extern unsigned int rcu_yield_active;
108extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield);
adcfce54 109
1de4df4b 110static inline void rcu_debug_yield_read(void)
adcfce54 111{
1de4df4b
MD
112 if (rcu_yield_active & RCU_YIELD_READ)
113 if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
114 usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
adcfce54
MD
115}
116
1de4df4b 117static inline void rcu_debug_yield_write(void)
adcfce54 118{
1de4df4b
MD
119 if (rcu_yield_active & RCU_YIELD_WRITE)
120 if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
121 usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
adcfce54
MD
122}
123
1de4df4b 124static inline void rcu_debug_yield_init(void)
adcfce54 125{
1de4df4b 126 URCU_TLS(rcu_rand_yield) = time(NULL) ^ (unsigned long) pthread_self();
adcfce54
MD
127}
128#else
1de4df4b 129static inline void rcu_debug_yield_read(void)
adcfce54
MD
130{
131}
132
1de4df4b 133static inline void rcu_debug_yield_write(void)
adcfce54
MD
134{
135}
136
1de4df4b 137static inline void rcu_debug_yield_init(void)
adcfce54
MD
138{
139
140}
141#endif
142
25cc6d18
MD
143/*
144 * RCU memory barrier broadcast group. Currently, only broadcast to all process
145 * threads is supported (group 0).
146 *
147 * Slave barriers are only guaranteed to be ordered wrt master barriers.
148 *
149 * The pair ordering is detailed as (O: ordered, X: not ordered) :
150 * slave master
151 * slave X O
152 * master O O
153 */
154
155#define MB_GROUP_ALL 0
156#define RCU_MB_GROUP MB_GROUP_ALL
157
fdf01eed 158#ifdef RCU_MEMBARRIER
1de4df4b 159extern int rcu_has_sys_membarrier;
fdf01eed 160
25cc6d18 161static inline void smp_mb_slave(int group)
fdf01eed 162{
1de4df4b 163 if (caa_likely(rcu_has_sys_membarrier))
5481ddb3 164 cmm_barrier();
fdf01eed 165 else
5481ddb3 166 cmm_smp_mb();
fdf01eed
MD
167}
168#endif
169
02be5561 170#ifdef RCU_MB
25cc6d18 171static inline void smp_mb_slave(int group)
adcfce54 172{
5481ddb3 173 cmm_smp_mb();
adcfce54 174}
fdf01eed
MD
175#endif
176
177#ifdef RCU_SIGNAL
25cc6d18 178static inline void smp_mb_slave(int group)
adcfce54 179{
5481ddb3 180 cmm_barrier();
adcfce54
MD
181}
182#endif
183
184/*
02be5561
MD
185 * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use
186 * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
adcfce54
MD
187 */
188#define RCU_GP_COUNT (1UL << 0)
189/* Use the amount of bits equal to half of the architecture long size */
27d65bc5 190#define RCU_GP_CTR_PHASE (1UL << (sizeof(unsigned long) << 2))
02be5561 191#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
adcfce54 192
4de0cd31 193struct rcu_gp {
ed1b099e
LJ
194 /*
195 * Global grace period counter.
196 * Contains the current RCU_GP_CTR_PHASE.
197 * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
198 * Written to only by writer with mutex taken.
199 * Read by both writer and readers.
200 */
201 unsigned long ctr;
202
203 int32_t futex;
204} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
205
4de0cd31 206extern struct rcu_gp rcu_gp;
adcfce54 207
02be5561 208struct rcu_reader {
bd1a5e15 209 /* Data used by both reader and synchronize_rcu() */
27d65bc5 210 unsigned long ctr;
e3b0cef0 211 char need_mb;
bd1a5e15 212 /* Data used for registry */
16aa9ee8 213 struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
bd1a5e15 214 pthread_t tid;
e3b0cef0
MD
215};
216
bd252a04 217extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
adcfce54 218
bc6c15bb
MD
219/*
220 * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
221 */
222static inline void wake_up_gp(void)
223{
ed1b099e
LJ
224 if (caa_unlikely(uatomic_read(&rcu_gp.futex) == -1)) {
225 uatomic_set(&rcu_gp.futex, 0);
226 futex_async(&rcu_gp.futex, FUTEX_WAKE, 1,
bc6c15bb
MD
227 NULL, NULL, 0);
228 }
229}
230
fd189fa5 231static inline enum rcu_state rcu_reader_state(unsigned long *ctr)
adcfce54 232{
27d65bc5 233 unsigned long v;
adcfce54 234
adcfce54
MD
235 /*
236 * Make sure both tests below are done on the same version of *value
237 * to insure consistency.
238 */
6cf3827c 239 v = CMM_LOAD_SHARED(*ctr);
fd189fa5
MD
240 if (!(v & RCU_GP_CTR_NEST_MASK))
241 return RCU_READER_INACTIVE;
ed1b099e 242 if (!((v ^ rcu_gp.ctr) & RCU_GP_CTR_PHASE))
fd189fa5
MD
243 return RCU_READER_ACTIVE_CURRENT;
244 return RCU_READER_ACTIVE_OLD;
adcfce54
MD
245}
246
a5a9f428 247/*
ed1b099e 248 * Helper for _rcu_read_lock(). The format of rcu_gp.ctr (as well as
a5a9f428
PM
249 * the per-thread rcu_reader.ctr) has the upper bits containing a count of
250 * _rcu_read_lock() nesting, and a lower-order bit that contains either zero
251 * or RCU_GP_CTR_PHASE. The smp_mb_slave() ensures that the accesses in
252 * _rcu_read_lock() happen before the subsequent read-side critical section.
253 */
254static inline void _rcu_read_lock_update(unsigned long tmp)
adcfce54 255{
a0b7f7ea 256 if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
ed1b099e 257 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp.ctr));
25cc6d18 258 smp_mb_slave(RCU_MB_GROUP);
a5a9f428 259 } else
bd252a04 260 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT);
adcfce54
MD
261}
262
a5a9f428
PM
263/*
264 * Enter an RCU read-side critical section.
265 *
266 * The first cmm_barrier() call ensures that the compiler does not reorder
267 * the body of _rcu_read_lock() with a mutex.
268 *
269 * This function and its helper are both less than 10 lines long. The
270 * intent is that this function meets the 10-line criterion in LGPL,
271 * allowing this function to be invoked directly from non-LGPL code.
272 */
273static inline void _rcu_read_lock(void)
adcfce54 274{
27d65bc5 275 unsigned long tmp;
bc6c15bb 276
a5a9f428 277 cmm_barrier();
bd252a04 278 tmp = URCU_TLS(rcu_reader).ctr;
a5a9f428
PM
279 _rcu_read_lock_update(tmp);
280}
281
282/*
283 * This is a helper function for _rcu_read_unlock().
284 *
285 * The first smp_mb_slave() call ensures that the critical section is
286 * seen to precede the store to rcu_reader.ctr.
287 * The second smp_mb_slave() call ensures that we write to rcu_reader.ctr
288 * before reading the update-side futex.
289 */
290static inline void _rcu_read_unlock_update_and_wakeup(unsigned long tmp)
291{
a0b7f7ea 292 if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
25cc6d18 293 smp_mb_slave(RCU_MB_GROUP);
bd252a04 294 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
25cc6d18 295 smp_mb_slave(RCU_MB_GROUP);
bc6c15bb 296 wake_up_gp();
a5a9f428 297 } else
bd252a04 298 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
a5a9f428
PM
299}
300
301/*
302 * Exit an RCU read-side crtical section. Both this function and its
303 * helper are smaller than 10 lines of code, and are intended to be
304 * usable by non-LGPL code, as called out in LGPL.
305 */
306static inline void _rcu_read_unlock(void)
307{
308 unsigned long tmp;
309
310 tmp = URCU_TLS(rcu_reader).ctr;
311 _rcu_read_unlock_update_and_wakeup(tmp);
5481ddb3 312 cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
adcfce54
MD
313}
314
882f3357
MD
315/*
316 * Returns whether within a RCU read-side critical section.
317 *
318 * This function is less than 10 lines long. The intent is that this
319 * function meets the 10-line criterion for LGPL, allowing this function
320 * to be invoked directly from non-LGPL code.
321 */
322static inline int _rcu_read_ongoing(void)
323{
324 return URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK;
325}
326
ad918eeb 327#ifdef __cplusplus
36bc70a8
MD
328}
329#endif
330
adcfce54 331#endif /* _URCU_STATIC_H */
This page took 0.046236 seconds and 4 git commands to generate.