Force build failure on unknown architectures
[urcu.git] / urcu-static.h
... / ...
CommitLineData
1#ifndef _URCU_STATIC_H
2#define _URCU_STATIC_H
3
4/*
5 * urcu-static.h
6 *
7 * Userspace RCU header.
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
10 * dynamically with the userspace rcu library.
11 *
12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 *
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
30 */
31
32#include <stdlib.h>
33#include <pthread.h>
34#include <syscall.h>
35#include <unistd.h>
36
37#include <urcu/compiler.h>
38#include <urcu/arch.h>
39#include <urcu/system.h>
40#include <urcu/uatomic_arch.h>
41#include <urcu/list.h>
42#include <urcu/urcu-futex.h>
43
44#ifdef __cplusplus
45extern "C" {
46#endif
47
48/* Default is RCU_MEMBARRIER */
49#if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL)
50#define RCU_MEMBARRIER
51#endif
52
53#ifdef RCU_MEMBARRIER
54#include <unistd.h>
55#include <sys/syscall.h>
56
57/* If the headers do not support SYS_membarrier, statically use RCU_MB */
58#ifdef SYS_membarrier
59# define MEMBARRIER_EXPEDITED (1 << 0)
60# define MEMBARRIER_DELAYED (1 << 1)
61# define MEMBARRIER_QUERY (1 << 16)
62# define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
63#else
64# undef RCU_MEMBARRIER
65# define RCU_MB
66#endif
67#endif
68
69/*
70 * This code section can only be included in LGPL 2.1 compatible source code.
71 * See below for the function call wrappers which can be used in code meant to
72 * be only linked with the Userspace RCU library. This comes with a small
73 * performance degradation on the read-side due to the added function calls.
74 * This is required to permit relinking with newer versions of the library.
75 */
76
77/*
78 * The signal number used by the RCU library can be overridden with
79 * -DSIGRCU= when compiling the library.
80 * Provide backward compatibility for liburcu 0.3.x SIGURCU.
81 */
82#ifdef SIGURCU
83#define SIGRCU SIGURCU
84#endif
85
86#ifndef SIGRCU
87#define SIGRCU SIGUSR1
88#endif
89
90/*
91 * If a reader is really non-cooperative and refuses to commit its
92 * rcu_active_readers count to memory (there is no barrier in the reader
93 * per-se), kick it after a few loops waiting for it.
94 */
95#define KICK_READER_LOOPS 10000
96
97/*
98 * Active attempts to check for reader Q.S. before calling futex().
99 */
100#define RCU_QS_ACTIVE_ATTEMPTS 100
101
102#ifdef DEBUG_RCU
103#define rcu_assert(args...) assert(args)
104#else
105#define rcu_assert(args...)
106#endif
107
108#ifdef DEBUG_YIELD
109#include <sched.h>
110#include <time.h>
111#include <pthread.h>
112#include <unistd.h>
113
114#define YIELD_READ (1 << 0)
115#define YIELD_WRITE (1 << 1)
116
117/*
118 * Updates with RCU_SIGNAL are much slower. Account this in the delay.
119 */
120#ifdef RCU_SIGNAL
121/* maximum sleep delay, in us */
122#define MAX_SLEEP 30000
123#else
124#define MAX_SLEEP 50
125#endif
126
127extern unsigned int yield_active;
128extern unsigned int __thread rand_yield;
129
130static inline void debug_yield_read(void)
131{
132 if (yield_active & YIELD_READ)
133 if (rand_r(&rand_yield) & 0x1)
134 usleep(rand_r(&rand_yield) % MAX_SLEEP);
135}
136
137static inline void debug_yield_write(void)
138{
139 if (yield_active & YIELD_WRITE)
140 if (rand_r(&rand_yield) & 0x1)
141 usleep(rand_r(&rand_yield) % MAX_SLEEP);
142}
143
144static inline void debug_yield_init(void)
145{
146 rand_yield = time(NULL) ^ pthread_self();
147}
148#else
149static inline void debug_yield_read(void)
150{
151}
152
153static inline void debug_yield_write(void)
154{
155}
156
157static inline void debug_yield_init(void)
158{
159
160}
161#endif
162
163/*
164 * RCU memory barrier broadcast group. Currently, only broadcast to all process
165 * threads is supported (group 0).
166 *
167 * Slave barriers are only guaranteed to be ordered wrt master barriers.
168 *
169 * The pair ordering is detailed as (O: ordered, X: not ordered) :
170 * slave master
171 * slave X O
172 * master O O
173 */
174
175#define MB_GROUP_ALL 0
176#define RCU_MB_GROUP MB_GROUP_ALL
177
178#ifdef RCU_MEMBARRIER
179extern int has_sys_membarrier;
180
181static inline void smp_mb_slave(int group)
182{
183 if (likely(has_sys_membarrier))
184 barrier();
185 else
186 smp_mb();
187}
188#endif
189
190#ifdef RCU_MB
191static inline void smp_mb_slave(int group)
192{
193 smp_mb();
194}
195#endif
196
197#ifdef RCU_SIGNAL
198static inline void smp_mb_slave(int group)
199{
200 barrier();
201}
202#endif
203
204/*
205 * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use
206 * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
207 */
208#define RCU_GP_COUNT (1UL << 0)
209/* Use the amount of bits equal to half of the architecture long size */
210#define RCU_GP_CTR_PHASE (1UL << (sizeof(unsigned long) << 2))
211#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
212
213/*
214 * Global quiescent period counter with low-order bits unused.
215 * Using a int rather than a char to eliminate false register dependencies
216 * causing stalls on some architectures.
217 */
218extern unsigned long rcu_gp_ctr;
219
220struct rcu_reader {
221 /* Data used by both reader and synchronize_rcu() */
222 unsigned long ctr;
223 char need_mb;
224 /* Data used for registry */
225 struct list_head node __attribute__((aligned(CACHE_LINE_SIZE)));
226 pthread_t tid;
227};
228
229extern struct rcu_reader __thread rcu_reader;
230
231extern int gp_futex;
232
233/*
234 * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
235 */
236static inline void wake_up_gp(void)
237{
238 if (unlikely(uatomic_read(&gp_futex) == -1)) {
239 uatomic_set(&gp_futex, 0);
240 futex_async(&gp_futex, FUTEX_WAKE, 1,
241 NULL, NULL, 0);
242 }
243}
244
245static inline int rcu_gp_ongoing(unsigned long *ctr)
246{
247 unsigned long v;
248
249 /*
250 * Make sure both tests below are done on the same version of *value
251 * to insure consistency.
252 */
253 v = LOAD_SHARED(*ctr);
254 return (v & RCU_GP_CTR_NEST_MASK) &&
255 ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
256}
257
258static inline void _rcu_read_lock(void)
259{
260 unsigned long tmp;
261
262 tmp = rcu_reader.ctr;
263 /*
264 * rcu_gp_ctr is
265 * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
266 */
267 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
268 _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
269 /*
270 * Set active readers count for outermost nesting level before
271 * accessing the pointer. See smp_mb_master().
272 */
273 smp_mb_slave(RCU_MB_GROUP);
274 } else {
275 _STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
276 }
277}
278
279static inline void _rcu_read_unlock(void)
280{
281 unsigned long tmp;
282
283 tmp = rcu_reader.ctr;
284 /*
285 * Finish using rcu before decrementing the pointer.
286 * See smp_mb_master().
287 */
288 if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
289 smp_mb_slave(RCU_MB_GROUP);
290 _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
291 /* write rcu_reader.ctr before read futex */
292 smp_mb_slave(RCU_MB_GROUP);
293 wake_up_gp();
294 } else {
295 _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
296 }
297}
298
299#ifdef __cplusplus
300}
301#endif
302
303#endif /* _URCU_STATIC_H */
This page took 0.024227 seconds and 4 git commands to generate.