Add SIGURCU backward compatibility
[urcu.git] / urcu-static.h
1 #ifndef _URCU_STATIC_H
2 #define _URCU_STATIC_H
3
4 /*
5 * urcu-static.h
6 *
7 * Userspace RCU header.
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
10 * dynamically with the userspace rcu library.
11 *
12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 *
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
30 */
31
32 #include <stdlib.h>
33 #include <pthread.h>
34 #include <syscall.h>
35 #include <unistd.h>
36
37 #include <urcu/compiler.h>
38 #include <urcu/arch.h>
39 #include <urcu/system.h>
40 #include <urcu/uatomic_arch.h>
41 #include <urcu/list.h>
42 #include <urcu/urcu-futex.h>
43
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47
48 /* Default is RCU_MEMBARRIER */
49 #if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL)
50 #define RCU_MEMBARRIER
51 #endif
52
53 #ifdef RCU_MEMBARRIER
54 #include <unistd.h>
55 #include <sys/syscall.h>
56
57 /* If the headers do not support SYS_membarrier, statically use RCU_MB */
58 #ifdef SYS_membarrier
59 # define MEMBARRIER_EXPEDITED (1 << 0)
60 # define MEMBARRIER_DELAYED (1 << 1)
61 # define MEMBARRIER_QUERY (1 << 16)
62 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
63 #else
64 # undef RCU_MEMBARRIER
65 # define RCU_MB
66 #endif
67 #endif
68
69 /*
70 * This code section can only be included in LGPL 2.1 compatible source code.
71 * See below for the function call wrappers which can be used in code meant to
72 * be only linked with the Userspace RCU library. This comes with a small
73 * performance degradation on the read-side due to the added function calls.
74 * This is required to permit relinking with newer versions of the library.
75 */
76
77 /*
78 * The signal number used by the RCU library can be overridden with
79 * -DSIGRCU= when compiling the library.
80 * Provide backward compatibility for liburcu 0.3.x SIGURCU.
81 */
82 #ifdef SIGURCU
83 #define SIGRCU SIGURCU
84 #endif
85
86 #ifndef SIGRCU
87 #define SIGRCU SIGUSR1
88 #endif
89
90 /*
91 * If a reader is really non-cooperative and refuses to commit its
92 * rcu_active_readers count to memory (there is no barrier in the reader
93 * per-se), kick it after a few loops waiting for it.
94 */
95 #define KICK_READER_LOOPS 10000
96
97 /*
98 * Active attempts to check for reader Q.S. before calling futex().
99 */
100 #define RCU_QS_ACTIVE_ATTEMPTS 100
101
102 #ifdef DEBUG_RCU
103 #define rcu_assert(args...) assert(args)
104 #else
105 #define rcu_assert(args...)
106 #endif
107
108 #ifdef DEBUG_YIELD
109 #include <sched.h>
110 #include <time.h>
111 #include <pthread.h>
112 #include <unistd.h>
113
114 #define YIELD_READ (1 << 0)
115 #define YIELD_WRITE (1 << 1)
116
117 /*
118 * Updates with RCU_SIGNAL are much slower. Account this in the delay.
119 */
120 #ifdef RCU_SIGNAL
121 /* maximum sleep delay, in us */
122 #define MAX_SLEEP 30000
123 #else
124 #define MAX_SLEEP 50
125 #endif
126
127 extern unsigned int yield_active;
128 extern unsigned int __thread rand_yield;
129
130 static inline void debug_yield_read(void)
131 {
132 if (yield_active & YIELD_READ)
133 if (rand_r(&rand_yield) & 0x1)
134 usleep(rand_r(&rand_yield) % MAX_SLEEP);
135 }
136
137 static inline void debug_yield_write(void)
138 {
139 if (yield_active & YIELD_WRITE)
140 if (rand_r(&rand_yield) & 0x1)
141 usleep(rand_r(&rand_yield) % MAX_SLEEP);
142 }
143
144 static inline void debug_yield_init(void)
145 {
146 rand_yield = time(NULL) ^ pthread_self();
147 }
148 #else
149 static inline void debug_yield_read(void)
150 {
151 }
152
153 static inline void debug_yield_write(void)
154 {
155 }
156
157 static inline void debug_yield_init(void)
158 {
159
160 }
161 #endif
162
163 #ifdef RCU_MEMBARRIER
164 extern int has_sys_membarrier;
165
166 static inline void smp_mb_light()
167 {
168 if (likely(has_sys_membarrier))
169 barrier();
170 else
171 smp_mb();
172 }
173 #endif
174
175 #ifdef RCU_MB
176 static inline void smp_mb_light()
177 {
178 smp_mb();
179 }
180 #endif
181
182 #ifdef RCU_SIGNAL
183 static inline void smp_mb_light()
184 {
185 barrier();
186 }
187 #endif
188
189 /*
190 * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use
191 * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
192 */
193 #define RCU_GP_COUNT (1UL << 0)
194 /* Use the amount of bits equal to half of the architecture long size */
195 #define RCU_GP_CTR_PHASE (1UL << (sizeof(long) << 2))
196 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
197
198 /*
199 * Global quiescent period counter with low-order bits unused.
200 * Using a int rather than a char to eliminate false register dependencies
201 * causing stalls on some architectures.
202 */
203 extern long rcu_gp_ctr;
204
205 struct rcu_reader {
206 /* Data used by both reader and synchronize_rcu() */
207 long ctr;
208 char need_mb;
209 /* Data used for registry */
210 struct list_head head __attribute__((aligned(CACHE_LINE_SIZE)));
211 pthread_t tid;
212 };
213
214 extern struct rcu_reader __thread rcu_reader;
215
216 extern int gp_futex;
217
218 /*
219 * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
220 */
221 static inline void wake_up_gp(void)
222 {
223 if (unlikely(uatomic_read(&gp_futex) == -1)) {
224 uatomic_set(&gp_futex, 0);
225 futex_async(&gp_futex, FUTEX_WAKE, 1,
226 NULL, NULL, 0);
227 }
228 }
229
230 static inline int rcu_old_gp_ongoing(long *value)
231 {
232 long v;
233
234 if (value == NULL)
235 return 0;
236 /*
237 * Make sure both tests below are done on the same version of *value
238 * to insure consistency.
239 */
240 v = LOAD_SHARED(*value);
241 return (v & RCU_GP_CTR_NEST_MASK) &&
242 ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
243 }
244
245 static inline void _rcu_read_lock(void)
246 {
247 long tmp;
248
249 tmp = rcu_reader.ctr;
250 /*
251 * rcu_gp_ctr is
252 * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
253 */
254 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
255 _STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
256 /*
257 * Set active readers count for outermost nesting level before
258 * accessing the pointer. See smp_mb_heavy().
259 */
260 smp_mb_light();
261 } else {
262 _STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
263 }
264 }
265
266 static inline void _rcu_read_unlock(void)
267 {
268 long tmp;
269
270 tmp = rcu_reader.ctr;
271 /*
272 * Finish using rcu before decrementing the pointer.
273 * See smp_mb_heavy().
274 */
275 if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
276 smp_mb_light();
277 _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
278 /* write rcu_reader.ctr before read futex */
279 smp_mb_light();
280 wake_up_gp();
281 } else {
282 _STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
283 }
284 }
285
286 #ifdef __cplusplus
287 }
288 #endif
289
290 #endif /* _URCU_STATIC_H */
This page took 0.036781 seconds and 4 git commands to generate.