Fix undefined NULL pointer arithmetic
[urcu.git] / urcu / static / urcu.h
1 #ifndef _URCU_STATIC_H
2 #define _URCU_STATIC_H
3
4 /*
5 * urcu-static.h
6 *
7 * Userspace RCU header.
8 *
9 * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
10 * RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
11 *
12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 *
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
30 */
31
32 #include <stdlib.h>
33 #include <pthread.h>
34 #include <unistd.h>
35 #include <stdint.h>
36
37 #include <urcu/compiler.h>
38 #include <urcu/arch.h>
39 #include <urcu/system.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/list.h>
42 #include <urcu/futex.h>
43 #include <urcu/tls-compat.h>
44 #include <urcu/rand-compat.h>
45
46 #ifdef __cplusplus
47 extern "C" {
48 #endif
49
50 /* Default is RCU_MEMBARRIER */
51 #if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL)
52 #define RCU_MEMBARRIER
53 #endif
54
55 /*
56 * This code section can only be included in LGPL 2.1 compatible source code.
57 * See below for the function call wrappers which can be used in code meant to
58 * be only linked with the Userspace RCU library. This comes with a small
59 * performance degradation on the read-side due to the added function calls.
60 * This is required to permit relinking with newer versions of the library.
61 */
62
63 /*
64 * The signal number used by the RCU library can be overridden with
65 * -DSIGRCU= when compiling the library.
66 * Provide backward compatibility for liburcu 0.3.x SIGURCU.
67 */
68 #ifdef SIGURCU
69 #define SIGRCU SIGURCU
70 #endif
71
72 #ifndef SIGRCU
73 #define SIGRCU SIGUSR1
74 #endif
75
76 enum rcu_state {
77 RCU_READER_ACTIVE_CURRENT,
78 RCU_READER_ACTIVE_OLD,
79 RCU_READER_INACTIVE,
80 };
81
82 #ifdef DEBUG_RCU
83 #define rcu_assert(args...) assert(args)
84 #else
85 #define rcu_assert(args...)
86 #endif
87
88 #ifdef DEBUG_YIELD
89 #include <sched.h>
90 #include <time.h>
91 #include <pthread.h>
92 #include <unistd.h>
93
94 #define RCU_YIELD_READ (1 << 0)
95 #define RCU_YIELD_WRITE (1 << 1)
96
97 /*
98 * Updates with RCU_SIGNAL are much slower. Account this in the delay.
99 */
100 #ifdef RCU_SIGNAL
101 /* maximum sleep delay, in us */
102 #define MAX_SLEEP 30000
103 #else
104 #define MAX_SLEEP 50
105 #endif
106
107 extern unsigned int rcu_yield_active;
108 extern DECLARE_URCU_TLS(unsigned int, rcu_rand_yield);
109
110 static inline void rcu_debug_yield_read(void)
111 {
112 if (rcu_yield_active & RCU_YIELD_READ)
113 if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
114 usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
115 }
116
117 static inline void rcu_debug_yield_write(void)
118 {
119 if (rcu_yield_active & RCU_YIELD_WRITE)
120 if (rand_r(&URCU_TLS(rcu_rand_yield)) & 0x1)
121 usleep(rand_r(&URCU_TLS(rcu_rand_yield)) % MAX_SLEEP);
122 }
123
124 static inline void rcu_debug_yield_init(void)
125 {
126 URCU_TLS(rcu_rand_yield) = time(NULL) ^ (unsigned long) pthread_self();
127 }
128 #else
129 static inline void rcu_debug_yield_read(void)
130 {
131 }
132
133 static inline void rcu_debug_yield_write(void)
134 {
135 }
136
137 static inline void rcu_debug_yield_init(void)
138 {
139
140 }
141 #endif
142
143 /*
144 * RCU memory barrier broadcast group. Currently, only broadcast to all process
145 * threads is supported (group 0).
146 *
147 * Slave barriers are only guaranteed to be ordered wrt master barriers.
148 *
149 * The pair ordering is detailed as (O: ordered, X: not ordered) :
150 * slave master
151 * slave X O
152 * master O O
153 */
154
155 #define MB_GROUP_ALL 0
156 #define RCU_MB_GROUP MB_GROUP_ALL
157
158 #ifdef RCU_MEMBARRIER
159 extern int rcu_has_sys_membarrier;
160
161 static inline void smp_mb_slave(int group)
162 {
163 if (caa_likely(rcu_has_sys_membarrier))
164 cmm_barrier();
165 else
166 cmm_smp_mb();
167 }
168 #endif
169
170 #ifdef RCU_MB
171 static inline void smp_mb_slave(int group)
172 {
173 cmm_smp_mb();
174 }
175 #endif
176
177 #ifdef RCU_SIGNAL
178 static inline void smp_mb_slave(int group)
179 {
180 cmm_barrier();
181 }
182 #endif
183
184 /*
185 * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use
186 * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
187 */
188 #define RCU_GP_COUNT (1UL << 0)
189 /* Use the amount of bits equal to half of the architecture long size */
190 #define RCU_GP_CTR_PHASE (1UL << (sizeof(unsigned long) << 2))
191 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
192
193 struct rcu_gp {
194 /*
195 * Global grace period counter.
196 * Contains the current RCU_GP_CTR_PHASE.
197 * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
198 * Written to only by writer with mutex taken.
199 * Read by both writer and readers.
200 */
201 unsigned long ctr;
202
203 int32_t futex;
204 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
205
206 extern struct rcu_gp rcu_gp;
207
208 struct rcu_reader {
209 /* Data used by both reader and synchronize_rcu() */
210 unsigned long ctr;
211 char need_mb;
212 /* Data used for registry */
213 struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
214 pthread_t tid;
215 };
216
217 extern DECLARE_URCU_TLS(struct rcu_reader, rcu_reader);
218
219 /*
220 * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
221 */
222 static inline void wake_up_gp(void)
223 {
224 if (caa_unlikely(uatomic_read(&rcu_gp.futex) == -1)) {
225 uatomic_set(&rcu_gp.futex, 0);
226 futex_async(&rcu_gp.futex, FUTEX_WAKE, 1,
227 NULL, NULL, 0);
228 }
229 }
230
231 static inline enum rcu_state rcu_reader_state(unsigned long *ctr)
232 {
233 unsigned long v;
234
235 /*
236 * Make sure both tests below are done on the same version of *value
237 * to insure consistency.
238 */
239 v = CMM_LOAD_SHARED(*ctr);
240 if (!(v & RCU_GP_CTR_NEST_MASK))
241 return RCU_READER_INACTIVE;
242 if (!((v ^ rcu_gp.ctr) & RCU_GP_CTR_PHASE))
243 return RCU_READER_ACTIVE_CURRENT;
244 return RCU_READER_ACTIVE_OLD;
245 }
246
247 /*
248 * Helper for _rcu_read_lock(). The format of rcu_gp.ctr (as well as
249 * the per-thread rcu_reader.ctr) has the upper bits containing a count of
250 * _rcu_read_lock() nesting, and a lower-order bit that contains either zero
251 * or RCU_GP_CTR_PHASE. The smp_mb_slave() ensures that the accesses in
252 * _rcu_read_lock() happen before the subsequent read-side critical section.
253 */
254 static inline void _rcu_read_lock_update(unsigned long tmp)
255 {
256 if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
257 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, _CMM_LOAD_SHARED(rcu_gp.ctr));
258 smp_mb_slave(RCU_MB_GROUP);
259 } else
260 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, tmp + RCU_GP_COUNT);
261 }
262
263 /*
264 * Enter an RCU read-side critical section.
265 *
266 * The first cmm_barrier() call ensures that the compiler does not reorder
267 * the body of _rcu_read_lock() with a mutex.
268 *
269 * This function and its helper are both less than 10 lines long. The
270 * intent is that this function meets the 10-line criterion in LGPL,
271 * allowing this function to be invoked directly from non-LGPL code.
272 */
273 static inline void _rcu_read_lock(void)
274 {
275 unsigned long tmp;
276
277 cmm_barrier();
278 tmp = URCU_TLS(rcu_reader).ctr;
279 _rcu_read_lock_update(tmp);
280 }
281
282 /*
283 * This is a helper function for _rcu_read_unlock().
284 *
285 * The first smp_mb_slave() call ensures that the critical section is
286 * seen to precede the store to rcu_reader.ctr.
287 * The second smp_mb_slave() call ensures that we write to rcu_reader.ctr
288 * before reading the update-side futex.
289 */
290 static inline void _rcu_read_unlock_update_and_wakeup(unsigned long tmp)
291 {
292 if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
293 smp_mb_slave(RCU_MB_GROUP);
294 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
295 smp_mb_slave(RCU_MB_GROUP);
296 wake_up_gp();
297 } else
298 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).ctr, URCU_TLS(rcu_reader).ctr - RCU_GP_COUNT);
299 }
300
301 /*
302 * Exit an RCU read-side crtical section. Both this function and its
303 * helper are smaller than 10 lines of code, and are intended to be
304 * usable by non-LGPL code, as called out in LGPL.
305 */
306 static inline void _rcu_read_unlock(void)
307 {
308 unsigned long tmp;
309
310 tmp = URCU_TLS(rcu_reader).ctr;
311 _rcu_read_unlock_update_and_wakeup(tmp);
312 cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
313 }
314
315 /*
316 * Returns whether within a RCU read-side critical section.
317 *
318 * This function is less than 10 lines long. The intent is that this
319 * function meets the 10-line criterion for LGPL, allowing this function
320 * to be invoked directly from non-LGPL code.
321 */
322 static inline int _rcu_read_ongoing(void)
323 {
324 return URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK;
325 }
326
327 #ifdef __cplusplus
328 }
329 #endif
330
331 #endif /* _URCU_STATIC_H */
This page took 0.047367 seconds and 4 git commands to generate.