rculfhash: number of logically removed nodes should not appear in API
[urcu.git] / urcu / static / urcu.h
CommitLineData
adcfce54
MD
1#ifndef _URCU_STATIC_H
2#define _URCU_STATIC_H
3
4/*
5 * urcu-static.h
6 *
d2d23035 7 * Userspace RCU header.
adcfce54 8 *
d2d23035
MD
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
10 * dynamically with the userspace rcu library.
adcfce54 11 *
6982d6d7 12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
d2d23035 13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
adcfce54 14 *
d2d23035
MD
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
adcfce54
MD
28 *
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
30 */
31
32#include <stdlib.h>
33#include <pthread.h>
bc6c15bb 34#include <unistd.h>
6d841bc2 35#include <stdint.h>
adcfce54 36
ec4e58a3
MD
37#include <urcu/compiler.h>
38#include <urcu/arch.h>
7e30abe3 39#include <urcu/system.h>
a2e7bf9c 40#include <urcu/uatomic.h>
e3b0cef0 41#include <urcu/list.h>
41849996 42#include <urcu/futex.h>
bc6c15bb 43
36bc70a8
MD
44#ifdef __cplusplus
45extern "C" {
46#endif
47
65f1e634 48/* Default is RCU_MEMBARRIER */
fdf01eed 49#if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL)
65f1e634
MD
50#define RCU_MEMBARRIER
51#endif
52
53/*
54 * RCU_MEMBARRIER is only possibly available on Linux. Fallback to RCU_MB
55 * otherwise.
56 */
57#if !defined(__linux__) && defined(RCU_MEMBARRIER)
58#undef RCU_MEMBARRIER
59#define RCU_MB
fdf01eed
MD
60#endif
61
62#ifdef RCU_MEMBARRIER
4906d398 63#include <syscall.h>
fdf01eed
MD
64
65/* If the headers do not support SYS_membarrier, statically use RCU_MB */
66#ifdef SYS_membarrier
cf5271ee
MD
67# define MEMBARRIER_EXPEDITED (1 << 0)
68# define MEMBARRIER_DELAYED (1 << 1)
69# define MEMBARRIER_QUERY (1 << 16)
d4fc680a 70# define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
fdf01eed 71#else
f0708810
MD
72# undef RCU_MEMBARRIER
73# define RCU_MB
fdf01eed
MD
74#endif
75#endif
76
adcfce54
MD
77/*
78 * This code section can only be included in LGPL 2.1 compatible source code.
79 * See below for the function call wrappers which can be used in code meant to
80 * be only linked with the Userspace RCU library. This comes with a small
81 * performance degradation on the read-side due to the added function calls.
82 * This is required to permit relinking with newer versions of the library.
83 */
84
85/*
86 * The signal number used by the RCU library can be overridden with
02be5561 87 * -DSIGRCU= when compiling the library.
ddf7eefb 88 * Provide backward compatibility for liburcu 0.3.x SIGURCU.
adcfce54 89 */
ddf7eefb
MD
90#ifdef SIGURCU
91#define SIGRCU SIGURCU
92#endif
93
02be5561
MD
94#ifndef SIGRCU
95#define SIGRCU SIGUSR1
adcfce54
MD
96#endif
97
7ac06cef
MD
98#ifdef DEBUG_RCU
99#define rcu_assert(args...) assert(args)
100#else
101#define rcu_assert(args...)
102#endif
103
adcfce54
MD
104#ifdef DEBUG_YIELD
105#include <sched.h>
106#include <time.h>
107#include <pthread.h>
108#include <unistd.h>
109
110#define YIELD_READ (1 << 0)
111#define YIELD_WRITE (1 << 1)
112
b4ce1526 113/*
fdf01eed 114 * Updates with RCU_SIGNAL are much slower. Account this in the delay.
b4ce1526 115 */
fdf01eed 116#ifdef RCU_SIGNAL
adcfce54 117/* maximum sleep delay, in us */
adcfce54 118#define MAX_SLEEP 30000
fdf01eed
MD
119#else
120#define MAX_SLEEP 50
adcfce54
MD
121#endif
122
123extern unsigned int yield_active;
124extern unsigned int __thread rand_yield;
125
126static inline void debug_yield_read(void)
127{
128 if (yield_active & YIELD_READ)
129 if (rand_r(&rand_yield) & 0x1)
130 usleep(rand_r(&rand_yield) % MAX_SLEEP);
131}
132
133static inline void debug_yield_write(void)
134{
135 if (yield_active & YIELD_WRITE)
136 if (rand_r(&rand_yield) & 0x1)
137 usleep(rand_r(&rand_yield) % MAX_SLEEP);
138}
139
140static inline void debug_yield_init(void)
141{
8ddd7970 142 rand_yield = time(NULL) ^ (unsigned long) pthread_self();
adcfce54
MD
143}
144#else
145static inline void debug_yield_read(void)
146{
147}
148
149static inline void debug_yield_write(void)
150{
151}
152
153static inline void debug_yield_init(void)
154{
155
156}
157#endif
158
25cc6d18
MD
159/*
160 * RCU memory barrier broadcast group. Currently, only broadcast to all process
161 * threads is supported (group 0).
162 *
163 * Slave barriers are only guaranteed to be ordered wrt master barriers.
164 *
165 * The pair ordering is detailed as (O: ordered, X: not ordered) :
166 * slave master
167 * slave X O
168 * master O O
169 */
170
171#define MB_GROUP_ALL 0
172#define RCU_MB_GROUP MB_GROUP_ALL
173
fdf01eed
MD
174#ifdef RCU_MEMBARRIER
175extern int has_sys_membarrier;
176
25cc6d18 177static inline void smp_mb_slave(int group)
fdf01eed 178{
a0b7f7ea 179 if (caa_likely(has_sys_membarrier))
5481ddb3 180 cmm_barrier();
fdf01eed 181 else
5481ddb3 182 cmm_smp_mb();
fdf01eed
MD
183}
184#endif
185
02be5561 186#ifdef RCU_MB
25cc6d18 187static inline void smp_mb_slave(int group)
adcfce54 188{
5481ddb3 189 cmm_smp_mb();
adcfce54 190}
fdf01eed
MD
191#endif
192
193#ifdef RCU_SIGNAL
25cc6d18 194static inline void smp_mb_slave(int group)
adcfce54 195{
5481ddb3 196 cmm_barrier();
adcfce54
MD
197}
198#endif
199
200/*
02be5561
MD
201 * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use
202 * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
adcfce54
MD
203 */
204#define RCU_GP_COUNT (1UL << 0)
205/* Use the amount of bits equal to half of the architecture long size */
27d65bc5 206#define RCU_GP_CTR_PHASE (1UL << (sizeof(unsigned long) << 2))
02be5561 207#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
adcfce54
MD
208
209/*
210 * Global quiescent period counter with low-order bits unused.
211 * Using a int rather than a char to eliminate false register dependencies
212 * causing stalls on some architectures.
213 */
27d65bc5 214extern unsigned long rcu_gp_ctr;
adcfce54 215
02be5561 216struct rcu_reader {
bd1a5e15 217 /* Data used by both reader and synchronize_rcu() */
27d65bc5 218 unsigned long ctr;
e3b0cef0 219 char need_mb;
bd1a5e15 220 /* Data used for registry */
16aa9ee8 221 struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
bd1a5e15 222 pthread_t tid;
e3b0cef0
MD
223};
224
02be5561 225extern struct rcu_reader __thread rcu_reader;
adcfce54 226
6d841bc2 227extern int32_t gp_futex;
bc6c15bb
MD
228
229/*
230 * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
231 */
232static inline void wake_up_gp(void)
233{
a0b7f7ea 234 if (caa_unlikely(uatomic_read(&gp_futex) == -1)) {
ec4e58a3 235 uatomic_set(&gp_futex, 0);
0854ccff 236 futex_async(&gp_futex, FUTEX_WAKE, 1,
bc6c15bb
MD
237 NULL, NULL, 0);
238 }
239}
240
e26fa029 241static inline int rcu_gp_ongoing(unsigned long *ctr)
adcfce54 242{
27d65bc5 243 unsigned long v;
adcfce54 244
adcfce54
MD
245 /*
246 * Make sure both tests below are done on the same version of *value
247 * to insure consistency.
248 */
6cf3827c 249 v = CMM_LOAD_SHARED(*ctr);
adcfce54 250 return (v & RCU_GP_CTR_NEST_MASK) &&
02be5561 251 ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
adcfce54
MD
252}
253
254static inline void _rcu_read_lock(void)
255{
27d65bc5 256 unsigned long tmp;
adcfce54 257
5481ddb3 258 cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
02be5561
MD
259 tmp = rcu_reader.ctr;
260 /*
261 * rcu_gp_ctr is
262 * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
263 */
a0b7f7ea 264 if (caa_likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
6cf3827c 265 _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
67ef1a2c
MD
266 /*
267 * Set active readers count for outermost nesting level before
25cc6d18 268 * accessing the pointer. See smp_mb_master().
67ef1a2c 269 */
25cc6d18 270 smp_mb_slave(RCU_MB_GROUP);
67ef1a2c 271 } else {
6cf3827c 272 _CMM_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
67ef1a2c 273 }
adcfce54
MD
274}
275
276static inline void _rcu_read_unlock(void)
277{
27d65bc5 278 unsigned long tmp;
bc6c15bb 279
02be5561 280 tmp = rcu_reader.ctr;
adcfce54
MD
281 /*
282 * Finish using rcu before decrementing the pointer.
25cc6d18 283 * See smp_mb_master().
adcfce54 284 */
a0b7f7ea 285 if (caa_likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
25cc6d18 286 smp_mb_slave(RCU_MB_GROUP);
6cf3827c 287 _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
02be5561 288 /* write rcu_reader.ctr before read futex */
25cc6d18 289 smp_mb_slave(RCU_MB_GROUP);
bc6c15bb
MD
290 wake_up_gp();
291 } else {
6cf3827c 292 _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
bc6c15bb 293 }
5481ddb3 294 cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
adcfce54
MD
295}
296
ad918eeb 297#ifdef __cplusplus
36bc70a8
MD
298}
299#endif
300
adcfce54 301#endif /* _URCU_STATIC_H */
This page took 0.041852 seconds and 4 git commands to generate.