uatomic: update atomic set/read, use STORE_SHARED/LOAD_SHARED
[urcu.git] / urcu-static.h
1 #ifndef _URCU_STATIC_H
2 #define _URCU_STATIC_H
3
4 /*
5 * urcu-static.h
6 *
7 * Userspace RCU header.
8 *
9 * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
10 * dynamically with the userspace rcu library.
11 *
12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 *
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
30 */
31
32 #include <stdlib.h>
33 #include <pthread.h>
34 #include <syscall.h>
35 #include <unistd.h>
36
37 #include <urcu/compiler.h>
38 #include <urcu/arch.h>
39 #include <urcu/system.h>
40 #include <urcu/uatomic_arch.h>
41 #include <urcu/list.h>
42 #include <urcu/urcu-futex.h>
43
44 /*
45 * This code section can only be included in LGPL 2.1 compatible source code.
46 * See below for the function call wrappers which can be used in code meant to
47 * be only linked with the Userspace RCU library. This comes with a small
48 * performance degradation on the read-side due to the added function calls.
49 * This is required to permit relinking with newer versions of the library.
50 */
51
52 /*
53 * The signal number used by the RCU library can be overridden with
54 * -DSIGURCU= when compiling the library.
55 */
56 #ifndef SIGURCU
57 #define SIGURCU SIGUSR1
58 #endif
59
60 /*
61 * If a reader is really non-cooperative and refuses to commit its
62 * urcu_active_readers count to memory (there is no barrier in the reader
63 * per-se), kick it after a few loops waiting for it.
64 */
65 #define KICK_READER_LOOPS 10000
66
67 /*
68 * Active attempts to check for reader Q.S. before calling futex().
69 */
70 #define RCU_QS_ACTIVE_ATTEMPTS 100
71
72 #ifdef DEBUG_RCU
73 #define rcu_assert(args...) assert(args)
74 #else
75 #define rcu_assert(args...)
76 #endif
77
78 #ifdef DEBUG_YIELD
79 #include <sched.h>
80 #include <time.h>
81 #include <pthread.h>
82 #include <unistd.h>
83
84 #define YIELD_READ (1 << 0)
85 #define YIELD_WRITE (1 << 1)
86
87 /*
88 * Updates without URCU_MB are much slower. Account this in
89 * the delay.
90 */
91 #ifdef URCU_MB
92 /* maximum sleep delay, in us */
93 #define MAX_SLEEP 50
94 #else
95 #define MAX_SLEEP 30000
96 #endif
97
98 extern unsigned int yield_active;
99 extern unsigned int __thread rand_yield;
100
101 static inline void debug_yield_read(void)
102 {
103 if (yield_active & YIELD_READ)
104 if (rand_r(&rand_yield) & 0x1)
105 usleep(rand_r(&rand_yield) % MAX_SLEEP);
106 }
107
108 static inline void debug_yield_write(void)
109 {
110 if (yield_active & YIELD_WRITE)
111 if (rand_r(&rand_yield) & 0x1)
112 usleep(rand_r(&rand_yield) % MAX_SLEEP);
113 }
114
115 static inline void debug_yield_init(void)
116 {
117 rand_yield = time(NULL) ^ pthread_self();
118 }
119 #else
120 static inline void debug_yield_read(void)
121 {
122 }
123
124 static inline void debug_yield_write(void)
125 {
126 }
127
128 static inline void debug_yield_init(void)
129 {
130
131 }
132 #endif
133
134 #ifdef URCU_MB
135 static inline void reader_barrier()
136 {
137 smp_mb();
138 }
139 #else
140 static inline void reader_barrier()
141 {
142 barrier();
143 }
144 #endif
145
146 /*
147 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
148 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
149 */
150 #define RCU_GP_COUNT (1UL << 0)
151 /* Use the amount of bits equal to half of the architecture long size */
152 #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
153 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
154
155 /*
156 * Global quiescent period counter with low-order bits unused.
157 * Using a int rather than a char to eliminate false register dependencies
158 * causing stalls on some architectures.
159 */
160 extern long urcu_gp_ctr;
161
162 struct urcu_reader {
163 /* Data used by both reader and synchronize_rcu() */
164 long ctr;
165 char need_mb;
166 /* Data used for registry */
167 struct list_head head __attribute__((aligned(CACHE_LINE_SIZE)));
168 pthread_t tid;
169 };
170
171 extern struct urcu_reader __thread urcu_reader;
172
173 extern int gp_futex;
174
175 /*
176 * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
177 */
178 static inline void wake_up_gp(void)
179 {
180 if (unlikely(uatomic_read(&gp_futex) == -1)) {
181 uatomic_set(&gp_futex, 0);
182 futex_async(&gp_futex, FUTEX_WAKE, 1,
183 NULL, NULL, 0);
184 }
185 }
186
187 static inline int rcu_old_gp_ongoing(long *value)
188 {
189 long v;
190
191 if (value == NULL)
192 return 0;
193 /*
194 * Make sure both tests below are done on the same version of *value
195 * to insure consistency.
196 */
197 v = LOAD_SHARED(*value);
198 return (v & RCU_GP_CTR_NEST_MASK) &&
199 ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
200 }
201
202 static inline void _rcu_read_lock(void)
203 {
204 long tmp;
205
206 tmp = urcu_reader.ctr;
207 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
208 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
209 _STORE_SHARED(urcu_reader.ctr, _LOAD_SHARED(urcu_gp_ctr));
210 /*
211 * Set active readers count for outermost nesting level before
212 * accessing the pointer. See force_mb_all_threads().
213 */
214 reader_barrier();
215 } else {
216 _STORE_SHARED(urcu_reader.ctr, tmp + RCU_GP_COUNT);
217 }
218 }
219
220 static inline void _rcu_read_unlock(void)
221 {
222 long tmp;
223
224 tmp = urcu_reader.ctr;
225 /*
226 * Finish using rcu before decrementing the pointer.
227 * See force_mb_all_threads().
228 */
229 if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
230 reader_barrier();
231 _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT);
232 /* write urcu_reader.ctr before read futex */
233 reader_barrier();
234 wake_up_gp();
235 } else {
236 _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT);
237 }
238 }
239
240 #endif /* _URCU_STATIC_H */
This page took 0.033272 seconds and 4 git commands to generate.