| 1 | #ifndef _URCU_STATIC_H |
| 2 | #define _URCU_STATIC_H |
| 3 | |
| 4 | /* |
| 5 | * urcu-static.h |
| 6 | * |
| 7 | * Userspace RCU header. |
| 8 | * |
| 9 | * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking |
| 10 | * dynamically with the userspace rcu library. |
| 11 | * |
| 12 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
| 13 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
| 14 | * |
| 15 | * This library is free software; you can redistribute it and/or |
| 16 | * modify it under the terms of the GNU Lesser General Public |
| 17 | * License as published by the Free Software Foundation; either |
| 18 | * version 2.1 of the License, or (at your option) any later version. |
| 19 | * |
| 20 | * This library is distributed in the hope that it will be useful, |
| 21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 23 | * Lesser General Public License for more details. |
| 24 | * |
| 25 | * You should have received a copy of the GNU Lesser General Public |
| 26 | * License along with this library; if not, write to the Free Software |
| 27 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 28 | * |
| 29 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. |
| 30 | */ |
| 31 | |
| 32 | #include <stdlib.h> |
| 33 | #include <pthread.h> |
| 34 | #include <syscall.h> |
| 35 | #include <unistd.h> |
| 36 | |
| 37 | #include <urcu/compiler.h> |
| 38 | #include <urcu/arch.h> |
| 39 | |
| 40 | /* |
| 41 | * Identify a shared load. A smp_rmc() or smp_mc() should come before the load. |
| 42 | */ |
| 43 | #define _LOAD_SHARED(p) ACCESS_ONCE(p) |
| 44 | |
| 45 | /* |
| 46 | * Load a data from shared memory, doing a cache flush if required. |
| 47 | */ |
| 48 | #define LOAD_SHARED(p) \ |
| 49 | ({ \ |
| 50 | smp_rmc(); \ |
| 51 | _LOAD_SHARED(p); \ |
| 52 | }) |
| 53 | |
| 54 | /* |
| 55 | * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. |
| 56 | */ |
| 57 | #define _STORE_SHARED(x, v) ({ ACCESS_ONCE(x) = (v); }) |
| 58 | |
| 59 | /* |
| 60 | * Store v into x, where x is located in shared memory. Performs the required |
| 61 | * cache flush after writing. Returns v. |
| 62 | */ |
| 63 | #define STORE_SHARED(x, v) \ |
| 64 | ({ \ |
| 65 | _STORE_SHARED(x, v); \ |
| 66 | smp_wmc(); \ |
| 67 | (v); \ |
| 68 | }) |
| 69 | |
| 70 | /** |
| 71 | * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable |
| 72 | * into a RCU read-side critical section. The pointer can later be safely |
| 73 | * dereferenced within the critical section. |
| 74 | * |
| 75 | * This ensures that the pointer copy is invariant thorough the whole critical |
| 76 | * section. |
| 77 | * |
| 78 | * Inserts memory barriers on architectures that require them (currently only |
| 79 | * Alpha) and documents which pointers are protected by RCU. |
| 80 | * |
| 81 | * The compiler memory barrier in LOAD_SHARED() ensures that value-speculative |
| 82 | * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the |
| 83 | * data read before the pointer read by speculating the value of the pointer. |
| 84 | * Correct ordering is ensured because the pointer is read as a volatile access. |
| 85 | * This acts as a global side-effect operation, which forbids reordering of |
| 86 | * dependent memory operations. Note that such concern about dependency-breaking |
| 87 | * optimizations will eventually be taken care of by the "memory_order_consume" |
| 88 | * addition to forthcoming C++ standard. |
| 89 | * |
| 90 | * Should match rcu_assign_pointer() or rcu_xchg_pointer(). |
| 91 | */ |
| 92 | |
| 93 | #define _rcu_dereference(p) ({ \ |
| 94 | typeof(p) _________p1 = LOAD_SHARED(p); \ |
| 95 | smp_read_barrier_depends(); \ |
| 96 | (_________p1); \ |
| 97 | }) |
| 98 | |
| 99 | #define futex(...) syscall(__NR_futex, __VA_ARGS__) |
| 100 | #define FUTEX_WAIT 0 |
| 101 | #define FUTEX_WAKE 1 |
| 102 | |
| 103 | /* |
| 104 | * This code section can only be included in LGPL 2.1 compatible source code. |
| 105 | * See below for the function call wrappers which can be used in code meant to |
| 106 | * be only linked with the Userspace RCU library. This comes with a small |
| 107 | * performance degradation on the read-side due to the added function calls. |
| 108 | * This is required to permit relinking with newer versions of the library. |
| 109 | */ |
| 110 | |
| 111 | /* |
| 112 | * The signal number used by the RCU library can be overridden with |
| 113 | * -DSIGURCU= when compiling the library. |
| 114 | */ |
| 115 | #ifndef SIGURCU |
| 116 | #define SIGURCU SIGUSR1 |
| 117 | #endif |
| 118 | |
| 119 | /* |
| 120 | * If a reader is really non-cooperative and refuses to commit its |
| 121 | * urcu_active_readers count to memory (there is no barrier in the reader |
| 122 | * per-se), kick it after a few loops waiting for it. |
| 123 | */ |
| 124 | #define KICK_READER_LOOPS 10000 |
| 125 | |
| 126 | /* |
| 127 | * Active attempts to check for reader Q.S. before calling futex(). |
| 128 | */ |
| 129 | #define RCU_QS_ACTIVE_ATTEMPTS 100 |
| 130 | |
| 131 | #ifdef DEBUG_RCU |
| 132 | #define rcu_assert(args...) assert(args) |
| 133 | #else |
| 134 | #define rcu_assert(args...) |
| 135 | #endif |
| 136 | |
| 137 | #ifdef DEBUG_YIELD |
| 138 | #include <sched.h> |
| 139 | #include <time.h> |
| 140 | #include <pthread.h> |
| 141 | #include <unistd.h> |
| 142 | |
| 143 | #define YIELD_READ (1 << 0) |
| 144 | #define YIELD_WRITE (1 << 1) |
| 145 | |
| 146 | /* |
| 147 | * Updates without URCU_MB are much slower. Account this in |
| 148 | * the delay. |
| 149 | */ |
| 150 | #ifdef URCU_MB |
| 151 | /* maximum sleep delay, in us */ |
| 152 | #define MAX_SLEEP 50 |
| 153 | #else |
| 154 | #define MAX_SLEEP 30000 |
| 155 | #endif |
| 156 | |
| 157 | extern unsigned int yield_active; |
| 158 | extern unsigned int __thread rand_yield; |
| 159 | |
| 160 | static inline void debug_yield_read(void) |
| 161 | { |
| 162 | if (yield_active & YIELD_READ) |
| 163 | if (rand_r(&rand_yield) & 0x1) |
| 164 | usleep(rand_r(&rand_yield) % MAX_SLEEP); |
| 165 | } |
| 166 | |
| 167 | static inline void debug_yield_write(void) |
| 168 | { |
| 169 | if (yield_active & YIELD_WRITE) |
| 170 | if (rand_r(&rand_yield) & 0x1) |
| 171 | usleep(rand_r(&rand_yield) % MAX_SLEEP); |
| 172 | } |
| 173 | |
| 174 | static inline void debug_yield_init(void) |
| 175 | { |
| 176 | rand_yield = time(NULL) ^ pthread_self(); |
| 177 | } |
| 178 | #else |
| 179 | static inline void debug_yield_read(void) |
| 180 | { |
| 181 | } |
| 182 | |
| 183 | static inline void debug_yield_write(void) |
| 184 | { |
| 185 | } |
| 186 | |
| 187 | static inline void debug_yield_init(void) |
| 188 | { |
| 189 | |
| 190 | } |
| 191 | #endif |
| 192 | |
| 193 | #ifdef URCU_MB |
| 194 | static inline void reader_barrier() |
| 195 | { |
| 196 | smp_mb(); |
| 197 | } |
| 198 | #else |
| 199 | static inline void reader_barrier() |
| 200 | { |
| 201 | barrier(); |
| 202 | } |
| 203 | #endif |
| 204 | |
| 205 | /* |
| 206 | * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a |
| 207 | * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. |
| 208 | */ |
| 209 | #define RCU_GP_COUNT (1UL << 0) |
| 210 | /* Use the amount of bits equal to half of the architecture long size */ |
| 211 | #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) |
| 212 | #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) |
| 213 | |
| 214 | /* |
| 215 | * Global quiescent period counter with low-order bits unused. |
| 216 | * Using a int rather than a char to eliminate false register dependencies |
| 217 | * causing stalls on some architectures. |
| 218 | */ |
| 219 | extern long urcu_gp_ctr; |
| 220 | |
| 221 | extern long __thread urcu_active_readers; |
| 222 | |
| 223 | extern int gp_futex; |
| 224 | |
| 225 | /* |
| 226 | * Wake-up waiting synchronize_rcu(). Called from many concurrent threads. |
| 227 | */ |
| 228 | static inline void wake_up_gp(void) |
| 229 | { |
| 230 | if (unlikely(uatomic_read(&gp_futex) == -1)) { |
| 231 | uatomic_set(&gp_futex, 0); |
| 232 | futex(&gp_futex, FUTEX_WAKE, 1, |
| 233 | NULL, NULL, 0); |
| 234 | } |
| 235 | } |
| 236 | |
| 237 | static inline int rcu_old_gp_ongoing(long *value) |
| 238 | { |
| 239 | long v; |
| 240 | |
| 241 | if (value == NULL) |
| 242 | return 0; |
| 243 | /* |
| 244 | * Make sure both tests below are done on the same version of *value |
| 245 | * to insure consistency. |
| 246 | */ |
| 247 | v = LOAD_SHARED(*value); |
| 248 | return (v & RCU_GP_CTR_NEST_MASK) && |
| 249 | ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); |
| 250 | } |
| 251 | |
| 252 | static inline void _rcu_read_lock(void) |
| 253 | { |
| 254 | long tmp; |
| 255 | |
| 256 | tmp = urcu_active_readers; |
| 257 | /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ |
| 258 | if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { |
| 259 | _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr)); |
| 260 | /* |
| 261 | * Set active readers count for outermost nesting level before |
| 262 | * accessing the pointer. See force_mb_all_threads(). |
| 263 | */ |
| 264 | reader_barrier(); |
| 265 | } else { |
| 266 | _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT); |
| 267 | } |
| 268 | } |
| 269 | |
| 270 | static inline void _rcu_read_unlock(void) |
| 271 | { |
| 272 | long tmp; |
| 273 | |
| 274 | tmp = urcu_active_readers; |
| 275 | /* |
| 276 | * Finish using rcu before decrementing the pointer. |
| 277 | * See force_mb_all_threads(). |
| 278 | */ |
| 279 | if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { |
| 280 | reader_barrier(); |
| 281 | _STORE_SHARED(urcu_active_readers, |
| 282 | urcu_active_readers - RCU_GP_COUNT); |
| 283 | /* write urcu_active_readers before read futex */ |
| 284 | reader_barrier(); |
| 285 | wake_up_gp(); |
| 286 | } else { |
| 287 | _STORE_SHARED(urcu_active_readers, |
| 288 | urcu_active_readers - RCU_GP_COUNT); |
| 289 | } |
| 290 | } |
| 291 | |
| 292 | /** |
| 293 | * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure |
| 294 | * meant to be read by RCU read-side critical sections. Returns the assigned |
| 295 | * value. |
| 296 | * |
| 297 | * Documents which pointers will be dereferenced by RCU read-side critical |
| 298 | * sections and adds the required memory barriers on architectures requiring |
| 299 | * them. It also makes sure the compiler does not reorder code initializing the |
| 300 | * data structure before its publication. |
| 301 | * |
| 302 | * Should match rcu_dereference_pointer(). |
| 303 | */ |
| 304 | |
| 305 | #define _rcu_assign_pointer(p, v) \ |
| 306 | ({ \ |
| 307 | if (!__builtin_constant_p(v) || \ |
| 308 | ((v) != NULL)) \ |
| 309 | wmb(); \ |
| 310 | STORE_SHARED(p, v); \ |
| 311 | }) |
| 312 | |
| 313 | /** |
| 314 | * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer |
| 315 | * is as expected by "old". If succeeds, returns the previous pointer to the |
| 316 | * data structure, which can be safely freed after waiting for a quiescent state |
| 317 | * using synchronize_rcu(). If fails (unexpected value), returns old (which |
| 318 | * should not be freed !). |
| 319 | */ |
| 320 | |
| 321 | #define _rcu_cmpxchg_pointer(p, old, _new) \ |
| 322 | ({ \ |
| 323 | if (!__builtin_constant_p(_new) || \ |
| 324 | ((_new) != NULL)) \ |
| 325 | wmb(); \ |
| 326 | uatomic_cmpxchg(p, old, _new); \ |
| 327 | }) |
| 328 | |
| 329 | /** |
| 330 | * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous |
| 331 | * pointer to the data structure, which can be safely freed after waiting for a |
| 332 | * quiescent state using synchronize_rcu(). |
| 333 | */ |
| 334 | |
| 335 | #define _rcu_xchg_pointer(p, v) \ |
| 336 | ({ \ |
| 337 | if (!__builtin_constant_p(v) || \ |
| 338 | ((v) != NULL)) \ |
| 339 | wmb(); \ |
| 340 | uatomic_xchg(p, v); \ |
| 341 | }) |
| 342 | |
| 343 | /* |
| 344 | * Exchanges the pointer and waits for quiescent state. |
| 345 | * The pointer returned can be freed. |
| 346 | */ |
| 347 | #define _rcu_publish_content(p, v) \ |
| 348 | ({ \ |
| 349 | void *oldptr; \ |
| 350 | oldptr = _rcu_xchg_pointer(p, v); \ |
| 351 | synchronize_rcu(); \ |
| 352 | oldptr; \ |
| 353 | }) |
| 354 | |
| 355 | #endif /* _URCU_STATIC_H */ |