X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-static.h;h=3caa0f93d55058528a07c5fb20ba3a0e80191e79;hp=c47a759d132357a7af23301ece58c238a200d2ec;hb=ba59a0c7b244a0939a2298fc76a9002436ef9674;hpb=adcfce542ec1c28796c3c641021a6d37f97b6f6f diff --git a/urcu-static.h b/urcu-static.h index c47a759..3caa0f9 100644 --- a/urcu-static.h +++ b/urcu-static.h @@ -4,20 +4,35 @@ /* * urcu-static.h * - * Userspace RCU header, to be included only in LGPL-compatible code. + * Userspace RCU header. * - * Copyright February 2009 - Mathieu Desnoyers + * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking + * dynamically with the userspace rcu library. * - * Credits for Paul E. McKenney - * for inspiration coming from the Linux kernel RCU and rcu-preempt. + * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. * - * Distributed under LGPLv2.1 + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * IBM's contributions to this file may be relicensed under LGPLv2 or later. */ #include #include +#include +#include #include #include @@ -63,6 +78,15 @@ * Inserts memory barriers on architectures that require them (currently only * Alpha) and documents which pointers are protected by RCU. * + * The compiler memory barrier in LOAD_SHARED() ensures that value-speculative + * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the + * data read before the pointer read by speculating the value of the pointer. + * Correct ordering is ensured because the pointer is read as a volatile access. + * This acts as a global side-effect operation, which forbids reordering of + * dependent memory operations. Note that such concern about dependency-breaking + * optimizations will eventually be taken care of by the "memory_order_consume" + * addition to forthcoming C++ standard. + * * Should match rcu_assign_pointer() or rcu_xchg_pointer(). */ @@ -72,6 +96,10 @@ (_________p1); \ }) +#define futex(...) syscall(__NR_futex, __VA_ARGS__) +#define FUTEX_WAIT 0 +#define FUTEX_WAKE 1 + /* * This code section can only be included in LGPL 2.1 compatible source code. * See below for the function call wrappers which can be used in code meant to @@ -95,6 +123,17 @@ */ #define KICK_READER_LOOPS 10000 +/* + * Active attempts to check for reader Q.S. before calling futex(). + */ +#define RCU_QS_ACTIVE_ATTEMPTS 100 + +#ifdef DEBUG_RCU +#define rcu_assert(args...) assert(args) +#else +#define rcu_assert(args...) +#endif + #ifdef DEBUG_YIELD #include #include @@ -104,8 +143,11 @@ #define YIELD_READ (1 << 0) #define YIELD_WRITE (1 << 1) -/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ -#ifdef DEBUG_FULL_MB +/* + * Updates without URCU_MB are much slower. Account this in + * the delay. + */ +#ifdef URCU_MB /* maximum sleep delay, in us */ #define MAX_SLEEP 50 #else @@ -148,7 +190,7 @@ static inline void debug_yield_init(void) } #endif -#ifdef DEBUG_FULL_MB +#ifdef URCU_MB static inline void reader_barrier() { smp_mb(); @@ -178,6 +220,20 @@ extern long urcu_gp_ctr; extern long __thread urcu_active_readers; +extern int gp_futex; + +/* + * Wake-up waiting synchronize_rcu(). Called from many concurrent threads. + */ +static inline void wake_up_gp(void) +{ + if (unlikely(atomic_read(&gp_futex) == -1)) { + atomic_set(&gp_futex, 0); + futex(&gp_futex, FUTEX_WAKE, 1, + NULL, NULL, 0); + } +} + static inline int rcu_old_gp_ongoing(long *value) { long v; @@ -199,32 +255,38 @@ static inline void _rcu_read_lock(void) tmp = urcu_active_readers; /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ - /* - * The data dependency "read urcu_gp_ctr, write urcu_active_readers", - * serializes those two memory operations. The memory barrier in the - * signal handler ensures we receive the proper memory commit barriers - * required by _STORE_SHARED and _LOAD_SHARED whenever communication - * with the writer is needed. - */ - if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) + if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) { _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr)); - else + /* + * Set active readers count for outermost nesting level before + * accessing the pointer. See force_mb_all_threads(). + */ + reader_barrier(); + } else { _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT); - /* - * Increment active readers count before accessing the pointer. - * See force_mb_all_threads(). - */ - reader_barrier(); + } } static inline void _rcu_read_unlock(void) { - reader_barrier(); + long tmp; + + tmp = urcu_active_readers; /* * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ - _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT); + if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) { + reader_barrier(); + _STORE_SHARED(urcu_active_readers, + urcu_active_readers - RCU_GP_COUNT); + /* write urcu_active_readers before read futex */ + reader_barrier(); + wake_up_gp(); + } else { + _STORE_SHARED(urcu_active_readers, + urcu_active_readers - RCU_GP_COUNT); + } } /** @@ -248,9 +310,25 @@ static inline void _rcu_read_unlock(void) STORE_SHARED(p, v); \ }) +/** + * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer + * is as expected by "old". If succeeds, returns the previous pointer to the + * data structure, which can be safely freed after waiting for a quiescent state + * using synchronize_rcu(). If fails (unexpected value), returns old (which + * should not be freed !). + */ + +#define _rcu_cmpxchg_pointer(p, old, _new) \ + ({ \ + if (!__builtin_constant_p(_new) || \ + ((_new) != NULL)) \ + wmb(); \ + cmpxchg(p, old, _new); \ + }) + /** * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous - * pointer to the data structure, which can be safely freed after waitin for a + * pointer to the data structure, which can be safely freed after waiting for a * quiescent state using synchronize_rcu(). */