X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.h;h=c6c54e7c7c3bcbc83d88b279e20ab35f64a0806f;hp=9e9fea2b96fafa5c4688a1b1bf696284587df25f;hb=eaf2c3f4b9041d2a6a85d7f9e56f6014603367d4;hpb=128166c908bfaa915c76e060522abd20241ac69c diff --git a/urcu.h b/urcu.h index 9e9fea2..c6c54e7 100644 --- a/urcu.h +++ b/urcu.h @@ -6,273 +6,98 @@ * * Userspace RCU header * - * Copyright February 2009 - Mathieu Desnoyers + * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. * - * Credits for Paul e. McKenney - * for inspiration coming from the Linux kernel RCU and rcu-preempt. + * LGPL-compatible code should include this header with : * - * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE - * and rcu_dereference primitives come from the Linux kernel. + * #define _LGPL_SOURCE + * #include * - * Distributed under GPLv2 + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * IBM's contributions to this file may be relicensed under LGPLv2 or later. */ #include #include -/* The "volatile" is due to gcc bugs */ -#define barrier() __asm__ __volatile__("": : :"memory") - -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) - -/* x86 32/64 specific */ -#define mb() asm volatile("mfence":::"memory") -#define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence" ::: "memory") - -static inline void atomic_inc(int *v) -{ - asm volatile("lock; incl %0" - : "+m" (*v)); -} - -#define xchg(ptr, v) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) - -struct __xchg_dummy { - unsigned long a[100]; -}; -#define __xg(x) ((struct __xchg_dummy *)(x)) - /* - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway - * Note 2: xchg has side effect, so that attribute volatile is necessary, - * but generally the primitive is invalid, *ptr is output argument. --ANK + * See urcu-pointer.h and urcu-pointer-static.h for pointer publication headers. */ -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, - int size) -{ - switch (size) { - case 1: - asm volatile("xchgb %b0,%1" - : "=q" (x) - : "m" (*__xg(ptr)), "0" (x) - : "memory"); - break; - case 2: - asm volatile("xchgw %w0,%1" - : "=r" (x) - : "m" (*__xg(ptr)), "0" (x) - : "memory"); - break; - case 4: - asm volatile("xchgl %k0,%1" - : "=r" (x) - : "m" (*__xg(ptr)), "0" (x) - : "memory"); - break; - case 8: - asm volatile("xchgq %0,%1" - : "=r" (x) - : "m" (*__xg(ptr)), "0" (x) - : "memory"); - break; - } - return x; -} +#include -/* Nop everywhere except on alpha. */ -#define smp_read_barrier_depends() +#ifdef __cplusplus +extern "C" { +#endif /* - * Prevent the compiler from merging or refetching accesses. The compiler - * is also forbidden from reordering successive instances of ACCESS_ONCE(), - * but only when the compiler is aware of some particular ordering. One way - * to make the compiler aware of ordering is to put the two invocations of - * ACCESS_ONCE() in different C statements. - * - * This macro does absolutely -nothing- to prevent the CPU from reordering, - * merging, or refetching absolutely anything at any time. Its main intended - * use is to mediate communication between process-level code and irq/NMI - * handlers, all running on the same CPU. - */ -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) - -/** - * rcu_dereference - fetch an RCU-protected pointer in an - * RCU read-side critical section. This pointer may later - * be safely dereferenced. + * Important ! * - * Inserts memory barriers on architectures that require them - * (currently only the Alpha), and, more importantly, documents - * exactly which pointers are protected by RCU. + * Each thread containing read-side critical sections must be registered + * with rcu_register_thread() before calling rcu_read_lock(). + * rcu_unregister_thread() should be called before the thread exits. */ -#define rcu_dereference(p) ({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ - smp_read_barrier_depends(); \ - (_________p1); \ - }) - -#define SIGURCU SIGUSR1 - -#ifdef DEBUG_YIELD -#include -#include -#include - -#define YIELD_READ (1 << 0) -#define YIELD_WRITE (1 << 1) +#ifdef _LGPL_SOURCE -extern unsigned int yield_active; -extern unsigned int __thread rand_yield; - -static inline void debug_yield_read(void) -{ - if (yield_active & YIELD_READ) - if (rand_r(&rand_yield) & 0x1) - sched_yield(); -} - -static inline void debug_yield_write(void) -{ - if (yield_active & YIELD_WRITE) - if (rand_r(&rand_yield) & 0x1) - sched_yield(); -} - -static inline void debug_yield_init(void) -{ - rand_yield = time(NULL) ^ pthread_self(); -} -#else -static inline void debug_yield_read(void) -{ -} - -static inline void debug_yield_write(void) -{ -} - -static inline void debug_yield_init(void) -{ - -} -#endif +#include /* - * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a - * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. + * Mappings for static use of the userspace RCU library. + * Should only be used in LGPL-compatible code. */ -#define RCU_GP_COUNT (1UL << 0) -/* Use the amount of bits equal to half of the architecture long size */ -#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2)) -#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1) /* - * Global quiescent period counter with low-order bits unused. - * Using a int rather than a char to eliminate false register dependencies - * causing stalls on some architectures. + * rcu_read_lock() + * rcu_read_unlock() + * + * Mark the beginning and end of a read-side critical section. + * DON'T FORGET TO USE RCU_REGISTER/UNREGISTER_THREAD() FOR EACH THREAD WITH + * READ-SIDE CRITICAL SECTION. */ -extern long urcu_gp_ctr; - -extern long __thread urcu_active_readers; - -static inline int rcu_old_gp_ongoing(long *value) -{ - long v; - - if (value == NULL) - return 0; - debug_yield_write(); - v = ACCESS_ONCE(*value); - debug_yield_write(); - return (v & RCU_GP_CTR_NEST_MASK) && - ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); -} +#define rcu_read_lock() _rcu_read_lock() +#define rcu_read_unlock() _rcu_read_unlock() -static inline void rcu_read_lock(void) -{ - long tmp; +#else /* !_LGPL_SOURCE */ - debug_yield_read(); - tmp = urcu_active_readers; - debug_yield_read(); - if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) - urcu_active_readers = urcu_gp_ctr; - else - urcu_active_readers = tmp + RCU_GP_COUNT; - debug_yield_read(); - /* - * Increment active readers count before accessing the pointer. - * See force_mb_all_threads(). - */ - barrier(); - debug_yield_read(); -} - -static inline void rcu_read_unlock(void) -{ - debug_yield_read(); - barrier(); - debug_yield_read(); - /* - * Finish using rcu before decrementing the pointer. - * See force_mb_all_threads(). - */ - urcu_active_readers -= RCU_GP_COUNT; - debug_yield_read(); -} - -/** - * rcu_assign_pointer - assign (publicize) a pointer to a newly - * initialized structure that will be dereferenced by RCU read-side - * critical sections. Returns the value assigned. - * - * Inserts memory barriers on architectures that require them - * (pretty much all of them other than x86), and also prevents - * the compiler from reordering the code that initializes the - * structure after the pointer assignment. More importantly, this - * call documents which pointers will be dereferenced by RCU read-side - * code. +/* + * library wrappers to be used by non-LGPL compatible source code. + * See LGPL-only urcu-pointer-static.h for documentation. */ -#define rcu_assign_pointer(p, v) \ - ({ \ - if (!__builtin_constant_p(v) || \ - ((v) != NULL)) \ - wmb(); \ - (p) = (v); \ - }) +extern void rcu_read_lock(void); +extern void rcu_read_unlock(void); -#define rcu_xchg_pointer(p, v) \ - ({ \ - if (!__builtin_constant_p(v) || \ - ((v) != NULL)) \ - wmb(); \ - xchg(p, v); \ - }) +#endif /* !_LGPL_SOURCE */ extern void synchronize_rcu(void); /* - * Exchanges the pointer and waits for quiescent state. - * The pointer returned can be freed. + * Reader thread registration. */ -#define urcu_publish_content(p, v) \ - ({ \ - void *oldptr; \ - debug_yield_write(); \ - oldptr = rcu_xchg_pointer(p, v); \ - synchronize_rcu(); \ - oldptr; \ - }) +extern void rcu_register_thread(void); +extern void rcu_unregister_thread(void); /* - * Reader thread registration. + * Explicit rcu initialization, for "early" use within library constructors. */ -extern void urcu_register_thread(void); -extern void urcu_unregister_thread(void); +extern void rcu_init(void); + +#ifdef __cplusplus +} +#endif #endif /* _URCU_H */