X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.h;h=0ff0877ac4ac45d6966d674d100b3b47fb7ecbbd;hp=d72cbf61ac1df31d62a8c7a2fe00895d55216d9d;hb=3db2d75b432e617014976239f694b91de2bc0d7d;hpb=de4f917ee256706282146e21b29c10a96fa27ec0 diff --git a/urcu.h b/urcu.h index d72cbf6..0ff0877 100644 --- a/urcu.h +++ b/urcu.h @@ -26,62 +26,10 @@ #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) -/* - * Assume the architecture has coherent caches. Blackfin will want this unset. - */ -#define CONFIG_HAVE_MEM_COHERENCY 1 - -/* Assume P4 or newer */ -#define CONFIG_HAVE_FENCE 1 - /* Assume SMP machine, given we don't have this information */ #define CONFIG_SMP 1 -#ifdef CONFIG_HAVE_MEM_COHERENCY -/* - * Caches are coherent, no need to flush them. - */ -#define mc() barrier() -#define rmc() barrier() -#define wmc() barrier() -#else -#error "The architecture must create its own cache flush primitives" -#define mc() arch_cache_flush() -#define rmc() arch_cache_flush_read() -#define wmc() arch_cache_flush_write() -#endif - - -#ifdef CONFIG_HAVE_MEM_COHERENCY - -/* x86 32/64 specific */ -#ifdef CONFIG_HAVE_FENCE -#define mb() asm volatile("mfence":::"memory") -#define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence"::: "memory") -#else -/* - * Some non-Intel clones support out of order store. wmb() ceases to be a - * nop for these. - */ -#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") -#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") -#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") -#endif - -#else /* !CONFIG_HAVE_MEM_COHERENCY */ - -/* - * Without cache coherency, the memory barriers become cache flushes. - */ -#define mb() mc() -#define rmb() rmc() -#define wmb() wmc() - -#endif /* !CONFIG_HAVE_MEM_COHERENCY */ - - #ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() @@ -98,69 +46,7 @@ #define smp_wmc() barrier() #endif -/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ -static inline void rep_nop(void) -{ - asm volatile("rep; nop" ::: "memory"); -} - -static inline void cpu_relax(void) -{ - rep_nop(); -} - -static inline void atomic_inc(int *v) -{ - asm volatile("lock; incl %0" - : "+m" (*v)); -} - -#define xchg(ptr, v) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) - -struct __xchg_dummy { - unsigned long a[100]; -}; -#define __xg(x) ((struct __xchg_dummy *)(x)) - -/* - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway - * Note 2: xchg has side effect, so that attribute volatile is necessary, - * but generally the primitive is invalid, *ptr is output argument. --ANK - * x is considered local, ptr is considered remote. - */ -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, - int size) -{ - switch (size) { - case 1: - asm volatile("xchgb %b0,%1" - : "=q" (x) - : "m" (*__xg(ptr)), "0" (x) - : "memory"); - break; - case 2: - asm volatile("xchgw %w0,%1" - : "=r" (x) - : "m" (*__xg(ptr)), "0" (x) - : "memory"); - break; - case 4: - asm volatile("xchgl %k0,%1" - : "=r" (x) - : "m" (*__xg(ptr)), "0" (x) - : "memory"); - break; - case 8: - asm volatile("xchgq %0,%1" - : "=r" (x) - : "m" (*__xg(ptr)), "0" (x) - : "memory"); - break; - } - smp_wmc(); - return x; -} +#include "arch.h" /* Nop everywhere except on alpha. */ #define smp_read_barrier_depends() @@ -180,22 +66,36 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) /* - * Load a data from remote memory, doing a cache flush if required. + * Identify a shared load. A smp_rmc() or smp_mc() should come before the load. */ -#define LOAD_REMOTE(p) ({ \ - smp_rmc(); \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ - (_________p1); \ - }) +#define _LOAD_SHARED(p) ACCESS_ONCE(p) /* - * Store v into x, where x is located in remote memory. Performs the required - * cache flush after writing. + * Load a data from shared memory, doing a cache flush if required. + */ +#define LOAD_SHARED(p) \ + ({ \ + smp_rmc(); \ + _LOAD_SHARED(p); \ + }) + + +/* + * Identify a shared store. A smp_wmc() or smp_mc() should follow the store. */ -#define STORE_REMOTE(x, v) \ +#define _STORE_SHARED(x, v) \ do { \ (x) = (v); \ - smp_wmc; \ + } while (0) + +/* + * Store v into x, where x is located in shared memory. Performs the required + * cache flush after writing. + */ +#define STORE_SHARED(x, v) \ + do { \ + _STORE_SHARED(x, v); \ + smp_wmc(); \ } while (0) /** @@ -209,13 +109,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, */ #define rcu_dereference(p) ({ \ - typeof(p) _________p1 = LOAD_REMOTE(p); \ + typeof(p) _________p1 = LOAD_SHARED(p); \ smp_read_barrier_depends(); \ (_________p1); \ }) - - #define SIGURCU SIGUSR1 /* @@ -318,7 +216,7 @@ static inline int rcu_old_gp_ongoing(long *value) * Make sure both tests below are done on the same version of *value * to insure consistency. */ - v = LOAD_REMOTE(*value); + v = LOAD_SHARED(*value); return (v & RCU_GP_CTR_NEST_MASK) && ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); } @@ -331,15 +229,15 @@ static inline void rcu_read_lock(void) /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ /* * The data dependency "read urcu_gp_ctr, write urcu_active_readers", - * serializes those two memory operations. We are not using STORE_REMOTE - * and LOAD_REMOTE here (although we should) because the writer will - * wake us up with a signal which does a flush in its handler to perform - * urcu_gp_ctr re-read and urcu_active_readers commit to main memory. + * serializes those two memory operations. The memory barrier in the + * signal handler ensures we receive the proper memory commit barriers + * required by _STORE_SHARED and _LOAD_SHARED whenever communication + * with the writer is needed. */ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) - urcu_active_readers = ACCESS_ONCE(urcu_gp_ctr); + _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr)); else - urcu_active_readers = tmp + RCU_GP_COUNT; + _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT); /* * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). @@ -354,7 +252,7 @@ static inline void rcu_read_unlock(void) * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ - urcu_active_readers -= RCU_GP_COUNT; + _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT); } /** @@ -375,8 +273,7 @@ static inline void rcu_read_unlock(void) if (!__builtin_constant_p(v) || \ ((v) != NULL)) \ wmb(); \ - (p) = (v); \ - smp_wmc(); \ + STORE_SHARED(p, v); \ }) #define rcu_xchg_pointer(p, v) \