X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.h;h=df270326675dafdf78a237ac72bceebc6b673274;hp=277b7d2f45ae565610e4db9ee89457174fafa667;hb=8895e525a86e679e775b8267546799a0e7bc569f;hpb=3a9e6e9df706b8d39af94d2f027210e2e7d4106e diff --git a/urcu.h b/urcu.h index 277b7d2..df27032 100644 --- a/urcu.h +++ b/urcu.h @@ -26,10 +26,88 @@ #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) +/* + * Assume the architecture has coherent caches. Blackfin will want this unset. + */ +#define CONFIG_HAVE_MEM_COHERENCY 1 + +/* Assume P4 or newer */ +#define CONFIG_HAVE_FENCE 1 + +/* Assume SMP machine, given we don't have this information */ +#define CONFIG_SMP 1 + + +#ifdef CONFIG_HAVE_MEM_COHERENCY +/* + * Caches are coherent, no need to flush them. + */ +#define mc() barrier() +#define rmc() barrier() +#define wmc() barrier() +#else +#error "The architecture must create its own cache flush primitives" +#define mc() arch_cache_flush() +#define rmc() arch_cache_flush_read() +#define wmc() arch_cache_flush_write() +#endif + + +#ifdef CONFIG_HAVE_MEM_COHERENCY + /* x86 32/64 specific */ +#ifdef CONFIG_HAVE_FENCE #define mb() asm volatile("mfence":::"memory") #define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence" ::: "memory") +#define wmb() asm volatile("sfence"::: "memory") +#else +/* + * Some non-Intel clones support out of order store. wmb() ceases to be a + * nop for these. + */ +#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") +#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") +#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") +#endif + +#else /* !CONFIG_HAVE_MEM_COHERENCY */ + +/* + * Without cache coherency, the memory barriers become cache flushes. + */ +#define mb() mc() +#define rmb() rmc() +#define wmb() wmc() + +#endif /* !CONFIG_HAVE_MEM_COHERENCY */ + + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#define smp_mc() mc() +#define smp_rmc() rmc() +#define smp_wmc() wmc() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#define smp_mc() barrier() +#define smp_rmc() barrier() +#define smp_wmc() barrier() +#endif + +/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ +static inline void rep_nop(void) +{ + asm volatile("rep; nop" ::: "memory"); +} + +static inline void cpu_relax(void) +{ + rep_nop(); +} static inline void atomic_inc(int *v) { @@ -49,6 +127,7 @@ struct __xchg_dummy { * Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note 2: xchg has side effect, so that attribute volatile is necessary, * but generally the primitive is invalid, *ptr is output argument. --ANK + * x is considered local, ptr is considered remote. */ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) @@ -79,6 +158,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, : "memory"); break; } + smp_wmc(); return x; } @@ -99,6 +179,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, */ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) +/* + * Load a data from shared memory, doing a cache flush if required. + */ +#define LOAD_SHARED(p) ({ \ + smp_rmc(); \ + typeof(p) _________p1 = ACCESS_ONCE(p); \ + (_________p1); \ + }) + +/* + * Store v into x, where x is located in shared memory. Performs the required + * cache flush after writing. + */ +#define STORE_SHARED(x, v) \ + do { \ + (x) = (v); \ + smp_wmc; \ + } while (0) + /** * rcu_dereference - fetch an RCU-protected pointer in an * RCU read-side critical section. This pointer may later @@ -110,21 +209,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, */ #define rcu_dereference(p) ({ \ - typeof(p) _________p1 = ACCESS_ONCE(p); \ + typeof(p) _________p1 = LOAD_SHARED(p); \ smp_read_barrier_depends(); \ (_________p1); \ }) + + #define SIGURCU SIGUSR1 +/* + * If a reader is really non-cooperative and refuses to commit its + * urcu_active_readers count to memory (there is no barrier in the reader + * per-se), kick it after a few loops waiting for it. + */ +#define KICK_READER_LOOPS 10000 + #ifdef DEBUG_YIELD #include #include #include +#include #define YIELD_READ (1 << 0) #define YIELD_WRITE (1 << 1) +/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ +#ifdef DEBUG_FULL_MB +/* maximum sleep delay, in us */ +#define MAX_SLEEP 50 +#else +#define MAX_SLEEP 30000 +#endif + extern unsigned int yield_active; extern unsigned int __thread rand_yield; @@ -132,14 +249,14 @@ static inline void debug_yield_read(void) { if (yield_active & YIELD_READ) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_write(void) { if (yield_active & YIELD_WRITE) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_init(void) @@ -161,6 +278,18 @@ static inline void debug_yield_init(void) } #endif +#ifdef DEBUG_FULL_MB +static inline void reader_barrier() +{ + smp_mb(); +} +#else +static inline void reader_barrier() +{ + barrier(); +} +#endif + /* * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. @@ -185,45 +314,47 @@ static inline int rcu_old_gp_ongoing(long *value) if (value == NULL) return 0; - debug_yield_write(); - v = ACCESS_ONCE(*value); - debug_yield_write(); + /* + * Make sure both tests below are done on the same version of *value + * to insure consistency. + */ + v = LOAD_SHARED(*value); return (v & RCU_GP_CTR_NEST_MASK) && - ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT); + ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT); } static inline void rcu_read_lock(void) { long tmp; - debug_yield_read(); tmp = urcu_active_readers; - debug_yield_read(); /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */ + /* + * The data dependency "read urcu_gp_ctr, write urcu_active_readers", + * serializes those two memory operations. We are not using STORE_SHARED + * and LOAD_SHARED here (although we should) because the writer will + * wake us up with a signal which does a flush in its handler to perform + * urcu_gp_ctr re-read and urcu_active_readers commit to main memory. + */ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) - urcu_active_readers = urcu_gp_ctr; + urcu_active_readers = ACCESS_ONCE(urcu_gp_ctr); else urcu_active_readers = tmp + RCU_GP_COUNT; - debug_yield_read(); /* * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ - barrier(); - debug_yield_read(); + reader_barrier(); } static inline void rcu_read_unlock(void) { - debug_yield_read(); - barrier(); - debug_yield_read(); + reader_barrier(); /* * Finish using rcu before decrementing the pointer. * See force_mb_all_threads(). */ urcu_active_readers -= RCU_GP_COUNT; - debug_yield_read(); } /** @@ -245,6 +376,7 @@ static inline void rcu_read_unlock(void) ((v) != NULL)) \ wmb(); \ (p) = (v); \ + smp_wmc(); \ }) #define rcu_xchg_pointer(p, v) \ @@ -264,7 +396,6 @@ extern void synchronize_rcu(void); #define urcu_publish_content(p, v) \ ({ \ void *oldptr; \ - debug_yield_write(); \ oldptr = rcu_xchg_pointer(p, v); \ synchronize_rcu(); \ oldptr; \