X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.h;h=03764ab33dbe241b643b72ef9cbfd6757662fa3b;hp=277b7d2f45ae565610e4db9ee89457174fafa667;hb=bb48818526ec4317f9e6daeb0aa1cd64d528f754;hpb=3a9e6e9df706b8d39af94d2f027210e2e7d4106e diff --git a/urcu.h b/urcu.h index 277b7d2..03764ab 100644 --- a/urcu.h +++ b/urcu.h @@ -121,10 +121,19 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, #include #include #include +#include #define YIELD_READ (1 << 0) #define YIELD_WRITE (1 << 1) +/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */ +#ifdef DEBUG_FULL_MB +/* maximum sleep delay, in us */ +#define MAX_SLEEP 50 +#else +#define MAX_SLEEP 30000 +#endif + extern unsigned int yield_active; extern unsigned int __thread rand_yield; @@ -132,14 +141,14 @@ static inline void debug_yield_read(void) { if (yield_active & YIELD_READ) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_write(void) { if (yield_active & YIELD_WRITE) if (rand_r(&rand_yield) & 0x1) - sched_yield(); + usleep(rand_r(&rand_yield) % MAX_SLEEP); } static inline void debug_yield_init(void) @@ -161,6 +170,18 @@ static inline void debug_yield_init(void) } #endif +#ifdef DEBUG_FULL_MB +static inline void read_barrier() +{ + mb(); +} +#else +static inline void read_barrier() +{ + barrier(); +} +#endif + /* * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits. @@ -209,14 +230,14 @@ static inline void rcu_read_lock(void) * Increment active readers count before accessing the pointer. * See force_mb_all_threads(). */ - barrier(); + read_barrier(); debug_yield_read(); } static inline void rcu_read_unlock(void) { debug_yield_read(); - barrier(); + read_barrier(); debug_yield_read(); /* * Finish using rcu before decrementing the pointer.