#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
+/*
+ * Assume the architecture has coherent caches. Blackfin will want this unset.
+ */
+#define CONFIG_HAVE_MEM_COHERENCY 1
+
+/* Assume P4 or newer */
+#define CONFIG_HAVE_FENCE 1
+
+/* Assume SMP machine, given we don't have this information */
+#define CONFIG_SMP 1
+
+
+#ifdef CONFIG_HAVE_MEM_COHERENCY
+/*
+ * Caches are coherent, no need to flush them.
+ */
+#define mc() barrier()
+#define rmc() barrier()
+#define wmc() barrier()
+#else
+#error "The architecture must create its own cache flush primitives"
+#define mc() arch_cache_flush()
+#define rmc() arch_cache_flush_read()
+#define wmc() arch_cache_flush_write()
+#endif
+
+
+#ifdef CONFIG_HAVE_MEM_COHERENCY
+
/* x86 32/64 specific */
+#ifdef CONFIG_HAVE_FENCE
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence" ::: "memory")
+#define wmb() asm volatile("sfence"::: "memory")
+#else
+/*
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
+#endif
+
+#else /* !CONFIG_HAVE_MEM_COHERENCY */
+
+/*
+ * Without cache coherency, the memory barriers become cache flushes.
+ */
+#define mb() mc()
+#define rmb() rmc()
+#define wmb() wmc()
+
+#endif /* !CONFIG_HAVE_MEM_COHERENCY */
+
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#define smp_mc() mc()
+#define smp_rmc() rmc()
+#define smp_wmc() wmc()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_mc() barrier()
+#define smp_rmc() barrier()
+#define smp_wmc() barrier()
+#endif
+
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ asm volatile("rep; nop" ::: "memory");
+}
+
+static inline void cpu_relax(void)
+{
+ rep_nop();
+}
static inline void atomic_inc(int *v)
{
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
+ * x is considered local, ptr is considered remote.
*/
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
: "memory");
break;
}
+ smp_wmc();
return x;
}
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+/*
+ * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
+ */
+#define _LOAD_SHARED(p) ACCESS_ONCE(p)
+
+/*
+ * Load a data from shared memory, doing a cache flush if required.
+ */
+#define LOAD_SHARED(p) \
+ ({ \
+ smp_rmc(); \
+ _LOAD_SHARED(p); \
+ })
+
+
+/*
+ * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
+ */
+#define _STORE_SHARED(x, v) \
+ do { \
+ (x) = (v); \
+ } while (0)
+
+/*
+ * Store v into x, where x is located in shared memory. Performs the required
+ * cache flush after writing.
+ */
+#define STORE_SHARED(x, v) \
+ do { \
+ _STORE_SHARED(x, v); \
+ smp_wmc(); \
+ } while (0)
+
/**
* rcu_dereference - fetch an RCU-protected pointer in an
* RCU read-side critical section. This pointer may later
*/
#define rcu_dereference(p) ({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
+ typeof(p) _________p1 = LOAD_SHARED(p); \
smp_read_barrier_depends(); \
(_________p1); \
})
#define SIGURCU SIGUSR1
+/*
+ * If a reader is really non-cooperative and refuses to commit its
+ * urcu_active_readers count to memory (there is no barrier in the reader
+ * per-se), kick it after a few loops waiting for it.
+ */
+#define KICK_READER_LOOPS 10000
+
#ifdef DEBUG_YIELD
#include <sched.h>
#include <time.h>
#include <pthread.h>
+#include <unistd.h>
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
+/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
+#ifdef DEBUG_FULL_MB
+/* maximum sleep delay, in us */
+#define MAX_SLEEP 50
+#else
+#define MAX_SLEEP 30000
+#endif
+
extern unsigned int yield_active;
extern unsigned int __thread rand_yield;
{
if (yield_active & YIELD_READ)
if (rand_r(&rand_yield) & 0x1)
- sched_yield();
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
if (rand_r(&rand_yield) & 0x1)
- sched_yield();
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
}
static inline void debug_yield_init(void)
}
#endif
+#ifdef DEBUG_FULL_MB
+static inline void reader_barrier()
+{
+ smp_mb();
+}
+#else
+static inline void reader_barrier()
+{
+ barrier();
+}
+#endif
+
/*
* The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
* full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
*/
-#define RCU_GP_COUNT (1U << 0)
+#define RCU_GP_COUNT (1UL << 0)
/* Use the amount of bits equal to half of the architecture long size */
-#define RCU_GP_CTR_BIT (sizeof(long) << 2)
+#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
/*
* Using a int rather than a char to eliminate false register dependencies
* causing stalls on some architectures.
*/
-extern int urcu_gp_ctr;
+extern long urcu_gp_ctr;
-extern int __thread urcu_active_readers;
+extern long __thread urcu_active_readers;
-static inline int rcu_old_gp_ongoing(int *value)
+static inline int rcu_old_gp_ongoing(long *value)
{
- int v;
+ long v;
if (value == NULL)
return 0;
- debug_yield_write();
- v = ACCESS_ONCE(*value);
- debug_yield_write();
+ /*
+ * Make sure both tests below are done on the same version of *value
+ * to insure consistency.
+ */
+ v = LOAD_SHARED(*value);
return (v & RCU_GP_CTR_NEST_MASK) &&
- ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT);
+ ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
}
static inline void rcu_read_lock(void)
{
- int tmp;
+ long tmp;
- debug_yield_read();
tmp = urcu_active_readers;
- debug_yield_read();
+ /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
+ /*
+ * The data dependency "read urcu_gp_ctr, write urcu_active_readers",
+ * serializes those two memory operations. The memory barrier in the
+ * signal handler ensures we receive the proper memory commit barriers
+ * required by _STORE_SHARED and _LOAD_SHARED whenever communication
+ * with the writer is needed.
+ */
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
- urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT;
+ _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr));
else
- urcu_active_readers = tmp + RCU_GP_COUNT;
- debug_yield_read();
+ _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT);
/*
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
- barrier();
- debug_yield_read();
+ reader_barrier();
}
static inline void rcu_read_unlock(void)
{
- debug_yield_read();
- barrier();
- debug_yield_read();
+ reader_barrier();
/*
* Finish using rcu before decrementing the pointer.
* See force_mb_all_threads().
*/
- urcu_active_readers -= RCU_GP_COUNT;
- debug_yield_read();
+ _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT);
}
/**
if (!__builtin_constant_p(v) || \
((v) != NULL)) \
wmb(); \
- (p) = (v); \
+ STORE_SHARED(p, v); \
})
#define rcu_xchg_pointer(p, v) \
#define urcu_publish_content(p, v) \
({ \
void *oldptr; \
- debug_yield_write(); \
oldptr = rcu_xchg_pointer(p, v); \
synchronize_rcu(); \
oldptr; \