#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
+/*
+ * Assume the architecture has coherent caches. Blackfin will want this unset.
+ */
+#define CONFIG_HAVE_MEM_COHERENCY 1
+
+/* Assume P4 or newer */
+#define CONFIG_HAVE_FENCE 1
+
+/* Assume SMP machine, given we don't have this information */
+#define CONFIG_SMP 1
+
+
+#ifdef CONFIG_HAVE_MEM_COHERENCY
+/*
+ * Caches are coherent, no need to flush them.
+ */
+#define mc() barrier()
+#define rmc() barrier()
+#define wmc() barrier()
+#else
+#error "The architecture must create its own cache flush primitives"
+#define mc() arch_cache_flush()
+#define rmc() arch_cache_flush_read()
+#define wmc() arch_cache_flush_write()
+#endif
+
+
+#ifdef CONFIG_HAVE_MEM_COHERENCY
+
/* x86 32/64 specific */
+#ifdef CONFIG_HAVE_FENCE
#define mb() asm volatile("mfence":::"memory")
#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence" ::: "memory")
+#define wmb() asm volatile("sfence"::: "memory")
+#else
+/*
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
+#endif
+
+#else /* !CONFIG_HAVE_MEM_COHERENCY */
+
+/*
+ * Without cache coherency, the memory barriers become cache flushes.
+ */
+#define mb() mc()
+#define rmb() rmc()
+#define wmb() wmc()
+
+#endif /* !CONFIG_HAVE_MEM_COHERENCY */
-/* Assume SMP machine, given we don't have this information */
-#define CONFIG_SMP 1
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
+#define smp_mc() mc()
+#define smp_rmc() rmc()
+#define smp_wmc() wmc()
#else
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
+#define smp_mc() barrier()
+#define smp_rmc() barrier()
+#define smp_wmc() barrier()
#endif
+/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
+static inline void rep_nop(void)
+{
+ asm volatile("rep; nop" ::: "memory");
+}
+
+static inline void cpu_relax(void)
+{
+ rep_nop();
+}
+
static inline void atomic_inc(int *v)
{
asm volatile("lock; incl %0"
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
+ * x is considered local, ptr is considered remote.
*/
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
: "memory");
break;
}
+ smp_wmc();
return x;
}
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+/*
+ * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
+ */
+#define _LOAD_SHARED(p) ACCESS_ONCE(p)
+
+/*
+ * Load a data from shared memory, doing a cache flush if required.
+ */
+#define LOAD_SHARED(p) \
+ ({ \
+ smp_rmc(); \
+ _LOAD_SHARED(p); \
+ })
+
+
+/*
+ * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
+ */
+#define _STORE_SHARED(x, v) \
+ do { \
+ (x) = (v); \
+ } while (0)
+
+/*
+ * Store v into x, where x is located in shared memory. Performs the required
+ * cache flush after writing.
+ */
+#define STORE_SHARED(x, v) \
+ do { \
+ _STORE_SHARED(x, v); \
+ smp_wmc(); \
+ } while (0)
+
/**
* rcu_dereference - fetch an RCU-protected pointer in an
* RCU read-side critical section. This pointer may later
*/
#define rcu_dereference(p) ({ \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
+ typeof(p) _________p1 = LOAD_SHARED(p); \
smp_read_barrier_depends(); \
(_________p1); \
})
#endif
#ifdef DEBUG_FULL_MB
-static inline void read_barrier()
+static inline void reader_barrier()
{
smp_mb();
}
#else
-static inline void read_barrier()
+static inline void reader_barrier()
{
barrier();
}
* Make sure both tests below are done on the same version of *value
* to insure consistency.
*/
- v = ACCESS_ONCE(*value);
+ v = LOAD_SHARED(*value);
return (v & RCU_GP_CTR_NEST_MASK) &&
((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
}
tmp = urcu_active_readers;
/* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
- /* The data dependency "read urcu_gp_ctr, write urcu_active_readers",
- * serializes those two memory operations. */
+ /*
+ * The data dependency "read urcu_gp_ctr, write urcu_active_readers",
+ * serializes those two memory operations. The memory barrier in the
+ * signal handler ensures we receive the proper memory commit barriers
+ * required by _STORE_SHARED and _LOAD_SHARED whenever communication
+ * with the writer is needed.
+ */
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
- urcu_active_readers = urcu_gp_ctr;
+ _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr));
else
- urcu_active_readers = tmp + RCU_GP_COUNT;
+ _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT);
/*
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
- read_barrier();
+ reader_barrier();
}
static inline void rcu_read_unlock(void)
{
- read_barrier();
+ reader_barrier();
/*
* Finish using rcu before decrementing the pointer.
* See force_mb_all_threads().
*/
- urcu_active_readers -= RCU_GP_COUNT;
+ _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT);
}
/**
if (!__builtin_constant_p(v) || \
((v) != NULL)) \
wmb(); \
- (p) = (v); \
+ STORE_SHARED(p, v); \
})
#define rcu_xchg_pointer(p, v) \