#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
-/*
- * Assume the architecture has coherent caches. Blackfin will want this unset.
- */
-#define CONFIG_HAVE_MEM_COHERENCY 1
-
-/* Assume P4 or newer */
-#define CONFIG_HAVE_FENCE 1
-
/* Assume SMP machine, given we don't have this information */
#define CONFIG_SMP 1
-#ifdef CONFIG_HAVE_MEM_COHERENCY
-/*
- * Caches are coherent, no need to flush them.
- */
-#define mc() barrier()
-#define rmc() barrier()
-#define wmc() barrier()
-#else
-#error "The architecture must create its own cache flush primitives"
-#define mc() arch_cache_flush()
-#define rmc() arch_cache_flush_read()
-#define wmc() arch_cache_flush_write()
-#endif
-
-
-#ifdef CONFIG_HAVE_MEM_COHERENCY
-
-/* x86 32/64 specific */
-#ifdef CONFIG_HAVE_FENCE
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence"::: "memory")
-#else
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
-#endif
-
-#else /* !CONFIG_HAVE_MEM_COHERENCY */
-
-/*
- * Without cache coherency, the memory barriers become cache flushes.
- */
-#define mb() mc()
-#define rmb() rmc()
-#define wmb() wmc()
-
-#endif /* !CONFIG_HAVE_MEM_COHERENCY */
-
-
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmc() barrier()
#endif
-/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
-static inline void rep_nop(void)
-{
- asm volatile("rep; nop" ::: "memory");
-}
-
-static inline void cpu_relax(void)
-{
- rep_nop();
-}
-
-static inline void atomic_inc(int *v)
-{
- asm volatile("lock; incl %0"
- : "+m" (*v));
-}
-
-#define xchg(ptr, v) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
-
-struct __xchg_dummy {
- unsigned long a[100];
-};
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-/*
- * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
- * Note 2: xchg has side effect, so that attribute volatile is necessary,
- * but generally the primitive is invalid, *ptr is output argument. --ANK
- * x is considered local, ptr is considered remote.
- */
-static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
- int size)
-{
- switch (size) {
- case 1:
- asm volatile("xchgb %b0,%1"
- : "=q" (x)
- : "m" (*__xg(ptr)), "0" (x)
- : "memory");
- break;
- case 2:
- asm volatile("xchgw %w0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
- : "memory");
- break;
- case 4:
- asm volatile("xchgl %k0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
- : "memory");
- break;
- case 8:
- asm volatile("xchgq %0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
- : "memory");
- break;
- }
- smp_wmc();
- return x;
-}
+#include "arch.h"
/* Nop everywhere except on alpha. */
#define smp_read_barrier_depends()
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+/*
+ * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
+ */
+#define _LOAD_SHARED(p) ACCESS_ONCE(p)
+
/*
* Load a data from shared memory, doing a cache flush if required.
*/
-#define LOAD_SHARED(p) ({ \
- smp_rmc(); \
- typeof(p) _________p1 = ACCESS_ONCE(p); \
- (_________p1); \
- })
+#define LOAD_SHARED(p) \
+ ({ \
+ smp_rmc(); \
+ _LOAD_SHARED(p); \
+ })
+
+
+/*
+ * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
+ */
+#define _STORE_SHARED(x, v) \
+ do { \
+ (x) = (v); \
+ } while (0)
/*
* Store v into x, where x is located in shared memory. Performs the required
*/
#define STORE_SHARED(x, v) \
do { \
- (x) = (v); \
- smp_wmc; \
+ _STORE_SHARED(x, v); \
+ smp_wmc(); \
} while (0)
/**
(_________p1); \
})
-
-
#define SIGURCU SIGUSR1
/*
/* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
/*
* The data dependency "read urcu_gp_ctr, write urcu_active_readers",
- * serializes those two memory operations. We are not using STORE_SHARED
- * and LOAD_SHARED here (although we should) because the writer will
- * wake us up with a signal which does a flush in its handler to perform
- * urcu_gp_ctr re-read and urcu_active_readers commit to main memory.
+ * serializes those two memory operations. The memory barrier in the
+ * signal handler ensures we receive the proper memory commit barriers
+ * required by _STORE_SHARED and _LOAD_SHARED whenever communication
+ * with the writer is needed.
*/
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
- urcu_active_readers = ACCESS_ONCE(urcu_gp_ctr);
+ _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr));
else
- urcu_active_readers = tmp + RCU_GP_COUNT;
+ _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT);
/*
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
* Finish using rcu before decrementing the pointer.
* See force_mb_all_threads().
*/
- urcu_active_readers -= RCU_GP_COUNT;
+ _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT);
}
/**
if (!__builtin_constant_p(v) || \
((v) != NULL)) \
wmb(); \
- (p) = (v); \
- smp_wmc(); \
+ STORE_SHARED(p, v); \
})
#define rcu_xchg_pointer(p, v) \