From e62b2f86c5ec06ed41d33ed578e66fad426ff215 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 22 Sep 2011 11:00:14 -0400 Subject: [PATCH] powerpc: use __NO_LWSYNC__ check to use appropriate lwsync/sync opcode We already used it in uatomic code, move it to arch ppc. Signed-off-by: Mathieu Desnoyers --- urcu/arch/ppc.h | 10 ++++++++-- urcu/uatomic/ppc.h | 6 ------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/urcu/arch/ppc.h b/urcu/arch/ppc.h index 048b217..2fcbf56 100644 --- a/urcu/arch/ppc.h +++ b/urcu/arch/ppc.h @@ -32,6 +32,12 @@ extern "C" { /* Include size of POWER5+ L3 cache lines: 256 bytes */ #define CAA_CACHE_LINE_SIZE 256 +#ifdef __NO_LWSYNC__ +#define LWSYNC_OPCODE "sync\n" +#else +#define LWSYNC_OPCODE "lwsync\n" +#endif + /* * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not * preserve ordering of cacheable vs. non-cacheable accesses, so it @@ -48,8 +54,8 @@ extern "C" { * Therefore, use it for barriers ordering accesses to cacheable memory * only. */ -#define cmm_smp_rmb() asm volatile("lwsync":::"memory") -#define cmm_smp_wmb() asm volatile("lwsync":::"memory") +#define cmm_smp_rmb() asm volatile(LWSYNC_OPCODE:::"memory") +#define cmm_smp_wmb() asm volatile(LWSYNC_OPCODE:::"memory") #define mftbl() \ ({ \ diff --git a/urcu/uatomic/ppc.h b/urcu/uatomic/ppc.h index 16dbd0c..3180750 100644 --- a/urcu/uatomic/ppc.h +++ b/urcu/uatomic/ppc.h @@ -27,12 +27,6 @@ extern "C" { #endif -#ifdef __NO_LWSYNC__ -#define LWSYNC_OPCODE "sync\n" -#else -#define LWSYNC_OPCODE "lwsync\n" -#endif - #define ILLEGAL_INSTR ".long 0xd00d00" /* -- 2.34.1