From: Mathieu Desnoyers Date: Thu, 22 Sep 2011 15:00:14 +0000 (-0400) Subject: powerpc: use __NO_LWSYNC__ check to use appropriate lwsync/sync opcode X-Git-Tag: v0.6.5~10 X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=commitdiff_plain;h=e62b2f86c5ec06ed41d33ed578e66fad426ff215 powerpc: use __NO_LWSYNC__ check to use appropriate lwsync/sync opcode We already used it in uatomic code, move it to arch ppc. Signed-off-by: Mathieu Desnoyers --- diff --git a/urcu/arch/ppc.h b/urcu/arch/ppc.h index 048b217..2fcbf56 100644 --- a/urcu/arch/ppc.h +++ b/urcu/arch/ppc.h @@ -32,6 +32,12 @@ extern "C" { /* Include size of POWER5+ L3 cache lines: 256 bytes */ #define CAA_CACHE_LINE_SIZE 256 +#ifdef __NO_LWSYNC__ +#define LWSYNC_OPCODE "sync\n" +#else +#define LWSYNC_OPCODE "lwsync\n" +#endif + /* * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not * preserve ordering of cacheable vs. non-cacheable accesses, so it @@ -48,8 +54,8 @@ extern "C" { * Therefore, use it for barriers ordering accesses to cacheable memory * only. */ -#define cmm_smp_rmb() asm volatile("lwsync":::"memory") -#define cmm_smp_wmb() asm volatile("lwsync":::"memory") +#define cmm_smp_rmb() asm volatile(LWSYNC_OPCODE:::"memory") +#define cmm_smp_wmb() asm volatile(LWSYNC_OPCODE:::"memory") #define mftbl() \ ({ \ diff --git a/urcu/uatomic/ppc.h b/urcu/uatomic/ppc.h index 16dbd0c..3180750 100644 --- a/urcu/uatomic/ppc.h +++ b/urcu/uatomic/ppc.h @@ -27,12 +27,6 @@ extern "C" { #endif -#ifdef __NO_LWSYNC__ -#define LWSYNC_OPCODE "sync\n" -#else -#define LWSYNC_OPCODE "lwsync\n" -#endif - #define ILLEGAL_INSTR ".long 0xd00d00" /*