X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=urcu%2Farch%2Fppc.h;h=1784ccdfe4aed8663e9706dc99e3ff0833f509eb;hb=3aea2ae08850f2f6e9ddab451e004adf78d42ddd;hp=2fcbf5660958a12d2ad6dd957838ed73da4f2bec;hpb=e62b2f86c5ec06ed41d33ed578e66fad426ff215;p=urcu.git diff --git a/urcu/arch/ppc.h b/urcu/arch/ppc.h index 2fcbf56..1784ccd 100644 --- a/urcu/arch/ppc.h +++ b/urcu/arch/ppc.h @@ -24,6 +24,7 @@ #include #include +#include #ifdef __cplusplus extern "C" { @@ -46,7 +47,7 @@ extern "C" { * order cacheable and non-cacheable memory operations separately---i.e. * not the latter against the former. */ -#define cmm_mb() asm volatile("sync":::"memory") +#define cmm_mb() __asm__ __volatile__ ("sync":::"memory") /* * lwsync orders loads in cacheable memory with respect to other loads, @@ -54,27 +55,30 @@ extern "C" { * Therefore, use it for barriers ordering accesses to cacheable memory * only. */ -#define cmm_smp_rmb() asm volatile(LWSYNC_OPCODE:::"memory") -#define cmm_smp_wmb() asm volatile(LWSYNC_OPCODE:::"memory") +#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory") +#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory") #define mftbl() \ + __extension__ \ ({ \ unsigned long rval; \ - asm volatile("mftbl %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftbl %0" : "=r" (rval)); \ rval; \ }) #define mftbu() \ + __extension__ \ ({ \ unsigned long rval; \ - asm volatile("mftbu %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \ rval; \ }) #define mftb() \ + __extension__ \ ({ \ unsigned long long rval; \ - asm volatile("mftb %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \ rval; \ }) @@ -101,6 +105,14 @@ static inline cycles_t caa_get_cycles(void) } #endif +/* + * Define the membarrier system call number if not yet available in the + * system headers. + */ +#ifndef __NR_membarrier +#define __NR_membarrier 365 +#endif + #ifdef __cplusplus } #endif