X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=urcu%2Farch%2Fppc.h;h=791529ebb1a08ff3db124103474522b77bf0ad15;hb=1a186a881c195c0f240c0baa813056b0cc1eab91;hp=048b217392cc7cd471647625ca1dd128e6627e00;hpb=8c35d699cf442f91fbba3c99beaa41a083ef7bff;p=urcu.git diff --git a/urcu/arch/ppc.h b/urcu/arch/ppc.h index 048b217..791529e 100644 --- a/urcu/arch/ppc.h +++ b/urcu/arch/ppc.h @@ -24,14 +24,22 @@ #include #include +#include +#include #ifdef __cplusplus extern "C" { -#endif +#endif /* Include size of POWER5+ L3 cache lines: 256 bytes */ #define CAA_CACHE_LINE_SIZE 256 +#ifdef __NO_LWSYNC__ +#define LWSYNC_OPCODE "sync\n" +#else +#define LWSYNC_OPCODE "lwsync\n" +#endif + /* * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not * preserve ordering of cacheable vs. non-cacheable accesses, so it @@ -40,7 +48,7 @@ extern "C" { * order cacheable and non-cacheable memory operations separately---i.e. * not the latter against the former. */ -#define cmm_mb() asm volatile("sync":::"memory") +#define cmm_mb() __asm__ __volatile__ ("sync":::"memory") /* * lwsync orders loads in cacheable memory with respect to other loads, @@ -48,39 +56,44 @@ extern "C" { * Therefore, use it for barriers ordering accesses to cacheable memory * only. */ -#define cmm_smp_rmb() asm volatile("lwsync":::"memory") -#define cmm_smp_wmb() asm volatile("lwsync":::"memory") +#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory") +#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory") #define mftbl() \ + __extension__ \ ({ \ unsigned long rval; \ - asm volatile("mftbl %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftbl %0" : "=r" (rval)); \ rval; \ }) #define mftbu() \ + __extension__ \ ({ \ unsigned long rval; \ - asm volatile("mftbu %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \ rval; \ }) #define mftb() \ + __extension__ \ ({ \ unsigned long long rval; \ - asm volatile("mftb %0" : "=r" (rval)); \ + __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \ rval; \ }) -typedef unsigned long long cycles_t; +#define HAS_CAA_GET_CYCLES + +typedef uint64_t caa_cycles_t; #ifdef __powerpc64__ -static inline cycles_t caa_get_cycles(void) +static inline caa_cycles_t caa_get_cycles(void) { - return (cycles_t) mftb(); + return (caa_cycles_t) mftb(); } #else -static inline cycles_t caa_get_cycles(void) +static inline caa_cycles_t caa_get_cycles(void) { unsigned long h, l; @@ -90,12 +103,20 @@ static inline cycles_t caa_get_cycles(void) l = mftbl(); cmm_barrier(); if (mftbu() == h) - return (((cycles_t) h) << 32) + l; + return (((caa_cycles_t) h) << 32) + l; } } #endif -#ifdef __cplusplus +/* + * On Linux, define the membarrier system call number if not yet available in + * the system headers. + */ +#if (defined(__linux__) && !defined(__NR_membarrier)) +#define __NR_membarrier 365 +#endif + +#ifdef __cplusplus } #endif