X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Farch%2Fx86.h;h=aac8ca1384b63f4ce321680cffbcaca661a0f16c;hp=c1e2e072ff752582a93e90cb7189e45b14283870;hb=2af1c19e6a553878fcb2a5106f050d5ed7ac0f54;hpb=bce63dfd0a2306452c9e39f5df01789e77f3f44a diff --git a/urcu/arch/x86.h b/urcu/arch/x86.h index c1e2e07..aac8ca1 100644 --- a/urcu/arch/x86.h +++ b/urcu/arch/x86.h @@ -24,23 +24,25 @@ #include #include +#include +#include #ifdef __cplusplus extern "C" { -#endif +#endif #define CAA_CACHE_LINE_SIZE 128 #ifdef CONFIG_RCU_HAVE_FENCE -#define cmm_mb() asm volatile("mfence":::"memory") +#define cmm_mb() __asm__ __volatile__ ("mfence":::"memory") /* * Define cmm_rmb/cmm_wmb to "strict" barriers that may be needed when * using SSE or working with I/O areas. cmm_smp_rmb/cmm_smp_wmb are * only compiler barriers, which is enough for general use. */ -#define cmm_rmb() asm volatile("lfence":::"memory") -#define cmm_wmb() asm volatile("sfence"::: "memory") +#define cmm_rmb() __asm__ __volatile__ ("lfence":::"memory") +#define cmm_wmb() __asm__ __volatile__ ("sfence"::: "memory") #define cmm_smp_rmb() cmm_barrier() #define cmm_smp_wmb() cmm_barrier() #else @@ -55,32 +57,52 @@ extern "C" { * IDT WinChip supports weak store ordering, and the kernel may enable it * under our feet; cmm_smp_wmb() ceases to be a nop for these processors. */ -#define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") -#define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") -#define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") +#if (CAA_BITS_PER_LONG == 32) +#define cmm_mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory") +#define cmm_rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory") +#define cmm_wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory") +#else +#define cmm_mb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory") +#define cmm_rmb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory") +#define cmm_wmb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory") +#endif #endif -#define caa_cpu_relax() asm volatile("rep; nop" : : : "memory"); +#define caa_cpu_relax() __asm__ __volatile__ ("rep; nop" : : : "memory") + +#define HAS_CAA_GET_CYCLES #define rdtscll(val) \ do { \ unsigned int __a, __d; \ - asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ + __asm__ __volatile__ ("rdtsc" : "=a" (__a), "=d" (__d)); \ (val) = ((unsigned long long)__a) \ | (((unsigned long long)__d) << 32); \ } while(0) -typedef unsigned long long cycles_t; +typedef uint64_t caa_cycles_t; -static inline cycles_t caa_get_cycles(void) +static inline caa_cycles_t caa_get_cycles(void) { - cycles_t ret = 0; + caa_cycles_t ret = 0; rdtscll(ret); return ret; } -#ifdef __cplusplus +/* + * On Linux, define the membarrier system call number if not yet available in + * the system headers. + */ +#if (defined(__linux__) && !defined(__NR_membarrier)) +#if (CAA_BITS_PER_LONG == 32) +#define __NR_membarrier 375 +#else +#define __NR_membarrier 324 +#endif +#endif + +#ifdef __cplusplus } #endif