X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Farch_x86.h;h=d0a58e80caa5abfc980a58ad51407f82b0ceeee6;hp=4ba08f7a288796186aeda16679d3fe8a7d558506;hb=5481ddb381061bda64aebc039900d21cac6a6caf;hpb=49617de1d80d771ac9a1d45b1710b16fac65e0aa diff --git a/urcu/arch_x86.h b/urcu/arch_x86.h index 4ba08f7..d0a58e8 100644 --- a/urcu/arch_x86.h +++ b/urcu/arch_x86.h @@ -5,13 +5,13 @@ * arch_x86.h: trivial definitions for the x86 architecture. * * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. - * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2009 Mathieu Desnoyers * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. -* + * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU @@ -25,91 +25,27 @@ #include #include -#define CONFIG_HAVE_MEM_COHERENCY +#ifdef __cplusplus +extern "C" { +#endif #define CACHE_LINE_SIZE 128 -#ifndef BITS_PER_LONG -#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) -#endif - -#ifdef CONFIG_URCU_HAVE_FENCE -#define mb() asm volatile("mfence":::"memory") -#define rmb() asm volatile("lfence":::"memory") -#define wmb() asm volatile("sfence"::: "memory") +#ifdef CONFIG_RCU_HAVE_FENCE +#define cmm_mb() asm volatile("mfence":::"memory") +#define cmm_rmb() asm volatile("lfence":::"memory") +#define cmm_wmb() asm volatile("sfence"::: "memory") #else /* - * Some non-Intel clones support out of order store. wmb() ceases to be a + * Some non-Intel clones support out of order store. cmm_wmb() ceases to be a * nop for these. */ -#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") -#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") -#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") -#endif - -/* - * Architectures without cache coherency need something like the following: - * - * #define mb() mc() - * #define rmb() rmc() - * #define wmb() wmc() - * #define mc() arch_cache_flush() - * #define rmc() arch_cache_flush_read() - * #define wmc() arch_cache_flush_write() - */ - -#define mc() barrier() -#define rmc() barrier() -#define wmc() barrier() - -#ifdef CONFIG_URCU_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_mc() mc() -#define smp_rmc() rmc() -#define smp_wmc() wmc() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_mc() barrier() -#define smp_rmc() barrier() -#define smp_wmc() barrier() +#define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") +#define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") +#define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") #endif -/* Nop everywhere except on alpha. */ -#define smp_read_barrier_depends() - -static inline void rep_nop(void) -{ - asm volatile("rep; nop" : : : "memory"); -} - -static inline void cpu_relax(void) -{ - rep_nop(); -} - -/* - * Serialize core instruction execution. Also acts as a compiler barrier. - */ -#ifdef __PIC__ -/* - * Cannot use cpuid because it clobbers the ebx register and clashes - * with -fPIC : - * error: PIC register 'ebx' clobbered in 'asm' - */ -static inline void sync_core(void) -{ - mb(); -} -#else -static inline void sync_core(void) -{ - asm volatile("cpuid" : : : "memory", "eax", "ebx", "ecx", "edx"); -} -#endif +#define cpu_relax() asm volatile("rep; nop" : : : "memory"); #define rdtscll(val) \ do { \ @@ -129,4 +65,10 @@ static inline cycles_t get_cycles(void) return ret; } +#ifdef __cplusplus +} +#endif + +#include + #endif /* _URCU_ARCH_X86_H */