Merge branch 'master' into urcu/ht-shrink-help
[urcu.git] / urcu / arch / x86.h
1 #ifndef _URCU_ARCH_X86_H
2 #define _URCU_ARCH_X86_H
3
4 /*
5 * arch_x86.h: trivial definitions for the x86 architecture.
6 *
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #include <urcu/compiler.h>
26 #include <urcu/config.h>
27
28 #ifdef __cplusplus
29 extern "C" {
30 #endif
31
32 #define CAA_CACHE_LINE_SIZE 128
33
34 #ifdef CONFIG_RCU_HAVE_FENCE
35 #define cmm_mb() asm volatile("mfence":::"memory")
36
37 /*
38 * Define cmm_rmb/cmm_wmb to "strict" barriers that may be needed when
39 * using SSE or working with I/O areas. cmm_smp_rmb/cmm_smp_wmb are
40 * only compiler barriers, which is enough for general use.
41 */
42 #define cmm_rmb() asm volatile("lfence":::"memory")
43 #define cmm_wmb() asm volatile("sfence"::: "memory")
44 #define cmm_smp_rmb() cmm_barrier()
45 #define cmm_smp_wmb() cmm_barrier()
46 #else
47 /*
48 * We leave smp_rmb/smp_wmb as full barriers for processors that do not have
49 * fence instructions.
50 *
51 * An empty cmm_smp_rmb() may not be enough on old PentiumPro multiprocessor
52 * systems, due to an erratum. The Linux kernel says that "Even distro
53 * kernels should think twice before enabling this", but for now let's
54 * be conservative and leave the full barrier on 32-bit processors. Also,
55 * IDT WinChip supports weak store ordering, and the kernel may enable it
56 * under our feet; cmm_smp_wmb() ceases to be a nop for these processors.
57 */
58 #define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
59 #define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
60 #define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
61 #endif
62
63 #define caa_cpu_relax() asm volatile("rep; nop" : : : "memory");
64
65 #define rdtscll(val) \
66 do { \
67 unsigned int __a, __d; \
68 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
69 (val) = ((unsigned long long)__a) \
70 | (((unsigned long long)__d) << 32); \
71 } while(0)
72
73 typedef unsigned long long cycles_t;
74
75 static inline cycles_t caa_get_cycles(void)
76 {
77 cycles_t ret = 0;
78
79 rdtscll(ret);
80 return ret;
81 }
82
83 #ifdef __cplusplus
84 }
85 #endif
86
87 #include <urcu/arch/generic.h>
88
89 #endif /* _URCU_ARCH_X86_H */
This page took 0.032461 seconds and 5 git commands to generate.