Commit | Line | Data |
---|---|---|
121a5d44 MD |
1 | #ifndef _ARCH_X86_H |
2 | #define _ARCH_X86_H | |
3 | ||
6d0ce021 PM |
4 | /* |
5 | * arch_x86.h: Definitions for the x86 architecture, derived from Linux. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; but only version 2 of the License given | |
10 | * that this comes from the Linux kernel. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
20 | * | |
21 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
22 | */ | |
23 | ||
121a5d44 MD |
24 | #include <compiler.h> |
25 | ||
6d0ce021 PM |
26 | /* Assume P4 or newer */ |
27 | #define CONFIG_HAVE_FENCE 1 | |
28 | #define CONFIG_HAVE_MEM_COHERENCY | |
29 | ||
30 | #ifdef CONFIG_HAVE_FENCE | |
31 | #define mb() asm volatile("mfence":::"memory") | |
32 | #define rmb() asm volatile("lfence":::"memory") | |
33 | #define wmb() asm volatile("sfence"::: "memory") | |
34 | #else | |
35 | /* | |
36 | * Some non-Intel clones support out of order store. wmb() ceases to be a | |
37 | * nop for these. | |
38 | */ | |
39 | #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory") | |
40 | #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory") | |
41 | #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory") | |
42 | #endif | |
43 | ||
44 | /* | |
45 | * Architectures without cache coherency need something like the following: | |
46 | * | |
47 | * #define mb() mc() | |
48 | * #define rmb() rmc() | |
49 | * #define wmb() wmc() | |
50 | * #define mc() arch_cache_flush() | |
51 | * #define rmc() arch_cache_flush_read() | |
52 | * #define wmc() arch_cache_flush_write() | |
53 | */ | |
54 | ||
55 | #define mc() barrier() | |
56 | #define rmc() barrier() | |
57 | #define wmc() barrier() | |
58 | ||
121a5d44 MD |
59 | /* Assume SMP machine, given we don't have this information */ |
60 | #define CONFIG_SMP 1 | |
61 | ||
62 | #ifdef CONFIG_SMP | |
63 | #define smp_mb() mb() | |
64 | #define smp_rmb() rmb() | |
65 | #define smp_wmb() wmb() | |
66 | #define smp_mc() mc() | |
67 | #define smp_rmc() rmc() | |
68 | #define smp_wmc() wmc() | |
69 | #else | |
70 | #define smp_mb() barrier() | |
71 | #define smp_rmb() barrier() | |
72 | #define smp_wmb() barrier() | |
73 | #define smp_mc() barrier() | |
74 | #define smp_rmc() barrier() | |
75 | #define smp_wmc() barrier() | |
76 | #endif | |
77 | ||
78 | /* Nop everywhere except on alpha. */ | |
79 | #define smp_read_barrier_depends() | |
80 | ||
6d0ce021 PM |
81 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ |
82 | static inline void rep_nop(void) | |
83 | { | |
84 | asm volatile("rep; nop" ::: "memory"); | |
85 | } | |
86 | ||
87 | static inline void cpu_relax(void) | |
88 | { | |
89 | rep_nop(); | |
90 | } | |
91 | ||
92 | #ifndef _INCLUDE_API_H | |
93 | ||
94 | static inline void atomic_inc(int *v) | |
95 | { | |
96 | asm volatile("lock; incl %0" | |
97 | : "+m" (*v)); | |
98 | } | |
99 | ||
100 | #endif /* #ifndef _INCLUDE_API_H */ | |
101 | ||
102 | #define xchg(ptr, v) \ | |
103 | ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr)))) | |
104 | ||
105 | struct __xchg_dummy { | |
106 | unsigned long a[100]; | |
107 | }; | |
108 | #define __xg(x) ((struct __xchg_dummy *)(x)) | |
109 | ||
110 | /* | |
111 | * Note: no "lock" prefix even on SMP: xchg always implies lock anyway | |
112 | * Note 2: xchg has side effect, so that attribute volatile is necessary, | |
113 | * but generally the primitive is invalid, *ptr is output argument. --ANK | |
114 | * x is considered local, ptr is considered remote. | |
115 | */ | |
116 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, | |
117 | int size) | |
118 | { | |
119 | switch (size) { | |
120 | case 1: | |
121 | asm volatile("xchgb %b0,%1" | |
122 | : "=q" (x) | |
123 | : "m" (*__xg(ptr)), "0" (x) | |
124 | : "memory"); | |
125 | break; | |
126 | case 2: | |
127 | asm volatile("xchgw %w0,%1" | |
128 | : "=r" (x) | |
129 | : "m" (*__xg(ptr)), "0" (x) | |
130 | : "memory"); | |
131 | break; | |
132 | case 4: | |
133 | asm volatile("xchgl %k0,%1" | |
134 | : "=r" (x) | |
135 | : "m" (*__xg(ptr)), "0" (x) | |
136 | : "memory"); | |
137 | break; | |
138 | case 8: | |
139 | asm volatile("xchgq %0,%1" | |
140 | : "=r" (x) | |
141 | : "m" (*__xg(ptr)), "0" (x) | |
142 | : "memory"); | |
143 | break; | |
144 | } | |
145 | smp_wmc(); | |
146 | return x; | |
147 | } | |
148 | ||
149 | ||
150 | #define rdtscll(val) do { \ | |
151 | unsigned int __a,__d; \ | |
152 | asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \ | |
153 | (val) = ((unsigned long long)__a) | (((unsigned long long)__d)<<32); \ | |
154 | } while(0) | |
155 | ||
156 | typedef unsigned long long cycles_t; | |
157 | ||
158 | static inline cycles_t get_cycles (void) | |
159 | { | |
160 | unsigned long long ret = 0; | |
161 | ||
162 | rdtscll(ret); | |
163 | return ret; | |
164 | } | |
121a5d44 MD |
165 | |
166 | #endif /* _ARCH_X86_H */ |