Commit | Line | Data |
---|---|---|
0114ba7f MD |
1 | #ifndef _ARCH_ATOMIC_X86_H |
2 | #define _ARCH_ATOMIC_X86_H | |
3 | ||
4 | /* | |
5 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. | |
6 | * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved. | |
7 | * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P. | |
8 | * Copyright (c) 2009 Mathieu Desnoyers | |
9 | * | |
10 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
11 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
12 | * | |
13 | * Permission is hereby granted to use or copy this program | |
14 | * for any purpose, provided the above notices are retained on all copies. | |
15 | * Permission to modify the code and to distribute modified code is granted, | |
16 | * provided the above notices are retained, and a notice that the code was | |
17 | * modified is included with the above copyright notice. | |
18 | * | |
19 | * Code inspired from libatomic_ops-1.2, inherited in part from the | |
20 | * Boehm-Demers-Weiser conservative garbage collector. | |
21 | */ | |
22 | ||
23 | #ifndef BITS_PER_LONG | |
24 | #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) | |
25 | #endif | |
26 | ||
27 | #ifndef _INCLUDE_API_H | |
28 | ||
29 | /* | |
0114ba7f MD |
30 | * Derived from AO_compare_and_swap() and AO_test_and_set_full(). |
31 | */ | |
32 | ||
cc1be41b MD |
33 | struct __atomic_dummy { |
34 | unsigned long v[10]; | |
35 | }; | |
36 | #define __hp(x) ((struct __atomic_dummy *)(x)) | |
37 | ||
38 | /* cmpxchg */ | |
39 | ||
5dba80f9 | 40 | static inline __attribute__((always_inline)) |
cc1be41b MD |
41 | unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old, |
42 | unsigned long _new, int len) | |
0114ba7f | 43 | { |
cc1be41b MD |
44 | switch (len) { |
45 | case 1: | |
46 | { | |
47 | unsigned char result = old; | |
48 | __asm__ __volatile__( | |
49 | "lock; cmpxchgb %2, %1" | |
50 | : "+a"(result), "+m"(*__hp(addr)) | |
51 | : "q"((unsigned char)_new) | |
0114ba7f | 52 | : "memory"); |
cc1be41b MD |
53 | return result; |
54 | } | |
55 | case 2: | |
56 | { | |
57 | unsigned short result = old; | |
58 | __asm__ __volatile__( | |
59 | "lock; cmpxchgw %2, %1" | |
60 | : "+a"(result), "+m"(*__hp(addr)) | |
61 | : "r"((unsigned short)_new) | |
62 | : "memory"); | |
63 | return result; | |
64 | } | |
65 | case 4: | |
66 | { | |
67 | unsigned int result = old; | |
68 | __asm__ __volatile__( | |
69 | "lock; cmpxchgl %2, %1" | |
70 | : "+a"(result), "+m"(*__hp(addr)) | |
71 | : "r"((unsigned int)_new) | |
72 | : "memory"); | |
73 | return result; | |
74 | } | |
75 | #if (BITS_PER_LONG == 64) | |
76 | case 8: | |
77 | { | |
6edb297e | 78 | unsigned long result = old; |
cc1be41b | 79 | __asm__ __volatile__( |
2c5e5fb3 | 80 | "lock; cmpxchgq %2, %1" |
cc1be41b MD |
81 | : "+a"(result), "+m"(*__hp(addr)) |
82 | : "r"((unsigned long)_new) | |
83 | : "memory"); | |
84 | return result; | |
85 | } | |
86 | #endif | |
87 | } | |
88 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
89 | * when optimizations are disabled. */ | |
90 | __asm__ __volatile__("ud2"); | |
91 | return 0; | |
0114ba7f MD |
92 | } |
93 | ||
cc1be41b MD |
94 | #define cmpxchg(addr, old, _new) \ |
95 | ((__typeof__(*(addr))) _atomic_cmpxchg((addr), (unsigned long)(old),\ | |
96 | (unsigned long)(_new), \ | |
97 | sizeof(*(addr)))) | |
98 | ||
99 | /* xchg */ | |
0114ba7f | 100 | |
5dba80f9 | 101 | static inline __attribute__((always_inline)) |
cc1be41b | 102 | unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len) |
0114ba7f | 103 | { |
cc1be41b MD |
104 | /* Note: the "xchg" instruction does not need a "lock" prefix. */ |
105 | switch (len) { | |
106 | case 1: | |
107 | { | |
108 | unsigned char result; | |
109 | __asm__ __volatile__( | |
110 | "xchgb %0, %1" | |
111 | : "=q"(result), "+m"(*__hp(addr)) | |
112 | : "0" ((unsigned char)val) | |
113 | : "memory"); | |
114 | return result; | |
115 | } | |
116 | case 2: | |
117 | { | |
118 | unsigned short result; | |
119 | __asm__ __volatile__( | |
120 | "xchgw %0, %1" | |
121 | : "=r"(result), "+m"(*__hp(addr)) | |
122 | : "0" ((unsigned short)val) | |
123 | : "memory"); | |
124 | return result; | |
125 | } | |
126 | case 4: | |
127 | { | |
128 | unsigned int result; | |
129 | __asm__ __volatile__( | |
130 | "xchgl %0, %1" | |
131 | : "=r"(result), "+m"(*__hp(addr)) | |
132 | : "0" ((unsigned int)val) | |
133 | : "memory"); | |
134 | return result; | |
135 | } | |
136 | #if (BITS_PER_LONG == 64) | |
137 | case 8: | |
138 | { | |
139 | unsigned long result; | |
140 | __asm__ __volatile__( | |
0114ba7f | 141 | "xchgq %0, %1" |
cc1be41b MD |
142 | : "=r"(result), "+m"(*__hp(addr)) |
143 | : "0" ((unsigned long)val) | |
0114ba7f | 144 | : "memory"); |
cc1be41b MD |
145 | return result; |
146 | } | |
147 | #endif | |
148 | } | |
149 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
150 | * when optimizations are disabled. */ | |
151 | __asm__ __volatile__("ud2"); | |
152 | return 0; | |
0114ba7f MD |
153 | } |
154 | ||
cc1be41b MD |
155 | #define xchg(addr, v) \ |
156 | ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \ | |
157 | sizeof(*(addr)))) | |
158 | ||
2c5e5fb3 | 159 | /* atomic_add, atomic_sub */ |
0114ba7f | 160 | |
5dba80f9 | 161 | static inline __attribute__((always_inline)) |
a81b8e5e | 162 | void _atomic_add(volatile void *addr, unsigned long val, int len) |
0114ba7f MD |
163 | { |
164 | switch (len) { | |
cc1be41b MD |
165 | case 1: |
166 | { | |
167 | __asm__ __volatile__( | |
168 | "lock; addb %1, %0" | |
169 | : "=m"(*__hp(addr)) | |
87322fe8 MD |
170 | : "iq" ((unsigned char)val) |
171 | : "memory"); | |
cc1be41b MD |
172 | return; |
173 | } | |
174 | case 2: | |
175 | { | |
176 | __asm__ __volatile__( | |
177 | "lock; addw %1, %0" | |
178 | : "=m"(*__hp(addr)) | |
87322fe8 MD |
179 | : "ir" ((unsigned short)val) |
180 | : "memory"); | |
cc1be41b MD |
181 | return; |
182 | } | |
183 | case 4: | |
184 | { | |
185 | __asm__ __volatile__( | |
186 | "lock; addl %1, %0" | |
187 | : "=m"(*__hp(addr)) | |
87322fe8 MD |
188 | : "ir" ((unsigned int)val) |
189 | : "memory"); | |
cc1be41b MD |
190 | return; |
191 | } | |
0114ba7f | 192 | #if (BITS_PER_LONG == 64) |
cc1be41b MD |
193 | case 8: |
194 | { | |
195 | __asm__ __volatile__( | |
196 | "lock; addq %1, %0" | |
197 | : "=m"(*__hp(addr)) | |
87322fe8 MD |
198 | : "er" ((unsigned long)val) |
199 | : "memory"); | |
cc1be41b MD |
200 | return; |
201 | } | |
0114ba7f MD |
202 | #endif |
203 | } | |
204 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
205 | * when optimizations are disabled. */ | |
206 | __asm__ __volatile__("ud2"); | |
a81b8e5e | 207 | return; |
0114ba7f MD |
208 | } |
209 | ||
cc1be41b MD |
210 | #define atomic_add(addr, v) \ |
211 | (_atomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) | |
0114ba7f | 212 | |
2c5e5fb3 MD |
213 | #define atomic_sub(addr, v) atomic_add((addr), -(v)) |
214 | ||
215 | ||
216 | /* atomic_inc */ | |
217 | ||
218 | static inline __attribute__((always_inline)) | |
219 | void _atomic_inc(volatile void *addr, int len) | |
220 | { | |
221 | switch (len) { | |
222 | case 1: | |
223 | { | |
224 | __asm__ __volatile__( | |
225 | "lock; incb %0" | |
226 | : "=m"(*__hp(addr)) | |
227 | : | |
228 | : "memory"); | |
229 | return; | |
230 | } | |
231 | case 2: | |
232 | { | |
233 | __asm__ __volatile__( | |
234 | "lock; incw %0" | |
235 | : "=m"(*__hp(addr)) | |
236 | : | |
237 | : "memory"); | |
238 | return; | |
239 | } | |
240 | case 4: | |
241 | { | |
242 | __asm__ __volatile__( | |
243 | "lock; incl %0" | |
244 | : "=m"(*__hp(addr)) | |
245 | : | |
246 | : "memory"); | |
247 | return; | |
248 | } | |
249 | #if (BITS_PER_LONG == 64) | |
250 | case 8: | |
251 | { | |
252 | __asm__ __volatile__( | |
253 | "lock; incq %0" | |
254 | : "=m"(*__hp(addr)) | |
255 | : | |
256 | : "memory"); | |
257 | return; | |
258 | } | |
259 | #endif | |
260 | } | |
261 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
262 | * when optimizations are disabled. */ | |
263 | __asm__ __volatile__("ud2"); | |
264 | return; | |
265 | } | |
266 | ||
267 | #define atomic_inc(addr) (_atomic_inc((addr), sizeof(*(addr)))) | |
268 | ||
269 | /* atomic_dec */ | |
270 | ||
271 | static inline __attribute__((always_inline)) | |
272 | void _atomic_dec(volatile void *addr, int len) | |
273 | { | |
274 | switch (len) { | |
275 | case 1: | |
276 | { | |
277 | __asm__ __volatile__( | |
278 | "lock; decb %0" | |
279 | : "=m"(*__hp(addr)) | |
280 | : | |
281 | : "memory"); | |
282 | return; | |
283 | } | |
284 | case 2: | |
285 | { | |
286 | __asm__ __volatile__( | |
287 | "lock; decw %0" | |
288 | : "=m"(*__hp(addr)) | |
289 | : | |
290 | : "memory"); | |
291 | return; | |
292 | } | |
293 | case 4: | |
294 | { | |
295 | __asm__ __volatile__( | |
296 | "lock; decl %0" | |
297 | : "=m"(*__hp(addr)) | |
298 | : | |
299 | : "memory"); | |
300 | return; | |
301 | } | |
302 | #if (BITS_PER_LONG == 64) | |
303 | case 8: | |
304 | { | |
305 | __asm__ __volatile__( | |
306 | "lock; decq %0" | |
307 | : "=m"(*__hp(addr)) | |
308 | : | |
309 | : "memory"); | |
310 | return; | |
311 | } | |
312 | #endif | |
313 | } | |
314 | /* generate an illegal instruction. Cannot catch this with linker tricks | |
315 | * when optimizations are disabled. */ | |
316 | __asm__ __volatile__("ud2"); | |
317 | return; | |
318 | } | |
319 | ||
320 | #define atomic_dec(addr) (_atomic_dec((addr), sizeof(*(addr)))) | |
321 | ||
0114ba7f MD |
322 | #endif /* #ifndef _INCLUDE_API_H */ |
323 | ||
324 | #endif /* ARCH_ATOMIC_X86_H */ |