update x86 and ppc atomic ops
[urcu.git] / arch_atomic_x86.h
CommitLineData
0114ba7f
MD
1#ifndef _ARCH_ATOMIC_X86_H
2#define _ARCH_ATOMIC_X86_H
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
0fad128b
MD
23#include <compiler.h>
24
0114ba7f
MD
25#ifndef BITS_PER_LONG
26#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
27#endif
28
29#ifndef _INCLUDE_API_H
30
31/*
0114ba7f
MD
32 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
33 */
34
cc1be41b
MD
35struct __atomic_dummy {
36 unsigned long v[10];
37};
38#define __hp(x) ((struct __atomic_dummy *)(x))
39
0fad128b
MD
40#define atomic_set(addr, v) \
41do { \
42 ACCESS_ONCE(*(addr)) = (v); \
43} while (0)
44
45#define atomic_read(addr) ACCESS_ONCE(*(addr))
46
cc1be41b
MD
47/* cmpxchg */
48
5dba80f9 49static inline __attribute__((always_inline))
cc1be41b 50unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old,
0fad128b 51 unsigned long _new, int len)
0114ba7f 52{
cc1be41b
MD
53 switch (len) {
54 case 1:
55 {
56 unsigned char result = old;
0fad128b 57
cc1be41b
MD
58 __asm__ __volatile__(
59 "lock; cmpxchgb %2, %1"
60 : "+a"(result), "+m"(*__hp(addr))
61 : "q"((unsigned char)_new)
0114ba7f 62 : "memory");
cc1be41b
MD
63 return result;
64 }
65 case 2:
66 {
67 unsigned short result = old;
0fad128b 68
cc1be41b
MD
69 __asm__ __volatile__(
70 "lock; cmpxchgw %2, %1"
71 : "+a"(result), "+m"(*__hp(addr))
72 : "r"((unsigned short)_new)
73 : "memory");
74 return result;
75 }
76 case 4:
77 {
78 unsigned int result = old;
0fad128b 79
cc1be41b
MD
80 __asm__ __volatile__(
81 "lock; cmpxchgl %2, %1"
82 : "+a"(result), "+m"(*__hp(addr))
83 : "r"((unsigned int)_new)
84 : "memory");
85 return result;
86 }
87#if (BITS_PER_LONG == 64)
88 case 8:
89 {
6edb297e 90 unsigned long result = old;
0fad128b 91
cc1be41b 92 __asm__ __volatile__(
2c5e5fb3 93 "lock; cmpxchgq %2, %1"
cc1be41b
MD
94 : "+a"(result), "+m"(*__hp(addr))
95 : "r"((unsigned long)_new)
96 : "memory");
97 return result;
98 }
99#endif
100 }
101 /* generate an illegal instruction. Cannot catch this with linker tricks
102 * when optimizations are disabled. */
103 __asm__ __volatile__("ud2");
104 return 0;
0114ba7f
MD
105}
106
cc1be41b
MD
107#define cmpxchg(addr, old, _new) \
108 ((__typeof__(*(addr))) _atomic_cmpxchg((addr), (unsigned long)(old),\
109 (unsigned long)(_new), \
110 sizeof(*(addr))))
111
112/* xchg */
0114ba7f 113
5dba80f9 114static inline __attribute__((always_inline))
cc1be41b 115unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
0114ba7f 116{
cc1be41b
MD
117 /* Note: the "xchg" instruction does not need a "lock" prefix. */
118 switch (len) {
119 case 1:
120 {
121 unsigned char result;
122 __asm__ __volatile__(
123 "xchgb %0, %1"
124 : "=q"(result), "+m"(*__hp(addr))
125 : "0" ((unsigned char)val)
126 : "memory");
127 return result;
128 }
129 case 2:
130 {
131 unsigned short result;
132 __asm__ __volatile__(
133 "xchgw %0, %1"
134 : "=r"(result), "+m"(*__hp(addr))
135 : "0" ((unsigned short)val)
136 : "memory");
137 return result;
138 }
139 case 4:
140 {
141 unsigned int result;
142 __asm__ __volatile__(
143 "xchgl %0, %1"
144 : "=r"(result), "+m"(*__hp(addr))
145 : "0" ((unsigned int)val)
146 : "memory");
147 return result;
148 }
149#if (BITS_PER_LONG == 64)
150 case 8:
151 {
152 unsigned long result;
153 __asm__ __volatile__(
0114ba7f 154 "xchgq %0, %1"
cc1be41b
MD
155 : "=r"(result), "+m"(*__hp(addr))
156 : "0" ((unsigned long)val)
0114ba7f 157 : "memory");
cc1be41b
MD
158 return result;
159 }
160#endif
161 }
162 /* generate an illegal instruction. Cannot catch this with linker tricks
163 * when optimizations are disabled. */
164 __asm__ __volatile__("ud2");
165 return 0;
0114ba7f
MD
166}
167
cc1be41b
MD
168#define xchg(addr, v) \
169 ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
170 sizeof(*(addr))))
171
0fad128b
MD
172/* atomic_add_return, atomic_sub_return */
173
174static inline __attribute__((always_inline))
175unsigned long _atomic_add_return(volatile void *addr, unsigned long val,
176 int len)
177{
178 switch (len) {
179 case 1:
180 {
181 unsigned char result = val;
182
183 __asm__ __volatile__(
184 "lock; xaddb %1, %0"
185 : "+m"(*__hp(addr)), "+q" (result)
186 :
187 : "memory");
188 return result + (unsigned char)val;
189 }
190 case 2:
191 {
192 unsigned short result = val;
193
194 __asm__ __volatile__(
195 "lock; xaddw %1, %0"
196 : "+m"(*__hp(addr)), "+r" (result)
197 :
198 : "memory");
199 return result + (unsigned short)val;
200 }
201 case 4:
202 {
203 unsigned int result = val;
204
205 __asm__ __volatile__(
206 "lock; xaddl %1, %0"
207 : "+m"(*__hp(addr)), "+r" (result)
208 :
209 : "memory");
210 return result + (unsigned int)val;
211 }
212#if (BITS_PER_LONG == 64)
213 case 8:
214 {
215 unsigned long result = val;
216
217 __asm__ __volatile__(
218 "lock; xaddq %1, %0"
219 : "+m"(*__hp(addr)), "+r" (result)
220 :
221 : "memory");
222 return result + (unsigned long)val;
223 }
224#endif
225 }
226 /* generate an illegal instruction. Cannot catch this with linker tricks
227 * when optimizations are disabled. */
228 __asm__ __volatile__("ud2");
229 return 0;
230}
231
232#define atomic_add_return(addr, v) \
233 ((__typeof__(*(addr))) _atomic_add_return((addr), \
234 (unsigned long)(v), \
235 sizeof(*(addr))))
236
237#define atomic_sub_return(addr, v) atomic_add_return((addr), -(v))
238
2c5e5fb3 239/* atomic_add, atomic_sub */
0114ba7f 240
5dba80f9 241static inline __attribute__((always_inline))
a81b8e5e 242void _atomic_add(volatile void *addr, unsigned long val, int len)
0114ba7f
MD
243{
244 switch (len) {
cc1be41b
MD
245 case 1:
246 {
247 __asm__ __volatile__(
248 "lock; addb %1, %0"
249 : "=m"(*__hp(addr))
87322fe8
MD
250 : "iq" ((unsigned char)val)
251 : "memory");
cc1be41b
MD
252 return;
253 }
254 case 2:
255 {
256 __asm__ __volatile__(
257 "lock; addw %1, %0"
258 : "=m"(*__hp(addr))
87322fe8
MD
259 : "ir" ((unsigned short)val)
260 : "memory");
cc1be41b
MD
261 return;
262 }
263 case 4:
264 {
265 __asm__ __volatile__(
266 "lock; addl %1, %0"
267 : "=m"(*__hp(addr))
87322fe8
MD
268 : "ir" ((unsigned int)val)
269 : "memory");
cc1be41b
MD
270 return;
271 }
0114ba7f 272#if (BITS_PER_LONG == 64)
cc1be41b
MD
273 case 8:
274 {
275 __asm__ __volatile__(
276 "lock; addq %1, %0"
277 : "=m"(*__hp(addr))
87322fe8
MD
278 : "er" ((unsigned long)val)
279 : "memory");
cc1be41b
MD
280 return;
281 }
0114ba7f
MD
282#endif
283 }
284 /* generate an illegal instruction. Cannot catch this with linker tricks
285 * when optimizations are disabled. */
286 __asm__ __volatile__("ud2");
a81b8e5e 287 return;
0114ba7f
MD
288}
289
cc1be41b
MD
290#define atomic_add(addr, v) \
291 (_atomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
0114ba7f 292
2c5e5fb3
MD
293#define atomic_sub(addr, v) atomic_add((addr), -(v))
294
295
296/* atomic_inc */
297
298static inline __attribute__((always_inline))
299void _atomic_inc(volatile void *addr, int len)
300{
301 switch (len) {
302 case 1:
303 {
304 __asm__ __volatile__(
305 "lock; incb %0"
306 : "=m"(*__hp(addr))
307 :
308 : "memory");
309 return;
310 }
311 case 2:
312 {
313 __asm__ __volatile__(
314 "lock; incw %0"
315 : "=m"(*__hp(addr))
316 :
317 : "memory");
318 return;
319 }
320 case 4:
321 {
322 __asm__ __volatile__(
323 "lock; incl %0"
324 : "=m"(*__hp(addr))
325 :
326 : "memory");
327 return;
328 }
329#if (BITS_PER_LONG == 64)
330 case 8:
331 {
332 __asm__ __volatile__(
333 "lock; incq %0"
334 : "=m"(*__hp(addr))
335 :
336 : "memory");
337 return;
338 }
339#endif
340 }
341 /* generate an illegal instruction. Cannot catch this with linker tricks
342 * when optimizations are disabled. */
343 __asm__ __volatile__("ud2");
344 return;
345}
346
347#define atomic_inc(addr) (_atomic_inc((addr), sizeof(*(addr))))
348
349/* atomic_dec */
350
351static inline __attribute__((always_inline))
352void _atomic_dec(volatile void *addr, int len)
353{
354 switch (len) {
355 case 1:
356 {
357 __asm__ __volatile__(
358 "lock; decb %0"
359 : "=m"(*__hp(addr))
360 :
361 : "memory");
362 return;
363 }
364 case 2:
365 {
366 __asm__ __volatile__(
367 "lock; decw %0"
368 : "=m"(*__hp(addr))
369 :
370 : "memory");
371 return;
372 }
373 case 4:
374 {
375 __asm__ __volatile__(
376 "lock; decl %0"
377 : "=m"(*__hp(addr))
378 :
379 : "memory");
380 return;
381 }
382#if (BITS_PER_LONG == 64)
383 case 8:
384 {
385 __asm__ __volatile__(
386 "lock; decq %0"
387 : "=m"(*__hp(addr))
388 :
389 : "memory");
390 return;
391 }
392#endif
393 }
394 /* generate an illegal instruction. Cannot catch this with linker tricks
395 * when optimizations are disabled. */
396 __asm__ __volatile__("ud2");
397 return;
398}
399
400#define atomic_dec(addr) (_atomic_dec((addr), sizeof(*(addr))))
401
0114ba7f
MD
402#endif /* #ifndef _INCLUDE_API_H */
403
404#endif /* ARCH_ATOMIC_X86_H */
This page took 0.038113 seconds and 4 git commands to generate.