move whether atomic byte/short exists to uatomic_arch_*.h
[urcu.git] / urcu / uatomic_arch_x86.h
CommitLineData
ec4e58a3
MD
1#ifndef _URCU_ARCH_UATOMIC_X86_H
2#define _URCU_ARCH_UATOMIC_X86_H
0114ba7f
MD
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
ec4e58a3 19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
0114ba7f
MD
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
ec4e58a3 23#include <urcu/compiler.h>
bf9de1b7 24#include <urcu/system.h>
0fad128b 25
f469d839
PB
26#define UATOMIC_HAS_ATOMIC_BYTE
27#define UATOMIC_HAS_ATOMIC_SHORT
28
36bc70a8
MD
29#ifdef __cplusplus
30extern "C" {
31#endif
32
618c4ae1
MD
33#ifndef __SIZEOF_LONG__
34#if defined(__x86_64__) || defined(__amd64__)
35#define __SIZEOF_LONG__ 8
36#else
37#define __SIZEOF_LONG__ 4
38#endif
39#endif
40
0114ba7f
MD
41#ifndef BITS_PER_LONG
42#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
43#endif
44
0114ba7f 45/*
0114ba7f
MD
46 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
47 */
48
ec4e58a3 49struct __uatomic_dummy {
cc1be41b
MD
50 unsigned long v[10];
51};
ec4e58a3 52#define __hp(x) ((struct __uatomic_dummy *)(x))
cc1be41b 53
bf9de1b7 54#define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
0fad128b 55
cc1be41b
MD
56/* cmpxchg */
57
5dba80f9 58static inline __attribute__((always_inline))
bf9de1b7 59unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
0fad128b 60 unsigned long _new, int len)
0114ba7f 61{
cc1be41b
MD
62 switch (len) {
63 case 1:
64 {
65 unsigned char result = old;
0fad128b 66
cc1be41b
MD
67 __asm__ __volatile__(
68 "lock; cmpxchgb %2, %1"
69 : "+a"(result), "+m"(*__hp(addr))
70 : "q"((unsigned char)_new)
0114ba7f 71 : "memory");
cc1be41b
MD
72 return result;
73 }
74 case 2:
75 {
76 unsigned short result = old;
0fad128b 77
cc1be41b
MD
78 __asm__ __volatile__(
79 "lock; cmpxchgw %2, %1"
80 : "+a"(result), "+m"(*__hp(addr))
81 : "r"((unsigned short)_new)
82 : "memory");
83 return result;
84 }
85 case 4:
86 {
87 unsigned int result = old;
0fad128b 88
cc1be41b
MD
89 __asm__ __volatile__(
90 "lock; cmpxchgl %2, %1"
91 : "+a"(result), "+m"(*__hp(addr))
92 : "r"((unsigned int)_new)
93 : "memory");
94 return result;
95 }
96#if (BITS_PER_LONG == 64)
97 case 8:
98 {
6edb297e 99 unsigned long result = old;
0fad128b 100
cc1be41b 101 __asm__ __volatile__(
2c5e5fb3 102 "lock; cmpxchgq %2, %1"
cc1be41b
MD
103 : "+a"(result), "+m"(*__hp(addr))
104 : "r"((unsigned long)_new)
105 : "memory");
106 return result;
107 }
108#endif
109 }
110 /* generate an illegal instruction. Cannot catch this with linker tricks
111 * when optimizations are disabled. */
112 __asm__ __volatile__("ud2");
113 return 0;
0114ba7f
MD
114}
115
bf9de1b7
MD
116#define _uatomic_cmpxchg(addr, old, _new) \
117 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
118 (unsigned long)(_new), \
cc1be41b
MD
119 sizeof(*(addr))))
120
121/* xchg */
0114ba7f 122
5dba80f9 123static inline __attribute__((always_inline))
bf9de1b7 124unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
0114ba7f 125{
cc1be41b
MD
126 /* Note: the "xchg" instruction does not need a "lock" prefix. */
127 switch (len) {
128 case 1:
129 {
130 unsigned char result;
131 __asm__ __volatile__(
132 "xchgb %0, %1"
133 : "=q"(result), "+m"(*__hp(addr))
134 : "0" ((unsigned char)val)
135 : "memory");
136 return result;
137 }
138 case 2:
139 {
140 unsigned short result;
141 __asm__ __volatile__(
142 "xchgw %0, %1"
143 : "=r"(result), "+m"(*__hp(addr))
144 : "0" ((unsigned short)val)
145 : "memory");
146 return result;
147 }
148 case 4:
149 {
150 unsigned int result;
151 __asm__ __volatile__(
152 "xchgl %0, %1"
153 : "=r"(result), "+m"(*__hp(addr))
154 : "0" ((unsigned int)val)
155 : "memory");
156 return result;
157 }
158#if (BITS_PER_LONG == 64)
159 case 8:
160 {
161 unsigned long result;
162 __asm__ __volatile__(
0114ba7f 163 "xchgq %0, %1"
cc1be41b
MD
164 : "=r"(result), "+m"(*__hp(addr))
165 : "0" ((unsigned long)val)
0114ba7f 166 : "memory");
cc1be41b
MD
167 return result;
168 }
169#endif
170 }
171 /* generate an illegal instruction. Cannot catch this with linker tricks
172 * when optimizations are disabled. */
173 __asm__ __volatile__("ud2");
174 return 0;
0114ba7f
MD
175}
176
bf9de1b7
MD
177#define _uatomic_xchg(addr, v) \
178 ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
cc1be41b
MD
179 sizeof(*(addr))))
180
8760d94e 181/* uatomic_add_return */
0fad128b
MD
182
183static inline __attribute__((always_inline))
bf9de1b7 184unsigned long __uatomic_add_return(void *addr, unsigned long val,
0fad128b
MD
185 int len)
186{
187 switch (len) {
188 case 1:
189 {
190 unsigned char result = val;
191
192 __asm__ __volatile__(
193 "lock; xaddb %1, %0"
194 : "+m"(*__hp(addr)), "+q" (result)
195 :
196 : "memory");
197 return result + (unsigned char)val;
198 }
199 case 2:
200 {
201 unsigned short result = val;
202
203 __asm__ __volatile__(
204 "lock; xaddw %1, %0"
205 : "+m"(*__hp(addr)), "+r" (result)
206 :
207 : "memory");
208 return result + (unsigned short)val;
209 }
210 case 4:
211 {
212 unsigned int result = val;
213
214 __asm__ __volatile__(
215 "lock; xaddl %1, %0"
216 : "+m"(*__hp(addr)), "+r" (result)
217 :
218 : "memory");
219 return result + (unsigned int)val;
220 }
221#if (BITS_PER_LONG == 64)
222 case 8:
223 {
224 unsigned long result = val;
225
226 __asm__ __volatile__(
227 "lock; xaddq %1, %0"
228 : "+m"(*__hp(addr)), "+r" (result)
229 :
230 : "memory");
231 return result + (unsigned long)val;
232 }
233#endif
234 }
235 /* generate an illegal instruction. Cannot catch this with linker tricks
236 * when optimizations are disabled. */
237 __asm__ __volatile__("ud2");
238 return 0;
239}
240
bf9de1b7
MD
241#define _uatomic_add_return(addr, v) \
242 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
0fad128b
MD
243 (unsigned long)(v), \
244 sizeof(*(addr))))
245
8760d94e 246/* uatomic_add */
0114ba7f 247
5dba80f9 248static inline __attribute__((always_inline))
bf9de1b7 249void __uatomic_add(void *addr, unsigned long val, int len)
0114ba7f
MD
250{
251 switch (len) {
cc1be41b
MD
252 case 1:
253 {
254 __asm__ __volatile__(
255 "lock; addb %1, %0"
256 : "=m"(*__hp(addr))
87322fe8
MD
257 : "iq" ((unsigned char)val)
258 : "memory");
cc1be41b
MD
259 return;
260 }
261 case 2:
262 {
263 __asm__ __volatile__(
264 "lock; addw %1, %0"
265 : "=m"(*__hp(addr))
87322fe8
MD
266 : "ir" ((unsigned short)val)
267 : "memory");
cc1be41b
MD
268 return;
269 }
270 case 4:
271 {
272 __asm__ __volatile__(
273 "lock; addl %1, %0"
274 : "=m"(*__hp(addr))
87322fe8
MD
275 : "ir" ((unsigned int)val)
276 : "memory");
cc1be41b
MD
277 return;
278 }
0114ba7f 279#if (BITS_PER_LONG == 64)
cc1be41b
MD
280 case 8:
281 {
282 __asm__ __volatile__(
283 "lock; addq %1, %0"
284 : "=m"(*__hp(addr))
87322fe8
MD
285 : "er" ((unsigned long)val)
286 : "memory");
cc1be41b
MD
287 return;
288 }
0114ba7f
MD
289#endif
290 }
291 /* generate an illegal instruction. Cannot catch this with linker tricks
292 * when optimizations are disabled. */
293 __asm__ __volatile__("ud2");
a81b8e5e 294 return;
0114ba7f
MD
295}
296
bf9de1b7
MD
297#define _uatomic_add(addr, v) \
298 (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
0114ba7f 299
2c5e5fb3 300
ec4e58a3 301/* uatomic_inc */
2c5e5fb3
MD
302
303static inline __attribute__((always_inline))
bf9de1b7 304void __uatomic_inc(void *addr, int len)
2c5e5fb3
MD
305{
306 switch (len) {
307 case 1:
308 {
309 __asm__ __volatile__(
310 "lock; incb %0"
311 : "=m"(*__hp(addr))
312 :
313 : "memory");
314 return;
315 }
316 case 2:
317 {
318 __asm__ __volatile__(
319 "lock; incw %0"
320 : "=m"(*__hp(addr))
321 :
322 : "memory");
323 return;
324 }
325 case 4:
326 {
327 __asm__ __volatile__(
328 "lock; incl %0"
329 : "=m"(*__hp(addr))
330 :
331 : "memory");
332 return;
333 }
334#if (BITS_PER_LONG == 64)
335 case 8:
336 {
337 __asm__ __volatile__(
338 "lock; incq %0"
339 : "=m"(*__hp(addr))
340 :
341 : "memory");
342 return;
343 }
344#endif
345 }
346 /* generate an illegal instruction. Cannot catch this with linker tricks
347 * when optimizations are disabled. */
348 __asm__ __volatile__("ud2");
349 return;
350}
351
bf9de1b7 352#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
2c5e5fb3 353
ec4e58a3 354/* uatomic_dec */
2c5e5fb3
MD
355
356static inline __attribute__((always_inline))
bf9de1b7 357void __uatomic_dec(void *addr, int len)
2c5e5fb3
MD
358{
359 switch (len) {
360 case 1:
361 {
362 __asm__ __volatile__(
363 "lock; decb %0"
364 : "=m"(*__hp(addr))
365 :
366 : "memory");
367 return;
368 }
369 case 2:
370 {
371 __asm__ __volatile__(
372 "lock; decw %0"
373 : "=m"(*__hp(addr))
374 :
375 : "memory");
376 return;
377 }
378 case 4:
379 {
380 __asm__ __volatile__(
381 "lock; decl %0"
382 : "=m"(*__hp(addr))
383 :
384 : "memory");
385 return;
386 }
387#if (BITS_PER_LONG == 64)
388 case 8:
389 {
390 __asm__ __volatile__(
391 "lock; decq %0"
392 : "=m"(*__hp(addr))
393 :
394 : "memory");
395 return;
396 }
397#endif
398 }
399 /* generate an illegal instruction. Cannot catch this with linker tricks
400 * when optimizations are disabled. */
401 __asm__ __volatile__("ud2");
402 return;
403}
404
bf9de1b7 405#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
0114ba7f 406
02be5561
MD
407#if ((BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
408extern int __rcu_cas_avail;
409extern int __rcu_cas_init(void);
bf9de1b7
MD
410
411#define UATOMIC_COMPAT(insn) \
02be5561 412 ((likely(__rcu_cas_avail > 0)) \
bf9de1b7 413 ? (_uatomic_##insn) \
02be5561
MD
414 : ((unlikely(__rcu_cas_avail < 0) \
415 ? ((__rcu_cas_init() > 0) \
bf9de1b7
MD
416 ? (_uatomic_##insn) \
417 : (compat_uatomic_##insn)) \
418 : (compat_uatomic_##insn))))
419
420extern unsigned long _compat_uatomic_set(void *addr,
421 unsigned long _new, int len);
422#define compat_uatomic_set(addr, _new) \
423 ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
424 (unsigned long)(_new), \
425 sizeof(*(addr))))
426
427
428extern unsigned long _compat_uatomic_xchg(void *addr,
429 unsigned long _new, int len);
430#define compat_uatomic_xchg(addr, _new) \
431 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
432 (unsigned long)(_new), \
433 sizeof(*(addr))))
7d413817
MD
434
435extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
bf9de1b7
MD
436 unsigned long _new, int len);
437#define compat_uatomic_cmpxchg(addr, old, _new) \
438 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
439 (unsigned long)(old), \
440 (unsigned long)(_new), \
441 sizeof(*(addr))))
7d413817 442
bf9de1b7
MD
443extern unsigned long _compat_uatomic_xchg(void *addr,
444 unsigned long _new, int len);
445#define compat_uatomic_add_return(addr, v) \
446 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
447 (unsigned long)(v), \
7d413817 448 sizeof(*(addr))))
bf9de1b7 449
bf9de1b7
MD
450#define compat_uatomic_add(addr, v) \
451 ((void)compat_uatomic_add_return((addr), (v)))
bf9de1b7
MD
452#define compat_uatomic_inc(addr) \
453 (compat_uatomic_add((addr), 1))
454#define compat_uatomic_dec(addr) \
8760d94e 455 (compat_uatomic_add((addr), -1))
bf9de1b7
MD
456
457#else
458#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
7d413817
MD
459#endif
460
bf9de1b7 461/* Read is atomic even in compat mode */
bf9de1b7
MD
462#define uatomic_set(addr, v) \
463 UATOMIC_COMPAT(set(addr, v))
8760d94e 464
bf9de1b7
MD
465#define uatomic_cmpxchg(addr, old, _new) \
466 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
467#define uatomic_xchg(addr, v) \
468 UATOMIC_COMPAT(xchg(addr, v))
469#define uatomic_add_return(addr, v) \
470 UATOMIC_COMPAT(add_return(addr, v))
8760d94e 471
bf9de1b7 472#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
bf9de1b7
MD
473#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
474#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
475
36bc70a8
MD
476#ifdef __cplusplus
477}
478#endif
479
8760d94e
PB
480#include <urcu/uatomic_generic.h>
481
ec4e58a3 482#endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.045434 seconds and 4 git commands to generate.