uatomic: add uatomic_or
[urcu.git] / urcu / uatomic_arch_x86.h
CommitLineData
ec4e58a3
MD
1#ifndef _URCU_ARCH_UATOMIC_X86_H
2#define _URCU_ARCH_UATOMIC_X86_H
0114ba7f
MD
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
ec4e58a3 19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
0114ba7f
MD
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
ec4e58a3 23#include <urcu/compiler.h>
bf9de1b7 24#include <urcu/system.h>
0fad128b 25
f469d839
PB
26#define UATOMIC_HAS_ATOMIC_BYTE
27#define UATOMIC_HAS_ATOMIC_SHORT
28
36bc70a8
MD
29#ifdef __cplusplus
30extern "C" {
31#endif
32
0114ba7f 33/*
0114ba7f
MD
34 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
35 */
36
ec4e58a3 37struct __uatomic_dummy {
cc1be41b
MD
38 unsigned long v[10];
39};
ec4e58a3 40#define __hp(x) ((struct __uatomic_dummy *)(x))
cc1be41b 41
6cf3827c 42#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
0fad128b 43
cc1be41b
MD
44/* cmpxchg */
45
5dba80f9 46static inline __attribute__((always_inline))
bf9de1b7 47unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
0fad128b 48 unsigned long _new, int len)
0114ba7f 49{
cc1be41b
MD
50 switch (len) {
51 case 1:
52 {
53 unsigned char result = old;
0fad128b 54
cc1be41b
MD
55 __asm__ __volatile__(
56 "lock; cmpxchgb %2, %1"
57 : "+a"(result), "+m"(*__hp(addr))
58 : "q"((unsigned char)_new)
0114ba7f 59 : "memory");
cc1be41b
MD
60 return result;
61 }
62 case 2:
63 {
64 unsigned short result = old;
0fad128b 65
cc1be41b
MD
66 __asm__ __volatile__(
67 "lock; cmpxchgw %2, %1"
68 : "+a"(result), "+m"(*__hp(addr))
69 : "r"((unsigned short)_new)
70 : "memory");
71 return result;
72 }
73 case 4:
74 {
75 unsigned int result = old;
0fad128b 76
cc1be41b
MD
77 __asm__ __volatile__(
78 "lock; cmpxchgl %2, %1"
79 : "+a"(result), "+m"(*__hp(addr))
80 : "r"((unsigned int)_new)
81 : "memory");
82 return result;
83 }
e040d717 84#if (CAA_BITS_PER_LONG == 64)
cc1be41b
MD
85 case 8:
86 {
6edb297e 87 unsigned long result = old;
0fad128b 88
cc1be41b 89 __asm__ __volatile__(
2c5e5fb3 90 "lock; cmpxchgq %2, %1"
cc1be41b
MD
91 : "+a"(result), "+m"(*__hp(addr))
92 : "r"((unsigned long)_new)
93 : "memory");
94 return result;
95 }
96#endif
97 }
98 /* generate an illegal instruction. Cannot catch this with linker tricks
99 * when optimizations are disabled. */
100 __asm__ __volatile__("ud2");
101 return 0;
0114ba7f
MD
102}
103
bf9de1b7
MD
104#define _uatomic_cmpxchg(addr, old, _new) \
105 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
106 (unsigned long)(_new), \
cc1be41b
MD
107 sizeof(*(addr))))
108
109/* xchg */
0114ba7f 110
5dba80f9 111static inline __attribute__((always_inline))
bf9de1b7 112unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
0114ba7f 113{
cc1be41b
MD
114 /* Note: the "xchg" instruction does not need a "lock" prefix. */
115 switch (len) {
116 case 1:
117 {
118 unsigned char result;
119 __asm__ __volatile__(
120 "xchgb %0, %1"
121 : "=q"(result), "+m"(*__hp(addr))
122 : "0" ((unsigned char)val)
123 : "memory");
124 return result;
125 }
126 case 2:
127 {
128 unsigned short result;
129 __asm__ __volatile__(
130 "xchgw %0, %1"
131 : "=r"(result), "+m"(*__hp(addr))
132 : "0" ((unsigned short)val)
133 : "memory");
134 return result;
135 }
136 case 4:
137 {
138 unsigned int result;
139 __asm__ __volatile__(
140 "xchgl %0, %1"
141 : "=r"(result), "+m"(*__hp(addr))
142 : "0" ((unsigned int)val)
143 : "memory");
144 return result;
145 }
e040d717 146#if (CAA_BITS_PER_LONG == 64)
cc1be41b
MD
147 case 8:
148 {
149 unsigned long result;
150 __asm__ __volatile__(
0114ba7f 151 "xchgq %0, %1"
cc1be41b
MD
152 : "=r"(result), "+m"(*__hp(addr))
153 : "0" ((unsigned long)val)
0114ba7f 154 : "memory");
cc1be41b
MD
155 return result;
156 }
157#endif
158 }
159 /* generate an illegal instruction. Cannot catch this with linker tricks
160 * when optimizations are disabled. */
161 __asm__ __volatile__("ud2");
162 return 0;
0114ba7f
MD
163}
164
bf9de1b7
MD
165#define _uatomic_xchg(addr, v) \
166 ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
cc1be41b
MD
167 sizeof(*(addr))))
168
8760d94e 169/* uatomic_add_return */
0fad128b
MD
170
171static inline __attribute__((always_inline))
bf9de1b7 172unsigned long __uatomic_add_return(void *addr, unsigned long val,
0fad128b
MD
173 int len)
174{
175 switch (len) {
176 case 1:
177 {
178 unsigned char result = val;
179
180 __asm__ __volatile__(
181 "lock; xaddb %1, %0"
182 : "+m"(*__hp(addr)), "+q" (result)
183 :
184 : "memory");
185 return result + (unsigned char)val;
186 }
187 case 2:
188 {
189 unsigned short result = val;
190
191 __asm__ __volatile__(
192 "lock; xaddw %1, %0"
193 : "+m"(*__hp(addr)), "+r" (result)
194 :
195 : "memory");
196 return result + (unsigned short)val;
197 }
198 case 4:
199 {
200 unsigned int result = val;
201
202 __asm__ __volatile__(
203 "lock; xaddl %1, %0"
204 : "+m"(*__hp(addr)), "+r" (result)
205 :
206 : "memory");
207 return result + (unsigned int)val;
208 }
e040d717 209#if (CAA_BITS_PER_LONG == 64)
0fad128b
MD
210 case 8:
211 {
212 unsigned long result = val;
213
214 __asm__ __volatile__(
215 "lock; xaddq %1, %0"
216 : "+m"(*__hp(addr)), "+r" (result)
217 :
218 : "memory");
219 return result + (unsigned long)val;
220 }
221#endif
222 }
223 /* generate an illegal instruction. Cannot catch this with linker tricks
224 * when optimizations are disabled. */
225 __asm__ __volatile__("ud2");
226 return 0;
227}
228
bf9de1b7
MD
229#define _uatomic_add_return(addr, v) \
230 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
0fad128b
MD
231 (unsigned long)(v), \
232 sizeof(*(addr))))
233
985b35b1
PB
234/* uatomic_or */
235
236static inline __attribute__((always_inline))
237void __uatomic_or(void *addr, unsigned long val, int len)
238{
239 switch (len) {
240 case 1:
241 {
242 __asm__ __volatile__(
243 "lock; orb %1, %0"
244 : "=m"(*__hp(addr))
245 : "iq" ((unsigned char)val)
246 : "memory");
247 return;
248 }
249 case 2:
250 {
251 __asm__ __volatile__(
252 "lock; orw %1, %0"
253 : "=m"(*__hp(addr))
254 : "ir" ((unsigned short)val)
255 : "memory");
256 return;
257 }
258 case 4:
259 {
260 __asm__ __volatile__(
261 "lock; orl %1, %0"
262 : "=m"(*__hp(addr))
263 : "ir" ((unsigned int)val)
264 : "memory");
265 return;
266 }
267#if (CAA_BITS_PER_LONG == 64)
268 case 8:
269 {
270 __asm__ __volatile__(
271 "lock; orq %1, %0"
272 : "=m"(*__hp(addr))
273 : "er" ((unsigned long)val)
274 : "memory");
275 return;
276 }
277#endif
278 }
279 /* generate an illegal instruction. Cannot catch this with linker tricks
280 * when optimizations are disabled. */
281 __asm__ __volatile__("ud2");
282 return;
283}
284
285#define _uatomic_or(addr, v) \
286 (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
287
8760d94e 288/* uatomic_add */
0114ba7f 289
5dba80f9 290static inline __attribute__((always_inline))
bf9de1b7 291void __uatomic_add(void *addr, unsigned long val, int len)
0114ba7f
MD
292{
293 switch (len) {
cc1be41b
MD
294 case 1:
295 {
296 __asm__ __volatile__(
297 "lock; addb %1, %0"
298 : "=m"(*__hp(addr))
87322fe8
MD
299 : "iq" ((unsigned char)val)
300 : "memory");
cc1be41b
MD
301 return;
302 }
303 case 2:
304 {
305 __asm__ __volatile__(
306 "lock; addw %1, %0"
307 : "=m"(*__hp(addr))
87322fe8
MD
308 : "ir" ((unsigned short)val)
309 : "memory");
cc1be41b
MD
310 return;
311 }
312 case 4:
313 {
314 __asm__ __volatile__(
315 "lock; addl %1, %0"
316 : "=m"(*__hp(addr))
87322fe8
MD
317 : "ir" ((unsigned int)val)
318 : "memory");
cc1be41b
MD
319 return;
320 }
e040d717 321#if (CAA_BITS_PER_LONG == 64)
cc1be41b
MD
322 case 8:
323 {
324 __asm__ __volatile__(
325 "lock; addq %1, %0"
326 : "=m"(*__hp(addr))
87322fe8
MD
327 : "er" ((unsigned long)val)
328 : "memory");
cc1be41b
MD
329 return;
330 }
0114ba7f
MD
331#endif
332 }
333 /* generate an illegal instruction. Cannot catch this with linker tricks
334 * when optimizations are disabled. */
335 __asm__ __volatile__("ud2");
a81b8e5e 336 return;
0114ba7f
MD
337}
338
bf9de1b7
MD
339#define _uatomic_add(addr, v) \
340 (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
0114ba7f 341
2c5e5fb3 342
ec4e58a3 343/* uatomic_inc */
2c5e5fb3
MD
344
345static inline __attribute__((always_inline))
bf9de1b7 346void __uatomic_inc(void *addr, int len)
2c5e5fb3
MD
347{
348 switch (len) {
349 case 1:
350 {
351 __asm__ __volatile__(
352 "lock; incb %0"
353 : "=m"(*__hp(addr))
354 :
355 : "memory");
356 return;
357 }
358 case 2:
359 {
360 __asm__ __volatile__(
361 "lock; incw %0"
362 : "=m"(*__hp(addr))
363 :
364 : "memory");
365 return;
366 }
367 case 4:
368 {
369 __asm__ __volatile__(
370 "lock; incl %0"
371 : "=m"(*__hp(addr))
372 :
373 : "memory");
374 return;
375 }
e040d717 376#if (CAA_BITS_PER_LONG == 64)
2c5e5fb3
MD
377 case 8:
378 {
379 __asm__ __volatile__(
380 "lock; incq %0"
381 : "=m"(*__hp(addr))
382 :
383 : "memory");
384 return;
385 }
386#endif
387 }
388 /* generate an illegal instruction. Cannot catch this with linker tricks
389 * when optimizations are disabled. */
390 __asm__ __volatile__("ud2");
391 return;
392}
393
bf9de1b7 394#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
2c5e5fb3 395
ec4e58a3 396/* uatomic_dec */
2c5e5fb3
MD
397
398static inline __attribute__((always_inline))
bf9de1b7 399void __uatomic_dec(void *addr, int len)
2c5e5fb3
MD
400{
401 switch (len) {
402 case 1:
403 {
404 __asm__ __volatile__(
405 "lock; decb %0"
406 : "=m"(*__hp(addr))
407 :
408 : "memory");
409 return;
410 }
411 case 2:
412 {
413 __asm__ __volatile__(
414 "lock; decw %0"
415 : "=m"(*__hp(addr))
416 :
417 : "memory");
418 return;
419 }
420 case 4:
421 {
422 __asm__ __volatile__(
423 "lock; decl %0"
424 : "=m"(*__hp(addr))
425 :
426 : "memory");
427 return;
428 }
e040d717 429#if (CAA_BITS_PER_LONG == 64)
2c5e5fb3
MD
430 case 8:
431 {
432 __asm__ __volatile__(
433 "lock; decq %0"
434 : "=m"(*__hp(addr))
435 :
436 : "memory");
437 return;
438 }
439#endif
440 }
441 /* generate an illegal instruction. Cannot catch this with linker tricks
442 * when optimizations are disabled. */
443 __asm__ __volatile__("ud2");
444 return;
445}
446
bf9de1b7 447#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
0114ba7f 448
e040d717 449#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
02be5561
MD
450extern int __rcu_cas_avail;
451extern int __rcu_cas_init(void);
bf9de1b7
MD
452
453#define UATOMIC_COMPAT(insn) \
02be5561 454 ((likely(__rcu_cas_avail > 0)) \
bf9de1b7 455 ? (_uatomic_##insn) \
02be5561
MD
456 : ((unlikely(__rcu_cas_avail < 0) \
457 ? ((__rcu_cas_init() > 0) \
bf9de1b7
MD
458 ? (_uatomic_##insn) \
459 : (compat_uatomic_##insn)) \
460 : (compat_uatomic_##insn))))
461
462extern unsigned long _compat_uatomic_set(void *addr,
463 unsigned long _new, int len);
464#define compat_uatomic_set(addr, _new) \
465 ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
466 (unsigned long)(_new), \
467 sizeof(*(addr))))
468
469
470extern unsigned long _compat_uatomic_xchg(void *addr,
471 unsigned long _new, int len);
472#define compat_uatomic_xchg(addr, _new) \
473 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
474 (unsigned long)(_new), \
475 sizeof(*(addr))))
7d413817
MD
476
477extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
bf9de1b7
MD
478 unsigned long _new, int len);
479#define compat_uatomic_cmpxchg(addr, old, _new) \
480 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
481 (unsigned long)(old), \
482 (unsigned long)(_new), \
483 sizeof(*(addr))))
7d413817 484
985b35b1
PB
485extern unsigned long _compat_uatomic_or(void *addr,
486 unsigned long _new, int len);
487#define compat_uatomic_or(addr, v) \
488 ((__typeof__(*(addr))) _compat_uatomic_or((addr), \
489 (unsigned long)(v), \
490 sizeof(*(addr))))
491
28ca843d
PB
492extern unsigned long _compat_uatomic_add_return(void *addr,
493 unsigned long _new, int len);
bf9de1b7
MD
494#define compat_uatomic_add_return(addr, v) \
495 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
496 (unsigned long)(v), \
7d413817 497 sizeof(*(addr))))
bf9de1b7 498
bf9de1b7
MD
499#define compat_uatomic_add(addr, v) \
500 ((void)compat_uatomic_add_return((addr), (v)))
bf9de1b7
MD
501#define compat_uatomic_inc(addr) \
502 (compat_uatomic_add((addr), 1))
503#define compat_uatomic_dec(addr) \
8760d94e 504 (compat_uatomic_add((addr), -1))
bf9de1b7
MD
505
506#else
507#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
7d413817
MD
508#endif
509
bf9de1b7 510/* Read is atomic even in compat mode */
bf9de1b7
MD
511#define uatomic_set(addr, v) \
512 UATOMIC_COMPAT(set(addr, v))
8760d94e 513
bf9de1b7
MD
514#define uatomic_cmpxchg(addr, old, _new) \
515 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
516#define uatomic_xchg(addr, v) \
517 UATOMIC_COMPAT(xchg(addr, v))
985b35b1
PB
518#define uatomic_or(addr, v) \
519 UATOMIC_COMPAT(or(addr, v))
bf9de1b7
MD
520#define uatomic_add_return(addr, v) \
521 UATOMIC_COMPAT(add_return(addr, v))
8760d94e 522
bf9de1b7 523#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
bf9de1b7
MD
524#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
525#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
526
36bc70a8
MD
527#ifdef __cplusplus
528}
529#endif
530
8760d94e
PB
531#include <urcu/uatomic_generic.h>
532
ec4e58a3 533#endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.04767 seconds and 4 git commands to generate.