Add CMM memory model
[urcu.git] / include / urcu / uatomic / generic.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
2// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
3// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
4// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5// SPDX-FileCopyrightText: 2010 Paolo Bonzini
6//
7// SPDX-License-Identifier: LicenseRef-Boehm-GC
8
8760d94e
PB
9#ifndef _URCU_UATOMIC_GENERIC_H
10#define _URCU_UATOMIC_GENERIC_H
11
12/*
8760d94e
PB
13 * Code inspired from libuatomic_ops-1.2, inherited in part from the
14 * Boehm-Demers-Weiser conservative garbage collector.
15 */
16
2917c006 17#include <stdint.h>
8760d94e
PB
18#include <urcu/compiler.h>
19#include <urcu/system.h>
20
21#ifdef __cplusplus
22extern "C" {
23#endif
24
8760d94e 25#ifndef uatomic_set
3daae22a 26#define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
8760d94e
PB
27#endif
28
d1854484
OD
29extern void abort(void);
30
31#define uatomic_load_store_return_op(op, addr, v, mo) \
32 __extension__ \
33 ({ \
34 \
35 switch (mo) { \
36 case CMM_ACQUIRE: \
37 case CMM_CONSUME: \
38 case CMM_RELAXED: \
39 break; \
40 case CMM_RELEASE: \
41 case CMM_ACQ_REL: \
42 case CMM_SEQ_CST: \
43 case CMM_SEQ_CST_FENCE: \
44 cmm_smp_mb(); \
45 break; \
46 default: \
47 abort(); \
48 } \
49 \
50 __typeof__((*addr)) _value = op(addr, v); \
51 \
52 switch (mo) { \
53 case CMM_CONSUME: \
54 cmm_smp_read_barrier_depends(); \
55 break; \
56 case CMM_ACQUIRE: \
57 case CMM_ACQ_REL: \
58 case CMM_SEQ_CST: \
59 case CMM_SEQ_CST_FENCE: \
60 cmm_smp_mb(); \
61 break; \
62 case CMM_RELAXED: \
63 case CMM_RELEASE: \
64 break; \
65 default: \
66 abort(); \
67 } \
68 _value; \
69 })
70
71#define uatomic_load_store_op(op, addr, v, mo) \
72 do { \
73 switch (mo) { \
74 case CMM_ACQUIRE: \
75 case CMM_CONSUME: \
76 case CMM_RELAXED: \
77 break; \
78 case CMM_RELEASE: \
79 case CMM_ACQ_REL: \
80 case CMM_SEQ_CST: \
81 case CMM_SEQ_CST_FENCE: \
82 cmm_smp_mb(); \
83 break; \
84 default: \
85 abort(); \
86 } \
87 \
88 op(addr, v); \
89 \
90 switch (mo) { \
91 case CMM_CONSUME: \
92 cmm_smp_read_barrier_depends(); \
93 break; \
94 case CMM_ACQUIRE: \
95 case CMM_ACQ_REL: \
96 case CMM_SEQ_CST: \
97 case CMM_SEQ_CST_FENCE: \
98 cmm_smp_mb(); \
99 break; \
100 case CMM_RELAXED: \
101 case CMM_RELEASE: \
102 break; \
103 default: \
104 abort(); \
105 } \
106 } while (0)
107
108#define uatomic_store(addr, v, mo) \
109 do { \
110 switch (mo) { \
111 case CMM_RELAXED: \
112 break; \
113 case CMM_RELEASE: \
114 case CMM_SEQ_CST: \
115 case CMM_SEQ_CST_FENCE: \
116 cmm_smp_mb(); \
117 break; \
118 default: \
119 abort(); \
120 } \
121 \
122 uatomic_set(addr, v); \
123 \
124 switch (mo) { \
125 case CMM_RELAXED: \
126 case CMM_RELEASE: \
127 break; \
128 case CMM_SEQ_CST: \
129 case CMM_SEQ_CST_FENCE: \
130 cmm_smp_mb(); \
131 break; \
132 default: \
133 abort(); \
134 } \
135 } while (0)
136
137#define uatomic_and_mo(addr, v, mo) \
138 uatomic_load_store_op(uatomic_and, addr, v, mo)
139
140#define uatomic_or_mo(addr, v, mo) \
141 uatomic_load_store_op(uatomic_or, addr, v, mo)
142
143#define uatomic_add_mo(addr, v, mo) \
144 uatomic_load_store_op(uatomic_add, addr, v, mo)
145
146#define uatomic_sub_mo(addr, v, mo) \
147 uatomic_load_store_op(uatomic_sub, addr, v, mo)
148
149#define uatomic_inc_mo(addr, mo) \
150 uatomic_load_store_op(uatomic_add, addr, 1, mo)
151
152#define uatomic_dec_mo(addr, mo) \
153 uatomic_load_store_op(uatomic_add, addr, -1, mo)
154/*
155 * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
156 * compiler emit a -Wduplicated-cond warning.
157 */
158#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
159 __extension__ \
160 ({ \
161 switch (mos) { \
162 case CMM_ACQUIRE: \
163 case CMM_CONSUME: \
164 case CMM_RELAXED: \
165 break; \
166 case CMM_RELEASE: \
167 case CMM_ACQ_REL: \
168 case CMM_SEQ_CST: \
169 case CMM_SEQ_CST_FENCE: \
170 cmm_smp_mb(); \
171 break; \
172 default: \
173 abort(); \
174 } \
175 \
176 __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
177 new); \
178 \
179 if (_value == (old)) { \
180 switch (mos) { \
181 case CMM_CONSUME: \
182 cmm_smp_read_barrier_depends(); \
183 break; \
184 case CMM_ACQUIRE: \
185 case CMM_ACQ_REL: \
186 case CMM_SEQ_CST: \
187 case CMM_SEQ_CST_FENCE: \
188 cmm_smp_mb(); \
189 break; \
190 case CMM_RELAXED: \
191 case CMM_RELEASE: \
192 break; \
193 default: \
194 abort(); \
195 } \
196 } else { \
197 switch (mof) { \
198 case CMM_CONSUME: \
199 cmm_smp_read_barrier_depends(); \
200 break; \
201 case CMM_ACQUIRE: \
202 case CMM_ACQ_REL: \
203 case CMM_SEQ_CST: \
204 case CMM_SEQ_CST_FENCE: \
205 cmm_smp_mb(); \
206 break; \
207 case CMM_RELAXED: \
208 case CMM_RELEASE: \
209 break; \
210 default: \
211 abort(); \
212 } \
213 } \
214 _value; \
215 })
216
217#define uatomic_xchg_mo(addr, v, mo) \
218 uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
219
220#define uatomic_add_return_mo(addr, v, mo) \
221 uatomic_load_store_return_op(uatomic_add_return, addr, v)
222
223#define uatomic_sub_return_mo(addr, v, mo) \
224 uatomic_load_store_return_op(uatomic_sub_return, addr, v)
225
226
8760d94e 227#ifndef uatomic_read
6cf3827c 228#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
8760d94e
PB
229#endif
230
d1854484
OD
231#define uatomic_load(addr, mo) \
232 __extension__ \
233 ({ \
234 switch (mo) { \
235 case CMM_ACQUIRE: \
236 case CMM_CONSUME: \
237 case CMM_RELAXED: \
238 break; \
239 case CMM_SEQ_CST: \
240 case CMM_SEQ_CST_FENCE: \
241 cmm_smp_mb(); \
242 break; \
243 default: \
244 abort(); \
245 } \
246 \
247 __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
248 \
249 switch (mo) { \
250 case CMM_RELAXED: \
251 break; \
252 case CMM_CONSUME: \
253 cmm_smp_read_barrier_depends(); \
254 break; \
255 case CMM_ACQUIRE: \
256 case CMM_SEQ_CST: \
257 case CMM_SEQ_CST_FENCE: \
258 cmm_smp_mb(); \
259 break; \
260 default: \
261 abort(); \
262 } \
263 \
264 _rcu_value; \
265 })
266
8760d94e 267#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
106ed137
MJ
268#ifdef ILLEGAL_INSTR
269static inline __attribute__((always_inline))
5c28497b 270void _uatomic_link_error(void)
8760d94e 271{
d0bbd9c2
MD
272 /*
273 * generate an illegal instruction. Cannot catch this with
274 * linker tricks when optimizations are disabled.
275 */
8760d94e 276 __asm__ __volatile__(ILLEGAL_INSTR);
106ed137 277}
8760d94e 278#else
106ed137
MJ
279static inline __attribute__((always_inline, __noreturn__))
280void _uatomic_link_error(void)
281{
5c28497b 282 __builtin_trap();
8760d94e 283}
106ed137 284#endif
8760d94e
PB
285
286#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
5c28497b 287extern void _uatomic_link_error(void);
8760d94e
PB
288#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
289
290/* cmpxchg */
291
292#ifndef uatomic_cmpxchg
293static inline __attribute__((always_inline))
294unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
295 unsigned long _new, int len)
296{
297 switch (len) {
f469d839
PB
298#ifdef UATOMIC_HAS_ATOMIC_BYTE
299 case 1:
2917c006
KR
300 return __sync_val_compare_and_swap_1((uint8_t *) addr, old,
301 _new);
f469d839
PB
302#endif
303#ifdef UATOMIC_HAS_ATOMIC_SHORT
304 case 2:
2917c006
KR
305 return __sync_val_compare_and_swap_2((uint16_t *) addr, old,
306 _new);
f469d839 307#endif
8760d94e 308 case 4:
2917c006
KR
309 return __sync_val_compare_and_swap_4((uint32_t *) addr, old,
310 _new);
b39e1761 311#if (CAA_BITS_PER_LONG == 64)
8760d94e 312 case 8:
2917c006
KR
313 return __sync_val_compare_and_swap_8((uint64_t *) addr, old,
314 _new);
8760d94e
PB
315#endif
316 }
317 _uatomic_link_error();
318 return 0;
319}
320
321
e56d99bf
MD
322#define uatomic_cmpxchg(addr, old, _new) \
323 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
324 caa_cast_long_keep_sign(old), \
325 caa_cast_long_keep_sign(_new),\
8760d94e
PB
326 sizeof(*(addr))))
327
328
bf33aaea
PB
329/* uatomic_and */
330
331#ifndef uatomic_and
332static inline __attribute__((always_inline))
333void _uatomic_and(void *addr, unsigned long val,
334 int len)
335{
336 switch (len) {
337#ifdef UATOMIC_HAS_ATOMIC_BYTE
338 case 1:
2917c006 339 __sync_and_and_fetch_1((uint8_t *) addr, val);
c51d5c6e 340 return;
bf33aaea
PB
341#endif
342#ifdef UATOMIC_HAS_ATOMIC_SHORT
343 case 2:
2917c006 344 __sync_and_and_fetch_2((uint16_t *) addr, val);
c51d5c6e 345 return;
bf33aaea
PB
346#endif
347 case 4:
2917c006 348 __sync_and_and_fetch_4((uint32_t *) addr, val);
c51d5c6e 349 return;
bf33aaea
PB
350#if (CAA_BITS_PER_LONG == 64)
351 case 8:
2917c006 352 __sync_and_and_fetch_8((uint64_t *) addr, val);
c51d5c6e 353 return;
bf33aaea
PB
354#endif
355 }
356 _uatomic_link_error();
bf33aaea
PB
357}
358
359#define uatomic_and(addr, v) \
360 (_uatomic_and((addr), \
e56d99bf
MD
361 caa_cast_long_keep_sign(v), \
362 sizeof(*(addr))))
42e83919
MD
363#define cmm_smp_mb__before_uatomic_and() cmm_barrier()
364#define cmm_smp_mb__after_uatomic_and() cmm_barrier()
2812a2d2 365
bf33aaea
PB
366#endif
367
985b35b1
PB
368/* uatomic_or */
369
370#ifndef uatomic_or
371static inline __attribute__((always_inline))
372void _uatomic_or(void *addr, unsigned long val,
373 int len)
374{
375 switch (len) {
376#ifdef UATOMIC_HAS_ATOMIC_BYTE
377 case 1:
2917c006 378 __sync_or_and_fetch_1((uint8_t *) addr, val);
c51d5c6e 379 return;
985b35b1
PB
380#endif
381#ifdef UATOMIC_HAS_ATOMIC_SHORT
382 case 2:
2917c006 383 __sync_or_and_fetch_2((uint16_t *) addr, val);
c51d5c6e 384 return;
985b35b1
PB
385#endif
386 case 4:
2917c006 387 __sync_or_and_fetch_4((uint32_t *) addr, val);
c51d5c6e 388 return;
985b35b1
PB
389#if (CAA_BITS_PER_LONG == 64)
390 case 8:
2917c006 391 __sync_or_and_fetch_8((uint64_t *) addr, val);
c51d5c6e 392 return;
985b35b1
PB
393#endif
394 }
395 _uatomic_link_error();
c51d5c6e 396 return;
985b35b1
PB
397}
398
399#define uatomic_or(addr, v) \
400 (_uatomic_or((addr), \
e56d99bf
MD
401 caa_cast_long_keep_sign(v), \
402 sizeof(*(addr))))
42e83919
MD
403#define cmm_smp_mb__before_uatomic_or() cmm_barrier()
404#define cmm_smp_mb__after_uatomic_or() cmm_barrier()
2812a2d2 405
985b35b1
PB
406#endif
407
2812a2d2 408
8760d94e
PB
409/* uatomic_add_return */
410
411#ifndef uatomic_add_return
412static inline __attribute__((always_inline))
413unsigned long _uatomic_add_return(void *addr, unsigned long val,
414 int len)
415{
416 switch (len) {
f469d839
PB
417#ifdef UATOMIC_HAS_ATOMIC_BYTE
418 case 1:
2917c006 419 return __sync_add_and_fetch_1((uint8_t *) addr, val);
f469d839
PB
420#endif
421#ifdef UATOMIC_HAS_ATOMIC_SHORT
422 case 2:
2917c006 423 return __sync_add_and_fetch_2((uint16_t *) addr, val);
f469d839 424#endif
8760d94e 425 case 4:
2917c006 426 return __sync_add_and_fetch_4((uint32_t *) addr, val);
b39e1761 427#if (CAA_BITS_PER_LONG == 64)
8760d94e 428 case 8:
2917c006 429 return __sync_add_and_fetch_8((uint64_t *) addr, val);
8760d94e
PB
430#endif
431 }
432 _uatomic_link_error();
433 return 0;
434}
435
436
e56d99bf
MD
437#define uatomic_add_return(addr, v) \
438 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
439 caa_cast_long_keep_sign(v), \
440 sizeof(*(addr))))
8760d94e
PB
441#endif /* #ifndef uatomic_add_return */
442
443#ifndef uatomic_xchg
444/* xchg */
445
446static inline __attribute__((always_inline))
447unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
448{
449 switch (len) {
f469d839
PB
450#ifdef UATOMIC_HAS_ATOMIC_BYTE
451 case 1:
452 {
2917c006 453 uint8_t old;
f469d839
PB
454
455 do {
2917c006
KR
456 old = uatomic_read((uint8_t *) addr);
457 } while (!__sync_bool_compare_and_swap_1((uint8_t *) addr,
458 old, val));
f469d839
PB
459
460 return old;
461 }
462#endif
463#ifdef UATOMIC_HAS_ATOMIC_SHORT
464 case 2:
465 {
2917c006 466 uint16_t old;
f469d839
PB
467
468 do {
2917c006
KR
469 old = uatomic_read((uint16_t *) addr);
470 } while (!__sync_bool_compare_and_swap_2((uint16_t *) addr,
471 old, val));
f469d839
PB
472
473 return old;
474 }
475#endif
8760d94e
PB
476 case 4:
477 {
2917c006 478 uint32_t old;
8760d94e
PB
479
480 do {
2917c006
KR
481 old = uatomic_read((uint32_t *) addr);
482 } while (!__sync_bool_compare_and_swap_4((uint32_t *) addr,
483 old, val));
8760d94e 484
2f2908d0 485 return old;
8760d94e 486 }
b39e1761 487#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
488 case 8:
489 {
2917c006 490 uint64_t old;
8760d94e
PB
491
492 do {
2917c006
KR
493 old = uatomic_read((uint64_t *) addr);
494 } while (!__sync_bool_compare_and_swap_8((uint64_t *) addr,
495 old, val));
8760d94e
PB
496
497 return old;
498 }
499#endif
500 }
501 _uatomic_link_error();
502 return 0;
503}
504
505#define uatomic_xchg(addr, v) \
e56d99bf
MD
506 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
507 caa_cast_long_keep_sign(v), \
8760d94e
PB
508 sizeof(*(addr))))
509#endif /* #ifndef uatomic_xchg */
510
511#else /* #ifndef uatomic_cmpxchg */
512
bf33aaea
PB
513#ifndef uatomic_and
514/* uatomic_and */
515
516static inline __attribute__((always_inline))
517void _uatomic_and(void *addr, unsigned long val, int len)
518{
519 switch (len) {
520#ifdef UATOMIC_HAS_ATOMIC_BYTE
521 case 1:
522 {
2917c006 523 uint8_t old, oldt;
bf33aaea 524
2917c006 525 oldt = uatomic_read((uint8_t *) addr);
bf33aaea
PB
526 do {
527 old = oldt;
528 oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
529 } while (oldt != old);
e6e5957d
MD
530
531 return;
bf33aaea
PB
532 }
533#endif
534#ifdef UATOMIC_HAS_ATOMIC_SHORT
535 case 2:
536 {
2917c006 537 uint16_t old, oldt;
bf33aaea 538
2917c006 539 oldt = uatomic_read((uint16_t *) addr);
bf33aaea
PB
540 do {
541 old = oldt;
542 oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
543 } while (oldt != old);
544 }
545#endif
546 case 4:
547 {
2917c006 548 uint32_t old, oldt;
bf33aaea 549
2917c006 550 oldt = uatomic_read((uint32_t *) addr);
bf33aaea
PB
551 do {
552 old = oldt;
553 oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
554 } while (oldt != old);
e6e5957d
MD
555
556 return;
bf33aaea
PB
557 }
558#if (CAA_BITS_PER_LONG == 64)
559 case 8:
560 {
2917c006 561 uint64_t old, oldt;
bf33aaea 562
2917c006 563 oldt = uatomic_read((uint64_t *) addr);
bf33aaea
PB
564 do {
565 old = oldt;
566 oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
567 } while (oldt != old);
e6e5957d
MD
568
569 return;
bf33aaea
PB
570 }
571#endif
572 }
573 _uatomic_link_error();
bf33aaea
PB
574}
575
e56d99bf
MD
576#define uatomic_and(addr, v) \
577 (_uatomic_and((addr), \
578 caa_cast_long_keep_sign(v), \
579 sizeof(*(addr))))
42e83919
MD
580#define cmm_smp_mb__before_uatomic_and() cmm_barrier()
581#define cmm_smp_mb__after_uatomic_and() cmm_barrier()
2812a2d2 582
bf33aaea
PB
583#endif /* #ifndef uatomic_and */
584
985b35b1
PB
585#ifndef uatomic_or
586/* uatomic_or */
587
588static inline __attribute__((always_inline))
589void _uatomic_or(void *addr, unsigned long val, int len)
590{
591 switch (len) {
592#ifdef UATOMIC_HAS_ATOMIC_BYTE
593 case 1:
594 {
2917c006 595 uint8_t old, oldt;
985b35b1 596
2917c006 597 oldt = uatomic_read((uint8_t *) addr);
985b35b1
PB
598 do {
599 old = oldt;
600 oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
601 } while (oldt != old);
e6e5957d
MD
602
603 return;
985b35b1
PB
604 }
605#endif
606#ifdef UATOMIC_HAS_ATOMIC_SHORT
607 case 2:
608 {
2917c006 609 uint16_t old, oldt;
985b35b1 610
2917c006 611 oldt = uatomic_read((uint16_t *) addr);
985b35b1
PB
612 do {
613 old = oldt;
614 oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
615 } while (oldt != old);
e6e5957d
MD
616
617 return;
985b35b1
PB
618 }
619#endif
620 case 4:
621 {
2917c006 622 uint32_t old, oldt;
985b35b1 623
2917c006 624 oldt = uatomic_read((uint32_t *) addr);
985b35b1
PB
625 do {
626 old = oldt;
627 oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
628 } while (oldt != old);
e6e5957d
MD
629
630 return;
985b35b1
PB
631 }
632#if (CAA_BITS_PER_LONG == 64)
633 case 8:
634 {
2917c006 635 uint64_t old, oldt;
985b35b1 636
2917c006 637 oldt = uatomic_read((uint64_t *) addr);
985b35b1
PB
638 do {
639 old = oldt;
640 oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
641 } while (oldt != old);
e6e5957d
MD
642
643 return;
985b35b1
PB
644 }
645#endif
646 }
647 _uatomic_link_error();
985b35b1
PB
648}
649
e56d99bf
MD
650#define uatomic_or(addr, v) \
651 (_uatomic_or((addr), \
652 caa_cast_long_keep_sign(v), \
653 sizeof(*(addr))))
42e83919
MD
654#define cmm_smp_mb__before_uatomic_or() cmm_barrier()
655#define cmm_smp_mb__after_uatomic_or() cmm_barrier()
2812a2d2 656
985b35b1
PB
657#endif /* #ifndef uatomic_or */
658
8760d94e
PB
659#ifndef uatomic_add_return
660/* uatomic_add_return */
661
662static inline __attribute__((always_inline))
663unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
664{
665 switch (len) {
f469d839
PB
666#ifdef UATOMIC_HAS_ATOMIC_BYTE
667 case 1:
668 {
2917c006 669 uint8_t old, oldt;
f469d839 670
2917c006 671 oldt = uatomic_read((uint8_t *) addr);
f469d839
PB
672 do {
673 old = oldt;
2917c006 674 oldt = uatomic_cmpxchg((uint8_t *) addr,
b4e6d540 675 old, old + val);
f469d839
PB
676 } while (oldt != old);
677
678 return old + val;
679 }
680#endif
681#ifdef UATOMIC_HAS_ATOMIC_SHORT
682 case 2:
683 {
2917c006 684 uint16_t old, oldt;
f469d839 685
2917c006 686 oldt = uatomic_read((uint16_t *) addr);
f469d839
PB
687 do {
688 old = oldt;
2917c006 689 oldt = uatomic_cmpxchg((uint16_t *) addr,
b4e6d540 690 old, old + val);
f469d839
PB
691 } while (oldt != old);
692
693 return old + val;
694 }
695#endif
8760d94e
PB
696 case 4:
697 {
2917c006 698 uint32_t old, oldt;
8760d94e 699
2917c006 700 oldt = uatomic_read((uint32_t *) addr);
8760d94e
PB
701 do {
702 old = oldt;
2917c006 703 oldt = uatomic_cmpxchg((uint32_t *) addr,
b4e6d540 704 old, old + val);
8760d94e
PB
705 } while (oldt != old);
706
707 return old + val;
708 }
b39e1761 709#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
710 case 8:
711 {
2917c006 712 uint64_t old, oldt;
8760d94e 713
2917c006 714 oldt = uatomic_read((uint64_t *) addr);
8760d94e
PB
715 do {
716 old = oldt;
2917c006 717 oldt = uatomic_cmpxchg((uint64_t *) addr,
b4e6d540 718 old, old + val);
8760d94e
PB
719 } while (oldt != old);
720
721 return old + val;
722 }
723#endif
724 }
725 _uatomic_link_error();
726 return 0;
727}
728
e56d99bf
MD
729#define uatomic_add_return(addr, v) \
730 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
731 caa_cast_long_keep_sign(v), \
732 sizeof(*(addr))))
8760d94e
PB
733#endif /* #ifndef uatomic_add_return */
734
735#ifndef uatomic_xchg
736/* xchg */
737
738static inline __attribute__((always_inline))
739unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
740{
741 switch (len) {
f469d839
PB
742#ifdef UATOMIC_HAS_ATOMIC_BYTE
743 case 1:
744 {
2917c006 745 uint8_t old, oldt;
f469d839 746
2917c006 747 oldt = uatomic_read((uint8_t *) addr);
f469d839
PB
748 do {
749 old = oldt;
2917c006 750 oldt = uatomic_cmpxchg((uint8_t *) addr,
b4e6d540 751 old, val);
f469d839
PB
752 } while (oldt != old);
753
754 return old;
755 }
756#endif
757#ifdef UATOMIC_HAS_ATOMIC_SHORT
758 case 2:
759 {
2917c006 760 uint16_t old, oldt;
f469d839 761
2917c006 762 oldt = uatomic_read((uint16_t *) addr);
f469d839
PB
763 do {
764 old = oldt;
2917c006 765 oldt = uatomic_cmpxchg((uint16_t *) addr,
b4e6d540 766 old, val);
f469d839
PB
767 } while (oldt != old);
768
769 return old;
770 }
771#endif
8760d94e
PB
772 case 4:
773 {
2917c006 774 uint32_t old, oldt;
8760d94e 775
2917c006 776 oldt = uatomic_read((uint32_t *) addr);
8760d94e
PB
777 do {
778 old = oldt;
2917c006 779 oldt = uatomic_cmpxchg((uint32_t *) addr,
b4e6d540 780 old, val);
8760d94e
PB
781 } while (oldt != old);
782
783 return old;
784 }
b39e1761 785#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
786 case 8:
787 {
2917c006 788 uint64_t old, oldt;
8760d94e 789
2917c006 790 oldt = uatomic_read((uint64_t *) addr);
8760d94e
PB
791 do {
792 old = oldt;
2917c006 793 oldt = uatomic_cmpxchg((uint64_t *) addr,
b4e6d540 794 old, val);
8760d94e
PB
795 } while (oldt != old);
796
797 return old;
798 }
799#endif
800 }
801 _uatomic_link_error();
802 return 0;
803}
804
805#define uatomic_xchg(addr, v) \
e56d99bf
MD
806 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
807 caa_cast_long_keep_sign(v), \
8760d94e
PB
808 sizeof(*(addr))))
809#endif /* #ifndef uatomic_xchg */
810
811#endif /* #else #ifndef uatomic_cmpxchg */
812
813/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
814
815#ifndef uatomic_add
816#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
42e83919
MD
817#define cmm_smp_mb__before_uatomic_add() cmm_barrier()
818#define cmm_smp_mb__after_uatomic_add() cmm_barrier()
8760d94e
PB
819#endif
820
e56d99bf
MD
821#define uatomic_sub_return(addr, v) \
822 uatomic_add_return((addr), -(caa_cast_long_keep_sign(v)))
823#define uatomic_sub(addr, v) \
824 uatomic_add((addr), -(caa_cast_long_keep_sign(v)))
42e83919
MD
825#define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb__before_uatomic_add()
826#define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb__after_uatomic_add()
8760d94e
PB
827
828#ifndef uatomic_inc
829#define uatomic_inc(addr) uatomic_add((addr), 1)
42e83919
MD
830#define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb__before_uatomic_add()
831#define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb__after_uatomic_add()
8760d94e
PB
832#endif
833
834#ifndef uatomic_dec
835#define uatomic_dec(addr) uatomic_add((addr), -1)
42e83919
MD
836#define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb__before_uatomic_add()
837#define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb__after_uatomic_add()
8760d94e
PB
838#endif
839
840#ifdef __cplusplus
841}
842#endif
843
844#endif /* _URCU_UATOMIC_GENERIC_H */
This page took 0.083251 seconds and 4 git commands to generate.