uatomic/generic: Fix redundant declaration warning
[urcu.git] / include / urcu / uatomic / generic.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
2// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
3// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
4// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5// SPDX-FileCopyrightText: 2010 Paolo Bonzini
6//
7// SPDX-License-Identifier: LicenseRef-Boehm-GC
8
8760d94e
PB
9#ifndef _URCU_UATOMIC_GENERIC_H
10#define _URCU_UATOMIC_GENERIC_H
11
12/*
8760d94e
PB
13 * Code inspired from libuatomic_ops-1.2, inherited in part from the
14 * Boehm-Demers-Weiser conservative garbage collector.
15 */
16
2917c006 17#include <stdint.h>
7e60fda7 18#include <stdlib.h>
8760d94e
PB
19#include <urcu/compiler.h>
20#include <urcu/system.h>
21
22#ifdef __cplusplus
23extern "C" {
24#endif
25
8760d94e 26#ifndef uatomic_set
3daae22a 27#define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
8760d94e
PB
28#endif
29
d1854484
OD
30#define uatomic_load_store_return_op(op, addr, v, mo) \
31 __extension__ \
32 ({ \
33 \
34 switch (mo) { \
35 case CMM_ACQUIRE: \
36 case CMM_CONSUME: \
37 case CMM_RELAXED: \
38 break; \
39 case CMM_RELEASE: \
40 case CMM_ACQ_REL: \
41 case CMM_SEQ_CST: \
42 case CMM_SEQ_CST_FENCE: \
43 cmm_smp_mb(); \
44 break; \
45 default: \
46 abort(); \
47 } \
48 \
49 __typeof__((*addr)) _value = op(addr, v); \
50 \
51 switch (mo) { \
52 case CMM_CONSUME: \
53 cmm_smp_read_barrier_depends(); \
54 break; \
55 case CMM_ACQUIRE: \
56 case CMM_ACQ_REL: \
57 case CMM_SEQ_CST: \
58 case CMM_SEQ_CST_FENCE: \
59 cmm_smp_mb(); \
60 break; \
61 case CMM_RELAXED: \
62 case CMM_RELEASE: \
63 break; \
64 default: \
65 abort(); \
66 } \
67 _value; \
68 })
69
70#define uatomic_load_store_op(op, addr, v, mo) \
71 do { \
72 switch (mo) { \
73 case CMM_ACQUIRE: \
74 case CMM_CONSUME: \
75 case CMM_RELAXED: \
76 break; \
77 case CMM_RELEASE: \
78 case CMM_ACQ_REL: \
79 case CMM_SEQ_CST: \
80 case CMM_SEQ_CST_FENCE: \
81 cmm_smp_mb(); \
82 break; \
83 default: \
84 abort(); \
85 } \
86 \
87 op(addr, v); \
88 \
89 switch (mo) { \
90 case CMM_CONSUME: \
91 cmm_smp_read_barrier_depends(); \
92 break; \
93 case CMM_ACQUIRE: \
94 case CMM_ACQ_REL: \
95 case CMM_SEQ_CST: \
96 case CMM_SEQ_CST_FENCE: \
97 cmm_smp_mb(); \
98 break; \
99 case CMM_RELAXED: \
100 case CMM_RELEASE: \
101 break; \
102 default: \
103 abort(); \
104 } \
105 } while (0)
106
107#define uatomic_store(addr, v, mo) \
108 do { \
109 switch (mo) { \
110 case CMM_RELAXED: \
111 break; \
112 case CMM_RELEASE: \
113 case CMM_SEQ_CST: \
114 case CMM_SEQ_CST_FENCE: \
115 cmm_smp_mb(); \
116 break; \
117 default: \
118 abort(); \
119 } \
120 \
121 uatomic_set(addr, v); \
122 \
123 switch (mo) { \
124 case CMM_RELAXED: \
125 case CMM_RELEASE: \
126 break; \
127 case CMM_SEQ_CST: \
128 case CMM_SEQ_CST_FENCE: \
129 cmm_smp_mb(); \
130 break; \
131 default: \
132 abort(); \
133 } \
134 } while (0)
135
136#define uatomic_and_mo(addr, v, mo) \
137 uatomic_load_store_op(uatomic_and, addr, v, mo)
138
139#define uatomic_or_mo(addr, v, mo) \
140 uatomic_load_store_op(uatomic_or, addr, v, mo)
141
142#define uatomic_add_mo(addr, v, mo) \
143 uatomic_load_store_op(uatomic_add, addr, v, mo)
144
145#define uatomic_sub_mo(addr, v, mo) \
146 uatomic_load_store_op(uatomic_sub, addr, v, mo)
147
148#define uatomic_inc_mo(addr, mo) \
149 uatomic_load_store_op(uatomic_add, addr, 1, mo)
150
151#define uatomic_dec_mo(addr, mo) \
152 uatomic_load_store_op(uatomic_add, addr, -1, mo)
153/*
154 * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
155 * compiler emit a -Wduplicated-cond warning.
156 */
157#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
158 __extension__ \
159 ({ \
160 switch (mos) { \
161 case CMM_ACQUIRE: \
162 case CMM_CONSUME: \
163 case CMM_RELAXED: \
164 break; \
165 case CMM_RELEASE: \
166 case CMM_ACQ_REL: \
167 case CMM_SEQ_CST: \
168 case CMM_SEQ_CST_FENCE: \
169 cmm_smp_mb(); \
170 break; \
171 default: \
172 abort(); \
173 } \
174 \
175 __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
176 new); \
177 \
178 if (_value == (old)) { \
179 switch (mos) { \
180 case CMM_CONSUME: \
181 cmm_smp_read_barrier_depends(); \
182 break; \
183 case CMM_ACQUIRE: \
184 case CMM_ACQ_REL: \
185 case CMM_SEQ_CST: \
186 case CMM_SEQ_CST_FENCE: \
187 cmm_smp_mb(); \
188 break; \
189 case CMM_RELAXED: \
190 case CMM_RELEASE: \
191 break; \
192 default: \
193 abort(); \
194 } \
195 } else { \
196 switch (mof) { \
197 case CMM_CONSUME: \
198 cmm_smp_read_barrier_depends(); \
199 break; \
200 case CMM_ACQUIRE: \
201 case CMM_ACQ_REL: \
202 case CMM_SEQ_CST: \
203 case CMM_SEQ_CST_FENCE: \
204 cmm_smp_mb(); \
205 break; \
206 case CMM_RELAXED: \
207 case CMM_RELEASE: \
208 break; \
209 default: \
210 abort(); \
211 } \
212 } \
213 _value; \
214 })
215
216#define uatomic_xchg_mo(addr, v, mo) \
217 uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
218
219#define uatomic_add_return_mo(addr, v, mo) \
220 uatomic_load_store_return_op(uatomic_add_return, addr, v)
221
222#define uatomic_sub_return_mo(addr, v, mo) \
223 uatomic_load_store_return_op(uatomic_sub_return, addr, v)
224
225
8760d94e 226#ifndef uatomic_read
6cf3827c 227#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
8760d94e
PB
228#endif
229
d1854484
OD
230#define uatomic_load(addr, mo) \
231 __extension__ \
232 ({ \
233 switch (mo) { \
234 case CMM_ACQUIRE: \
235 case CMM_CONSUME: \
236 case CMM_RELAXED: \
237 break; \
238 case CMM_SEQ_CST: \
239 case CMM_SEQ_CST_FENCE: \
240 cmm_smp_mb(); \
241 break; \
242 default: \
243 abort(); \
244 } \
245 \
246 __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
247 \
248 switch (mo) { \
249 case CMM_RELAXED: \
250 break; \
251 case CMM_CONSUME: \
252 cmm_smp_read_barrier_depends(); \
253 break; \
254 case CMM_ACQUIRE: \
255 case CMM_SEQ_CST: \
256 case CMM_SEQ_CST_FENCE: \
257 cmm_smp_mb(); \
258 break; \
259 default: \
260 abort(); \
261 } \
262 \
263 _rcu_value; \
264 })
265
8760d94e 266#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
106ed137
MJ
267#ifdef ILLEGAL_INSTR
268static inline __attribute__((always_inline))
5c28497b 269void _uatomic_link_error(void)
8760d94e 270{
d0bbd9c2
MD
271 /*
272 * generate an illegal instruction. Cannot catch this with
273 * linker tricks when optimizations are disabled.
274 */
8760d94e 275 __asm__ __volatile__(ILLEGAL_INSTR);
106ed137 276}
8760d94e 277#else
106ed137
MJ
278static inline __attribute__((always_inline, __noreturn__))
279void _uatomic_link_error(void)
280{
5c28497b 281 __builtin_trap();
8760d94e 282}
106ed137 283#endif
8760d94e
PB
284
285#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
5c28497b 286extern void _uatomic_link_error(void);
8760d94e
PB
287#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
288
289/* cmpxchg */
290
291#ifndef uatomic_cmpxchg
292static inline __attribute__((always_inline))
293unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
294 unsigned long _new, int len)
295{
296 switch (len) {
f469d839
PB
297#ifdef UATOMIC_HAS_ATOMIC_BYTE
298 case 1:
2917c006
KR
299 return __sync_val_compare_and_swap_1((uint8_t *) addr, old,
300 _new);
f469d839
PB
301#endif
302#ifdef UATOMIC_HAS_ATOMIC_SHORT
303 case 2:
2917c006
KR
304 return __sync_val_compare_and_swap_2((uint16_t *) addr, old,
305 _new);
f469d839 306#endif
8760d94e 307 case 4:
2917c006
KR
308 return __sync_val_compare_and_swap_4((uint32_t *) addr, old,
309 _new);
b39e1761 310#if (CAA_BITS_PER_LONG == 64)
8760d94e 311 case 8:
2917c006
KR
312 return __sync_val_compare_and_swap_8((uint64_t *) addr, old,
313 _new);
8760d94e
PB
314#endif
315 }
316 _uatomic_link_error();
317 return 0;
318}
319
320
e56d99bf
MD
321#define uatomic_cmpxchg(addr, old, _new) \
322 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
323 caa_cast_long_keep_sign(old), \
324 caa_cast_long_keep_sign(_new),\
8760d94e
PB
325 sizeof(*(addr))))
326
327
bf33aaea
PB
328/* uatomic_and */
329
330#ifndef uatomic_and
331static inline __attribute__((always_inline))
332void _uatomic_and(void *addr, unsigned long val,
333 int len)
334{
335 switch (len) {
336#ifdef UATOMIC_HAS_ATOMIC_BYTE
337 case 1:
2917c006 338 __sync_and_and_fetch_1((uint8_t *) addr, val);
c51d5c6e 339 return;
bf33aaea
PB
340#endif
341#ifdef UATOMIC_HAS_ATOMIC_SHORT
342 case 2:
2917c006 343 __sync_and_and_fetch_2((uint16_t *) addr, val);
c51d5c6e 344 return;
bf33aaea
PB
345#endif
346 case 4:
2917c006 347 __sync_and_and_fetch_4((uint32_t *) addr, val);
c51d5c6e 348 return;
bf33aaea
PB
349#if (CAA_BITS_PER_LONG == 64)
350 case 8:
2917c006 351 __sync_and_and_fetch_8((uint64_t *) addr, val);
c51d5c6e 352 return;
bf33aaea
PB
353#endif
354 }
355 _uatomic_link_error();
bf33aaea
PB
356}
357
358#define uatomic_and(addr, v) \
359 (_uatomic_and((addr), \
e56d99bf
MD
360 caa_cast_long_keep_sign(v), \
361 sizeof(*(addr))))
42e83919
MD
362#define cmm_smp_mb__before_uatomic_and() cmm_barrier()
363#define cmm_smp_mb__after_uatomic_and() cmm_barrier()
2812a2d2 364
bf33aaea
PB
365#endif
366
985b35b1
PB
367/* uatomic_or */
368
369#ifndef uatomic_or
370static inline __attribute__((always_inline))
371void _uatomic_or(void *addr, unsigned long val,
372 int len)
373{
374 switch (len) {
375#ifdef UATOMIC_HAS_ATOMIC_BYTE
376 case 1:
2917c006 377 __sync_or_and_fetch_1((uint8_t *) addr, val);
c51d5c6e 378 return;
985b35b1
PB
379#endif
380#ifdef UATOMIC_HAS_ATOMIC_SHORT
381 case 2:
2917c006 382 __sync_or_and_fetch_2((uint16_t *) addr, val);
c51d5c6e 383 return;
985b35b1
PB
384#endif
385 case 4:
2917c006 386 __sync_or_and_fetch_4((uint32_t *) addr, val);
c51d5c6e 387 return;
985b35b1
PB
388#if (CAA_BITS_PER_LONG == 64)
389 case 8:
2917c006 390 __sync_or_and_fetch_8((uint64_t *) addr, val);
c51d5c6e 391 return;
985b35b1
PB
392#endif
393 }
394 _uatomic_link_error();
c51d5c6e 395 return;
985b35b1
PB
396}
397
398#define uatomic_or(addr, v) \
399 (_uatomic_or((addr), \
e56d99bf
MD
400 caa_cast_long_keep_sign(v), \
401 sizeof(*(addr))))
42e83919
MD
402#define cmm_smp_mb__before_uatomic_or() cmm_barrier()
403#define cmm_smp_mb__after_uatomic_or() cmm_barrier()
2812a2d2 404
985b35b1
PB
405#endif
406
2812a2d2 407
8760d94e
PB
408/* uatomic_add_return */
409
410#ifndef uatomic_add_return
411static inline __attribute__((always_inline))
412unsigned long _uatomic_add_return(void *addr, unsigned long val,
413 int len)
414{
415 switch (len) {
f469d839
PB
416#ifdef UATOMIC_HAS_ATOMIC_BYTE
417 case 1:
2917c006 418 return __sync_add_and_fetch_1((uint8_t *) addr, val);
f469d839
PB
419#endif
420#ifdef UATOMIC_HAS_ATOMIC_SHORT
421 case 2:
2917c006 422 return __sync_add_and_fetch_2((uint16_t *) addr, val);
f469d839 423#endif
8760d94e 424 case 4:
2917c006 425 return __sync_add_and_fetch_4((uint32_t *) addr, val);
b39e1761 426#if (CAA_BITS_PER_LONG == 64)
8760d94e 427 case 8:
2917c006 428 return __sync_add_and_fetch_8((uint64_t *) addr, val);
8760d94e
PB
429#endif
430 }
431 _uatomic_link_error();
432 return 0;
433}
434
435
e56d99bf
MD
436#define uatomic_add_return(addr, v) \
437 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
438 caa_cast_long_keep_sign(v), \
439 sizeof(*(addr))))
8760d94e
PB
440#endif /* #ifndef uatomic_add_return */
441
442#ifndef uatomic_xchg
443/* xchg */
444
445static inline __attribute__((always_inline))
446unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
447{
448 switch (len) {
f469d839
PB
449#ifdef UATOMIC_HAS_ATOMIC_BYTE
450 case 1:
451 {
2917c006 452 uint8_t old;
f469d839
PB
453
454 do {
2917c006
KR
455 old = uatomic_read((uint8_t *) addr);
456 } while (!__sync_bool_compare_and_swap_1((uint8_t *) addr,
457 old, val));
f469d839
PB
458
459 return old;
460 }
461#endif
462#ifdef UATOMIC_HAS_ATOMIC_SHORT
463 case 2:
464 {
2917c006 465 uint16_t old;
f469d839
PB
466
467 do {
2917c006
KR
468 old = uatomic_read((uint16_t *) addr);
469 } while (!__sync_bool_compare_and_swap_2((uint16_t *) addr,
470 old, val));
f469d839
PB
471
472 return old;
473 }
474#endif
8760d94e
PB
475 case 4:
476 {
2917c006 477 uint32_t old;
8760d94e
PB
478
479 do {
2917c006
KR
480 old = uatomic_read((uint32_t *) addr);
481 } while (!__sync_bool_compare_and_swap_4((uint32_t *) addr,
482 old, val));
8760d94e 483
2f2908d0 484 return old;
8760d94e 485 }
b39e1761 486#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
487 case 8:
488 {
2917c006 489 uint64_t old;
8760d94e
PB
490
491 do {
2917c006
KR
492 old = uatomic_read((uint64_t *) addr);
493 } while (!__sync_bool_compare_and_swap_8((uint64_t *) addr,
494 old, val));
8760d94e
PB
495
496 return old;
497 }
498#endif
499 }
500 _uatomic_link_error();
501 return 0;
502}
503
504#define uatomic_xchg(addr, v) \
e56d99bf
MD
505 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
506 caa_cast_long_keep_sign(v), \
8760d94e
PB
507 sizeof(*(addr))))
508#endif /* #ifndef uatomic_xchg */
509
510#else /* #ifndef uatomic_cmpxchg */
511
bf33aaea
PB
512#ifndef uatomic_and
513/* uatomic_and */
514
515static inline __attribute__((always_inline))
516void _uatomic_and(void *addr, unsigned long val, int len)
517{
518 switch (len) {
519#ifdef UATOMIC_HAS_ATOMIC_BYTE
520 case 1:
521 {
2917c006 522 uint8_t old, oldt;
bf33aaea 523
2917c006 524 oldt = uatomic_read((uint8_t *) addr);
bf33aaea
PB
525 do {
526 old = oldt;
527 oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
528 } while (oldt != old);
e6e5957d
MD
529
530 return;
bf33aaea
PB
531 }
532#endif
533#ifdef UATOMIC_HAS_ATOMIC_SHORT
534 case 2:
535 {
2917c006 536 uint16_t old, oldt;
bf33aaea 537
2917c006 538 oldt = uatomic_read((uint16_t *) addr);
bf33aaea
PB
539 do {
540 old = oldt;
541 oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
542 } while (oldt != old);
543 }
544#endif
545 case 4:
546 {
2917c006 547 uint32_t old, oldt;
bf33aaea 548
2917c006 549 oldt = uatomic_read((uint32_t *) addr);
bf33aaea
PB
550 do {
551 old = oldt;
552 oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
553 } while (oldt != old);
e6e5957d
MD
554
555 return;
bf33aaea
PB
556 }
557#if (CAA_BITS_PER_LONG == 64)
558 case 8:
559 {
2917c006 560 uint64_t old, oldt;
bf33aaea 561
2917c006 562 oldt = uatomic_read((uint64_t *) addr);
bf33aaea
PB
563 do {
564 old = oldt;
565 oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
566 } while (oldt != old);
e6e5957d
MD
567
568 return;
bf33aaea
PB
569 }
570#endif
571 }
572 _uatomic_link_error();
bf33aaea
PB
573}
574
e56d99bf
MD
575#define uatomic_and(addr, v) \
576 (_uatomic_and((addr), \
577 caa_cast_long_keep_sign(v), \
578 sizeof(*(addr))))
42e83919
MD
579#define cmm_smp_mb__before_uatomic_and() cmm_barrier()
580#define cmm_smp_mb__after_uatomic_and() cmm_barrier()
2812a2d2 581
bf33aaea
PB
582#endif /* #ifndef uatomic_and */
583
985b35b1
PB
584#ifndef uatomic_or
585/* uatomic_or */
586
587static inline __attribute__((always_inline))
588void _uatomic_or(void *addr, unsigned long val, int len)
589{
590 switch (len) {
591#ifdef UATOMIC_HAS_ATOMIC_BYTE
592 case 1:
593 {
2917c006 594 uint8_t old, oldt;
985b35b1 595
2917c006 596 oldt = uatomic_read((uint8_t *) addr);
985b35b1
PB
597 do {
598 old = oldt;
599 oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
600 } while (oldt != old);
e6e5957d
MD
601
602 return;
985b35b1
PB
603 }
604#endif
605#ifdef UATOMIC_HAS_ATOMIC_SHORT
606 case 2:
607 {
2917c006 608 uint16_t old, oldt;
985b35b1 609
2917c006 610 oldt = uatomic_read((uint16_t *) addr);
985b35b1
PB
611 do {
612 old = oldt;
613 oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
614 } while (oldt != old);
e6e5957d
MD
615
616 return;
985b35b1
PB
617 }
618#endif
619 case 4:
620 {
2917c006 621 uint32_t old, oldt;
985b35b1 622
2917c006 623 oldt = uatomic_read((uint32_t *) addr);
985b35b1
PB
624 do {
625 old = oldt;
626 oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
627 } while (oldt != old);
e6e5957d
MD
628
629 return;
985b35b1
PB
630 }
631#if (CAA_BITS_PER_LONG == 64)
632 case 8:
633 {
2917c006 634 uint64_t old, oldt;
985b35b1 635
2917c006 636 oldt = uatomic_read((uint64_t *) addr);
985b35b1
PB
637 do {
638 old = oldt;
639 oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
640 } while (oldt != old);
e6e5957d
MD
641
642 return;
985b35b1
PB
643 }
644#endif
645 }
646 _uatomic_link_error();
985b35b1
PB
647}
648
e56d99bf
MD
649#define uatomic_or(addr, v) \
650 (_uatomic_or((addr), \
651 caa_cast_long_keep_sign(v), \
652 sizeof(*(addr))))
42e83919
MD
653#define cmm_smp_mb__before_uatomic_or() cmm_barrier()
654#define cmm_smp_mb__after_uatomic_or() cmm_barrier()
2812a2d2 655
985b35b1
PB
656#endif /* #ifndef uatomic_or */
657
8760d94e
PB
658#ifndef uatomic_add_return
659/* uatomic_add_return */
660
661static inline __attribute__((always_inline))
662unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
663{
664 switch (len) {
f469d839
PB
665#ifdef UATOMIC_HAS_ATOMIC_BYTE
666 case 1:
667 {
2917c006 668 uint8_t old, oldt;
f469d839 669
2917c006 670 oldt = uatomic_read((uint8_t *) addr);
f469d839
PB
671 do {
672 old = oldt;
2917c006 673 oldt = uatomic_cmpxchg((uint8_t *) addr,
b4e6d540 674 old, old + val);
f469d839
PB
675 } while (oldt != old);
676
677 return old + val;
678 }
679#endif
680#ifdef UATOMIC_HAS_ATOMIC_SHORT
681 case 2:
682 {
2917c006 683 uint16_t old, oldt;
f469d839 684
2917c006 685 oldt = uatomic_read((uint16_t *) addr);
f469d839
PB
686 do {
687 old = oldt;
2917c006 688 oldt = uatomic_cmpxchg((uint16_t *) addr,
b4e6d540 689 old, old + val);
f469d839
PB
690 } while (oldt != old);
691
692 return old + val;
693 }
694#endif
8760d94e
PB
695 case 4:
696 {
2917c006 697 uint32_t old, oldt;
8760d94e 698
2917c006 699 oldt = uatomic_read((uint32_t *) addr);
8760d94e
PB
700 do {
701 old = oldt;
2917c006 702 oldt = uatomic_cmpxchg((uint32_t *) addr,
b4e6d540 703 old, old + val);
8760d94e
PB
704 } while (oldt != old);
705
706 return old + val;
707 }
b39e1761 708#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
709 case 8:
710 {
2917c006 711 uint64_t old, oldt;
8760d94e 712
2917c006 713 oldt = uatomic_read((uint64_t *) addr);
8760d94e
PB
714 do {
715 old = oldt;
2917c006 716 oldt = uatomic_cmpxchg((uint64_t *) addr,
b4e6d540 717 old, old + val);
8760d94e
PB
718 } while (oldt != old);
719
720 return old + val;
721 }
722#endif
723 }
724 _uatomic_link_error();
725 return 0;
726}
727
e56d99bf
MD
728#define uatomic_add_return(addr, v) \
729 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
730 caa_cast_long_keep_sign(v), \
731 sizeof(*(addr))))
8760d94e
PB
732#endif /* #ifndef uatomic_add_return */
733
734#ifndef uatomic_xchg
735/* xchg */
736
737static inline __attribute__((always_inline))
738unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
739{
740 switch (len) {
f469d839
PB
741#ifdef UATOMIC_HAS_ATOMIC_BYTE
742 case 1:
743 {
2917c006 744 uint8_t old, oldt;
f469d839 745
2917c006 746 oldt = uatomic_read((uint8_t *) addr);
f469d839
PB
747 do {
748 old = oldt;
2917c006 749 oldt = uatomic_cmpxchg((uint8_t *) addr,
b4e6d540 750 old, val);
f469d839
PB
751 } while (oldt != old);
752
753 return old;
754 }
755#endif
756#ifdef UATOMIC_HAS_ATOMIC_SHORT
757 case 2:
758 {
2917c006 759 uint16_t old, oldt;
f469d839 760
2917c006 761 oldt = uatomic_read((uint16_t *) addr);
f469d839
PB
762 do {
763 old = oldt;
2917c006 764 oldt = uatomic_cmpxchg((uint16_t *) addr,
b4e6d540 765 old, val);
f469d839
PB
766 } while (oldt != old);
767
768 return old;
769 }
770#endif
8760d94e
PB
771 case 4:
772 {
2917c006 773 uint32_t old, oldt;
8760d94e 774
2917c006 775 oldt = uatomic_read((uint32_t *) addr);
8760d94e
PB
776 do {
777 old = oldt;
2917c006 778 oldt = uatomic_cmpxchg((uint32_t *) addr,
b4e6d540 779 old, val);
8760d94e
PB
780 } while (oldt != old);
781
782 return old;
783 }
b39e1761 784#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
785 case 8:
786 {
2917c006 787 uint64_t old, oldt;
8760d94e 788
2917c006 789 oldt = uatomic_read((uint64_t *) addr);
8760d94e
PB
790 do {
791 old = oldt;
2917c006 792 oldt = uatomic_cmpxchg((uint64_t *) addr,
b4e6d540 793 old, val);
8760d94e
PB
794 } while (oldt != old);
795
796 return old;
797 }
798#endif
799 }
800 _uatomic_link_error();
801 return 0;
802}
803
804#define uatomic_xchg(addr, v) \
e56d99bf
MD
805 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
806 caa_cast_long_keep_sign(v), \
8760d94e
PB
807 sizeof(*(addr))))
808#endif /* #ifndef uatomic_xchg */
809
810#endif /* #else #ifndef uatomic_cmpxchg */
811
812/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
813
814#ifndef uatomic_add
815#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
42e83919
MD
816#define cmm_smp_mb__before_uatomic_add() cmm_barrier()
817#define cmm_smp_mb__after_uatomic_add() cmm_barrier()
8760d94e
PB
818#endif
819
e56d99bf
MD
820#define uatomic_sub_return(addr, v) \
821 uatomic_add_return((addr), -(caa_cast_long_keep_sign(v)))
822#define uatomic_sub(addr, v) \
823 uatomic_add((addr), -(caa_cast_long_keep_sign(v)))
42e83919
MD
824#define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb__before_uatomic_add()
825#define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb__after_uatomic_add()
8760d94e
PB
826
827#ifndef uatomic_inc
828#define uatomic_inc(addr) uatomic_add((addr), 1)
42e83919
MD
829#define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb__before_uatomic_add()
830#define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb__after_uatomic_add()
8760d94e
PB
831#endif
832
833#ifndef uatomic_dec
834#define uatomic_dec(addr) uatomic_add((addr), -1)
42e83919
MD
835#define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb__before_uatomic_add()
836#define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb__after_uatomic_add()
8760d94e
PB
837#endif
838
839#ifdef __cplusplus
840}
841#endif
842
843#endif /* _URCU_UATOMIC_GENERIC_H */
This page took 0.083758 seconds and 4 git commands to generate.