uatomic: Fix header guard comment
[urcu.git] / include / urcu / uatomic / generic.h
... / ...
CommitLineData
1// SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
2// SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
3// SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
4// SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5// SPDX-FileCopyrightText: 2010 Paolo Bonzini
6//
7// SPDX-License-Identifier: LicenseRef-Boehm-GC
8
9#ifndef _URCU_UATOMIC_GENERIC_H
10#define _URCU_UATOMIC_GENERIC_H
11
12/*
13 * Code inspired from libuatomic_ops-1.2, inherited in part from the
14 * Boehm-Demers-Weiser conservative garbage collector.
15 */
16
17#include <stdint.h>
18#include <urcu/compiler.h>
19#include <urcu/system.h>
20
21#ifdef __cplusplus
22extern "C" {
23#endif
24
25#ifndef uatomic_set
26#define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
27#endif
28
29/*
30 * Can be defined for the architecture.
31 *
32 * What needs to be emitted _before_ the `operation' with memory ordering `mo'.
33 */
34#ifndef _cmm_compat_c11_smp_mb__before_mo
35# define _cmm_compat_c11_smp_mb__before_mo(operation, mo) cmm_smp_mb()
36#endif
37
38/*
39 * Can be defined for the architecture.
40 *
41 * What needs to be emitted _after_ the `operation' with memory ordering `mo'.
42 */
43#ifndef _cmm_compat_c11_smp_mb__after_mo
44# define _cmm_compat_c11_smp_mb__after_mo(operation, mo) cmm_smp_mb()
45#endif
46
47#define uatomic_load_store_return_op(op, addr, v, mo) \
48 __extension__ \
49 ({ \
50 _cmm_compat_c11_smp_mb__before_mo(op, mo); \
51 __typeof__((*addr)) _value = op(addr, v); \
52 _cmm_compat_c11_smp_mb__after_mo(op, mo); \
53 \
54 _value; \
55 })
56
57#define uatomic_load_store_op(op, addr, v, mo) \
58 do { \
59 _cmm_compat_c11_smp_mb__before_mo(op, mo); \
60 op(addr, v); \
61 _cmm_compat_c11_smp_mb__after_mo(op, mo); \
62 } while (0)
63
64#define uatomic_store(addr, v, mo) \
65 do { \
66 _cmm_compat_c11_smp_mb__before_mo(uatomic_set, mo); \
67 uatomic_set(addr, v); \
68 _cmm_compat_c11_smp_mb__after_mo(uatomic_set, mo); \
69 } while (0)
70
71#define uatomic_and_mo(addr, v, mo) \
72 uatomic_load_store_op(uatomic_and, addr, v, mo)
73
74#define uatomic_or_mo(addr, v, mo) \
75 uatomic_load_store_op(uatomic_or, addr, v, mo)
76
77#define uatomic_add_mo(addr, v, mo) \
78 uatomic_load_store_op(uatomic_add, addr, v, mo)
79
80#define uatomic_sub_mo(addr, v, mo) \
81 uatomic_load_store_op(uatomic_sub, addr, v, mo)
82
83#define uatomic_inc_mo(addr, mo) \
84 uatomic_load_store_op(uatomic_add, addr, 1, mo)
85
86#define uatomic_dec_mo(addr, mo) \
87 uatomic_load_store_op(uatomic_add, addr, -1, mo)
88/*
89 * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
90 * compiler emit a -Wduplicated-cond warning.
91 */
92#define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
93 __extension__ \
94 ({ \
95 _cmm_compat_c11_smp_mb__before_mo(uatomic_cmpxchg, mos); \
96 __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
97 new); \
98 \
99 if (_value == (old)) { \
100 _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mos); \
101 } else { \
102 _cmm_compat_c11_smp_mb__after_mo(uatomic_cmpxchg, mof); \
103 } \
104 _value; \
105 })
106
107#define uatomic_xchg_mo(addr, v, mo) \
108 uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
109
110#define uatomic_add_return_mo(addr, v, mo) \
111 uatomic_load_store_return_op(uatomic_add_return, addr, v)
112
113#define uatomic_sub_return_mo(addr, v, mo) \
114 uatomic_load_store_return_op(uatomic_sub_return, addr, v)
115
116#ifndef uatomic_read
117#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
118#endif
119
120#define uatomic_load(addr, mo) \
121 __extension__ \
122 ({ \
123 _cmm_compat_c11_smp_mb__before_mo(uatomic_read, mo); \
124 __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
125 _cmm_compat_c11_smp_mb__after_mo(uatomic_read, mo); \
126 \
127 _rcu_value; \
128 })
129
130#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
131#ifdef ILLEGAL_INSTR
132static inline __attribute__((always_inline))
133void _uatomic_link_error(void)
134{
135 /*
136 * generate an illegal instruction. Cannot catch this with
137 * linker tricks when optimizations are disabled.
138 */
139 __asm__ __volatile__(ILLEGAL_INSTR);
140}
141#else
142static inline __attribute__((always_inline, __noreturn__))
143void _uatomic_link_error(void)
144{
145 __builtin_trap();
146}
147#endif
148
149#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
150extern void _uatomic_link_error(void);
151#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
152
153/* cmpxchg */
154
155#ifndef uatomic_cmpxchg
156static inline __attribute__((always_inline))
157unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
158 unsigned long _new, int len)
159{
160 switch (len) {
161#ifdef UATOMIC_HAS_ATOMIC_BYTE
162 case 1:
163 return __sync_val_compare_and_swap_1((uint8_t *) addr, old,
164 _new);
165#endif
166#ifdef UATOMIC_HAS_ATOMIC_SHORT
167 case 2:
168 return __sync_val_compare_and_swap_2((uint16_t *) addr, old,
169 _new);
170#endif
171 case 4:
172 return __sync_val_compare_and_swap_4((uint32_t *) addr, old,
173 _new);
174#if (CAA_BITS_PER_LONG == 64)
175 case 8:
176 return __sync_val_compare_and_swap_8((uint64_t *) addr, old,
177 _new);
178#endif
179 }
180 _uatomic_link_error();
181 return 0;
182}
183
184
185#define uatomic_cmpxchg(addr, old, _new) \
186 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
187 caa_cast_long_keep_sign(old), \
188 caa_cast_long_keep_sign(_new),\
189 sizeof(*(addr))))
190
191
192/* uatomic_and */
193
194#ifndef uatomic_and
195static inline __attribute__((always_inline))
196void _uatomic_and(void *addr, unsigned long val,
197 int len)
198{
199 switch (len) {
200#ifdef UATOMIC_HAS_ATOMIC_BYTE
201 case 1:
202 __sync_and_and_fetch_1((uint8_t *) addr, val);
203 return;
204#endif
205#ifdef UATOMIC_HAS_ATOMIC_SHORT
206 case 2:
207 __sync_and_and_fetch_2((uint16_t *) addr, val);
208 return;
209#endif
210 case 4:
211 __sync_and_and_fetch_4((uint32_t *) addr, val);
212 return;
213#if (CAA_BITS_PER_LONG == 64)
214 case 8:
215 __sync_and_and_fetch_8((uint64_t *) addr, val);
216 return;
217#endif
218 }
219 _uatomic_link_error();
220}
221
222#define uatomic_and(addr, v) \
223 (_uatomic_and((addr), \
224 caa_cast_long_keep_sign(v), \
225 sizeof(*(addr))))
226#define cmm_smp_mb__before_uatomic_and() cmm_barrier()
227#define cmm_smp_mb__after_uatomic_and() cmm_barrier()
228
229#endif
230
231/* uatomic_or */
232
233#ifndef uatomic_or
234static inline __attribute__((always_inline))
235void _uatomic_or(void *addr, unsigned long val,
236 int len)
237{
238 switch (len) {
239#ifdef UATOMIC_HAS_ATOMIC_BYTE
240 case 1:
241 __sync_or_and_fetch_1((uint8_t *) addr, val);
242 return;
243#endif
244#ifdef UATOMIC_HAS_ATOMIC_SHORT
245 case 2:
246 __sync_or_and_fetch_2((uint16_t *) addr, val);
247 return;
248#endif
249 case 4:
250 __sync_or_and_fetch_4((uint32_t *) addr, val);
251 return;
252#if (CAA_BITS_PER_LONG == 64)
253 case 8:
254 __sync_or_and_fetch_8((uint64_t *) addr, val);
255 return;
256#endif
257 }
258 _uatomic_link_error();
259 return;
260}
261
262#define uatomic_or(addr, v) \
263 (_uatomic_or((addr), \
264 caa_cast_long_keep_sign(v), \
265 sizeof(*(addr))))
266#define cmm_smp_mb__before_uatomic_or() cmm_barrier()
267#define cmm_smp_mb__after_uatomic_or() cmm_barrier()
268
269#endif
270
271
272/* uatomic_add_return */
273
274#ifndef uatomic_add_return
275static inline __attribute__((always_inline))
276unsigned long _uatomic_add_return(void *addr, unsigned long val,
277 int len)
278{
279 switch (len) {
280#ifdef UATOMIC_HAS_ATOMIC_BYTE
281 case 1:
282 return __sync_add_and_fetch_1((uint8_t *) addr, val);
283#endif
284#ifdef UATOMIC_HAS_ATOMIC_SHORT
285 case 2:
286 return __sync_add_and_fetch_2((uint16_t *) addr, val);
287#endif
288 case 4:
289 return __sync_add_and_fetch_4((uint32_t *) addr, val);
290#if (CAA_BITS_PER_LONG == 64)
291 case 8:
292 return __sync_add_and_fetch_8((uint64_t *) addr, val);
293#endif
294 }
295 _uatomic_link_error();
296 return 0;
297}
298
299
300#define uatomic_add_return(addr, v) \
301 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
302 caa_cast_long_keep_sign(v), \
303 sizeof(*(addr))))
304#endif /* #ifndef uatomic_add_return */
305
306#ifndef uatomic_xchg
307/* xchg */
308
309static inline __attribute__((always_inline))
310unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
311{
312 switch (len) {
313#ifdef UATOMIC_HAS_ATOMIC_BYTE
314 case 1:
315 {
316 uint8_t old;
317
318 do {
319 old = uatomic_read((uint8_t *) addr);
320 } while (!__sync_bool_compare_and_swap_1((uint8_t *) addr,
321 old, val));
322
323 return old;
324 }
325#endif
326#ifdef UATOMIC_HAS_ATOMIC_SHORT
327 case 2:
328 {
329 uint16_t old;
330
331 do {
332 old = uatomic_read((uint16_t *) addr);
333 } while (!__sync_bool_compare_and_swap_2((uint16_t *) addr,
334 old, val));
335
336 return old;
337 }
338#endif
339 case 4:
340 {
341 uint32_t old;
342
343 do {
344 old = uatomic_read((uint32_t *) addr);
345 } while (!__sync_bool_compare_and_swap_4((uint32_t *) addr,
346 old, val));
347
348 return old;
349 }
350#if (CAA_BITS_PER_LONG == 64)
351 case 8:
352 {
353 uint64_t old;
354
355 do {
356 old = uatomic_read((uint64_t *) addr);
357 } while (!__sync_bool_compare_and_swap_8((uint64_t *) addr,
358 old, val));
359
360 return old;
361 }
362#endif
363 }
364 _uatomic_link_error();
365 return 0;
366}
367
368#define uatomic_xchg(addr, v) \
369 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
370 caa_cast_long_keep_sign(v), \
371 sizeof(*(addr))))
372#endif /* #ifndef uatomic_xchg */
373
374#else /* #ifndef uatomic_cmpxchg */
375
376#ifndef uatomic_and
377/* uatomic_and */
378
379static inline __attribute__((always_inline))
380void _uatomic_and(void *addr, unsigned long val, int len)
381{
382 switch (len) {
383#ifdef UATOMIC_HAS_ATOMIC_BYTE
384 case 1:
385 {
386 uint8_t old, oldt;
387
388 oldt = uatomic_read((uint8_t *) addr);
389 do {
390 old = oldt;
391 oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
392 } while (oldt != old);
393
394 return;
395 }
396#endif
397#ifdef UATOMIC_HAS_ATOMIC_SHORT
398 case 2:
399 {
400 uint16_t old, oldt;
401
402 oldt = uatomic_read((uint16_t *) addr);
403 do {
404 old = oldt;
405 oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
406 } while (oldt != old);
407 }
408#endif
409 case 4:
410 {
411 uint32_t old, oldt;
412
413 oldt = uatomic_read((uint32_t *) addr);
414 do {
415 old = oldt;
416 oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
417 } while (oldt != old);
418
419 return;
420 }
421#if (CAA_BITS_PER_LONG == 64)
422 case 8:
423 {
424 uint64_t old, oldt;
425
426 oldt = uatomic_read((uint64_t *) addr);
427 do {
428 old = oldt;
429 oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
430 } while (oldt != old);
431
432 return;
433 }
434#endif
435 }
436 _uatomic_link_error();
437}
438
439#define uatomic_and(addr, v) \
440 (_uatomic_and((addr), \
441 caa_cast_long_keep_sign(v), \
442 sizeof(*(addr))))
443#define cmm_smp_mb__before_uatomic_and() cmm_barrier()
444#define cmm_smp_mb__after_uatomic_and() cmm_barrier()
445
446#endif /* #ifndef uatomic_and */
447
448#ifndef uatomic_or
449/* uatomic_or */
450
451static inline __attribute__((always_inline))
452void _uatomic_or(void *addr, unsigned long val, int len)
453{
454 switch (len) {
455#ifdef UATOMIC_HAS_ATOMIC_BYTE
456 case 1:
457 {
458 uint8_t old, oldt;
459
460 oldt = uatomic_read((uint8_t *) addr);
461 do {
462 old = oldt;
463 oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
464 } while (oldt != old);
465
466 return;
467 }
468#endif
469#ifdef UATOMIC_HAS_ATOMIC_SHORT
470 case 2:
471 {
472 uint16_t old, oldt;
473
474 oldt = uatomic_read((uint16_t *) addr);
475 do {
476 old = oldt;
477 oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
478 } while (oldt != old);
479
480 return;
481 }
482#endif
483 case 4:
484 {
485 uint32_t old, oldt;
486
487 oldt = uatomic_read((uint32_t *) addr);
488 do {
489 old = oldt;
490 oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
491 } while (oldt != old);
492
493 return;
494 }
495#if (CAA_BITS_PER_LONG == 64)
496 case 8:
497 {
498 uint64_t old, oldt;
499
500 oldt = uatomic_read((uint64_t *) addr);
501 do {
502 old = oldt;
503 oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
504 } while (oldt != old);
505
506 return;
507 }
508#endif
509 }
510 _uatomic_link_error();
511}
512
513#define uatomic_or(addr, v) \
514 (_uatomic_or((addr), \
515 caa_cast_long_keep_sign(v), \
516 sizeof(*(addr))))
517#define cmm_smp_mb__before_uatomic_or() cmm_barrier()
518#define cmm_smp_mb__after_uatomic_or() cmm_barrier()
519
520#endif /* #ifndef uatomic_or */
521
522#ifndef uatomic_add_return
523/* uatomic_add_return */
524
525static inline __attribute__((always_inline))
526unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
527{
528 switch (len) {
529#ifdef UATOMIC_HAS_ATOMIC_BYTE
530 case 1:
531 {
532 uint8_t old, oldt;
533
534 oldt = uatomic_read((uint8_t *) addr);
535 do {
536 old = oldt;
537 oldt = uatomic_cmpxchg((uint8_t *) addr,
538 old, old + val);
539 } while (oldt != old);
540
541 return old + val;
542 }
543#endif
544#ifdef UATOMIC_HAS_ATOMIC_SHORT
545 case 2:
546 {
547 uint16_t old, oldt;
548
549 oldt = uatomic_read((uint16_t *) addr);
550 do {
551 old = oldt;
552 oldt = uatomic_cmpxchg((uint16_t *) addr,
553 old, old + val);
554 } while (oldt != old);
555
556 return old + val;
557 }
558#endif
559 case 4:
560 {
561 uint32_t old, oldt;
562
563 oldt = uatomic_read((uint32_t *) addr);
564 do {
565 old = oldt;
566 oldt = uatomic_cmpxchg((uint32_t *) addr,
567 old, old + val);
568 } while (oldt != old);
569
570 return old + val;
571 }
572#if (CAA_BITS_PER_LONG == 64)
573 case 8:
574 {
575 uint64_t old, oldt;
576
577 oldt = uatomic_read((uint64_t *) addr);
578 do {
579 old = oldt;
580 oldt = uatomic_cmpxchg((uint64_t *) addr,
581 old, old + val);
582 } while (oldt != old);
583
584 return old + val;
585 }
586#endif
587 }
588 _uatomic_link_error();
589 return 0;
590}
591
592#define uatomic_add_return(addr, v) \
593 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
594 caa_cast_long_keep_sign(v), \
595 sizeof(*(addr))))
596#endif /* #ifndef uatomic_add_return */
597
598#ifndef uatomic_xchg
599/* xchg */
600
601static inline __attribute__((always_inline))
602unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
603{
604 switch (len) {
605#ifdef UATOMIC_HAS_ATOMIC_BYTE
606 case 1:
607 {
608 uint8_t old, oldt;
609
610 oldt = uatomic_read((uint8_t *) addr);
611 do {
612 old = oldt;
613 oldt = uatomic_cmpxchg((uint8_t *) addr,
614 old, val);
615 } while (oldt != old);
616
617 return old;
618 }
619#endif
620#ifdef UATOMIC_HAS_ATOMIC_SHORT
621 case 2:
622 {
623 uint16_t old, oldt;
624
625 oldt = uatomic_read((uint16_t *) addr);
626 do {
627 old = oldt;
628 oldt = uatomic_cmpxchg((uint16_t *) addr,
629 old, val);
630 } while (oldt != old);
631
632 return old;
633 }
634#endif
635 case 4:
636 {
637 uint32_t old, oldt;
638
639 oldt = uatomic_read((uint32_t *) addr);
640 do {
641 old = oldt;
642 oldt = uatomic_cmpxchg((uint32_t *) addr,
643 old, val);
644 } while (oldt != old);
645
646 return old;
647 }
648#if (CAA_BITS_PER_LONG == 64)
649 case 8:
650 {
651 uint64_t old, oldt;
652
653 oldt = uatomic_read((uint64_t *) addr);
654 do {
655 old = oldt;
656 oldt = uatomic_cmpxchg((uint64_t *) addr,
657 old, val);
658 } while (oldt != old);
659
660 return old;
661 }
662#endif
663 }
664 _uatomic_link_error();
665 return 0;
666}
667
668#define uatomic_xchg(addr, v) \
669 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
670 caa_cast_long_keep_sign(v), \
671 sizeof(*(addr))))
672#endif /* #ifndef uatomic_xchg */
673
674#endif /* #else #ifndef uatomic_cmpxchg */
675
676/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
677
678#ifndef uatomic_add
679#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
680#define cmm_smp_mb__before_uatomic_add() cmm_barrier()
681#define cmm_smp_mb__after_uatomic_add() cmm_barrier()
682#endif
683
684#define uatomic_sub_return(addr, v) \
685 uatomic_add_return((addr), -(caa_cast_long_keep_sign(v)))
686#define uatomic_sub(addr, v) \
687 uatomic_add((addr), -(caa_cast_long_keep_sign(v)))
688#define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb__before_uatomic_add()
689#define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb__after_uatomic_add()
690
691#ifndef uatomic_inc
692#define uatomic_inc(addr) uatomic_add((addr), 1)
693#define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb__before_uatomic_add()
694#define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb__after_uatomic_add()
695#endif
696
697#ifndef uatomic_dec
698#define uatomic_dec(addr) uatomic_add((addr), -1)
699#define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb__before_uatomic_add()
700#define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb__after_uatomic_add()
701#endif
702
703#ifdef __cplusplus
704}
705#endif
706
707#endif /* _URCU_UATOMIC_GENERIC_H */
This page took 0.025898 seconds and 5 git commands to generate.