1 // SPDX-FileCopyrightText: 1991-1994 by Xerox Corporation. All rights reserved.
2 // SPDX-FileCopyrightText: 1996-1999 by Silicon Graphics. All rights reserved.
3 // SPDX-FileCopyrightText: 1999-2004 Hewlett-Packard Development Company, L.P.
4 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 // SPDX-FileCopyrightText: 2010 Paolo Bonzini
7 // SPDX-License-Identifier: LicenseRef-Boehm-GC
9 #ifndef _URCU_UATOMIC_GENERIC_H
10 #define _URCU_UATOMIC_GENERIC_H
13 * Code inspired from libuatomic_ops-1.2, inherited in part from the
14 * Boehm-Demers-Weiser conservative garbage collector.
19 #include <urcu/compiler.h>
20 #include <urcu/system.h>
27 #define uatomic_set(addr, v) ((void) CMM_STORE_SHARED(*(addr), (v)))
30 #define uatomic_load_store_return_op(op, addr, v, mo) \
42 case CMM_SEQ_CST_FENCE: \
49 __typeof__((*addr)) _value = op(addr, v); \
53 cmm_smp_read_barrier_depends(); \
58 case CMM_SEQ_CST_FENCE: \
70 #define uatomic_load_store_op(op, addr, v, mo) \
80 case CMM_SEQ_CST_FENCE: \
91 cmm_smp_read_barrier_depends(); \
96 case CMM_SEQ_CST_FENCE: \
107 #define uatomic_store(addr, v, mo) \
114 case CMM_SEQ_CST_FENCE: \
121 uatomic_set(addr, v); \
128 case CMM_SEQ_CST_FENCE: \
136 #define uatomic_and_mo(addr, v, mo) \
137 uatomic_load_store_op(uatomic_and, addr, v, mo)
139 #define uatomic_or_mo(addr, v, mo) \
140 uatomic_load_store_op(uatomic_or, addr, v, mo)
142 #define uatomic_add_mo(addr, v, mo) \
143 uatomic_load_store_op(uatomic_add, addr, v, mo)
145 #define uatomic_sub_mo(addr, v, mo) \
146 uatomic_load_store_op(uatomic_sub, addr, v, mo)
148 #define uatomic_inc_mo(addr, mo) \
149 uatomic_load_store_op(uatomic_add, addr, 1, mo)
151 #define uatomic_dec_mo(addr, mo) \
152 uatomic_load_store_op(uatomic_add, addr, -1, mo)
154 * NOTE: We can not just do switch (_value == (old) ? mos : mof) otherwise the
155 * compiler emit a -Wduplicated-cond warning.
157 #define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
168 case CMM_SEQ_CST_FENCE: \
175 __typeof__(*(addr)) _value = uatomic_cmpxchg(addr, old, \
178 if (_value == (old)) { \
181 cmm_smp_read_barrier_depends(); \
186 case CMM_SEQ_CST_FENCE: \
198 cmm_smp_read_barrier_depends(); \
203 case CMM_SEQ_CST_FENCE: \
216 #define uatomic_xchg_mo(addr, v, mo) \
217 uatomic_load_store_return_op(uatomic_xchg, addr, v, mo)
219 #define uatomic_add_return_mo(addr, v, mo) \
220 uatomic_load_store_return_op(uatomic_add_return, addr, v)
222 #define uatomic_sub_return_mo(addr, v, mo) \
223 uatomic_load_store_return_op(uatomic_sub_return, addr, v)
227 #define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
230 #define uatomic_load(addr, mo) \
239 case CMM_SEQ_CST_FENCE: \
246 __typeof__(*(addr)) _rcu_value = uatomic_read(addr); \
252 cmm_smp_read_barrier_depends(); \
256 case CMM_SEQ_CST_FENCE: \
266 #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
268 static inline __attribute__((always_inline
))
269 void _uatomic_link_error(void)
272 * generate an illegal instruction. Cannot catch this with
273 * linker tricks when optimizations are disabled.
275 __asm__
__volatile__(ILLEGAL_INSTR
);
278 static inline __attribute__((always_inline
, __noreturn__
))
279 void _uatomic_link_error(void)
285 #else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
286 extern void _uatomic_link_error(void);
287 #endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
291 #ifndef uatomic_cmpxchg
292 static inline __attribute__((always_inline
))
293 unsigned long _uatomic_cmpxchg(void *addr
, unsigned long old
,
294 unsigned long _new
, int len
)
297 #ifdef UATOMIC_HAS_ATOMIC_BYTE
299 return __sync_val_compare_and_swap_1((uint8_t *) addr
, old
,
302 #ifdef UATOMIC_HAS_ATOMIC_SHORT
304 return __sync_val_compare_and_swap_2((uint16_t *) addr
, old
,
308 return __sync_val_compare_and_swap_4((uint32_t *) addr
, old
,
310 #if (CAA_BITS_PER_LONG == 64)
312 return __sync_val_compare_and_swap_8((uint64_t *) addr
, old
,
316 _uatomic_link_error();
321 #define uatomic_cmpxchg(addr, old, _new) \
322 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
323 caa_cast_long_keep_sign(old), \
324 caa_cast_long_keep_sign(_new),\
331 static inline __attribute__((always_inline
))
332 void _uatomic_and(void *addr
, unsigned long val
,
336 #ifdef UATOMIC_HAS_ATOMIC_BYTE
338 __sync_and_and_fetch_1((uint8_t *) addr
, val
);
341 #ifdef UATOMIC_HAS_ATOMIC_SHORT
343 __sync_and_and_fetch_2((uint16_t *) addr
, val
);
347 __sync_and_and_fetch_4((uint32_t *) addr
, val
);
349 #if (CAA_BITS_PER_LONG == 64)
351 __sync_and_and_fetch_8((uint64_t *) addr
, val
);
355 _uatomic_link_error();
358 #define uatomic_and(addr, v) \
359 (_uatomic_and((addr), \
360 caa_cast_long_keep_sign(v), \
362 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
363 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
370 static inline __attribute__((always_inline
))
371 void _uatomic_or(void *addr
, unsigned long val
,
375 #ifdef UATOMIC_HAS_ATOMIC_BYTE
377 __sync_or_and_fetch_1((uint8_t *) addr
, val
);
380 #ifdef UATOMIC_HAS_ATOMIC_SHORT
382 __sync_or_and_fetch_2((uint16_t *) addr
, val
);
386 __sync_or_and_fetch_4((uint32_t *) addr
, val
);
388 #if (CAA_BITS_PER_LONG == 64)
390 __sync_or_and_fetch_8((uint64_t *) addr
, val
);
394 _uatomic_link_error();
398 #define uatomic_or(addr, v) \
399 (_uatomic_or((addr), \
400 caa_cast_long_keep_sign(v), \
402 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
403 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
408 /* uatomic_add_return */
410 #ifndef uatomic_add_return
411 static inline __attribute__((always_inline
))
412 unsigned long _uatomic_add_return(void *addr
, unsigned long val
,
416 #ifdef UATOMIC_HAS_ATOMIC_BYTE
418 return __sync_add_and_fetch_1((uint8_t *) addr
, val
);
420 #ifdef UATOMIC_HAS_ATOMIC_SHORT
422 return __sync_add_and_fetch_2((uint16_t *) addr
, val
);
425 return __sync_add_and_fetch_4((uint32_t *) addr
, val
);
426 #if (CAA_BITS_PER_LONG == 64)
428 return __sync_add_and_fetch_8((uint64_t *) addr
, val
);
431 _uatomic_link_error();
436 #define uatomic_add_return(addr, v) \
437 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
438 caa_cast_long_keep_sign(v), \
440 #endif /* #ifndef uatomic_add_return */
445 static inline __attribute__((always_inline
))
446 unsigned long _uatomic_exchange(void *addr
, unsigned long val
, int len
)
449 #ifdef UATOMIC_HAS_ATOMIC_BYTE
455 old
= uatomic_read((uint8_t *) addr
);
456 } while (!__sync_bool_compare_and_swap_1((uint8_t *) addr
,
462 #ifdef UATOMIC_HAS_ATOMIC_SHORT
468 old
= uatomic_read((uint16_t *) addr
);
469 } while (!__sync_bool_compare_and_swap_2((uint16_t *) addr
,
480 old
= uatomic_read((uint32_t *) addr
);
481 } while (!__sync_bool_compare_and_swap_4((uint32_t *) addr
,
486 #if (CAA_BITS_PER_LONG == 64)
492 old
= uatomic_read((uint64_t *) addr
);
493 } while (!__sync_bool_compare_and_swap_8((uint64_t *) addr
,
500 _uatomic_link_error();
504 #define uatomic_xchg(addr, v) \
505 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
506 caa_cast_long_keep_sign(v), \
508 #endif /* #ifndef uatomic_xchg */
510 #else /* #ifndef uatomic_cmpxchg */
515 static inline __attribute__((always_inline
))
516 void _uatomic_and(void *addr
, unsigned long val
, int len
)
519 #ifdef UATOMIC_HAS_ATOMIC_BYTE
524 oldt
= uatomic_read((uint8_t *) addr
);
527 oldt
= _uatomic_cmpxchg(addr
, old
, old
& val
, 1);
528 } while (oldt
!= old
);
533 #ifdef UATOMIC_HAS_ATOMIC_SHORT
538 oldt
= uatomic_read((uint16_t *) addr
);
541 oldt
= _uatomic_cmpxchg(addr
, old
, old
& val
, 2);
542 } while (oldt
!= old
);
549 oldt
= uatomic_read((uint32_t *) addr
);
552 oldt
= _uatomic_cmpxchg(addr
, old
, old
& val
, 4);
553 } while (oldt
!= old
);
557 #if (CAA_BITS_PER_LONG == 64)
562 oldt
= uatomic_read((uint64_t *) addr
);
565 oldt
= _uatomic_cmpxchg(addr
, old
, old
& val
, 8);
566 } while (oldt
!= old
);
572 _uatomic_link_error();
575 #define uatomic_and(addr, v) \
576 (_uatomic_and((addr), \
577 caa_cast_long_keep_sign(v), \
579 #define cmm_smp_mb__before_uatomic_and() cmm_barrier()
580 #define cmm_smp_mb__after_uatomic_and() cmm_barrier()
582 #endif /* #ifndef uatomic_and */
587 static inline __attribute__((always_inline
))
588 void _uatomic_or(void *addr
, unsigned long val
, int len
)
591 #ifdef UATOMIC_HAS_ATOMIC_BYTE
596 oldt
= uatomic_read((uint8_t *) addr
);
599 oldt
= _uatomic_cmpxchg(addr
, old
, old
| val
, 1);
600 } while (oldt
!= old
);
605 #ifdef UATOMIC_HAS_ATOMIC_SHORT
610 oldt
= uatomic_read((uint16_t *) addr
);
613 oldt
= _uatomic_cmpxchg(addr
, old
, old
| val
, 2);
614 } while (oldt
!= old
);
623 oldt
= uatomic_read((uint32_t *) addr
);
626 oldt
= _uatomic_cmpxchg(addr
, old
, old
| val
, 4);
627 } while (oldt
!= old
);
631 #if (CAA_BITS_PER_LONG == 64)
636 oldt
= uatomic_read((uint64_t *) addr
);
639 oldt
= _uatomic_cmpxchg(addr
, old
, old
| val
, 8);
640 } while (oldt
!= old
);
646 _uatomic_link_error();
649 #define uatomic_or(addr, v) \
650 (_uatomic_or((addr), \
651 caa_cast_long_keep_sign(v), \
653 #define cmm_smp_mb__before_uatomic_or() cmm_barrier()
654 #define cmm_smp_mb__after_uatomic_or() cmm_barrier()
656 #endif /* #ifndef uatomic_or */
658 #ifndef uatomic_add_return
659 /* uatomic_add_return */
661 static inline __attribute__((always_inline
))
662 unsigned long _uatomic_add_return(void *addr
, unsigned long val
, int len
)
665 #ifdef UATOMIC_HAS_ATOMIC_BYTE
670 oldt
= uatomic_read((uint8_t *) addr
);
673 oldt
= uatomic_cmpxchg((uint8_t *) addr
,
675 } while (oldt
!= old
);
680 #ifdef UATOMIC_HAS_ATOMIC_SHORT
685 oldt
= uatomic_read((uint16_t *) addr
);
688 oldt
= uatomic_cmpxchg((uint16_t *) addr
,
690 } while (oldt
!= old
);
699 oldt
= uatomic_read((uint32_t *) addr
);
702 oldt
= uatomic_cmpxchg((uint32_t *) addr
,
704 } while (oldt
!= old
);
708 #if (CAA_BITS_PER_LONG == 64)
713 oldt
= uatomic_read((uint64_t *) addr
);
716 oldt
= uatomic_cmpxchg((uint64_t *) addr
,
718 } while (oldt
!= old
);
724 _uatomic_link_error();
728 #define uatomic_add_return(addr, v) \
729 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
730 caa_cast_long_keep_sign(v), \
732 #endif /* #ifndef uatomic_add_return */
737 static inline __attribute__((always_inline
))
738 unsigned long _uatomic_exchange(void *addr
, unsigned long val
, int len
)
741 #ifdef UATOMIC_HAS_ATOMIC_BYTE
746 oldt
= uatomic_read((uint8_t *) addr
);
749 oldt
= uatomic_cmpxchg((uint8_t *) addr
,
751 } while (oldt
!= old
);
756 #ifdef UATOMIC_HAS_ATOMIC_SHORT
761 oldt
= uatomic_read((uint16_t *) addr
);
764 oldt
= uatomic_cmpxchg((uint16_t *) addr
,
766 } while (oldt
!= old
);
775 oldt
= uatomic_read((uint32_t *) addr
);
778 oldt
= uatomic_cmpxchg((uint32_t *) addr
,
780 } while (oldt
!= old
);
784 #if (CAA_BITS_PER_LONG == 64)
789 oldt
= uatomic_read((uint64_t *) addr
);
792 oldt
= uatomic_cmpxchg((uint64_t *) addr
,
794 } while (oldt
!= old
);
800 _uatomic_link_error();
804 #define uatomic_xchg(addr, v) \
805 ((__typeof__(*(addr))) _uatomic_exchange((addr), \
806 caa_cast_long_keep_sign(v), \
808 #endif /* #ifndef uatomic_xchg */
810 #endif /* #else #ifndef uatomic_cmpxchg */
812 /* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
815 #define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
816 #define cmm_smp_mb__before_uatomic_add() cmm_barrier()
817 #define cmm_smp_mb__after_uatomic_add() cmm_barrier()
820 #define uatomic_sub_return(addr, v) \
821 uatomic_add_return((addr), -(caa_cast_long_keep_sign(v)))
822 #define uatomic_sub(addr, v) \
823 uatomic_add((addr), -(caa_cast_long_keep_sign(v)))
824 #define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb__before_uatomic_add()
825 #define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb__after_uatomic_add()
828 #define uatomic_inc(addr) uatomic_add((addr), 1)
829 #define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb__before_uatomic_add()
830 #define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb__after_uatomic_add()
834 #define uatomic_dec(addr) uatomic_add((addr), -1)
835 #define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb__before_uatomic_add()
836 #define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb__after_uatomic_add()
843 #endif /* _URCU_UATOMIC_GENERIC_H */
This page took 0.045219 seconds and 4 git commands to generate.