1 // SPDX-FileCopyrightText: 2023 Olivier Dion <odion@efficios.com>
3 // SPDX-License-Identifier: LGPL-2.1-or-later
6 * urcu/uatomic/builtins-generic.h
9 #ifndef _URCU_UATOMIC_BUILTINS_GENERIC_H
10 #define _URCU_UATOMIC_BUILTINS_GENERIC_H
12 #include <urcu/compiler.h>
13 #include <urcu/system.h>
15 #define uatomic_store(addr, v, mo) \
17 __atomic_store_n(cmm_cast_volatile(addr), v, \
19 cmm_seq_cst_fence_after_atomic(mo); \
22 #define uatomic_set(addr, v) \
24 uatomic_store(addr, v, CMM_RELAXED); \
27 #define uatomic_load(addr, mo) \
30 __typeof__(*(addr)) _value = \
31 __atomic_load_n(cmm_cast_volatile(addr), \
33 cmm_seq_cst_fence_after_atomic(mo); \
38 #define uatomic_read(addr) \
39 uatomic_load(addr, CMM_RELAXED)
41 #define uatomic_cmpxchg_mo(addr, old, new, mos, mof) \
44 __typeof__(*(addr)) _old = (__typeof__(*(addr)))old; \
46 if (__atomic_compare_exchange_n(cmm_cast_volatile(addr), \
50 cmm_seq_cst_fence_after_atomic(mos); \
52 cmm_seq_cst_fence_after_atomic(mof); \
57 #define uatomic_cmpxchg(addr, old, new) \
58 uatomic_cmpxchg_mo(addr, old, new, CMM_SEQ_CST_FENCE, CMM_RELAXED)
60 #define uatomic_xchg_mo(addr, v, mo) \
63 __typeof__((*addr)) _old = \
64 __atomic_exchange_n(cmm_cast_volatile(addr), v, \
66 cmm_seq_cst_fence_after_atomic(mo); \
70 #define uatomic_xchg(addr, v) \
71 uatomic_xchg_mo(addr, v, CMM_SEQ_CST_FENCE)
73 #define uatomic_add_return_mo(addr, v, mo) \
76 __typeof__(*(addr)) _old = \
77 __atomic_add_fetch(cmm_cast_volatile(addr), v, \
79 cmm_seq_cst_fence_after_atomic(mo); \
83 #define uatomic_add_return(addr, v) \
84 uatomic_add_return_mo(addr, v, CMM_SEQ_CST_FENCE)
86 #define uatomic_sub_return_mo(addr, v, mo) \
89 __typeof__(*(addr)) _old = \
90 __atomic_sub_fetch(cmm_cast_volatile(addr), v, \
92 cmm_seq_cst_fence_after_atomic(mo); \
96 #define uatomic_sub_return(addr, v) \
97 uatomic_sub_return_mo(addr, v, CMM_SEQ_CST_FENCE)
99 #define uatomic_and_mo(addr, mask, mo) \
101 (void) __atomic_and_fetch(cmm_cast_volatile(addr), mask, \
103 cmm_seq_cst_fence_after_atomic(mo); \
106 #define uatomic_and(addr, mask) \
107 uatomic_and_mo(addr, mask, CMM_SEQ_CST)
109 #define uatomic_or_mo(addr, mask, mo) \
111 (void) __atomic_or_fetch(cmm_cast_volatile(addr), mask, \
113 cmm_seq_cst_fence_after_atomic(mo); \
117 #define uatomic_or(addr, mask) \
118 uatomic_or_mo(addr, mask, CMM_RELAXED)
120 #define uatomic_add_mo(addr, v, mo) \
121 (void) uatomic_add_return_mo(addr, v, mo)
123 #define uatomic_add(addr, v) \
124 uatomic_add_mo(addr, v, CMM_RELAXED)
126 #define uatomic_sub_mo(addr, v, mo) \
127 (void) uatomic_sub_return_mo(addr, v, mo)
129 #define uatomic_sub(addr, v) \
130 uatomic_sub_mo(addr, v, CMM_RELAXED)
132 #define uatomic_inc_mo(addr, mo) \
133 uatomic_add_mo(addr, 1, mo)
135 #define uatomic_inc(addr) \
136 uatomic_inc_mo(addr, CMM_RELAXED)
138 #define uatomic_dec_mo(addr, mo) \
139 uatomic_sub_mo(addr, 1, mo)
141 #define uatomic_dec(addr) \
142 uatomic_dec_mo(addr, CMM_RELAXED)
144 #define cmm_smp_mb__before_uatomic_and() cmm_smp_mb()
145 #define cmm_smp_mb__after_uatomic_and() cmm_smp_mb()
147 #define cmm_smp_mb__before_uatomic_or() cmm_smp_mb()
148 #define cmm_smp_mb__after_uatomic_or() cmm_smp_mb()
150 #define cmm_smp_mb__before_uatomic_add() cmm_smp_mb()
151 #define cmm_smp_mb__after_uatomic_add() cmm_smp_mb()
153 #define cmm_smp_mb__before_uatomic_sub() cmm_smp_mb()
154 #define cmm_smp_mb__after_uatomic_sub() cmm_smp_mb()
156 #define cmm_smp_mb__before_uatomic_inc() cmm_smp_mb()
157 #define cmm_smp_mb__after_uatomic_inc() cmm_smp_mb()
159 #define cmm_smp_mb__before_uatomic_dec() cmm_smp_mb()
160 #define cmm_smp_mb__after_uatomic_dec() cmm_smp_mb()
162 #endif /* _URCU_UATOMIC_BUILTINS_X86_H */
This page took 0.031877 seconds and 4 git commands to generate.