remove compat_uatomic_cmpxchg #define from non-x86
[urcu.git] / urcu / uatomic_arch_ppc.h
1 #ifndef _URCU_ARCH_UATOMIC_PPC_H
2 #define _URCU_ARCH_UATOMIC_PPC_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
25
26 #ifdef __cplusplus
27 extern "C" {
28 #endif
29
30 #ifndef __SIZEOF_LONG__
31 #ifdef __powerpc64__
32 #define __SIZEOF_LONG__ 8
33 #else
34 #define __SIZEOF_LONG__ 4
35 #endif
36 #endif
37
38 #ifdef __NO_LWSYNC__
39 #define LWSYNC_OPCODE "sync\n"
40 #else
41 #define LWSYNC_OPCODE "lwsync\n"
42 #endif
43
44 #ifndef BITS_PER_LONG
45 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
46 #endif
47
48 #define ILLEGAL_INSTR ".long 0xd00d00"
49
50 #define uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
51 #define uatomic_read(addr) LOAD_SHARED(*(addr))
52
53 /*
54 * Using a isync as second barrier for exchange to provide acquire semantic.
55 * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
56 * explicit that this also has acquire semantics."
57 * Derived from AO_compare_and_swap(), but removed the comparison.
58 */
59
60 /* xchg */
61
62 static inline __attribute__((always_inline))
63 unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
64 {
65 switch (len) {
66 case 4:
67 {
68 unsigned int result;
69
70 __asm__ __volatile__(
71 LWSYNC_OPCODE
72 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
73 "stwcx. %2,0,%1\n" /* else store conditional */
74 "bne- 1b\n" /* retry if lost reservation */
75 "isync\n"
76 : "=&r"(result)
77 : "r"(addr), "r"(val)
78 : "memory", "cc");
79
80 return result;
81 }
82 #if (BITS_PER_LONG == 64)
83 case 8:
84 {
85 unsigned long result;
86
87 __asm__ __volatile__(
88 LWSYNC_OPCODE
89 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
90 "stdcx. %2,0,%1\n" /* else store conditional */
91 "bne- 1b\n" /* retry if lost reservation */
92 "isync\n"
93 : "=&r"(result)
94 : "r"(addr), "r"(val)
95 : "memory", "cc");
96
97 return result;
98 }
99 #endif
100 }
101 /* generate an illegal instruction. Cannot catch this with linker tricks
102 * when optimizations are disabled. */
103 __asm__ __volatile__(ILLEGAL_INSTR);
104 return 0;
105 }
106
107 #define uatomic_xchg(addr, v) \
108 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
109 sizeof(*(addr))))
110 /* cmpxchg */
111
112 static inline __attribute__((always_inline))
113 unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
114 unsigned long _new, int len)
115 {
116 switch (len) {
117 case 4:
118 {
119 unsigned int old_val;
120
121 __asm__ __volatile__(
122 LWSYNC_OPCODE
123 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
124 "cmpd %0,%3\n" /* if load is not equal to */
125 "bne 2f\n" /* old, fail */
126 "stwcx. %2,0,%1\n" /* else store conditional */
127 "bne- 1b\n" /* retry if lost reservation */
128 "isync\n"
129 "2:\n"
130 : "=&r"(old_val)
131 : "r"(addr), "r"((unsigned int)_new),
132 "r"((unsigned int)old)
133 : "memory", "cc");
134
135 return old_val;
136 }
137 #if (BITS_PER_LONG == 64)
138 case 8:
139 {
140 unsigned long old_val;
141
142 __asm__ __volatile__(
143 LWSYNC_OPCODE
144 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
145 "cmpd %0,%3\n" /* if load is not equal to */
146 "bne 2f\n" /* old, fail */
147 "stdcx. %2,0,%1\n" /* else store conditional */
148 "bne- 1b\n" /* retry if lost reservation */
149 "isync\n"
150 "2:\n"
151 : "=&r"(old_val),
152 : "r"(addr), "r"((unsigned long)_new),
153 "r"((unsigned long)old)
154 : "memory", "cc");
155
156 return old_val;
157 }
158 #endif
159 }
160 /* generate an illegal instruction. Cannot catch this with linker tricks
161 * when optimizations are disabled. */
162 __asm__ __volatile__(ILLEGAL_INSTR);
163 return 0;
164 }
165
166
167 #define uatomic_cmpxchg(addr, old, _new) \
168 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
169 (unsigned long)(_new), \
170 sizeof(*(addr))))
171
172 /* uatomic_add_return */
173
174 static inline __attribute__((always_inline))
175 unsigned long _uatomic_add_return(void *addr, unsigned long val,
176 int len)
177 {
178 switch (len) {
179 case 4:
180 {
181 unsigned int result;
182
183 __asm__ __volatile__(
184 LWSYNC_OPCODE
185 "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
186 "add %0,%2,%0\n" /* add val to value loaded */
187 "stwcx. %0,0,%1\n" /* store conditional */
188 "bne- 1b\n" /* retry if lost reservation */
189 "isync\n"
190 : "=&r"(result)
191 : "r"(addr), "r"(val)
192 : "memory", "cc");
193
194 return result;
195 }
196 #if (BITS_PER_LONG == 64)
197 case 8:
198 {
199 unsigned long result;
200
201 __asm__ __volatile__(
202 LWSYNC_OPCODE
203 "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
204 "add %0,%2,%0\n" /* add val to value loaded */
205 "stdcx. %0,0,%1\n" /* store conditional */
206 "bne- 1b\n" /* retry if lost reservation */
207 "isync\n"
208 : "=&r"(result)
209 : "r"(addr), "r"(val)
210 : "memory", "cc");
211
212 return result;
213 }
214 #endif
215 }
216 /* generate an illegal instruction. Cannot catch this with linker tricks
217 * when optimizations are disabled. */
218 __asm__ __volatile__(ILLEGAL_INSTR);
219 return 0;
220 }
221
222
223 #define uatomic_add_return(addr, v) \
224 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
225 (unsigned long)(v), \
226 sizeof(*(addr))))
227
228 /* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
229
230 #define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
231
232 #define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
233 #define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v))
234
235 #define uatomic_inc(addr) uatomic_add((addr), 1)
236 #define uatomic_dec(addr) uatomic_add((addr), -1)
237
238 #ifdef __cplusplus
239 }
240 #endif
241
242 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.036325 seconds and 4 git commands to generate.