s390 uatomic arch fix
[urcu.git] / urcu / uatomic_arch_x86.h
1 #ifndef _URCU_ARCH_UATOMIC_X86_H
2 #define _URCU_ARCH_UATOMIC_X86_H
3
4 /*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 *
10 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
11 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
12 *
13 * Permission is hereby granted to use or copy this program
14 * for any purpose, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
18 *
19 * Code inspired from libuatomic_ops-1.2, inherited in part from the
20 * Boehm-Demers-Weiser conservative garbage collector.
21 */
22
23 #include <urcu/compiler.h>
24 #include <urcu/system.h>
25
26 #ifndef __SIZEOF_LONG__
27 #if defined(__x86_64__) || defined(__amd64__)
28 #define __SIZEOF_LONG__ 8
29 #else
30 #define __SIZEOF_LONG__ 4
31 #endif
32 #endif
33
34 #ifndef BITS_PER_LONG
35 #define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
36 #endif
37
38 /*
39 * Derived from AO_compare_and_swap() and AO_test_and_set_full().
40 */
41
42 struct __uatomic_dummy {
43 unsigned long v[10];
44 };
45 #define __hp(x) ((struct __uatomic_dummy *)(x))
46
47 #define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
48 #define _uatomic_read(addr) LOAD_SHARED(*(addr))
49
50 /* cmpxchg */
51
52 static inline __attribute__((always_inline))
53 unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
54 unsigned long _new, int len)
55 {
56 switch (len) {
57 case 1:
58 {
59 unsigned char result = old;
60
61 __asm__ __volatile__(
62 "lock; cmpxchgb %2, %1"
63 : "+a"(result), "+m"(*__hp(addr))
64 : "q"((unsigned char)_new)
65 : "memory");
66 return result;
67 }
68 case 2:
69 {
70 unsigned short result = old;
71
72 __asm__ __volatile__(
73 "lock; cmpxchgw %2, %1"
74 : "+a"(result), "+m"(*__hp(addr))
75 : "r"((unsigned short)_new)
76 : "memory");
77 return result;
78 }
79 case 4:
80 {
81 unsigned int result = old;
82
83 __asm__ __volatile__(
84 "lock; cmpxchgl %2, %1"
85 : "+a"(result), "+m"(*__hp(addr))
86 : "r"((unsigned int)_new)
87 : "memory");
88 return result;
89 }
90 #if (BITS_PER_LONG == 64)
91 case 8:
92 {
93 unsigned long result = old;
94
95 __asm__ __volatile__(
96 "lock; cmpxchgq %2, %1"
97 : "+a"(result), "+m"(*__hp(addr))
98 : "r"((unsigned long)_new)
99 : "memory");
100 return result;
101 }
102 #endif
103 }
104 /* generate an illegal instruction. Cannot catch this with linker tricks
105 * when optimizations are disabled. */
106 __asm__ __volatile__("ud2");
107 return 0;
108 }
109
110 #define _uatomic_cmpxchg(addr, old, _new) \
111 ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
112 (unsigned long)(_new), \
113 sizeof(*(addr))))
114
115 /* xchg */
116
117 static inline __attribute__((always_inline))
118 unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
119 {
120 /* Note: the "xchg" instruction does not need a "lock" prefix. */
121 switch (len) {
122 case 1:
123 {
124 unsigned char result;
125 __asm__ __volatile__(
126 "xchgb %0, %1"
127 : "=q"(result), "+m"(*__hp(addr))
128 : "0" ((unsigned char)val)
129 : "memory");
130 return result;
131 }
132 case 2:
133 {
134 unsigned short result;
135 __asm__ __volatile__(
136 "xchgw %0, %1"
137 : "=r"(result), "+m"(*__hp(addr))
138 : "0" ((unsigned short)val)
139 : "memory");
140 return result;
141 }
142 case 4:
143 {
144 unsigned int result;
145 __asm__ __volatile__(
146 "xchgl %0, %1"
147 : "=r"(result), "+m"(*__hp(addr))
148 : "0" ((unsigned int)val)
149 : "memory");
150 return result;
151 }
152 #if (BITS_PER_LONG == 64)
153 case 8:
154 {
155 unsigned long result;
156 __asm__ __volatile__(
157 "xchgq %0, %1"
158 : "=r"(result), "+m"(*__hp(addr))
159 : "0" ((unsigned long)val)
160 : "memory");
161 return result;
162 }
163 #endif
164 }
165 /* generate an illegal instruction. Cannot catch this with linker tricks
166 * when optimizations are disabled. */
167 __asm__ __volatile__("ud2");
168 return 0;
169 }
170
171 #define _uatomic_xchg(addr, v) \
172 ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
173 sizeof(*(addr))))
174
175 /* uatomic_add_return, uatomic_sub_return */
176
177 static inline __attribute__((always_inline))
178 unsigned long __uatomic_add_return(void *addr, unsigned long val,
179 int len)
180 {
181 switch (len) {
182 case 1:
183 {
184 unsigned char result = val;
185
186 __asm__ __volatile__(
187 "lock; xaddb %1, %0"
188 : "+m"(*__hp(addr)), "+q" (result)
189 :
190 : "memory");
191 return result + (unsigned char)val;
192 }
193 case 2:
194 {
195 unsigned short result = val;
196
197 __asm__ __volatile__(
198 "lock; xaddw %1, %0"
199 : "+m"(*__hp(addr)), "+r" (result)
200 :
201 : "memory");
202 return result + (unsigned short)val;
203 }
204 case 4:
205 {
206 unsigned int result = val;
207
208 __asm__ __volatile__(
209 "lock; xaddl %1, %0"
210 : "+m"(*__hp(addr)), "+r" (result)
211 :
212 : "memory");
213 return result + (unsigned int)val;
214 }
215 #if (BITS_PER_LONG == 64)
216 case 8:
217 {
218 unsigned long result = val;
219
220 __asm__ __volatile__(
221 "lock; xaddq %1, %0"
222 : "+m"(*__hp(addr)), "+r" (result)
223 :
224 : "memory");
225 return result + (unsigned long)val;
226 }
227 #endif
228 }
229 /* generate an illegal instruction. Cannot catch this with linker tricks
230 * when optimizations are disabled. */
231 __asm__ __volatile__("ud2");
232 return 0;
233 }
234
235 #define _uatomic_add_return(addr, v) \
236 ((__typeof__(*(addr))) __uatomic_add_return((addr), \
237 (unsigned long)(v), \
238 sizeof(*(addr))))
239
240 #define _uatomic_sub_return(addr, v) _uatomic_add_return((addr), -(v))
241
242 /* uatomic_add, uatomic_sub */
243
244 static inline __attribute__((always_inline))
245 void __uatomic_add(void *addr, unsigned long val, int len)
246 {
247 switch (len) {
248 case 1:
249 {
250 __asm__ __volatile__(
251 "lock; addb %1, %0"
252 : "=m"(*__hp(addr))
253 : "iq" ((unsigned char)val)
254 : "memory");
255 return;
256 }
257 case 2:
258 {
259 __asm__ __volatile__(
260 "lock; addw %1, %0"
261 : "=m"(*__hp(addr))
262 : "ir" ((unsigned short)val)
263 : "memory");
264 return;
265 }
266 case 4:
267 {
268 __asm__ __volatile__(
269 "lock; addl %1, %0"
270 : "=m"(*__hp(addr))
271 : "ir" ((unsigned int)val)
272 : "memory");
273 return;
274 }
275 #if (BITS_PER_LONG == 64)
276 case 8:
277 {
278 __asm__ __volatile__(
279 "lock; addq %1, %0"
280 : "=m"(*__hp(addr))
281 : "er" ((unsigned long)val)
282 : "memory");
283 return;
284 }
285 #endif
286 }
287 /* generate an illegal instruction. Cannot catch this with linker tricks
288 * when optimizations are disabled. */
289 __asm__ __volatile__("ud2");
290 return;
291 }
292
293 #define _uatomic_add(addr, v) \
294 (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
295
296 #define _uatomic_sub(addr, v) _uatomic_add((addr), -(v))
297
298
299 /* uatomic_inc */
300
301 static inline __attribute__((always_inline))
302 void __uatomic_inc(void *addr, int len)
303 {
304 switch (len) {
305 case 1:
306 {
307 __asm__ __volatile__(
308 "lock; incb %0"
309 : "=m"(*__hp(addr))
310 :
311 : "memory");
312 return;
313 }
314 case 2:
315 {
316 __asm__ __volatile__(
317 "lock; incw %0"
318 : "=m"(*__hp(addr))
319 :
320 : "memory");
321 return;
322 }
323 case 4:
324 {
325 __asm__ __volatile__(
326 "lock; incl %0"
327 : "=m"(*__hp(addr))
328 :
329 : "memory");
330 return;
331 }
332 #if (BITS_PER_LONG == 64)
333 case 8:
334 {
335 __asm__ __volatile__(
336 "lock; incq %0"
337 : "=m"(*__hp(addr))
338 :
339 : "memory");
340 return;
341 }
342 #endif
343 }
344 /* generate an illegal instruction. Cannot catch this with linker tricks
345 * when optimizations are disabled. */
346 __asm__ __volatile__("ud2");
347 return;
348 }
349
350 #define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
351
352 /* uatomic_dec */
353
354 static inline __attribute__((always_inline))
355 void __uatomic_dec(void *addr, int len)
356 {
357 switch (len) {
358 case 1:
359 {
360 __asm__ __volatile__(
361 "lock; decb %0"
362 : "=m"(*__hp(addr))
363 :
364 : "memory");
365 return;
366 }
367 case 2:
368 {
369 __asm__ __volatile__(
370 "lock; decw %0"
371 : "=m"(*__hp(addr))
372 :
373 : "memory");
374 return;
375 }
376 case 4:
377 {
378 __asm__ __volatile__(
379 "lock; decl %0"
380 : "=m"(*__hp(addr))
381 :
382 : "memory");
383 return;
384 }
385 #if (BITS_PER_LONG == 64)
386 case 8:
387 {
388 __asm__ __volatile__(
389 "lock; decq %0"
390 : "=m"(*__hp(addr))
391 :
392 : "memory");
393 return;
394 }
395 #endif
396 }
397 /* generate an illegal instruction. Cannot catch this with linker tricks
398 * when optimizations are disabled. */
399 __asm__ __volatile__("ud2");
400 return;
401 }
402
403 #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
404
405 #if ((BITS_PER_LONG != 64) && defined(CONFIG_URCU_COMPAT_ARCH))
406 extern int __urcu_cas_avail;
407 extern int __urcu_cas_init(void);
408
409 #define UATOMIC_COMPAT(insn) \
410 ((likely(__urcu_cas_avail > 0)) \
411 ? (_uatomic_##insn) \
412 : ((unlikely(__urcu_cas_avail < 0) \
413 ? ((__urcu_cas_init() > 0) \
414 ? (_uatomic_##insn) \
415 : (compat_uatomic_##insn)) \
416 : (compat_uatomic_##insn))))
417
418 extern unsigned long _compat_uatomic_set(void *addr,
419 unsigned long _new, int len);
420 #define compat_uatomic_set(addr, _new) \
421 ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
422 (unsigned long)(_new), \
423 sizeof(*(addr))))
424
425
426 extern unsigned long _compat_uatomic_xchg(void *addr,
427 unsigned long _new, int len);
428 #define compat_uatomic_xchg(addr, _new) \
429 ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
430 (unsigned long)(_new), \
431 sizeof(*(addr))))
432
433 extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
434 unsigned long _new, int len);
435 #define compat_uatomic_cmpxchg(addr, old, _new) \
436 ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
437 (unsigned long)(old), \
438 (unsigned long)(_new), \
439 sizeof(*(addr))))
440
441 extern unsigned long _compat_uatomic_xchg(void *addr,
442 unsigned long _new, int len);
443 #define compat_uatomic_add_return(addr, v) \
444 ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
445 (unsigned long)(v), \
446 sizeof(*(addr))))
447
448 #define compat_uatomic_sub_return(addr, v) \
449 compat_uatomic_add_return((addr), -(v))
450 #define compat_uatomic_add(addr, v) \
451 ((void)compat_uatomic_add_return((addr), (v)))
452 #define compat_uatomic_sub(addr, v) \
453 ((void)compat_uatomic_sub_return((addr), (v)))
454 #define compat_uatomic_inc(addr) \
455 (compat_uatomic_add((addr), 1))
456 #define compat_uatomic_dec(addr) \
457 (compat_uatomic_sub((addr), 1))
458
459 #else
460 #define UATOMIC_COMPAT(insn) (_uatomic_##insn)
461 #endif
462
463 /* Read is atomic even in compat mode */
464 #define uatomic_read(addr) _uatomic_read(addr)
465
466 #define uatomic_set(addr, v) \
467 UATOMIC_COMPAT(set(addr, v))
468 #define uatomic_cmpxchg(addr, old, _new) \
469 UATOMIC_COMPAT(cmpxchg(addr, old, _new))
470 #define uatomic_xchg(addr, v) \
471 UATOMIC_COMPAT(xchg(addr, v))
472 #define uatomic_add_return(addr, v) \
473 UATOMIC_COMPAT(add_return(addr, v))
474 #define uatomic_sub_return(addr, v) \
475 UATOMIC_COMPAT(sub_return(addr, v))
476 #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
477 #define uatomic_sub(addr, v) UATOMIC_COMPAT(sub(addr, v))
478 #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
479 #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
480
481 #endif /* _URCU_ARCH_UATOMIC_X86_H */
This page took 0.045717 seconds and 5 git commands to generate.