powerpc: use __NO_LWSYNC__ check to use appropriate lwsync/sync opcode
[urcu.git] / urcu / uatomic / generic.h
CommitLineData
8760d94e
PB
1#ifndef _URCU_UATOMIC_GENERIC_H
2#define _URCU_UATOMIC_GENERIC_H
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 * Copyright (c) 2010 Paolo Bonzini
10 *
11 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
12 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 *
14 * Permission is hereby granted to use or copy this program
15 * for any purpose, provided the above notices are retained on all copies.
16 * Permission to modify the code and to distribute modified code is granted,
17 * provided the above notices are retained, and a notice that the code was
18 * modified is included with the above copyright notice.
19 *
20 * Code inspired from libuatomic_ops-1.2, inherited in part from the
21 * Boehm-Demers-Weiser conservative garbage collector.
22 */
23
24#include <urcu/compiler.h>
25#include <urcu/system.h>
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
8760d94e 31#ifndef uatomic_set
6cf3827c 32#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
8760d94e
PB
33#endif
34
35#ifndef uatomic_read
6cf3827c 36#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
8760d94e
PB
37#endif
38
39#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
40static inline __attribute__((always_inline))
41void _uatomic_link_error()
42{
43#ifdef ILLEGAL_INSTR
44 /* generate an illegal instruction. Cannot catch this with linker tricks
45 * when optimizations are disabled. */
46 __asm__ __volatile__(ILLEGAL_INSTR);
47#else
48 __builtin_trap ();
49#endif
50}
51
52#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
53extern void _uatomic_link_error ();
54#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
55
56/* cmpxchg */
57
58#ifndef uatomic_cmpxchg
59static inline __attribute__((always_inline))
60unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
61 unsigned long _new, int len)
62{
63 switch (len) {
f469d839
PB
64#ifdef UATOMIC_HAS_ATOMIC_BYTE
65 case 1:
66 return __sync_val_compare_and_swap_1(addr, old, _new);
67#endif
68#ifdef UATOMIC_HAS_ATOMIC_SHORT
69 case 2:
70 return __sync_val_compare_and_swap_2(addr, old, _new);
71#endif
8760d94e
PB
72 case 4:
73 return __sync_val_compare_and_swap_4(addr, old, _new);
b39e1761 74#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
75 case 8:
76 return __sync_val_compare_and_swap_8(addr, old, _new);
77#endif
78 }
79 _uatomic_link_error();
80 return 0;
81}
82
83
84#define uatomic_cmpxchg(addr, old, _new) \
85 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
86 (unsigned long)(_new), \
87 sizeof(*(addr))))
88
89
bf33aaea
PB
90/* uatomic_and */
91
92#ifndef uatomic_and
93static inline __attribute__((always_inline))
94void _uatomic_and(void *addr, unsigned long val,
95 int len)
96{
97 switch (len) {
98#ifdef UATOMIC_HAS_ATOMIC_BYTE
99 case 1:
100 __sync_and_and_fetch_1(addr, val);
c51d5c6e 101 return;
bf33aaea
PB
102#endif
103#ifdef UATOMIC_HAS_ATOMIC_SHORT
104 case 2:
105 __sync_and_and_fetch_2(addr, val);
c51d5c6e 106 return;
bf33aaea
PB
107#endif
108 case 4:
109 __sync_and_and_fetch_4(addr, val);
c51d5c6e 110 return;
bf33aaea
PB
111#if (CAA_BITS_PER_LONG == 64)
112 case 8:
113 __sync_and_and_fetch_8(addr, val);
c51d5c6e 114 return;
bf33aaea
PB
115#endif
116 }
117 _uatomic_link_error();
bf33aaea
PB
118}
119
120#define uatomic_and(addr, v) \
121 (_uatomic_and((addr), \
122 (unsigned long)(v), \
123 sizeof(*(addr))))
124#endif
125
985b35b1
PB
126/* uatomic_or */
127
128#ifndef uatomic_or
129static inline __attribute__((always_inline))
130void _uatomic_or(void *addr, unsigned long val,
131 int len)
132{
133 switch (len) {
134#ifdef UATOMIC_HAS_ATOMIC_BYTE
135 case 1:
136 __sync_or_and_fetch_1(addr, val);
c51d5c6e 137 return;
985b35b1
PB
138#endif
139#ifdef UATOMIC_HAS_ATOMIC_SHORT
140 case 2:
141 __sync_or_and_fetch_2(addr, val);
c51d5c6e 142 return;
985b35b1
PB
143#endif
144 case 4:
145 __sync_or_and_fetch_4(addr, val);
c51d5c6e 146 return;
985b35b1
PB
147#if (CAA_BITS_PER_LONG == 64)
148 case 8:
149 __sync_or_and_fetch_8(addr, val);
c51d5c6e 150 return;
985b35b1
PB
151#endif
152 }
153 _uatomic_link_error();
c51d5c6e 154 return;
985b35b1
PB
155}
156
157#define uatomic_or(addr, v) \
158 (_uatomic_or((addr), \
159 (unsigned long)(v), \
160 sizeof(*(addr))))
161#endif
162
8760d94e
PB
163/* uatomic_add_return */
164
165#ifndef uatomic_add_return
166static inline __attribute__((always_inline))
167unsigned long _uatomic_add_return(void *addr, unsigned long val,
168 int len)
169{
170 switch (len) {
f469d839
PB
171#ifdef UATOMIC_HAS_ATOMIC_BYTE
172 case 1:
173 return __sync_add_and_fetch_1(addr, val);
174#endif
175#ifdef UATOMIC_HAS_ATOMIC_SHORT
176 case 2:
177 return __sync_add_and_fetch_2(addr, val);
178#endif
8760d94e
PB
179 case 4:
180 return __sync_add_and_fetch_4(addr, val);
b39e1761 181#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
182 case 8:
183 return __sync_add_and_fetch_8(addr, val);
184#endif
185 }
186 _uatomic_link_error();
187 return 0;
188}
189
190
191#define uatomic_add_return(addr, v) \
192 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
193 (unsigned long)(v), \
194 sizeof(*(addr))))
195#endif /* #ifndef uatomic_add_return */
196
197#ifndef uatomic_xchg
198/* xchg */
199
200static inline __attribute__((always_inline))
201unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
202{
203 switch (len) {
f469d839
PB
204#ifdef UATOMIC_HAS_ATOMIC_BYTE
205 case 1:
206 {
207 unsigned char old;
208
209 do {
210 old = uatomic_read((unsigned char *)addr);
211 } while (!__sync_bool_compare_and_swap_1(addr, old, val));
212
213 return old;
214 }
215#endif
216#ifdef UATOMIC_HAS_ATOMIC_SHORT
217 case 2:
218 {
219 unsigned short old;
220
221 do {
222 old = uatomic_read((unsigned short *)addr);
223 } while (!__sync_bool_compare_and_swap_2(addr, old, val));
224
225 return old;
226 }
227#endif
8760d94e
PB
228 case 4:
229 {
230 unsigned int old;
231
232 do {
233 old = uatomic_read((unsigned int *)addr);
2f2908d0 234 } while (!__sync_bool_compare_and_swap_4(addr, old, val));
8760d94e 235
2f2908d0 236 return old;
8760d94e 237 }
b39e1761 238#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
239 case 8:
240 {
241 unsigned long old;
242
243 do {
244 old = uatomic_read((unsigned long *)addr);
245 } while (!__sync_bool_compare_and_swap_8(addr, old, val));
246
247 return old;
248 }
249#endif
250 }
251 _uatomic_link_error();
252 return 0;
253}
254
255#define uatomic_xchg(addr, v) \
256 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
257 sizeof(*(addr))))
258#endif /* #ifndef uatomic_xchg */
259
260#else /* #ifndef uatomic_cmpxchg */
261
bf33aaea
PB
262#ifndef uatomic_and
263/* uatomic_and */
264
265static inline __attribute__((always_inline))
266void _uatomic_and(void *addr, unsigned long val, int len)
267{
268 switch (len) {
269#ifdef UATOMIC_HAS_ATOMIC_BYTE
270 case 1:
271 {
272 unsigned char old, oldt;
273
274 oldt = uatomic_read((unsigned char *)addr);
275 do {
276 old = oldt;
277 oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
278 } while (oldt != old);
e6e5957d
MD
279
280 return;
bf33aaea
PB
281 }
282#endif
283#ifdef UATOMIC_HAS_ATOMIC_SHORT
284 case 2:
285 {
286 unsigned short old, oldt;
287
288 oldt = uatomic_read((unsigned short *)addr);
289 do {
290 old = oldt;
291 oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
292 } while (oldt != old);
293 }
294#endif
295 case 4:
296 {
297 unsigned int old, oldt;
298
299 oldt = uatomic_read((unsigned int *)addr);
300 do {
301 old = oldt;
302 oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
303 } while (oldt != old);
e6e5957d
MD
304
305 return;
bf33aaea
PB
306 }
307#if (CAA_BITS_PER_LONG == 64)
308 case 8:
309 {
310 unsigned long old, oldt;
311
312 oldt = uatomic_read((unsigned long *)addr);
313 do {
314 old = oldt;
315 oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
316 } while (oldt != old);
e6e5957d
MD
317
318 return;
bf33aaea
PB
319 }
320#endif
321 }
322 _uatomic_link_error();
bf33aaea
PB
323}
324
325#define uatomic_and(addr, v) \
ef6ca022 326 (_uatomic_and((addr), \
bf33aaea
PB
327 (unsigned long)(v), \
328 sizeof(*(addr))))
329#endif /* #ifndef uatomic_and */
330
985b35b1
PB
331#ifndef uatomic_or
332/* uatomic_or */
333
334static inline __attribute__((always_inline))
335void _uatomic_or(void *addr, unsigned long val, int len)
336{
337 switch (len) {
338#ifdef UATOMIC_HAS_ATOMIC_BYTE
339 case 1:
340 {
341 unsigned char old, oldt;
342
343 oldt = uatomic_read((unsigned char *)addr);
344 do {
345 old = oldt;
346 oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
347 } while (oldt != old);
e6e5957d
MD
348
349 return;
985b35b1
PB
350 }
351#endif
352#ifdef UATOMIC_HAS_ATOMIC_SHORT
353 case 2:
354 {
355 unsigned short old, oldt;
356
357 oldt = uatomic_read((unsigned short *)addr);
358 do {
359 old = oldt;
360 oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
361 } while (oldt != old);
e6e5957d
MD
362
363 return;
985b35b1
PB
364 }
365#endif
366 case 4:
367 {
368 unsigned int old, oldt;
369
370 oldt = uatomic_read((unsigned int *)addr);
371 do {
372 old = oldt;
373 oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
374 } while (oldt != old);
e6e5957d
MD
375
376 return;
985b35b1
PB
377 }
378#if (CAA_BITS_PER_LONG == 64)
379 case 8:
380 {
381 unsigned long old, oldt;
382
383 oldt = uatomic_read((unsigned long *)addr);
384 do {
385 old = oldt;
386 oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
387 } while (oldt != old);
e6e5957d
MD
388
389 return;
985b35b1
PB
390 }
391#endif
392 }
393 _uatomic_link_error();
985b35b1
PB
394}
395
396#define uatomic_or(addr, v) \
ef6ca022
MD
397 (_uatomic_or((addr), \
398 (unsigned long)(v),\
399 sizeof(*(addr))))
985b35b1
PB
400#endif /* #ifndef uatomic_or */
401
8760d94e
PB
402#ifndef uatomic_add_return
403/* uatomic_add_return */
404
405static inline __attribute__((always_inline))
406unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
407{
408 switch (len) {
f469d839
PB
409#ifdef UATOMIC_HAS_ATOMIC_BYTE
410 case 1:
411 {
412 unsigned char old, oldt;
413
414 oldt = uatomic_read((unsigned char *)addr);
415 do {
416 old = oldt;
b4e6d540
PB
417 oldt = uatomic_cmpxchg((unsigned char *)addr,
418 old, old + val);
f469d839
PB
419 } while (oldt != old);
420
421 return old + val;
422 }
423#endif
424#ifdef UATOMIC_HAS_ATOMIC_SHORT
425 case 2:
426 {
427 unsigned short old, oldt;
428
429 oldt = uatomic_read((unsigned short *)addr);
430 do {
431 old = oldt;
b4e6d540
PB
432 oldt = uatomic_cmpxchg((unsigned short *)addr,
433 old, old + val);
f469d839
PB
434 } while (oldt != old);
435
436 return old + val;
437 }
438#endif
8760d94e
PB
439 case 4:
440 {
441 unsigned int old, oldt;
442
443 oldt = uatomic_read((unsigned int *)addr);
444 do {
445 old = oldt;
b4e6d540
PB
446 oldt = uatomic_cmpxchg((unsigned int *)addr,
447 old, old + val);
8760d94e
PB
448 } while (oldt != old);
449
450 return old + val;
451 }
b39e1761 452#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
453 case 8:
454 {
455 unsigned long old, oldt;
456
457 oldt = uatomic_read((unsigned long *)addr);
458 do {
459 old = oldt;
b4e6d540
PB
460 oldt = uatomic_cmpxchg((unsigned long *)addr,
461 old, old + val);
8760d94e
PB
462 } while (oldt != old);
463
464 return old + val;
465 }
466#endif
467 }
468 _uatomic_link_error();
469 return 0;
470}
471
472#define uatomic_add_return(addr, v) \
473 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
474 (unsigned long)(v), \
475 sizeof(*(addr))))
476#endif /* #ifndef uatomic_add_return */
477
478#ifndef uatomic_xchg
479/* xchg */
480
481static inline __attribute__((always_inline))
482unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
483{
484 switch (len) {
f469d839
PB
485#ifdef UATOMIC_HAS_ATOMIC_BYTE
486 case 1:
487 {
488 unsigned char old, oldt;
489
490 oldt = uatomic_read((unsigned char *)addr);
491 do {
492 old = oldt;
b4e6d540
PB
493 oldt = uatomic_cmpxchg((unsigned char *)addr,
494 old, val);
f469d839
PB
495 } while (oldt != old);
496
497 return old;
498 }
499#endif
500#ifdef UATOMIC_HAS_ATOMIC_SHORT
501 case 2:
502 {
503 unsigned short old, oldt;
504
505 oldt = uatomic_read((unsigned short *)addr);
506 do {
507 old = oldt;
b4e6d540
PB
508 oldt = uatomic_cmpxchg((unsigned short *)addr,
509 old, val);
f469d839
PB
510 } while (oldt != old);
511
512 return old;
513 }
514#endif
8760d94e
PB
515 case 4:
516 {
517 unsigned int old, oldt;
518
519 oldt = uatomic_read((unsigned int *)addr);
520 do {
521 old = oldt;
b4e6d540
PB
522 oldt = uatomic_cmpxchg((unsigned int *)addr,
523 old, val);
8760d94e
PB
524 } while (oldt != old);
525
526 return old;
527 }
b39e1761 528#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
529 case 8:
530 {
531 unsigned long old, oldt;
532
533 oldt = uatomic_read((unsigned long *)addr);
534 do {
535 old = oldt;
b4e6d540
PB
536 oldt = uatomic_cmpxchg((unsigned long *)addr,
537 old, val);
8760d94e
PB
538 } while (oldt != old);
539
540 return old;
541 }
542#endif
543 }
544 _uatomic_link_error();
545 return 0;
546}
547
548#define uatomic_xchg(addr, v) \
549 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
550 sizeof(*(addr))))
551#endif /* #ifndef uatomic_xchg */
552
553#endif /* #else #ifndef uatomic_cmpxchg */
554
555/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
556
557#ifndef uatomic_add
558#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
559#endif
560
561#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
562#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
563
564#ifndef uatomic_inc
565#define uatomic_inc(addr) uatomic_add((addr), 1)
566#endif
567
568#ifndef uatomic_dec
569#define uatomic_dec(addr) uatomic_add((addr), -1)
570#endif
571
572#ifdef __cplusplus
573}
574#endif
575
576#endif /* _URCU_UATOMIC_GENERIC_H */
This page took 0.046049 seconds and 4 git commands to generate.