call_rcu: drop mutex
[urcu.git] / urcu / uatomic_generic.h
CommitLineData
8760d94e
PB
1#ifndef _URCU_UATOMIC_GENERIC_H
2#define _URCU_UATOMIC_GENERIC_H
3
4/*
5 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
6 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
7 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
8 * Copyright (c) 2009 Mathieu Desnoyers
9 * Copyright (c) 2010 Paolo Bonzini
10 *
11 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
12 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
13 *
14 * Permission is hereby granted to use or copy this program
15 * for any purpose, provided the above notices are retained on all copies.
16 * Permission to modify the code and to distribute modified code is granted,
17 * provided the above notices are retained, and a notice that the code was
18 * modified is included with the above copyright notice.
19 *
20 * Code inspired from libuatomic_ops-1.2, inherited in part from the
21 * Boehm-Demers-Weiser conservative garbage collector.
22 */
23
24#include <urcu/compiler.h>
25#include <urcu/system.h>
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
8760d94e 31#ifndef uatomic_set
6cf3827c 32#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
8760d94e
PB
33#endif
34
35#ifndef uatomic_read
6cf3827c 36#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
8760d94e
PB
37#endif
38
39#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
40static inline __attribute__((always_inline))
41void _uatomic_link_error()
42{
43#ifdef ILLEGAL_INSTR
44 /* generate an illegal instruction. Cannot catch this with linker tricks
45 * when optimizations are disabled. */
46 __asm__ __volatile__(ILLEGAL_INSTR);
47#else
48 __builtin_trap ();
49#endif
50}
51
52#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
53extern void _uatomic_link_error ();
54#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
55
56/* cmpxchg */
57
58#ifndef uatomic_cmpxchg
59static inline __attribute__((always_inline))
60unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
61 unsigned long _new, int len)
62{
63 switch (len) {
f469d839
PB
64#ifdef UATOMIC_HAS_ATOMIC_BYTE
65 case 1:
66 return __sync_val_compare_and_swap_1(addr, old, _new);
67#endif
68#ifdef UATOMIC_HAS_ATOMIC_SHORT
69 case 2:
70 return __sync_val_compare_and_swap_2(addr, old, _new);
71#endif
8760d94e
PB
72 case 4:
73 return __sync_val_compare_and_swap_4(addr, old, _new);
b39e1761 74#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
75 case 8:
76 return __sync_val_compare_and_swap_8(addr, old, _new);
77#endif
78 }
79 _uatomic_link_error();
80 return 0;
81}
82
83
84#define uatomic_cmpxchg(addr, old, _new) \
85 ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
86 (unsigned long)(_new), \
87 sizeof(*(addr))))
88
89
bf33aaea
PB
90/* uatomic_and */
91
92#ifndef uatomic_and
93static inline __attribute__((always_inline))
94void _uatomic_and(void *addr, unsigned long val,
95 int len)
96{
97 switch (len) {
98#ifdef UATOMIC_HAS_ATOMIC_BYTE
99 case 1:
100 __sync_and_and_fetch_1(addr, val);
101#endif
102#ifdef UATOMIC_HAS_ATOMIC_SHORT
103 case 2:
104 __sync_and_and_fetch_2(addr, val);
105#endif
106 case 4:
107 __sync_and_and_fetch_4(addr, val);
108#if (CAA_BITS_PER_LONG == 64)
109 case 8:
110 __sync_and_and_fetch_8(addr, val);
111#endif
112 }
113 _uatomic_link_error();
114 return 0;
115}
116
117#define uatomic_and(addr, v) \
118 (_uatomic_and((addr), \
119 (unsigned long)(v), \
120 sizeof(*(addr))))
121#endif
122
985b35b1
PB
123/* uatomic_or */
124
125#ifndef uatomic_or
126static inline __attribute__((always_inline))
127void _uatomic_or(void *addr, unsigned long val,
128 int len)
129{
130 switch (len) {
131#ifdef UATOMIC_HAS_ATOMIC_BYTE
132 case 1:
133 __sync_or_and_fetch_1(addr, val);
134#endif
135#ifdef UATOMIC_HAS_ATOMIC_SHORT
136 case 2:
137 __sync_or_and_fetch_2(addr, val);
138#endif
139 case 4:
140 __sync_or_and_fetch_4(addr, val);
141#if (CAA_BITS_PER_LONG == 64)
142 case 8:
143 __sync_or_and_fetch_8(addr, val);
144#endif
145 }
146 _uatomic_link_error();
147 return 0;
148}
149
150#define uatomic_or(addr, v) \
151 (_uatomic_or((addr), \
152 (unsigned long)(v), \
153 sizeof(*(addr))))
154#endif
155
8760d94e
PB
156/* uatomic_add_return */
157
158#ifndef uatomic_add_return
159static inline __attribute__((always_inline))
160unsigned long _uatomic_add_return(void *addr, unsigned long val,
161 int len)
162{
163 switch (len) {
f469d839
PB
164#ifdef UATOMIC_HAS_ATOMIC_BYTE
165 case 1:
166 return __sync_add_and_fetch_1(addr, val);
167#endif
168#ifdef UATOMIC_HAS_ATOMIC_SHORT
169 case 2:
170 return __sync_add_and_fetch_2(addr, val);
171#endif
8760d94e
PB
172 case 4:
173 return __sync_add_and_fetch_4(addr, val);
b39e1761 174#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
175 case 8:
176 return __sync_add_and_fetch_8(addr, val);
177#endif
178 }
179 _uatomic_link_error();
180 return 0;
181}
182
183
184#define uatomic_add_return(addr, v) \
185 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
186 (unsigned long)(v), \
187 sizeof(*(addr))))
188#endif /* #ifndef uatomic_add_return */
189
190#ifndef uatomic_xchg
191/* xchg */
192
193static inline __attribute__((always_inline))
194unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
195{
196 switch (len) {
f469d839
PB
197#ifdef UATOMIC_HAS_ATOMIC_BYTE
198 case 1:
199 {
200 unsigned char old;
201
202 do {
203 old = uatomic_read((unsigned char *)addr);
204 } while (!__sync_bool_compare_and_swap_1(addr, old, val));
205
206 return old;
207 }
208#endif
209#ifdef UATOMIC_HAS_ATOMIC_SHORT
210 case 2:
211 {
212 unsigned short old;
213
214 do {
215 old = uatomic_read((unsigned short *)addr);
216 } while (!__sync_bool_compare_and_swap_2(addr, old, val));
217
218 return old;
219 }
220#endif
8760d94e
PB
221 case 4:
222 {
223 unsigned int old;
224
225 do {
226 old = uatomic_read((unsigned int *)addr);
2f2908d0 227 } while (!__sync_bool_compare_and_swap_4(addr, old, val));
8760d94e 228
2f2908d0 229 return old;
8760d94e 230 }
b39e1761 231#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
232 case 8:
233 {
234 unsigned long old;
235
236 do {
237 old = uatomic_read((unsigned long *)addr);
238 } while (!__sync_bool_compare_and_swap_8(addr, old, val));
239
240 return old;
241 }
242#endif
243 }
244 _uatomic_link_error();
245 return 0;
246}
247
248#define uatomic_xchg(addr, v) \
249 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
250 sizeof(*(addr))))
251#endif /* #ifndef uatomic_xchg */
252
253#else /* #ifndef uatomic_cmpxchg */
254
bf33aaea
PB
255#ifndef uatomic_and
256/* uatomic_and */
257
258static inline __attribute__((always_inline))
259void _uatomic_and(void *addr, unsigned long val, int len)
260{
261 switch (len) {
262#ifdef UATOMIC_HAS_ATOMIC_BYTE
263 case 1:
264 {
265 unsigned char old, oldt;
266
267 oldt = uatomic_read((unsigned char *)addr);
268 do {
269 old = oldt;
270 oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
271 } while (oldt != old);
272 }
273#endif
274#ifdef UATOMIC_HAS_ATOMIC_SHORT
275 case 2:
276 {
277 unsigned short old, oldt;
278
279 oldt = uatomic_read((unsigned short *)addr);
280 do {
281 old = oldt;
282 oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
283 } while (oldt != old);
284 }
285#endif
286 case 4:
287 {
288 unsigned int old, oldt;
289
290 oldt = uatomic_read((unsigned int *)addr);
291 do {
292 old = oldt;
293 oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
294 } while (oldt != old);
295 }
296#if (CAA_BITS_PER_LONG == 64)
297 case 8:
298 {
299 unsigned long old, oldt;
300
301 oldt = uatomic_read((unsigned long *)addr);
302 do {
303 old = oldt;
304 oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
305 } while (oldt != old);
306 }
307#endif
308 }
309 _uatomic_link_error();
310 return 0;
311}
312
313#define uatomic_and(addr, v) \
314 (uatomic_and((addr), \
315 (unsigned long)(v), \
316 sizeof(*(addr))))
317#endif /* #ifndef uatomic_and */
318
985b35b1
PB
319#ifndef uatomic_or
320/* uatomic_or */
321
322static inline __attribute__((always_inline))
323void _uatomic_or(void *addr, unsigned long val, int len)
324{
325 switch (len) {
326#ifdef UATOMIC_HAS_ATOMIC_BYTE
327 case 1:
328 {
329 unsigned char old, oldt;
330
331 oldt = uatomic_read((unsigned char *)addr);
332 do {
333 old = oldt;
334 oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
335 } while (oldt != old);
336 }
337#endif
338#ifdef UATOMIC_HAS_ATOMIC_SHORT
339 case 2:
340 {
341 unsigned short old, oldt;
342
343 oldt = uatomic_read((unsigned short *)addr);
344 do {
345 old = oldt;
346 oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
347 } while (oldt != old);
348 }
349#endif
350 case 4:
351 {
352 unsigned int old, oldt;
353
354 oldt = uatomic_read((unsigned int *)addr);
355 do {
356 old = oldt;
357 oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
358 } while (oldt != old);
359 }
360#if (CAA_BITS_PER_LONG == 64)
361 case 8:
362 {
363 unsigned long old, oldt;
364
365 oldt = uatomic_read((unsigned long *)addr);
366 do {
367 old = oldt;
368 oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
369 } while (oldt != old);
370 }
371#endif
372 }
373 _uatomic_link_error();
374 return 0;
375}
376
377#define uatomic_or(addr, v) \
378 (uatomic_or((addr), \
379 (unsigned long)(v), \
380 sizeof(*(addr))))
381#endif /* #ifndef uatomic_or */
382
8760d94e
PB
383#ifndef uatomic_add_return
384/* uatomic_add_return */
385
386static inline __attribute__((always_inline))
387unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
388{
389 switch (len) {
f469d839
PB
390#ifdef UATOMIC_HAS_ATOMIC_BYTE
391 case 1:
392 {
393 unsigned char old, oldt;
394
395 oldt = uatomic_read((unsigned char *)addr);
396 do {
397 old = oldt;
398 oldt = _uatomic_cmpxchg(addr, old, old + val, 1);
399 } while (oldt != old);
400
401 return old + val;
402 }
403#endif
404#ifdef UATOMIC_HAS_ATOMIC_SHORT
405 case 2:
406 {
407 unsigned short old, oldt;
408
409 oldt = uatomic_read((unsigned short *)addr);
410 do {
411 old = oldt;
412 oldt = _uatomic_cmpxchg(addr, old, old + val, 2);
413 } while (oldt != old);
414
415 return old + val;
416 }
417#endif
8760d94e
PB
418 case 4:
419 {
420 unsigned int old, oldt;
421
422 oldt = uatomic_read((unsigned int *)addr);
423 do {
424 old = oldt;
425 oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
426 } while (oldt != old);
427
428 return old + val;
429 }
b39e1761 430#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
431 case 8:
432 {
433 unsigned long old, oldt;
434
435 oldt = uatomic_read((unsigned long *)addr);
436 do {
437 old = oldt;
438 oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
439 } while (oldt != old);
440
441 return old + val;
442 }
443#endif
444 }
445 _uatomic_link_error();
446 return 0;
447}
448
449#define uatomic_add_return(addr, v) \
450 ((__typeof__(*(addr))) _uatomic_add_return((addr), \
451 (unsigned long)(v), \
452 sizeof(*(addr))))
453#endif /* #ifndef uatomic_add_return */
454
455#ifndef uatomic_xchg
456/* xchg */
457
458static inline __attribute__((always_inline))
459unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
460{
461 switch (len) {
f469d839
PB
462#ifdef UATOMIC_HAS_ATOMIC_BYTE
463 case 1:
464 {
465 unsigned char old, oldt;
466
467 oldt = uatomic_read((unsigned char *)addr);
468 do {
469 old = oldt;
470 oldt = _uatomic_cmpxchg(addr, old, val, 1);
471 } while (oldt != old);
472
473 return old;
474 }
475#endif
476#ifdef UATOMIC_HAS_ATOMIC_SHORT
477 case 2:
478 {
479 unsigned short old, oldt;
480
481 oldt = uatomic_read((unsigned short *)addr);
482 do {
483 old = oldt;
484 oldt = _uatomic_cmpxchg(addr, old, val, 2);
485 } while (oldt != old);
486
487 return old;
488 }
489#endif
8760d94e
PB
490 case 4:
491 {
492 unsigned int old, oldt;
493
494 oldt = uatomic_read((unsigned int *)addr);
495 do {
496 old = oldt;
497 oldt = _uatomic_cmpxchg(addr, old, val, 4);
498 } while (oldt != old);
499
500 return old;
501 }
b39e1761 502#if (CAA_BITS_PER_LONG == 64)
8760d94e
PB
503 case 8:
504 {
505 unsigned long old, oldt;
506
507 oldt = uatomic_read((unsigned long *)addr);
508 do {
509 old = oldt;
510 oldt = _uatomic_cmpxchg(addr, old, val, 8);
511 } while (oldt != old);
512
513 return old;
514 }
515#endif
516 }
517 _uatomic_link_error();
518 return 0;
519}
520
521#define uatomic_xchg(addr, v) \
522 ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
523 sizeof(*(addr))))
524#endif /* #ifndef uatomic_xchg */
525
526#endif /* #else #ifndef uatomic_cmpxchg */
527
528/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
529
530#ifndef uatomic_add
531#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
532#endif
533
534#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
535#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
536
537#ifndef uatomic_inc
538#define uatomic_inc(addr) uatomic_add((addr), 1)
539#endif
540
541#ifndef uatomic_dec
542#define uatomic_dec(addr) uatomic_add((addr), -1)
543#endif
544
545#ifdef __cplusplus
546}
547#endif
548
549#endif /* _URCU_UATOMIC_GENERIC_H */
This page took 0.042368 seconds and 4 git commands to generate.