include_HEADERS = urcu.h $(top_srcdir)/urcu-*.h
nobase_dist_include_HEADERS = urcu/compiler.h urcu/hlist.h urcu/list.h \
urcu/rculist.h urcu/rcuhlist.h urcu/system.h urcu/urcu-futex.h \
- urcu/uatomic_generic.h urcu/arch/generic.h urcu/wfstack.h \
+ urcu/uatomic/generic.h urcu/arch/generic.h urcu/wfstack.h \
urcu/wfqueue.h urcu/rculfstack.h urcu/rculfqueue.h \
urcu/urcu_ref.h urcu/map/*.h urcu/static/*.h
-nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic_arch.h urcu/config.h
+nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic.h urcu/config.h
-EXTRA_DIST = $(top_srcdir)/urcu/arch/*.h $(top_srcdir)/urcu/uatomic_arch_*.h \
+EXTRA_DIST = $(top_srcdir)/urcu/arch/*.h $(top_srcdir)/urcu/uatomic/*.h \
gpl-2.0.txt lgpl-2.1.txt lgpl-relicensing.txt \
README LICENSE compat_arch_x86.c
#include <pthread.h>
#include <signal.h>
#include <assert.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
/*
* It does not really matter if the constructor is called before using
)
fi
-UATOMICSRC=urcu/uatomic_arch_$ARCHTYPE.h
+UATOMICSRC=urcu/uatomic/$ARCHTYPE.h
ARCHSRC=urcu/arch/$ARCHTYPE.h
if test "x$ARCHTYPE" != xx86 -a "x$ARCHTYPE" != xppc; then
APISRC=tests/api_gcc.h
AC_CONFIG_LINKS([
urcu/arch.h:$ARCHSRC
- urcu/uatomic_arch.h:$UATOMICSRC
+ urcu/uatomic.h:$UATOMICSRC
tests/api.h:$APISRC
])
AC_CONFIG_FILES([
#include <stdio.h>
#include <assert.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
struct testvals {
unsigned char c;
#include <urcu-bp.h>
#endif
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#include <urcu/rculist.h>
#include "rcutorture.h"
#include <urcu/compiler.h>
#include <urcu/arch.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/system.h>
* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#include "urcu/static/urcu-pointer.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include <urcu/compiler.h>
#include <urcu/arch.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#ifdef __cplusplus
extern "C" {
*/
#include <urcu/urcu_ref.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#include <assert.h>
/* A urcu implementation header should be already included. */
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
/* A urcu implementation header should be already included. */
#ifdef __cplusplus
#include <urcu/compiler.h>
#include <urcu/arch.h>
#include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#include <urcu/list.h>
/*
#include <urcu/compiler.h>
#include <urcu/arch.h>
#include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#ifdef __cplusplus
extern "C" {
#include <urcu/compiler.h>
#include <urcu/arch.h>
#include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/urcu-futex.h>
#include <urcu/compiler.h>
#include <urcu/arch.h>
#include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/urcu-futex.h>
#include <assert.h>
#include <poll.h>
#include <urcu/compiler.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#ifdef __cplusplus
extern "C" {
#include <assert.h>
#include <poll.h>
#include <urcu/compiler.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#ifdef __cplusplus
extern "C" {
--- /dev/null
+#ifndef _URCU_UATOMIC_ARCH_ALPHA_H
+#define _URCU_UATOMIC_ARCH_ALPHA_H
+
+/*
+ * Atomic exchange operations for the Alpha architecture. Let GCC do it.
+ *
+ * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_UATOMIC_ARCH_ALPHA_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_ARM_H
+#define _URCU_ARCH_UATOMIC_ARM_H
+
+/*
+ * Atomics for ARM. This approach is usable on kernels back to 2.6.15.
+ *
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
+ * (Adapted from uatomic_arch_ppc.h)
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* xchg */
+#define uatomic_xchg(addr, v) __sync_lock_test_and_set(addr, v)
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_ARM_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_GCC_H
+#define _URCU_ARCH_UATOMIC_GCC_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
+ * (Adapted from uatomic_arch_ppc.h)
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * If your platform doesn't have a full set of atomics, you will need
+ * a separate uatomic_arch_*.h file for your architecture. Otherwise,
+ * just rely on the definitions in uatomic/generic.h.
+ */
+#define UATOMIC_HAS_ATOMIC_BYTE
+#define UATOMIC_HAS_ATOMIC_SHORT
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_GCC_H */
--- /dev/null
+#ifndef _URCU_UATOMIC_GENERIC_H
+#define _URCU_UATOMIC_GENERIC_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ * Copyright (c) 2010 Paolo Bonzini
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef uatomic_set
+#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
+#endif
+
+#ifndef uatomic_read
+#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
+#endif
+
+#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
+static inline __attribute__((always_inline))
+void _uatomic_link_error()
+{
+#ifdef ILLEGAL_INSTR
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+#else
+ __builtin_trap ();
+#endif
+}
+
+#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
+extern void _uatomic_link_error ();
+#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
+
+/* cmpxchg */
+
+#ifndef uatomic_cmpxchg
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ return __sync_val_compare_and_swap_1(addr, old, _new);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ return __sync_val_compare_and_swap_2(addr, old, _new);
+#endif
+ case 4:
+ return __sync_val_compare_and_swap_4(addr, old, _new);
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ return __sync_val_compare_and_swap_8(addr, old, _new);
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+
+/* uatomic_and */
+
+#ifndef uatomic_and
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ __sync_and_and_fetch_1(addr, val);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ __sync_and_and_fetch_2(addr, val);
+#endif
+ case 4:
+ __sync_and_and_fetch_4(addr, val);
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ __sync_and_and_fetch_8(addr, val);
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_and(addr, v) \
+ (_uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif
+
+/* uatomic_or */
+
+#ifndef uatomic_or
+static inline __attribute__((always_inline))
+void _uatomic_or(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ __sync_or_and_fetch_1(addr, val);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ __sync_or_and_fetch_2(addr, val);
+#endif
+ case 4:
+ __sync_or_and_fetch_4(addr, val);
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ __sync_or_and_fetch_8(addr, val);
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_or(addr, v) \
+ (_uatomic_or((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif
+
+/* uatomic_add_return */
+
+#ifndef uatomic_add_return
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ return __sync_add_and_fetch_1(addr, val);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ return __sync_add_and_fetch_2(addr, val);
+#endif
+ case 4:
+ return __sync_add_and_fetch_4(addr, val);
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ return __sync_add_and_fetch_8(addr, val);
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_add_return */
+
+#ifndef uatomic_xchg
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old;
+
+ do {
+ old = uatomic_read((unsigned char *)addr);
+ } while (!__sync_bool_compare_and_swap_1(addr, old, val));
+
+ return old;
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old;
+
+ do {
+ old = uatomic_read((unsigned short *)addr);
+ } while (!__sync_bool_compare_and_swap_2(addr, old, val));
+
+ return old;
+ }
+#endif
+ case 4:
+ {
+ unsigned int old;
+
+ do {
+ old = uatomic_read((unsigned int *)addr);
+ } while (!__sync_bool_compare_and_swap_4(addr, old, val));
+
+ return old;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old;
+
+ do {
+ old = uatomic_read((unsigned long *)addr);
+ } while (!__sync_bool_compare_and_swap_8(addr, old, val));
+
+ return old;
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_xchg */
+
+#else /* #ifndef uatomic_cmpxchg */
+
+#ifndef uatomic_and
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old, oldt;
+
+ oldt = uatomic_read((unsigned char *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
+ } while (oldt != old);
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old, oldt;
+
+ oldt = uatomic_read((unsigned short *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
+ } while (oldt != old);
+ }
+#endif
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
+ } while (oldt != old);
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
+ } while (oldt != old);
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_and(addr, v) \
+ (uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_and */
+
+#ifndef uatomic_or
+/* uatomic_or */
+
+static inline __attribute__((always_inline))
+void _uatomic_or(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old, oldt;
+
+ oldt = uatomic_read((unsigned char *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
+ } while (oldt != old);
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old, oldt;
+
+ oldt = uatomic_read((unsigned short *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
+ } while (oldt != old);
+ }
+#endif
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
+ } while (oldt != old);
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
+ } while (oldt != old);
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_or(addr, v) \
+ (uatomic_or((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_or */
+
+#ifndef uatomic_add_return
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old, oldt;
+
+ oldt = uatomic_read((unsigned char *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned char *)addr,
+ old, old + val);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old, oldt;
+
+ oldt = uatomic_read((unsigned short *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned short *)addr,
+ old, old + val);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#endif
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned int *)addr,
+ old, old + val);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned long *)addr,
+ old, old + val);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_add_return */
+
+#ifndef uatomic_xchg
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old, oldt;
+
+ oldt = uatomic_read((unsigned char *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned char *)addr,
+ old, val);
+ } while (oldt != old);
+
+ return old;
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old, oldt;
+
+ oldt = uatomic_read((unsigned short *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned short *)addr,
+ old, val);
+ } while (oldt != old);
+
+ return old;
+ }
+#endif
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned int *)addr,
+ old, val);
+ } while (oldt != old);
+
+ return old;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned long *)addr,
+ old, val);
+ } while (oldt != old);
+
+ return old;
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_xchg */
+
+#endif /* #else #ifndef uatomic_cmpxchg */
+
+/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
+
+#ifndef uatomic_add
+#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
+#endif
+
+#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
+#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
+
+#ifndef uatomic_inc
+#define uatomic_inc(addr) uatomic_add((addr), 1)
+#endif
+
+#ifndef uatomic_dec
+#define uatomic_dec(addr) uatomic_add((addr), -1)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_UATOMIC_GENERIC_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_PPC_H
+#define _URCU_ARCH_UATOMIC_PPC_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __NO_LWSYNC__
+#define LWSYNC_OPCODE "sync\n"
+#else
+#define LWSYNC_OPCODE "lwsync\n"
+#endif
+
+#define ILLEGAL_INSTR ".long 0xd00d00"
+
+/*
+ * Using a isync as second barrier for exchange to provide acquire semantic.
+ * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
+ * explicit that this also has acquire semantics."
+ * Derived from AO_compare_and_swap(), but removed the comparison.
+ */
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int result;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
+ "stwcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
+ "stdcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int old_val;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
+ "cmpw %0,%3\n" /* if load is not equal to */
+ "bne 2f\n" /* old, fail */
+ "stwcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ "2:\n"
+ : "=&r"(old_val)
+ : "r"(addr), "r"((unsigned int)_new),
+ "r"((unsigned int)old)
+ : "memory", "cc");
+
+ return old_val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old_val;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
+ "cmpd %0,%3\n" /* if load is not equal to */
+ "bne 2f\n" /* old, fail */
+ "stdcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ "2:\n"
+ : "=&r"(old_val)
+ : "r"(addr), "r"((unsigned long)_new),
+ "r"((unsigned long)old)
+ : "memory", "cc");
+
+ return old_val;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+ return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int result;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
+ "add %0,%2,%0\n" /* add val to value loaded */
+ "stwcx. %0,0,%1\n" /* store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
+ "add %0,%2,%0\n" /* add val to value loaded */
+ "stdcx. %0,0,%1\n" /* store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+ return 0;
+}
+
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_PPC_H */
--- /dev/null
+#ifndef _URCU_UATOMIC_ARCH_S390_H
+#define _URCU_UATOMIC_ARCH_S390_H
+
+/*
+ * Atomic exchange operations for the S390 architecture. Based on information
+ * taken from the Principles of Operation Appendix A "Conditional Swapping
+ * Instructions (CS, CDS)".
+ *
+ * Copyright (c) 2009 Novell, Inc.
+ * Author: Jan Blunck <jblunck@suse.de>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
+#define COMPILER_HAVE_SHORT_MEM_OPERAND
+#endif
+
+/*
+ * MEMOP assembler operand rules:
+ * - op refer to MEMOP_IN operand
+ * - MEMOP_IN can expand to more than a single operand. Use it at the end of
+ * operand list only.
+ */
+
+#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
+
+#define MEMOP_OUT(addr) "=Q" (*(addr))
+#define MEMOP_IN(addr) "Q" (*(addr))
+#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
+
+#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
+
+#define MEMOP_OUT(addr) "=m" (*(addr))
+#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
+#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
+
+#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
+
+struct __uatomic_dummy {
+ unsigned long v[10];
+};
+#define __hp(x) ((struct __uatomic_dummy *)(x))
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int old_val;
+
+ __asm__ __volatile__(
+ "0: cs %0,%2," MEMOP_REF(%3) "\n"
+ " brc 4,0b\n"
+ : "=&r" (old_val), MEMOP_OUT (__hp(addr))
+ : "r" (val), MEMOP_IN (__hp(addr))
+ : "memory", "cc");
+ return old_val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old_val;
+
+ __asm__ __volatile__(
+ "0: csg %0,%2," MEMOP_REF(%3) "\n"
+ " brc 4,0b\n"
+ : "=&r" (old_val), MEMOP_OUT (__hp(addr))
+ : "r" (val), MEMOP_IN (__hp(addr))
+ : "memory", "cc");
+ return old_val;
+ }
+#endif
+ default:
+ __asm__ __volatile__(".long 0xd00d00");
+ }
+
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr)))
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int old_val = (unsigned int)old;
+
+ __asm__ __volatile__(
+ " cs %0,%2," MEMOP_REF(%3) "\n"
+ : "+r" (old_val), MEMOP_OUT (__hp(addr))
+ : "r" (_new), MEMOP_IN (__hp(addr))
+ : "memory", "cc");
+ return old_val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ " csg %0,%2," MEMOP_REF(%3) "\n"
+ : "+r" (old), MEMOP_OUT (__hp(addr))
+ : "r" (_new), MEMOP_IN (__hp(addr))
+ : "memory", "cc");
+ return old;
+ }
+#endif
+ default:
+ __asm__ __volatile__(".long 0xd00d00");
+ }
+
+ return 0;
+}
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
+ (unsigned long)(old), \
+ (unsigned long)(_new), \
+ sizeof(*(addr)))
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_UATOMIC_ARCH_S390_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
+#define _URCU_ARCH_UATOMIC_SPARC64_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ __asm__ __volatile__ (
+ "membar #StoreLoad | #LoadLoad\n\t"
+ "cas [%1],%2,%0\n\t"
+ "membar #StoreLoad | #StoreStore\n\t"
+ : "+&r" (_new)
+ : "r" (addr), "r" (old)
+ : "memory");
+
+ return _new;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__ (
+ "membar #StoreLoad | #LoadLoad\n\t"
+ "casx [%1],%2,%0\n\t"
+ "membar #StoreLoad | #StoreStore\n\t"
+ : "+&r" (_new)
+ : "r" (addr), "r" (old)
+ : "memory");
+
+ return _new;
+ }
+#endif
+ }
+ __builtin_trap();
+ return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_PPC_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_UNKNOWN_H
+#define _URCU_ARCH_UATOMIC_UNKNOWN_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
+ * (Adapted from uatomic_arch_ppc.h)
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* See configure.ac for the list of recognized architectures. */
+#error "Cannot build: unrecognized architecture detected."
+
+#endif /* _URCU_ARCH_UATOMIC_UNKNOWN_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_X86_H
+#define _URCU_ARCH_UATOMIC_X86_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#define UATOMIC_HAS_ATOMIC_BYTE
+#define UATOMIC_HAS_ATOMIC_SHORT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Derived from AO_compare_and_swap() and AO_test_and_set_full().
+ */
+
+struct __uatomic_dummy {
+ unsigned long v[10];
+};
+#define __hp(x) ((struct __uatomic_dummy *)(x))
+
+#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ unsigned char result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgb %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "q"((unsigned char)_new)
+ : "memory");
+ return result;
+ }
+ case 2:
+ {
+ unsigned short result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgw %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "r"((unsigned short)_new)
+ : "memory");
+ return result;
+ }
+ case 4:
+ {
+ unsigned int result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgl %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "r"((unsigned int)_new)
+ : "memory");
+ return result;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgq %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "r"((unsigned long)_new)
+ : "memory");
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define _uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ /* Note: the "xchg" instruction does not need a "lock" prefix. */
+ switch (len) {
+ case 1:
+ {
+ unsigned char result;
+ __asm__ __volatile__(
+ "xchgb %0, %1"
+ : "=q"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned char)val)
+ : "memory");
+ return result;
+ }
+ case 2:
+ {
+ unsigned short result;
+ __asm__ __volatile__(
+ "xchgw %0, %1"
+ : "=r"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned short)val)
+ : "memory");
+ return result;
+ }
+ case 4:
+ {
+ unsigned int result;
+ __asm__ __volatile__(
+ "xchgl %0, %1"
+ : "=r"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned int)val)
+ : "memory");
+ return result;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result;
+ __asm__ __volatile__(
+ "xchgq %0, %1"
+ : "=r"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned long)val)
+ : "memory");
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define _uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long __uatomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+ case 1:
+ {
+ unsigned char result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddb %1, %0"
+ : "+m"(*__hp(addr)), "+q" (result)
+ :
+ : "memory");
+ return result + (unsigned char)val;
+ }
+ case 2:
+ {
+ unsigned short result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddw %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned short)val;
+ }
+ case 4:
+ {
+ unsigned int result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddl %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned int)val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddq %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned long)val;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define _uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) __uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void __uatomic_and(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; andb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; andw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; andl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; andq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_and(addr, v) \
+ (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_or */
+
+static inline __attribute__((always_inline))
+void __uatomic_or(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; orb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; orw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; orl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; orq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_or(addr, v) \
+ (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_add */
+
+static inline __attribute__((always_inline))
+void __uatomic_add(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; addb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; addw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; addl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; addq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_add(addr, v) \
+ (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
+
+
+/* uatomic_inc */
+
+static inline __attribute__((always_inline))
+void __uatomic_inc(void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; incb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; incw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; incl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; incq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
+
+/* uatomic_dec */
+
+static inline __attribute__((always_inline))
+void __uatomic_dec(void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; decb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; decw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; decl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; decq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
+
+#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+extern int __rcu_cas_avail;
+extern int __rcu_cas_init(void);
+
+#define UATOMIC_COMPAT(insn) \
+ ((likely(__rcu_cas_avail > 0)) \
+ ? (_uatomic_##insn) \
+ : ((unlikely(__rcu_cas_avail < 0) \
+ ? ((__rcu_cas_init() > 0) \
+ ? (_uatomic_##insn) \
+ : (compat_uatomic_##insn)) \
+ : (compat_uatomic_##insn))))
+
+extern unsigned long _compat_uatomic_set(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_set(addr, _new) \
+ ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+
+extern unsigned long _compat_uatomic_xchg(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_xchg(addr, _new) \
+ ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len);
+#define compat_uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
+ (unsigned long)(old), \
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_and(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_and(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_or(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_or(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_or((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_add_return(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+#define compat_uatomic_add(addr, v) \
+ ((void)compat_uatomic_add_return((addr), (v)))
+#define compat_uatomic_inc(addr) \
+ (compat_uatomic_add((addr), 1))
+#define compat_uatomic_dec(addr) \
+ (compat_uatomic_add((addr), -1))
+
+#else
+#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
+#endif
+
+/* Read is atomic even in compat mode */
+#define uatomic_set(addr, v) \
+ UATOMIC_COMPAT(set(addr, v))
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ UATOMIC_COMPAT(cmpxchg(addr, old, _new))
+#define uatomic_xchg(addr, v) \
+ UATOMIC_COMPAT(xchg(addr, v))
+#define uatomic_and(addr, v) \
+ UATOMIC_COMPAT(and(addr, v))
+#define uatomic_or(addr, v) \
+ UATOMIC_COMPAT(or(addr, v))
+#define uatomic_add_return(addr, v) \
+ UATOMIC_COMPAT(add_return(addr, v))
+
+#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
+#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
+#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_X86_H */
+++ /dev/null
-#ifndef _URCU_UATOMIC_ARCH_ALPHA_H
-#define _URCU_UATOMIC_ARCH_ALPHA_H
-
-/*
- * Atomic exchange operations for the Alpha architecture. Let GCC do it.
- *
- * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_ALPHA_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_ARM_H
-#define _URCU_ARCH_UATOMIC_ARM_H
-
-/*
- * Atomics for ARM. This approach is usable on kernels back to 2.6.15.
- *
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
- * (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* xchg */
-#define uatomic_xchg(addr, v) __sync_lock_test_and_set(addr, v)
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_ARM_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_GCC_H
-#define _URCU_ARCH_UATOMIC_GCC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
- * (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * If your platform doesn't have a full set of atomics, you will need
- * a separate uatomic_arch_*.h file for your architecture. Otherwise,
- * just rely on the definitions in uatomic_generic.h.
- */
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_GCC_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_PPC_H
-#define _URCU_ARCH_UATOMIC_PPC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef __NO_LWSYNC__
-#define LWSYNC_OPCODE "sync\n"
-#else
-#define LWSYNC_OPCODE "lwsync\n"
-#endif
-
-#define ILLEGAL_INSTR ".long 0xd00d00"
-
-/*
- * Using a isync as second barrier for exchange to provide acquire semantic.
- * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
- * explicit that this also has acquire semantics."
- * Derived from AO_compare_and_swap(), but removed the comparison.
- */
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "stwcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "stdcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "cmpw %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stwcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val)
- : "r"(addr), "r"((unsigned int)_new),
- "r"((unsigned int)old)
- : "memory", "cc");
-
- return old_val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old_val;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "cmpd %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stdcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val)
- : "r"(addr), "r"((unsigned long)_new),
- "r"((unsigned long)old)
- : "memory", "cc");
-
- return old_val;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stwcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stdcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
+++ /dev/null
-#ifndef _URCU_UATOMIC_ARCH_S390_H
-#define _URCU_UATOMIC_ARCH_S390_H
-
-/*
- * Atomic exchange operations for the S390 architecture. Based on information
- * taken from the Principles of Operation Appendix A "Conditional Swapping
- * Instructions (CS, CDS)".
- *
- * Copyright (c) 2009 Novell, Inc.
- * Author: Jan Blunck <jblunck@suse.de>
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
-#define COMPILER_HAVE_SHORT_MEM_OPERAND
-#endif
-
-/*
- * MEMOP assembler operand rules:
- * - op refer to MEMOP_IN operand
- * - MEMOP_IN can expand to more than a single operand. Use it at the end of
- * operand list only.
- */
-
-#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
-
-#define MEMOP_OUT(addr) "=Q" (*(addr))
-#define MEMOP_IN(addr) "Q" (*(addr))
-#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
-
-#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-#define MEMOP_OUT(addr) "=m" (*(addr))
-#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
-#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
-
-#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val;
-
- __asm__ __volatile__(
- "0: cs %0,%2," MEMOP_REF(%3) "\n"
- " brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (val), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old_val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old_val;
-
- __asm__ __volatile__(
- "0: csg %0,%2," MEMOP_REF(%3) "\n"
- " brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (val), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old_val;
- }
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr)))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val = (unsigned int)old;
-
- __asm__ __volatile__(
- " cs %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (_new), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old_val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- " csg %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old), MEMOP_OUT (__hp(addr))
- : "r" (_new), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old;
- }
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return 0;
-}
-
-#define uatomic_cmpxchg(addr, old, _new) \
- (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
- (unsigned long)(old), \
- (unsigned long)(_new), \
- sizeof(*(addr)))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_S390_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
-#define _URCU_ARCH_UATOMIC_SPARC64_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- __asm__ __volatile__ (
- "membar #StoreLoad | #LoadLoad\n\t"
- "cas [%1],%2,%0\n\t"
- "membar #StoreLoad | #StoreStore\n\t"
- : "+&r" (_new)
- : "r" (addr), "r" (old)
- : "memory");
-
- return _new;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__ (
- "membar #StoreLoad | #LoadLoad\n\t"
- "casx [%1],%2,%0\n\t"
- "membar #StoreLoad | #StoreStore\n\t"
- : "+&r" (_new)
- : "r" (addr), "r" (old)
- : "memory");
-
- return _new;
- }
-#endif
- }
- __builtin_trap();
- return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_UNKNOWN_H
-#define _URCU_ARCH_UATOMIC_UNKNOWN_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
- * (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- */
-
-/* See configure.ac for the list of recognized architectures. */
-#error "Cannot build: unrecognized architecture detected."
-
-#endif /* _URCU_ARCH_UATOMIC_UNKNOWN_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_X86_H
-#define _URCU_ARCH_UATOMIC_X86_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Derived from AO_compare_and_swap() and AO_test_and_set_full().
- */
-
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
-
-#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 1:
- {
- unsigned char result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgb %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "q"((unsigned char)_new)
- : "memory");
- return result;
- }
- case 2:
- {
- unsigned short result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgw %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned short)_new)
- : "memory");
- return result;
- }
- case 4:
- {
- unsigned int result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgl %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned int)_new)
- : "memory");
- return result;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgq %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned long)_new)
- : "memory");
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define _uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
-{
- /* Note: the "xchg" instruction does not need a "lock" prefix. */
- switch (len) {
- case 1:
- {
- unsigned char result;
- __asm__ __volatile__(
- "xchgb %0, %1"
- : "=q"(result), "+m"(*__hp(addr))
- : "0" ((unsigned char)val)
- : "memory");
- return result;
- }
- case 2:
- {
- unsigned short result;
- __asm__ __volatile__(
- "xchgw %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned short)val)
- : "memory");
- return result;
- }
- case 4:
- {
- unsigned int result;
- __asm__ __volatile__(
- "xchgl %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned int)val)
- : "memory");
- return result;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
- __asm__ __volatile__(
- "xchgq %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned long)val)
- : "memory");
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define _uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
- case 1:
- {
- unsigned char result = val;
-
- __asm__ __volatile__(
- "lock; xaddb %1, %0"
- : "+m"(*__hp(addr)), "+q" (result)
- :
- : "memory");
- return result + (unsigned char)val;
- }
- case 2:
- {
- unsigned short result = val;
-
- __asm__ __volatile__(
- "lock; xaddw %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned short)val;
- }
- case 4:
- {
- unsigned int result = val;
-
- __asm__ __volatile__(
- "lock; xaddl %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned int)val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result = val;
-
- __asm__ __volatile__(
- "lock; xaddq %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned long)val;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define _uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) __uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-/* uatomic_and */
-
-static inline __attribute__((always_inline))
-void __uatomic_and(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; andb %1, %0"
- : "=m"(*__hp(addr))
- : "iq" ((unsigned char)val)
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; andw %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned short)val)
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; andl %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned int)val)
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; andq %1, %0"
- : "=m"(*__hp(addr))
- : "er" ((unsigned long)val)
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_and(addr, v) \
- (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
-
-/* uatomic_or */
-
-static inline __attribute__((always_inline))
-void __uatomic_or(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; orb %1, %0"
- : "=m"(*__hp(addr))
- : "iq" ((unsigned char)val)
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; orw %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned short)val)
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; orl %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned int)val)
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; orq %1, %0"
- : "=m"(*__hp(addr))
- : "er" ((unsigned long)val)
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_or(addr, v) \
- (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
-
-/* uatomic_add */
-
-static inline __attribute__((always_inline))
-void __uatomic_add(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; addb %1, %0"
- : "=m"(*__hp(addr))
- : "iq" ((unsigned char)val)
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; addw %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned short)val)
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; addl %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned int)val)
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; addq %1, %0"
- : "=m"(*__hp(addr))
- : "er" ((unsigned long)val)
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_add(addr, v) \
- (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
-
-
-/* uatomic_inc */
-
-static inline __attribute__((always_inline))
-void __uatomic_inc(void *addr, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; incb %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; incw %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; incl %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; incq %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
-
-/* uatomic_dec */
-
-static inline __attribute__((always_inline))
-void __uatomic_dec(void *addr, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; decb %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; decw %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; decl %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; decq %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-
-#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
-extern int __rcu_cas_avail;
-extern int __rcu_cas_init(void);
-
-#define UATOMIC_COMPAT(insn) \
- ((likely(__rcu_cas_avail > 0)) \
- ? (_uatomic_##insn) \
- : ((unlikely(__rcu_cas_avail < 0) \
- ? ((__rcu_cas_init() > 0) \
- ? (_uatomic_##insn) \
- : (compat_uatomic_##insn)) \
- : (compat_uatomic_##insn))))
-
-extern unsigned long _compat_uatomic_set(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_set(addr, _new) \
- ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-
-extern unsigned long _compat_uatomic_xchg(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_xchg(addr, _new) \
- ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len);
-#define compat_uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
- (unsigned long)(old), \
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_and(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_and(addr, v) \
- ((__typeof__(*(addr))) _compat_uatomic_and((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_or(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_or(addr, v) \
- ((__typeof__(*(addr))) _compat_uatomic_or((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_add_return(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-#define compat_uatomic_add(addr, v) \
- ((void)compat_uatomic_add_return((addr), (v)))
-#define compat_uatomic_inc(addr) \
- (compat_uatomic_add((addr), 1))
-#define compat_uatomic_dec(addr) \
- (compat_uatomic_add((addr), -1))
-
-#else
-#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
-#endif
-
-/* Read is atomic even in compat mode */
-#define uatomic_set(addr, v) \
- UATOMIC_COMPAT(set(addr, v))
-
-#define uatomic_cmpxchg(addr, old, _new) \
- UATOMIC_COMPAT(cmpxchg(addr, old, _new))
-#define uatomic_xchg(addr, v) \
- UATOMIC_COMPAT(xchg(addr, v))
-#define uatomic_and(addr, v) \
- UATOMIC_COMPAT(and(addr, v))
-#define uatomic_or(addr, v) \
- UATOMIC_COMPAT(or(addr, v))
-#define uatomic_add_return(addr, v) \
- UATOMIC_COMPAT(add_return(addr, v))
-
-#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
-#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
-#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_X86_H */
+++ /dev/null
-#ifndef _URCU_UATOMIC_GENERIC_H
-#define _URCU_UATOMIC_GENERIC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- * Copyright (c) 2010 Paolo Bonzini
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef uatomic_set
-#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
-#endif
-
-#ifndef uatomic_read
-#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
-#endif
-
-#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
-static inline __attribute__((always_inline))
-void _uatomic_link_error()
-{
-#ifdef ILLEGAL_INSTR
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
-#else
- __builtin_trap ();
-#endif
-}
-
-#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
-extern void _uatomic_link_error ();
-#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
-
-/* cmpxchg */
-
-#ifndef uatomic_cmpxchg
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- return __sync_val_compare_and_swap_1(addr, old, _new);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- return __sync_val_compare_and_swap_2(addr, old, _new);
-#endif
- case 4:
- return __sync_val_compare_and_swap_4(addr, old, _new);
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- return __sync_val_compare_and_swap_8(addr, old, _new);
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-
-/* uatomic_and */
-
-#ifndef uatomic_and
-static inline __attribute__((always_inline))
-void _uatomic_and(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- __sync_and_and_fetch_1(addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- __sync_and_and_fetch_2(addr, val);
-#endif
- case 4:
- __sync_and_and_fetch_4(addr, val);
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- __sync_and_and_fetch_8(addr, val);
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_and(addr, v) \
- (_uatomic_and((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif
-
-/* uatomic_or */
-
-#ifndef uatomic_or
-static inline __attribute__((always_inline))
-void _uatomic_or(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- __sync_or_and_fetch_1(addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- __sync_or_and_fetch_2(addr, val);
-#endif
- case 4:
- __sync_or_and_fetch_4(addr, val);
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- __sync_or_and_fetch_8(addr, val);
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_or(addr, v) \
- (_uatomic_or((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif
-
-/* uatomic_add_return */
-
-#ifndef uatomic_add_return
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- return __sync_add_and_fetch_1(addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- return __sync_add_and_fetch_2(addr, val);
-#endif
- case 4:
- return __sync_add_and_fetch_4(addr, val);
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- return __sync_add_and_fetch_8(addr, val);
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
-
-#ifndef uatomic_xchg
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old;
-
- do {
- old = uatomic_read((unsigned char *)addr);
- } while (!__sync_bool_compare_and_swap_1(addr, old, val));
-
- return old;
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old;
-
- do {
- old = uatomic_read((unsigned short *)addr);
- } while (!__sync_bool_compare_and_swap_2(addr, old, val));
-
- return old;
- }
-#endif
- case 4:
- {
- unsigned int old;
-
- do {
- old = uatomic_read((unsigned int *)addr);
- } while (!__sync_bool_compare_and_swap_4(addr, old, val));
-
- return old;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old;
-
- do {
- old = uatomic_read((unsigned long *)addr);
- } while (!__sync_bool_compare_and_swap_8(addr, old, val));
-
- return old;
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
-
-#else /* #ifndef uatomic_cmpxchg */
-
-#ifndef uatomic_and
-/* uatomic_and */
-
-static inline __attribute__((always_inline))
-void _uatomic_and(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old, oldt;
-
- oldt = uatomic_read((unsigned char *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
- } while (oldt != old);
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old, oldt;
-
- oldt = uatomic_read((unsigned short *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
- } while (oldt != old);
- }
-#endif
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
- } while (oldt != old);
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
- } while (oldt != old);
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_and(addr, v) \
- (uatomic_and((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_and */
-
-#ifndef uatomic_or
-/* uatomic_or */
-
-static inline __attribute__((always_inline))
-void _uatomic_or(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old, oldt;
-
- oldt = uatomic_read((unsigned char *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
- } while (oldt != old);
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old, oldt;
-
- oldt = uatomic_read((unsigned short *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
- } while (oldt != old);
- }
-#endif
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
- } while (oldt != old);
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
- } while (oldt != old);
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_or(addr, v) \
- (uatomic_or((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_or */
-
-#ifndef uatomic_add_return
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old, oldt;
-
- oldt = uatomic_read((unsigned char *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned char *)addr,
- old, old + val);
- } while (oldt != old);
-
- return old + val;
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old, oldt;
-
- oldt = uatomic_read((unsigned short *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned short *)addr,
- old, old + val);
- } while (oldt != old);
-
- return old + val;
- }
-#endif
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned int *)addr,
- old, old + val);
- } while (oldt != old);
-
- return old + val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned long *)addr,
- old, old + val);
- } while (oldt != old);
-
- return old + val;
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
-
-#ifndef uatomic_xchg
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old, oldt;
-
- oldt = uatomic_read((unsigned char *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned char *)addr,
- old, val);
- } while (oldt != old);
-
- return old;
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old, oldt;
-
- oldt = uatomic_read((unsigned short *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned short *)addr,
- old, val);
- } while (oldt != old);
-
- return old;
- }
-#endif
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned int *)addr,
- old, val);
- } while (oldt != old);
-
- return old;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned long *)addr,
- old, val);
- } while (oldt != old);
-
- return old;
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
-
-#endif /* #else #ifndef uatomic_cmpxchg */
-
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#ifndef uatomic_add
-#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
-#endif
-
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
-#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
-
-#ifndef uatomic_inc
-#define uatomic_inc(addr) uatomic_add((addr), 1)
-#endif
-
-#ifndef uatomic_dec
-#define uatomic_dec(addr) uatomic_add((addr), -1)
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_UATOMIC_GENERIC_H */
*/
#include <assert.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
struct urcu_ref {
long refcount; /* ATOMIC */