urcu-yield.o
tests/api.h
urcu/arch.h
-urcu/uatomic_arch.h
+urcu/uatomic.h
liburcu-defer.so
liburcu-mb.so
liburcu-qsbr.so
--- /dev/null
+2011-07-21 Userspace RCU 0.6.4
+ * uatomic: Fix ARM build errors in uatomic.
+ * urcu tests: hold mutex across use of custom allocator.
+ * Portability fixes to support FreeBSD 8.2.
+
+2011-06-27 Userspace RCU 0.6.3
+ * uatomic: Fix i386 compatibility build errors in uatomic.
+
+2011-06-13 Userspace RCU 0.6.2
+ * Fix build on powerpc.
+
+2011-06-13 Userspace RCU 0.6.1
+ * Add missing headers into release tarball:
+ urcu-call-rcu-impl.h and urcu-defer-impl.h
+
+2011-06-10 Userspace RCU 0.6.0
+ * Added call_rcu() support, graciously contributed by Paul E. McKenney.
+ * Added urcu/cds.h and merged all "Concurrent Data Containers" (CDS)
+ into a single .so: liburcu-cds.so to have a single header/shared
+ object holding all concurrency-aware data containers. It provides
+ support for RCU lists, queues and stacks at the moment.
+ * liburcu 0.6.0 now allows linking a single application with multiple
+ flavors of RCU. This required a library API change which is
+ incompatible with older 0.5.x liburcu. The .so version number
+ is therefore bumped from 0 to 1 (e.g. liburcu.so.1).
+ * Added "atfork" handling API, documented in README. Only useful for
+ processes using fork() without following exec().
+ * Renaming the following files, keeping the old files (producing a
+ compiler warning):
+ urcu/uatomic_arch.h -> urcu/uatomic.h
+ urcu/urcu-futex.h -> urcu/futex.h
+ urcu/urcu_ref.h -> urcu/ref.h
+
+2011-03-04 Userspace RCU 0.5.4
+ * urcu-bp: Update fork() handling
+
+ Introduce
+
+ extern void rcu_bp_before_fork(void);
+ extern void rcu_bp_after_fork_parent(void);
+ extern void rcu_bp_after_fork_child(void);
+
+ to handle fork gracefully. These disable signals and hold
+ the registry mutex across forks. (this flavor of the liburcu
+ library ("bp" for "bulletproof") is mainly used by UST, the
+ user-space tracer).
+
+2011-03-03 Userspace RCU 0.5.3
+ * Add support for older ARM single-CPU architecturess (pre-ARMv7)
+ (thanks to Jason Wessel from WindRiver).
+ * Fix ARMv7 cmm_rmb/cmm_wmb primitives (use a dmb barrier).
+ * Remove leftover list_t type from urcu/list.h (namespace cleanup).
+ * urcu_defer: handle malloc error value.
+ * Update README file to describe urcu interaction with fork() when not
+ * followed
+ by an exec() system call.
+
+2010-11-18 Userspace RCU 0.5.2
+ * Fix renaming error introduced in 0.5.0.
+
+2010-11-18 Userspace RCU 0.5.1
+ * Fix renaming error introduced in 0.5.0. (incomplete fix)
+
+2010-11-18 Userspace RCU 0.5.0
+ * Version 0.5.0 changes the API presented by memory model,
+ architecture abstraction and data structure primitives in
+ headers. The prefixes are, respectively:
+ - cmm_ : Concurrent Memory Model
+ - caa_ : Concurrent Architecture Abstraction
+ - cds_ : Concurrent Data Structures
+
+2010-03-04 Userspace RCU 0.4.2
+ * Add generic uatomic ops support.
+ * Bugfix in urcu-bp. (rare garbage collection bug occurring in
+ multithreaded environment). Only affects urcu-bp users (UST).
+
+2010-02-12 Userspace RCU 0.4.1
+ * s390: compilation fix.
+
+2010-01-30 Userspace RCU 0.4.0
+ * API change: SIGURCU -> SIGRCU
+ * API changes: standardize on rcu_ prefix.
+ * API change: urcu_init -> rcu_init.
+ * urcu/config.h options renamed to CONFIG_RCU_HAVE_FENCE,
+ CONFIG_RCU_HAVE_FUTEX, CONFIG_RCU_SMP, CONFIG_RCU_COMPAT_ARCH.
+ ((moving from URCU -> RCU).
+ * library names changes: liburcu becomes the "default" version, using
+ sys_membarrier() when available, and using liburcu-mb as a
+ fallback.
+ * The signal-based liburcu is renamed from liburcu to
+ liburcu-signal. People previously using the signal-based
+ "liburcu" will automatically be moved to the "default" liburcu
+ version.
+
+2009-11-16 Userspace RCU 0.3.1
+ * Add support for sparcv9 32-bit build.
+ * Update build system to use --host instead of --target.
+
+2009-11-03 Userspace RCU 0.3.0
+ * API change for the "deferred work" interface.
+
+2009-10-14 Userspace RCU 0.2.3
+ * Move to autotools.
+ * Automated architecture detection, with i386 fallback.
+ * Detect if NR_futex is on the system, fallback on portable
+ alternatives.
+ * Add configure mode for UP-only systems.
+
+2009-10-02 Userspace RCU 0.2.2
+ * Phases out rcu_publish_content() api.
+ * Adds type checking to urcu-pointer.h pointer exchange primitives.
+
+2009-10-02 Userspace RCU 0.2.1
+ * small header dependency fix for rculist.h.
+ * new "liburcu-bp.so" : "Bulletproof RCU", made especially for
+ the UST userspace tracer. It's a library that sacrifices a bit
+ of read-side performance for automatically monitoring thread
+ creation/removal. See README for details.
+
+2009-10-01 Userspace RCU 0.2
+ * Clarify usage of rcu_cmpxchg_pointer, rcu_xchg_pointer,
+ rcu_set_pointer.
+
+2009-09-29 Userspace RCU 0.1
+ * Initial release.
INCLUDES = -I$(top_builddir)/urcu
-AM_LDFLAGS=-lpthread
+#Add the -version-info directly here since we are only building
+# library that use the version-info
+AM_LDFLAGS=-lpthread -version-info $(URCU_LIBRARY_VERSION)
AM_CFLAGS=-Wall
SUBDIRS = . tests
-include_HEADERS = urcu.h $(top_srcdir)/urcu-*.h
+include_HEADERS = urcu.h urcu-bp.h urcu-call-rcu.h urcu-defer.h \
+ urcu-pointer.h urcu-qsbr.h
nobase_dist_include_HEADERS = urcu/compiler.h urcu/hlist.h urcu/list.h \
- urcu/rculist.h urcu/rcuhlist.h urcu/system.h urcu/urcu-futex.h \
- urcu/uatomic_generic.h urcu/arch_generic.h urcu/wfstack.h \
+ urcu/rculist.h urcu/rcuhlist.h urcu/system.h urcu/futex.h \
+ urcu/uatomic/generic.h urcu/arch/generic.h urcu/wfstack.h \
urcu/wfqueue.h urcu/rculfstack.h urcu/rculfqueue.h \
- urcu/wfqueue-static.h urcu/wfstack-static.h \
- urcu/rculfqueue-static.h urcu/rculfstack-static.h \
- urcu/urcu_ref.h urcu/rcurbtree.h
-nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic_arch.h urcu/config.h
+ urcu/ref.h urcu/map/*.h urcu/static/*.h urcu/cds.h \
+ urcu/urcu_ref.h urcu/urcu-futex.h urcu/uatomic_arch.h \
+ urcu/rcurbtree.h
+nobase_nodist_include_HEADERS = urcu/arch.h urcu/uatomic.h urcu/config.h
-EXTRA_DIST = $(top_srcdir)/urcu/arch_*.h $(top_srcdir)/urcu/uatomic_arch_*.h \
+EXTRA_DIST = $(top_srcdir)/urcu/arch/*.h $(top_srcdir)/urcu/uatomic/*.h \
gpl-2.0.txt lgpl-2.1.txt lgpl-relicensing.txt \
- README LICENSE compat_arch_x86.c
+ README LICENSE compat_arch_x86.c \
+ urcu-call-rcu-impl.h urcu-defer-impl.h \
+ ChangeLog
if COMPAT_ARCH
COMPAT=compat_arch_@ARCHTYPE@.c
COMPAT+=compat_futex.c
endif
-lib_LTLIBRARIES = liburcu.la liburcu-qsbr.la liburcu-mb.la liburcu-signal.la \
- liburcu-bp.la liburcu-rbtree.la \
- libwfqueue.la libwfstack.la librculfqueue.la librculfstack.la
+lib_LTLIBRARIES = liburcu-cds-common.la \
+ liburcu.la liburcu-qsbr.la \
+ liburcu-mb.la liburcu-signal.la liburcu-bp.la \
+ liburcu-cds.la liburcu-cds-qsbr.la \
+ liburcu-cds-mb.la liburcu-cds-signal.la liburcu-cds-bp.la \
+ liburcu-rbtree.la
+
+liburcu_cds_common_la_SOURCES = wfqueue.c wfstack.c $(COMPAT)
liburcu_la_SOURCES = urcu.c urcu-pointer.c $(COMPAT)
-liburcu_la_LIBADD = -lwfqueue
-liburcu_la_DEPENDENCIES = libwfqueue.la
+liburcu_la_LIBADD = liburcu-cds-common.la
+liburcu_cds_la_SOURCES = rculfqueue.c rculfstack.c $(COMPAT)
liburcu_qsbr_la_SOURCES = urcu-qsbr.c urcu-pointer.c $(COMPAT)
-liburcu_qsbr_la_LIBADD = -lwfqueue
-liburcu_qsbr_la_DEPENDENCIES = libwfqueue.la
+liburcu_qsbr_la_LIBADD = liburcu-cds-common.la
+liburcu_cds_qsbr_la_SOURCES = rculfqueue.c rculfstack.c $(COMPAT)
+liburcu_cds_qsbr_la_CFLAGS = -DRCU_QSBR
liburcu_mb_la_SOURCES = urcu.c urcu-pointer.c $(COMPAT)
liburcu_mb_la_CFLAGS = -DRCU_MB
-liburcu_mb_la_LIBADD = -lwfqueue
-liburcu_mb_la_DEPENDENCIES = libwfqueue.la
+liburcu_mb_la_LIBADD = liburcu-cds-common.la
+liburcu_cds_mb_la_SOURCES = rculfqueue.c rculfstack.c $(COMPAT)
+liburcu_cds_mb_la_CFLAGS = -DRCU_MB
liburcu_signal_la_SOURCES = urcu.c urcu-pointer.c $(COMPAT)
liburcu_signal_la_CFLAGS = -DRCU_SIGNAL
-liburcu_signal_la_LIBADD = -lwfqueue
-liburcu_signal_la_DEPENDENCIES = libwfqueue.la
+liburcu_signal_la_LIBADD = liburcu-cds-common.la
+liburcu_cds_signal_la_SOURCES = rculfqueue.c rculfstack.c $(COMPAT)
+liburcu_cds_signal_la_CFLAGS = -DRCU_SIGNAL
liburcu_bp_la_SOURCES = urcu-bp.c urcu-pointer.c $(COMPAT)
-liburcu_bp_la_CFLAGS = -lwfqueue
-liburcu_bp_la_DEPENDENCIES = libwfqueue.la
+liburcu_bp_la_LIBADD = liburcu-cds-common.la
+liburcu_cds_bp_la_SOURCES = rculfqueue.c rculfstack.c $(COMPAT)
+liburcu_cds_bp_la_CFLAGS = -DRCU_BP
-libwfqueue_la_SOURCES = wfqueue.c $(COMPAT)
-libwfstack_la_SOURCES = wfstack.c $(COMPAT)
-librculfqueue_la_SOURCES = rculfqueue.c $(COMPAT)
-librculfstack_la_SOURCES = rculfstack.c $(COMPAT)
liburcu_rbtree_la_SOURCES = urcu-rbtree.c $(COMPAT)
pkgconfigdir = $(libdir)/pkgconfig
-pkgconfig_DATA = liburcu.pc liburcu-bp.pc
+pkgconfig_DATA = liburcu-cds.pc liburcu.pc liburcu-bp.pc liburcu-qsbr.pc \
+ liburcu-signal.pc liburcu-mb.pc
./configure
make
make install
+ ldconfig
Hints: Forcing 32-bit build:
* CFLAGS="-m32 -g -O2" ./configure
therefore not compatible with liburcu on x86 32-bit (i386, i486, i586, i686).
The problem has been reported to the gcc community:
http://www.mail-archive.com/gcc-bugs@gcc.gnu.org/msg281255.html
+- gcc 3.3 cannot match the "xchg" instruction on 32-bit x86 build.
+ See: http://kerneltrap.org/node/7507
- Alpha, ia64 and ARM architectures depend on 4.x gcc with atomic builtins
support.
#include <pthread.h>
#include <signal.h>
#include <assert.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
/*
* It does not really matter if the constructor is called before using
* generate an illegal instruction. Cannot catch this with
* linker tricks when optimizations are disabled.
*/
+ result = 0;
__asm__ __volatile__("ud2");
}
mutex_lock_signal_restore(&compat_mutex, &mask);
- return _new;
+ return result;
}
unsigned long _compat_uatomic_xchg(void *addr, unsigned long _new, int len)
* generate an illegal instruction. Cannot catch this with
* linker tricks when optimizations are disabled.
*/
+ retval = 0; /* silence gcc warnings */
__asm__ __volatile__("ud2");
}
mutex_lock_signal_restore(&compat_mutex, &mask);
* generate an illegal instruction. Cannot catch this with
* linker tricks when optimizations are disabled.
*/
+ retval = 0; /* silence gcc warnings */
__asm__ __volatile__("ud2");
}
mutex_lock_signal_restore(&compat_mutex, &mask);
* generate an illegal instruction. Cannot catch this with
* linker tricks when optimizations are disabled.
*/
+ result = 0; /* silence gcc warnings */
__asm__ __volatile__("ud2");
}
mutex_lock_signal_restore(&compat_mutex, &mask);
#include <assert.h>
#include <errno.h>
#include <poll.h>
+#include <stdint.h>
#include <urcu/arch.h>
-#include <urcu/urcu-futex.h>
+#include <urcu/futex.h>
static pthread_mutex_t compat_futex_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t compat_futex_cond = PTHREAD_COND_INITIALIZER;
* Waiter will relinquish the CPU until woken up.
*/
-int compat_futex_noasync(int *uaddr, int op, int val,
- const struct timespec *timeout, int *uaddr2, int val3)
+int compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
{
int ret, i, gret = 0;
* Waiter will busy-loop trying to read the condition.
*/
-int compat_futex_async(int *uaddr, int op, int val,
- const struct timespec *timeout, int *uaddr2, int val3)
+int compat_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
{
- int ret, i;
-
/*
* Check if NULL. Don't let users expect that they are taken into
* account.
default:
return -EINVAL;
}
+ return 0;
}
# Process this file with autoconf to produce a configure script.
-AC_INIT([userspace-rcu], [0.5.4], [mathieu dot desnoyers at efficios dot com])
+AC_INIT([userspace-rcu], [0.6.4], [mathieu dot desnoyers at efficios dot com])
+
+# Following the numbering scheme proposed by libtool for the library version
+# http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html
+AC_SUBST([URCU_LIBRARY_VERSION], [1:0:0])
+
AC_CONFIG_AUX_DIR([config])
AC_CANONICAL_TARGET
AC_CANONICAL_HOST
)
fi
-UATOMICSRC=urcu/uatomic_arch_$ARCHTYPE.h
-ARCHSRC=urcu/arch_$ARCHTYPE.h
-if test "x$ARCHTYPE" != xx86 -a "x$ARCHTYPE" != xppc; then
- APISRC=tests/api_gcc.h
-else
- APISRC=tests/api_$ARCHTYPE.h
-fi
+UATOMICSRC=urcu/uatomic/$ARCHTYPE.h
+ARCHSRC=urcu/arch/$ARCHTYPE.h
if test "$ARCHTYPE" == "armv7l"; then
CFLAGS="-mcpu=cortex-a9 -mtune=cortex-a9 -O"
fi
AC_CONFIG_LINKS([
urcu/arch.h:$ARCHSRC
- urcu/uatomic_arch.h:$UATOMICSRC
- tests/api.h:$APISRC
+ urcu/uatomic.h:$UATOMICSRC
])
AC_CONFIG_FILES([
Makefile
tests/Makefile
+ liburcu-cds.pc
liburcu.pc
liburcu-bp.pc
liburcu-qsbr.pc
--- /dev/null
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: Userspace RCU Concurrent Data Structures
+Description: Data structures leveraging RCU and atomic operations to provide efficient concurrency-aware storage
+Version: @PACKAGE_VERSION@
+Requires:
+Libs: -L${libdir} -lurcu-cds
+Cflags: -I${includedir}
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-/* Use the urcu symbols to select the appropriate rcu flavor at link time */
-#include "urcu.h"
+#define _LGPL_SOURCE
+
+#ifdef RCU_QSBR
+# include "urcu-qsbr.h"
+#elif defined(RCU_BP)
+# include "urcu-bp.h"
+#else
+# include "urcu.h"
+#endif
+
+#undef _LGPL_SOURCE
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu/rculfqueue.h"
-#include "urcu/rculfqueue-static.h"
+#include "urcu/static/rculfqueue.h"
/*
* library wrappers to be used by non-LGPL compatible source code.
_cds_lfq_init_rcu(q);
}
+int cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
+{
+ return _cds_lfq_destroy_rcu(q);
+}
+
void cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, struct cds_lfq_node_rcu *node)
{
_cds_lfq_enqueue_rcu(q, node);
}
struct cds_lfq_node_rcu *
-cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q, void (*release)(struct urcu_ref *))
+cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
{
- return _cds_lfq_dequeue_rcu(q, release);
+ return _cds_lfq_dequeue_rcu(q);
}
*/
/* Use the urcu symbols to select the appropriate rcu flavor at link time */
-#include "urcu.h"
+#define _LGPL_SOURCE
+
+#ifdef RCU_QSBR
+# include "urcu-qsbr.h"
+#elif defined(RCU_BP)
+# include "urcu-bp.h"
+#else
+# include "urcu.h"
+#endif
+
+#undef _LGPL_SOURCE
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu/rculfstack.h"
-#include "urcu/rculfstack-static.h"
+#include "urcu/static/rculfstack.h"
/*
* library wrappers to be used by non-LGPL compatible source code.
URCU_DEFER=$(top_srcdir)/urcu.c $(top_srcdir)/urcu-pointer.c $(top_srcdir)/wfqueue.c $(COMPAT)
URCU_LIB=$(top_builddir)/liburcu.la
+URCU_CDS_LIB=$(top_builddir)/liburcu-cds.la
URCU_QSBR_LIB=$(top_builddir)/liburcu-qsbr.la
+URCU_CDS_QSBR_LIB=$(top_builddir)/liburcu-cds-qsbr.la
URCU_MB_LIB=$(top_builddir)/liburcu-mb.la
+URCU_CDS_MB_LIB=$(top_builddir)/liburcu-cds-mb.la
URCU_SIGNAL_LIB=$(top_builddir)/liburcu-signal.la
+URCU_CDS_SIGNAL_LIB=$(top_builddir)/liburcu-cds-signal.la
URCU_BP_LIB=$(top_builddir)/liburcu-bp.la
-WFQUEUE_LIB=$(top_builddir)/libwfqueue.la
-WFSTACK_LIB=$(top_builddir)/libwfstack.la
-RCULFQUEUE_LIB=$(top_builddir)/librculfqueue.la
-RCULFSTACK_LIB=$(top_builddir)/librculfstack.la
-URCU_RBTREE=$(URCU) $(top_srcdir)/urcu-rbtree.c
+URCU_CDS_BP_LIB=$(top_builddir)/liburcu-cds-bp.la
+URCU_CDS_COMMON_LIB=$(top_builddir)/liburcu-cds-common.la
-EXTRA_DIST = $(top_srcdir)/tests/api_*.h
+URCU_RBTREE=$(URCU) $(top_srcdir)/urcu-rbtree.c
+EXTRA_DIST = $(top_srcdir)/tests/api.h runall.sh
test_urcu_SOURCES = test_urcu.c $(URCU)
rcutorture_urcu_SOURCES = urcutorture.c
rcutorture_urcu_CFLAGS = -DRCU_MEMBARRIER $(AM_CFLAGS)
-rcutorture_urcu_LDADD = $(URCU) $(WFQUEUE_LIB)
+rcutorture_urcu_LDADD = $(URCU)
rcutorture_urcu_mb_SOURCES = urcutorture.c
rcutorture_urcu_mb_CFLAGS = -DRCU_MB $(AM_CFLAGS)
-rcutorture_urcu_mb_LDADD = $(URCU_MB_LIB) $(WFQUEUE_LIB)
+rcutorture_urcu_mb_LDADD = $(URCU_MB_LIB)
rcutorture_qsbr_SOURCES = urcutorture.c
-rcutorture_qsbr_CFLAGS = -DRCU_QSBR $(AM_CFLAGS)
-rcutorture_qsbr_LDADD = $(URCU_QSBR_LIB) $(WFQUEUE_LIB)
+rcutorture_qsbr_CFLAGS = -DTORTURE_QSBR -DRCU_QSBR $(AM_CFLAGS)
+rcutorture_qsbr_LDADD = $(URCU_QSBR_LIB)
rcutorture_urcu_signal_SOURCES = urcutorture.c
rcutorture_urcu_signal_CFLAGS = -DRCU_SIGNAL $(AM_CFLAGS)
-rcutorture_urcu_signal_LDADD = $(URCU_SIGNAL_LIB) $(WFQUEUE_LIB)
+rcutorture_urcu_signal_LDADD = $(URCU_SIGNAL_LIB)
rcutorture_urcu_bp_SOURCES = urcutorture.c
rcutorture_urcu_bp_CFLAGS = -DRCU_BP $(AM_CFLAGS)
-rcutorture_urcu_bp_LDADD = $(URCU_BP_LIB) $(WFQUEUE_LIB)
+rcutorture_urcu_bp_LDADD = $(URCU_BP_LIB)
test_mutex_SOURCES = test_mutex.c $(URCU)
test_urcu_bp_dynamic_link_SOURCES = test_urcu_bp.c $(URCU_BP)
test_urcu_bp_dynamic_link_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
-test_urcu_lfq_SOURCES = test_urcu_lfq.c $(URCU_DEFER)
-test_urcu_lfq_dynlink_SOURCES = test_urcu_lfq.c $(URCU_DEFER)
+test_urcu_lfq_SOURCES = test_urcu_lfq.c $(URCU) $(URCU_CDS_LIB)
+test_urcu_lfq_dynlink_SOURCES = test_urcu_lfq.c $(URCU)
test_urcu_lfq_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
-test_urcu_lfq_dynlink_LDADD = $(RCULFQUEUE_LIB)
+test_urcu_lfq_dynlink_LDADD = $(URCU_CDS_LIB)
-test_urcu_wfq_SOURCES = test_urcu_wfq.c
+test_urcu_wfq_SOURCES = test_urcu_wfq.c $(URCU_CDS_COMMON_LIB) $(COMPAT)
test_urcu_wfq_dynlink_SOURCES = test_urcu_wfq.c
test_urcu_wfq_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
-test_urcu_wfq_dynlink_LDADD = $(WFQUEUE_LIB)
+test_urcu_wfq_dynlink_LDADD = $(URCU_CDS_COMMON_LIB)
-test_urcu_lfs_SOURCES = test_urcu_lfs.c $(URCU_DEFER)
+test_urcu_lfs_SOURCES = test_urcu_lfs.c $(URCU_CDS_LIB) $(URCU_DEFER)
test_urcu_lfs_dynlink_SOURCES = test_urcu_lfs.c $(URCU_DEFER)
test_urcu_lfs_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
-test_urcu_lfs_dynlink_LDADD = $(RCULFSTACK_LIB)
+test_urcu_lfs_dynlink_LDADD = $(URCU_CDS_LIB)
-test_urcu_wfs_SOURCES = test_urcu_wfs.c
+test_urcu_wfs_SOURCES = test_urcu_wfs.c $(URCU_CDS_COMMON_LIB) $(COMPAT)
test_urcu_wfs_dynlink_SOURCES = test_urcu_wfs.c
test_urcu_wfs_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS)
-test_urcu_wfs_dynlink_LDADD = $(WFSTACK_LIB)
+test_urcu_wfs_dynlink_LDADD = $(URCU_CDS_COMMON_LIB)
test_urcu_rbtree_SOURCES = test_urcu_rbtree.c $(URCU_RBTREE)
--- /dev/null
+
+#ifndef _INCLUDE_API_H
+#define _INCLUDE_API_H
+
+#include "../config.h"
+
+/*
+ * common.h: Common Linux kernel-isms.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; but version 2 of the License only due
+ * to code included from the Linux kernel.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2006 Paul E. McKenney, IBM.
+ *
+ * Much code taken from the Linux kernel. For such code, the option
+ * to redistribute under later versions of GPL might not be available.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/arch.h>
+
+/*
+ * Machine parameters.
+ */
+
+#define ____cacheline_internodealigned_in_smp \
+ __attribute__((__aligned__(CAA_CACHE_LINE_SIZE)))
+
+/*
+ * api_pthreads.h: API mapping to pthreads environment.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version. However, please note that much
+ * of the code in this file derives from the Linux kernel, and that such
+ * code may not be available except under GPLv2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2006 Paul E. McKenney, IBM.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <limits.h>
+#include <sys/types.h>
+#define __USE_GNU
+#include <pthread.h>
+#include <sched.h>
+#include <sys/param.h>
+/* #include "atomic.h" */
+
+/*
+ * Exclusive locking primitives.
+ */
+
+typedef pthread_mutex_t spinlock_t;
+
+#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
+#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
+
+static void spin_lock_init(spinlock_t *sp)
+{
+ if (pthread_mutex_init(sp, NULL) != 0) {
+ perror("spin_lock_init:pthread_mutex_init");
+ exit(-1);
+ }
+}
+
+static void spin_lock(spinlock_t *sp)
+{
+ if (pthread_mutex_lock(sp) != 0) {
+ perror("spin_lock:pthread_mutex_lock");
+ exit(-1);
+ }
+}
+
+static void spin_unlock(spinlock_t *sp)
+{
+ if (pthread_mutex_unlock(sp) != 0) {
+ perror("spin_unlock:pthread_mutex_unlock");
+ exit(-1);
+ }
+}
+
+#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
+#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
+
+/*
+ * Thread creation/destruction primitives.
+ */
+
+typedef pthread_t thread_id_t;
+
+#define NR_THREADS 128
+
+#define __THREAD_ID_MAP_EMPTY ((thread_id_t) 0)
+#define __THREAD_ID_MAP_WAITING ((thread_id_t) 1)
+thread_id_t __thread_id_map[NR_THREADS];
+spinlock_t __thread_id_map_mutex;
+
+#define for_each_thread(t) \
+ for (t = 0; t < NR_THREADS; t++)
+
+#define for_each_running_thread(t) \
+ for (t = 0; t < NR_THREADS; t++) \
+ if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
+ (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
+
+#define for_each_tid(t, tid) \
+ for (t = 0; t < NR_THREADS; t++) \
+ if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
+ ((tid) != __THREAD_ID_MAP_WAITING))
+
+pthread_key_t thread_id_key;
+
+static int __smp_thread_id(void)
+{
+ int i;
+ thread_id_t tid = pthread_self();
+
+ for (i = 0; i < NR_THREADS; i++) {
+ if (__thread_id_map[i] == tid) {
+ long v = i + 1; /* must be non-NULL. */
+
+ if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
+ perror("pthread_setspecific");
+ exit(-1);
+ }
+ return i;
+ }
+ }
+ spin_lock(&__thread_id_map_mutex);
+ for (i = 0; i < NR_THREADS; i++) {
+ if (__thread_id_map[i] == tid)
+ spin_unlock(&__thread_id_map_mutex);
+ return i;
+ }
+ spin_unlock(&__thread_id_map_mutex);
+ fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
+ (int)tid, (int)tid);
+ exit(-1);
+}
+
+static int smp_thread_id(void)
+{
+ void *id;
+
+ id = pthread_getspecific(thread_id_key);
+ if (id == NULL)
+ return __smp_thread_id();
+ return (long)(id - 1);
+}
+
+static thread_id_t create_thread(void *(*func)(void *), void *arg)
+{
+ thread_id_t tid;
+ int i;
+
+ spin_lock(&__thread_id_map_mutex);
+ for (i = 0; i < NR_THREADS; i++) {
+ if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
+ break;
+ }
+ if (i >= NR_THREADS) {
+ spin_unlock(&__thread_id_map_mutex);
+ fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
+ exit(-1);
+ }
+ __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
+ spin_unlock(&__thread_id_map_mutex);
+ if (pthread_create(&tid, NULL, func, arg) != 0) {
+ perror("create_thread:pthread_create");
+ exit(-1);
+ }
+ __thread_id_map[i] = tid;
+ return tid;
+}
+
+static void *wait_thread(thread_id_t tid)
+{
+ int i;
+ void *vp;
+
+ for (i = 0; i < NR_THREADS; i++) {
+ if (__thread_id_map[i] == tid)
+ break;
+ }
+ if (i >= NR_THREADS){
+ fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
+ (int)tid, (int)tid);
+ exit(-1);
+ }
+ if (pthread_join(tid, &vp) != 0) {
+ perror("wait_thread:pthread_join");
+ exit(-1);
+ }
+ __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
+ return vp;
+}
+
+static void wait_all_threads(void)
+{
+ int i;
+ thread_id_t tid;
+
+ for (i = 1; i < NR_THREADS; i++) {
+ tid = __thread_id_map[i];
+ if (tid != __THREAD_ID_MAP_EMPTY &&
+ tid != __THREAD_ID_MAP_WAITING)
+ (void)wait_thread(tid);
+ }
+}
+
+#ifndef HAVE_CPU_SET_T
+typedef unsigned long cpu_set_t;
+# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
+# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
+#endif
+
+static void run_on(int cpu)
+{
+#if HAVE_SCHED_SETAFFINITY
+ cpu_set_t mask;
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+#if SCHED_SETAFFINITY_ARGS == 2
+ sched_setaffinity(0, &mask);
+#else
+ sched_setaffinity(0, sizeof(mask), &mask);
+#endif
+#endif /* HAVE_SCHED_SETAFFINITY */
+}
+
+/*
+ * timekeeping -- very crude -- should use MONOTONIC...
+ */
+
+long long get_microseconds(void)
+{
+ struct timeval tv;
+
+ if (gettimeofday(&tv, NULL) != 0)
+ abort();
+ return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
+}
+
+/*
+ * Per-thread variables.
+ */
+
+#define DEFINE_PER_THREAD(type, name) \
+ struct { \
+ __typeof__(type) v \
+ __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
+ } __per_thread_##name[NR_THREADS];
+#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
+
+#define per_thread(name, thread) __per_thread_##name[thread].v
+#define __get_thread_var(name) per_thread(name, smp_thread_id())
+
+#define init_per_thread(name, v) \
+ do { \
+ int __i_p_t_i; \
+ for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
+ per_thread(name, __i_p_t_i) = v; \
+ } while (0)
+
+DEFINE_PER_THREAD(int, smp_processor_id);
+
+/*
+ * Bug checks.
+ */
+
+#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
+
+/*
+ * Initialization -- Must be called before calling any primitives.
+ */
+
+static void smp_init(void)
+{
+ int i;
+
+ spin_lock_init(&__thread_id_map_mutex);
+ __thread_id_map[0] = pthread_self();
+ for (i = 1; i < NR_THREADS; i++)
+ __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
+ init_per_thread(smp_processor_id, 0);
+ if (pthread_key_create(&thread_id_key, NULL) != 0) {
+ perror("pthread_key_create");
+ exit(-1);
+ }
+}
+
+#endif
+++ /dev/null
-
-#ifndef _INCLUDE_API_H
-#define _INCLUDE_API_H
-
-#include "../config.h"
-
-/*
- * common.h: Common Linux kernel-isms.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; but version 2 of the License only due
- * to code included from the Linux kernel.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (c) 2006 Paul E. McKenney, IBM.
- *
- * Much code taken from the Linux kernel. For such code, the option
- * to redistribute under later versions of GPL might not be available.
- */
-
-#ifndef __always_inline
-#define __always_inline inline
-#endif
-
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
-
-#ifdef __ASSEMBLY__
-# define stringify_in_c(...) __VA_ARGS__
-# define ASM_CONST(x) x
-#else
-/* This version of stringify will deal with commas... */
-# define __stringify_in_c(...) #__VA_ARGS__
-# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
-# define __ASM_CONST(x) x##UL
-# define ASM_CONST(x) __ASM_CONST(x)
-#endif
-
-
-/*
- * arch-i386.h: Expose x86 atomic instructions. 80486 and better only.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, but version 2 only due to inclusion
- * of Linux-kernel code.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (c) 2006 Paul E. McKenney, IBM.
- *
- * Much code taken from the Linux kernel. For such code, the option
- * to redistribute under later versions of GPL might not be available.
- */
-
-/*
- * Machine parameters.
- */
-
-/* #define CAA_CACHE_LINE_SIZE 64 */
-#define ____cacheline_internodealigned_in_smp \
- __attribute__((__aligned__(1 << 6)))
-
-#define LOCK_PREFIX "lock ; "
-
-#if 0 /* duplicate with arch_atomic.h */
-/*
- * Atomic data structure, initialization, and access.
- */
-
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = (i))
-
-/*
- * Atomic operations.
- */
-
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-
-static __inline__ void atomic_add(int i, atomic_t *v)
-{
- (void)__sync_fetch_and_add(&v->counter, i);
-}
-
-/**
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic_sub(int i, atomic_t *v)
-{
- (void)__sync_fetch_and_add(&v->counter, -i);
-}
-
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
-{
- return __sync_add_and_fetch(&v->counter, -i) == 0;
-}
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
-{
- (void)__sync_fetch_and_add(&v->counter, 1);
-}
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-static __inline__ void atomic_dec(atomic_t *v)
-{
- (void)__sync_fetch_and_add(&v->counter, -1);
-}
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
- return __sync_add_and_fetch(&v->counter, -1) == 0;
-}
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic_inc_and_test(atomic_t *v)
-{
- return __sync_add_and_fetch(&v->counter, 1) == 0;
-}
-
-/**
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
-{
- return __sync_add_and_fetch(&v->counter, i) < 0;
-}
-
-/**
- * atomic_add_return - add and return
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
- return __sync_add_and_fetch(&v->counter, i);
-}
-
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
-{
- return atomic_add_return(-i,v);
-}
-
-static inline unsigned int
-cmpxchg(volatile long *ptr, long oldval, long newval)
-{
- return __sync_val_compare_and_swap(ptr, oldval, newval);
-}
-
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-/**
- * atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc_return(v) (atomic_add_return(1,v))
-#define atomic_dec_return(v) (atomic_sub_return(1,v))
-
-/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic_dec() cmm_barrier()
-#define smp_mb__after_atomic_dec() cmm_barrier()
-#define smp_mb__before_atomic_inc() cmm_barrier()
-#define smp_mb__after_atomic_inc() cmm_barrier()
-
-#endif //0 /* duplicate with arch_atomic.h */
-
-/*
- * api_pthreads.h: API mapping to pthreads environment.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version. However, please note that much
- * of the code in this file derives from the Linux kernel, and that such
- * code may not be available except under GPLv2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (c) 2006 Paul E. McKenney, IBM.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <limits.h>
-#include <sys/types.h>
-#define __USE_GNU
-#include <pthread.h>
-#include <sched.h>
-#include <sys/param.h>
-/* #include "atomic.h" */
-
-/*
- * Default machine parameters.
- */
-
-#ifndef CAA_CACHE_LINE_SIZE
-/* #define CAA_CACHE_LINE_SIZE 128 */
-#endif /* #ifndef CAA_CACHE_LINE_SIZE */
-
-/*
- * Exclusive locking primitives.
- */
-
-typedef pthread_mutex_t spinlock_t;
-
-#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
-#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
-
-static void spin_lock_init(spinlock_t *sp)
-{
- if (pthread_mutex_init(sp, NULL) != 0) {
- perror("spin_lock_init:pthread_mutex_init");
- exit(-1);
- }
-}
-
-static void spin_lock(spinlock_t *sp)
-{
- if (pthread_mutex_lock(sp) != 0) {
- perror("spin_lock:pthread_mutex_lock");
- exit(-1);
- }
-}
-
-static void spin_unlock(spinlock_t *sp)
-{
- if (pthread_mutex_unlock(sp) != 0) {
- perror("spin_unlock:pthread_mutex_unlock");
- exit(-1);
- }
-}
-
-#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
-#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
-
-/*
- * Thread creation/destruction primitives.
- */
-
-typedef pthread_t thread_id_t;
-
-#define NR_THREADS 128
-
-#define __THREAD_ID_MAP_EMPTY 0
-#define __THREAD_ID_MAP_WAITING 1
-thread_id_t __thread_id_map[NR_THREADS];
-spinlock_t __thread_id_map_mutex;
-
-#define for_each_thread(t) \
- for (t = 0; t < NR_THREADS; t++)
-
-#define for_each_running_thread(t) \
- for (t = 0; t < NR_THREADS; t++) \
- if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
- (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
-
-pthread_key_t thread_id_key;
-
-static int __smp_thread_id(void)
-{
- int i;
- thread_id_t tid = pthread_self();
-
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == tid) {
- long v = i + 1; /* must be non-NULL. */
-
- if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
- perror("pthread_setspecific");
- exit(-1);
- }
- return i;
- }
- }
- spin_lock(&__thread_id_map_mutex);
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == tid)
- spin_unlock(&__thread_id_map_mutex);
- return i;
- }
- spin_unlock(&__thread_id_map_mutex);
- fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
- (int)tid, (int)tid);
- exit(-1);
-}
-
-static int smp_thread_id(void)
-{
- void *id;
-
- id = pthread_getspecific(thread_id_key);
- if (id == NULL)
- return __smp_thread_id();
- return (long)(id - 1);
-}
-
-static thread_id_t create_thread(void *(*func)(void *), void *arg)
-{
- thread_id_t tid;
- int i;
-
- spin_lock(&__thread_id_map_mutex);
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
- break;
- }
- if (i >= NR_THREADS) {
- spin_unlock(&__thread_id_map_mutex);
- fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
- exit(-1);
- }
- __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
- spin_unlock(&__thread_id_map_mutex);
- if (pthread_create(&tid, NULL, func, arg) != 0) {
- perror("create_thread:pthread_create");
- exit(-1);
- }
- __thread_id_map[i] = tid;
- return tid;
-}
-
-static void *wait_thread(thread_id_t tid)
-{
- int i;
- void *vp;
-
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == tid)
- break;
- }
- if (i >= NR_THREADS){
- fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
- (int)tid, (int)tid);
- exit(-1);
- }
- if (pthread_join(tid, &vp) != 0) {
- perror("wait_thread:pthread_join");
- exit(-1);
- }
- __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
- return vp;
-}
-
-static void wait_all_threads(void)
-{
- int i;
- thread_id_t tid;
-
- for (i = 1; i < NR_THREADS; i++) {
- tid = __thread_id_map[i];
- if (tid != __THREAD_ID_MAP_EMPTY &&
- tid != __THREAD_ID_MAP_WAITING)
- (void)wait_thread(tid);
- }
-}
-
-#ifndef HAVE_CPU_SET_T
-typedef unsigned long cpu_set_t;
-# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
-# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
-#endif
-
-static void run_on(int cpu)
-{
-#if HAVE_SCHED_SETAFFINITY
- cpu_set_t mask;
-
- CPU_ZERO(&mask);
- CPU_SET(cpu, &mask);
-#if SCHED_SETAFFINITY_ARGS == 2
- sched_setaffinity(0, &mask);
-#else
- sched_setaffinity(0, sizeof(mask), &mask);
-#endif
-#endif /* HAVE_SCHED_SETAFFINITY */
-}
-
-/*
- * timekeeping -- very crude -- should use MONOTONIC...
- */
-
-long long get_microseconds(void)
-{
- struct timeval tv;
-
- if (gettimeofday(&tv, NULL) != 0)
- abort();
- return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
-}
-
-/*
- * Per-thread variables.
- */
-
-#define DEFINE_PER_THREAD(type, name) \
- struct { \
- __typeof__(type) v \
- __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
- } __per_thread_##name[NR_THREADS];
-#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
-
-#define per_thread(name, thread) __per_thread_##name[thread].v
-#define __get_thread_var(name) per_thread(name, smp_thread_id())
-
-#define init_per_thread(name, v) \
- do { \
- int __i_p_t_i; \
- for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
- per_thread(name, __i_p_t_i) = v; \
- } while (0)
-
-/*
- * CPU traversal primitives.
- */
-
-#ifndef NR_CPUS
-#define NR_CPUS 16
-#endif /* #ifndef NR_CPUS */
-
-#define for_each_possible_cpu(cpu) \
- for (cpu = 0; cpu < NR_CPUS; cpu++)
-#define for_each_online_cpu(cpu) \
- for (cpu = 0; cpu < NR_CPUS; cpu++)
-
-/*
- * Per-CPU variables.
- */
-
-#define DEFINE_PER_CPU(type, name) \
- struct { \
- __typeof__(type) v \
- __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
- } __per_cpu_##name[NR_CPUS]
-#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
-
-DEFINE_PER_THREAD(int, smp_processor_id);
-
-#define per_cpu(name, thread) __per_cpu_##name[thread].v
-#define __get_cpu_var(name) per_cpu(name, smp_processor_id())
-
-#define init_per_cpu(name, v) \
- do { \
- int __i_p_c_i; \
- for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
- per_cpu(name, __i_p_c_i) = v; \
- } while (0)
-
-/*
- * CPU state checking (crowbarred).
- */
-
-#define idle_cpu(cpu) 0
-#define in_softirq() 1
-#define hardirq_count() 0
-#define PREEMPT_SHIFT 0
-#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define PREEMPT_BITS 8
-#define SOFTIRQ_BITS 8
-
-/*
- * CPU hotplug.
- */
-
-struct notifier_block {
- int (*notifier_call)(struct notifier_block *, unsigned long, void *);
- struct notifier_block *next;
- int priority;
-};
-
-#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
-#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
-#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
-#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
-#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
- * not handling interrupts, soon dead */
-#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
- * lock is dropped */
-
-/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
- * operation in progress
- */
-#define CPU_TASKS_FROZEN 0x0010
-
-#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
-#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
-#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
-#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
-#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
-
-/* Hibernation and suspend events */
-#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
-#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
-#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
-#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
-#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
-#define PM_POST_RESTORE 0x0006 /* Restore failed */
-
-#define NOTIFY_DONE 0x0000 /* Don't care */
-#define NOTIFY_OK 0x0001 /* Suits me */
-#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
-#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
- /* Bad/Veto action */
-/*
- * Clean way to return from the notifier and stop further calls.
- */
-#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
-
-/*
- * Bug checks.
- */
-
-#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
-
-/*
- * Initialization -- Must be called before calling any primitives.
- */
-
-static void smp_init(void)
-{
- int i;
-
- spin_lock_init(&__thread_id_map_mutex);
- __thread_id_map[0] = pthread_self();
- for (i = 1; i < NR_THREADS; i++)
- __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
- init_per_thread(smp_processor_id, 0);
- if (pthread_key_create(&thread_id_key, NULL) != 0) {
- perror("pthread_key_create");
- exit(-1);
- }
-}
-
-/* Taken from the Linux kernel source tree, so GPLv2-only!!! */
-
-#ifndef _LINUX_LIST_H
-#define _LINUX_LIST_H
-
-#define LIST_POISON1 ((void *) 0x00100100)
-#define LIST_POISON2 ((void *) 0x00200200)
-
-#if 0
-/*
- * Simple doubly linked list implementation.
- *
- * Some of the internal functions ("__xxx") are useful when
- * manipulating whole lists rather than single entries, as
- * sometimes we already know the next/prev entries and we can
- * generate better code by using them directly rather than
- * using the generic single-entry routines.
- */
-
-struct cds_list_head {
- struct cds_list_head *next, *prev;
-};
-
-#define CDS_LIST_HEAD_INIT(name) { &(name), &(name) }
-
-#define CDS_LIST_HEAD(name) \
- struct cds_list_head name = CDS_LIST_HEAD_INIT(name)
-
-static inline void CDS_INIT_LIST_HEAD(struct cds_list_head *list)
-{
- list->next = list;
- list->prev = list;
-}
-
-/*
- * Insert a new entry between two known consecutive entries.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-#ifndef CONFIG_DEBUG_LIST
-static inline void __cds_list_add(struct cds_list_head *new,
- struct cds_list_head *prev,
- struct cds_list_head *next)
-{
- next->prev = new;
- new->next = next;
- new->prev = prev;
- prev->next = new;
-}
-#else
-extern void __cds_list_add(struct cds_list_head *new,
- struct cds_list_head *prev,
- struct cds_list_head *next);
-#endif
-
-/**
- * cds_list_add - add a new entry
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- */
-static inline void cds_list_add(struct cds_list_head *new, struct cds_list_head *head)
-{
- __cds_list_add(new, head, head->next);
-}
-
-
-/**
- * cds_list_add_tail - add a new entry
- * @new: new entry to be added
- * @head: list head to add it before
- *
- * Insert a new entry before the specified head.
- * This is useful for implementing queues.
- */
-static inline void cds_list_add_tail(struct cds_list_head *new, struct cds_list_head *head)
-{
- __cds_list_add(new, head->prev, head);
-}
-
-/*
- * Delete a list entry by making the prev/next entries
- * point to each other.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-static inline void __cds_list_del(struct cds_list_head * prev, struct cds_list_head * next)
-{
- next->prev = prev;
- prev->next = next;
-}
-
-/**
- * cds_list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: cds_list_empty() on entry does not return true after this, the entry is
- * in an undefined state.
- */
-#ifndef CONFIG_DEBUG_LIST
-static inline void cds_list_del(struct cds_list_head *entry)
-{
- __cds_list_del(entry->prev, entry->next);
- entry->next = LIST_POISON1;
- entry->prev = LIST_POISON2;
-}
-#else
-extern void cds_list_del(struct cds_list_head *entry);
-#endif
-
-/**
- * cds_list_replace - replace old entry by new one
- * @old : the element to be replaced
- * @new : the new element to insert
- *
- * If @old was empty, it will be overwritten.
- */
-static inline void cds_list_replace(struct cds_list_head *old,
- struct cds_list_head *new)
-{
- new->next = old->next;
- new->next->prev = new;
- new->prev = old->prev;
- new->prev->next = new;
-}
-
-static inline void cds_list_replace_init(struct cds_list_head *old,
- struct cds_list_head *new)
-{
- cds_list_replace(old, new);
- CDS_INIT_LIST_HEAD(old);
-}
-
-/**
- * cds_list_del_init - deletes entry from list and reinitialize it.
- * @entry: the element to delete from the list.
- */
-static inline void cds_list_del_init(struct cds_list_head *entry)
-{
- __cds_list_del(entry->prev, entry->next);
- CDS_INIT_LIST_HEAD(entry);
-}
-
-/**
- * cds_list_move - delete from one list and add as another's head
- * @list: the entry to move
- * @head: the head that will precede our entry
- */
-static inline void cds_list_move(struct cds_list_head *list, struct cds_list_head *head)
-{
- __cds_list_del(list->prev, list->next);
- cds_list_add(list, head);
-}
-
-/**
- * cds_list_move_tail - delete from one list and add as another's tail
- * @list: the entry to move
- * @head: the head that will follow our entry
- */
-static inline void cds_list_move_tail(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- __cds_list_del(list->prev, list->next);
- cds_list_add_tail(list, head);
-}
-
-/**
- * list_is_last - tests whether @list is the last entry in list @head
- * @list: the entry to test
- * @head: the head of the list
- */
-static inline int list_is_last(const struct cds_list_head *list,
- const struct cds_list_head *head)
-{
- return list->next == head;
-}
-
-/**
- * cds_list_empty - tests whether a list is empty
- * @head: the list to test.
- */
-static inline int cds_list_empty(const struct cds_list_head *head)
-{
- return head->next == head;
-}
-
-/**
- * cds_list_empty_careful - tests whether a list is empty and not being modified
- * @head: the list to test
- *
- * Description:
- * tests whether a list is empty _and_ checks that no other CPU might be
- * in the process of modifying either member (next or prev)
- *
- * NOTE: using cds_list_empty_careful() without synchronization
- * can only be safe if the only activity that can happen
- * to the list entry is cds_list_del_init(). Eg. it cannot be used
- * if another CPU could re-list_add() it.
- */
-static inline int cds_list_empty_careful(const struct cds_list_head *head)
-{
- struct cds_list_head *next = head->next;
- return (next == head) && (next == head->prev);
-}
-
-/**
- * list_is_singular - tests whether a list has just one entry.
- * @head: the list to test.
- */
-static inline int list_is_singular(const struct cds_list_head *head)
-{
- return !list_empty(head) && (head->next == head->prev);
-}
-
-static inline void __list_cut_position(struct cds_list_head *list,
- struct cds_list_head *head, struct cds_list_head *entry)
-{
- struct cds_list_head *new_first = entry->next;
- list->next = head->next;
- list->next->prev = list;
- list->prev = entry;
- entry->next = list;
- head->next = new_first;
- new_first->prev = head;
-}
-
-/**
- * list_cut_position - cut a list into two
- * @list: a new list to add all removed entries
- * @head: a list with entries
- * @entry: an entry within head, could be the head itself
- * and if so we won't cut the list
- *
- * This helper moves the initial part of @head, up to and
- * including @entry, from @head to @list. You should
- * pass on @entry an element you know is on @head. @list
- * should be an empty list or a list you do not care about
- * losing its data.
- *
- */
-static inline void list_cut_position(struct cds_list_head *list,
- struct cds_list_head *head, struct cds_list_head *entry)
-{
- if (cds_list_empty(head))
- return;
- if (list_is_singular(head) &&
- (head->next != entry && head != entry))
- return;
- if (entry == head)
- CDS_INIT_LIST_HEAD(list);
- else
- __list_cut_position(list, head, entry);
-}
-
-static inline void __cds_list_splice(const struct cds_list_head *list,
- struct cds_list_head *prev,
- struct cds_list_head *next)
-{
- struct cds_list_head *first = list->next;
- struct cds_list_head *last = list->prev;
-
- first->prev = prev;
- prev->next = first;
-
- last->next = next;
- next->prev = last;
-}
-
-/**
- * cds_list_splice - join two lists, this is designed for stacks
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- */
-static inline void cds_list_splice(const struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list))
- __cds_list_splice(list, head, head->next);
-}
-
-/**
- * cds_list_splice_tail - join two lists, each list being a queue
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- */
-static inline void cds_list_splice_tail(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list))
- __cds_list_splice(list, head->prev, head);
-}
-
-/**
- * cds_list_splice_init - join two lists and reinitialise the emptied list.
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- *
- * The list at @list is reinitialised
- */
-static inline void cds_list_splice_init(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list)) {
- __cds_list_splice(list, head, head->next);
- CDS_INIT_LIST_HEAD(list);
- }
-}
-
-/**
- * cds_list_splice_tail_init - join two lists and reinitialise the emptied list
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- *
- * Each of the lists is a queue.
- * The list at @list is reinitialised
- */
-static inline void cds_list_splice_tail_init(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list)) {
- __cds_list_splice(list, head->prev, head);
- CDS_INIT_LIST_HEAD(list);
- }
-}
-
-/**
- * cds_list_entry - get the struct for this entry
- * @ptr: the &struct cds_list_head pointer.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_entry(ptr, type, member) \
- caa_container_of(ptr, type, member)
-
-/**
- * list_first_entry - get the first element from a list
- * @ptr: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- *
- * Note, that list is expected to be not empty.
- */
-#define list_first_entry(ptr, type, member) \
- cds_list_entry((ptr)->next, type, member)
-
-/**
- * cds_list_for_each - iterate over a list
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define cds_list_for_each(pos, head) \
- for (pos = (head)->next; prefetch(pos->next), pos != (head); \
- pos = pos->next)
-
-/**
- * __cds_list_for_each - iterate over a list
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @head: the head for your list.
- *
- * This variant differs from cds_list_for_each() in that it's the
- * simplest possible list iteration code, no prefetching is done.
- * Use this for code that knows the list to be very short (empty
- * or 1 entry) most of the time.
- */
-#define __cds_list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
-
-/**
- * cds_list_for_each_prev - iterate over a list backwards
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define cds_list_for_each_prev(pos, head) \
- for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
- pos = pos->prev)
-
-/**
- * cds_list_for_each_safe - iterate over a list safe against removal of list entry
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @n: another &struct cds_list_head to use as temporary storage
- * @head: the head for your list.
- */
-#define cds_list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
-
-/**
- * cds_list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @n: another &struct cds_list_head to use as temporary storage
- * @head: the head for your list.
- */
-#define cds_list_for_each_prev_safe(pos, n, head) \
- for (pos = (head)->prev, n = pos->prev; \
- prefetch(pos->prev), pos != (head); \
- pos = n, n = pos->prev)
-
-/**
- * cds_list_for_each_entry - iterate over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_for_each_entry(pos, head, member) \
- for (pos = cds_list_entry((head)->next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = cds_list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_reverse - iterate backwards over list of given type.
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_for_each_entry_reverse(pos, head, member) \
- for (pos = cds_list_entry((head)->prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = cds_list_entry(pos->member.prev, typeof(*pos), member))
-
-/**
- * list_prepare_entry - prepare a pos entry for use in cds_list_for_each_entry_continue()
- * @pos: the type * to use as a start point
- * @head: the head of the list
- * @member: the name of the list_struct within the struct.
- *
- * Prepares a pos entry for use as a start point in cds_list_for_each_entry_continue().
- */
-#define list_prepare_entry(pos, head, member) \
- ((pos) ? : cds_list_entry(head, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_continue - continue iteration over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Continue to iterate over list of given type, continuing after
- * the current position.
- */
-#define cds_list_for_each_entry_continue(pos, head, member) \
- for (pos = cds_list_entry(pos->member.next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = cds_list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_continue_reverse - iterate backwards from the given point
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Start to iterate over list of given type backwards, continuing after
- * the current position.
- */
-#define cds_list_for_each_entry_continue_reverse(pos, head, member) \
- for (pos = cds_list_entry(pos->member.prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = cds_list_entry(pos->member.prev, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_from - iterate over list of given type from the current point
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type, continuing from current position.
- */
-#define cds_list_for_each_entry_from(pos, head, member) \
- for (; prefetch(pos->member.next), &pos->member != (head); \
- pos = cds_list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_for_each_entry_safe(pos, n, head, member) \
- for (pos = cds_list_entry((head)->next, typeof(*pos), member), \
- n = cds_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.next, typeof(*n), member))
-
-/**
- * cds_list_for_each_entry_safe_continue
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type, continuing after current point,
- * safe against removal of list entry.
- */
-#define cds_list_for_each_entry_safe_continue(pos, n, head, member) \
- for (pos = cds_list_entry(pos->member.next, typeof(*pos), member), \
- n = cds_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.next, typeof(*n), member))
-
-/**
- * cds_list_for_each_entry_safe_from
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type from current point, safe against
- * removal of list entry.
- */
-#define cds_list_for_each_entry_safe_from(pos, n, head, member) \
- for (n = cds_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.next, typeof(*n), member))
-
-/**
- * cds_list_for_each_entry_safe_reverse
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate backwards over list of given type, safe against removal
- * of list entry.
- */
-#define cds_list_for_each_entry_safe_reverse(pos, n, head, member) \
- for (pos = cds_list_entry((head)->prev, typeof(*pos), member), \
- n = cds_list_entry(pos->member.prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.prev, typeof(*n), member))
-
-#endif //0
-
-/*
- * Double linked lists with a single pointer list head.
- * Mostly useful for hash tables where the two pointer list head is
- * too wasteful.
- * You lose the ability to access the tail in O(1).
- */
-
-struct cds_hlist_head {
- struct cds_hlist_node *first;
-};
-
-struct cds_hlist_node {
- struct cds_hlist_node *next, **pprev;
-};
-
-#define HLIST_HEAD_INIT { .first = NULL }
-#define HLIST_HEAD(name) struct cds_hlist_head name = { .first = NULL }
-#define CDS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
-static inline void INIT_HLIST_NODE(struct cds_hlist_node *h)
-{
- h->next = NULL;
- h->pprev = NULL;
-}
-
-static inline int hlist_unhashed(const struct cds_hlist_node *h)
-{
- return !h->pprev;
-}
-
-static inline int hlist_empty(const struct cds_hlist_head *h)
-{
- return !h->first;
-}
-
-static inline void __cds_hlist_del(struct cds_hlist_node *n)
-{
- struct cds_hlist_node *next = n->next;
- struct cds_hlist_node **pprev = n->pprev;
- *pprev = next;
- if (next)
- next->pprev = pprev;
-}
-
-static inline void cds_hlist_del(struct cds_hlist_node *n)
-{
- __cds_hlist_del(n);
- n->next = LIST_POISON1;
- n->pprev = LIST_POISON2;
-}
-
-static inline void cds_hlist_del_init(struct cds_hlist_node *n)
-{
- if (!hlist_unhashed(n)) {
- __cds_hlist_del(n);
- INIT_HLIST_NODE(n);
- }
-}
-
-static inline void cds_hlist_add_head(struct cds_hlist_node *n, struct cds_hlist_head *h)
-{
- struct cds_hlist_node *first = h->first;
- n->next = first;
- if (first)
- first->pprev = &n->next;
- h->first = n;
- n->pprev = &h->first;
-}
-
-/* next must be != NULL */
-static inline void hlist_add_before(struct cds_hlist_node *n,
- struct cds_hlist_node *next)
-{
- n->pprev = next->pprev;
- n->next = next;
- next->pprev = &n->next;
- *(n->pprev) = n;
-}
-
-static inline void hlist_add_after(struct cds_hlist_node *n,
- struct cds_hlist_node *next)
-{
- next->next = n->next;
- n->next = next;
- next->pprev = &n->next;
-
- if(next->next)
- next->next->pprev = &next->next;
-}
-
-/*
- * Move a list from one list head to another. Fixup the pprev
- * reference of the first entry if it exists.
- */
-static inline void hlist_move_list(struct cds_hlist_head *old,
- struct cds_hlist_head *new)
-{
- new->first = old->first;
- if (new->first)
- new->first->pprev = &new->first;
- old->first = NULL;
-}
-
-#define cds_hlist_entry(ptr, type, member) caa_container_of(ptr,type,member)
-
-#define cds_hlist_for_each(pos, head) \
- for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
- pos = pos->next)
-
-#define cds_hlist_for_each_safe(pos, n, head) \
- for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
- pos = n)
-
-/**
- * cds_hlist_for_each_entry - iterate over list of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * cds_hlist_for_each_entry_continue - iterate over a hlist continuing after current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry_continue(tpos, pos, member) \
- for (pos = (pos)->next; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * cds_hlist_for_each_entry_from - iterate over a hlist continuing from current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry_from(tpos, pos, member) \
- for (; pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * cds_hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @n: another &struct cds_hlist_node to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- for (pos = (head)->first; \
- pos && ({ n = pos->next; 1; }) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = n)
-
-#endif
-
-#endif
+++ /dev/null
-/* MECHANICALLY GENERATED, DO NOT EDIT!!! */
-
-#ifndef _INCLUDE_API_H
-#define _INCLUDE_API_H
-
-/*
- * common.h: Common Linux kernel-isms.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; but version 2 of the License only due
- * to code included from the Linux kernel.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (c) 2006 Paul E. McKenney, IBM.
- *
- * Much code taken from the Linux kernel. For such code, the option
- * to redistribute under later versions of GPL might not be available.
- */
-
-#include <urcu/arch.h>
-
-#ifndef __always_inline
-#define __always_inline inline
-#endif
-
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
-
-#ifdef __ASSEMBLY__
-# define stringify_in_c(...) __VA_ARGS__
-# define ASM_CONST(x) x
-#else
-/* This version of stringify will deal with commas... */
-# define __stringify_in_c(...) #__VA_ARGS__
-# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
-# define __ASM_CONST(x) x##UL
-# define ASM_CONST(x) __ASM_CONST(x)
-#endif
-
-
-/*
- * arch-ppc64.h: Expose PowerPC atomic instructions.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; but version 2 of the License only due
- * to code included from the Linux kernel.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (c) 2006 Paul E. McKenney, IBM.
- *
- * Much code taken from the Linux kernel. For such code, the option
- * to redistribute under later versions of GPL might not be available.
- */
-
-/*
- * Machine parameters.
- */
-
-#define CONFIG_PPC64
-
-/*#define CAA_CACHE_LINE_SIZE 128 */
-#define ____cacheline_internodealigned_in_smp \
- __attribute__((__aligned__(1 << 7)))
-
-#if 0 /* duplicate with arch_atomic.h */
-
-/*
- * Atomic data structure, initialization, and access.
- */
-
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = (i))
-
-/*
- * Atomic operations.
- */
-
-#define LWSYNC lwsync
-#define PPC405_ERR77(ra,rb)
-#ifdef CONFIG_SMP
-# define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
-# define ISYNC_ON_SMP "\n\tisync\n"
-#else
-# define LWSYNC_ON_SMP
-# define ISYNC_ON_SMP
-#endif
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- */
-static __always_inline unsigned long
-__xchg_u32(volatile void *p, unsigned long val)
-{
- unsigned long prev;
-
- __asm__ __volatile__(
- LWSYNC_ON_SMP
-"1: lwarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stwcx. %3,0,%2 \n\
- bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
-
- return prev;
-}
-
-/*
- * Atomic exchange
- *
- * Changes the memory location '*ptr' to be val and returns
- * the previous value stored there.
- */
-static __always_inline unsigned long
-__xchg_u32_local(volatile void *p, unsigned long val)
-{
- unsigned long prev;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stwcx. %3,0,%2 \n\
- bne- 1b"
- : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
-
- return prev;
-}
-
-#ifdef CONFIG_PPC64
-static __always_inline unsigned long
-__xchg_u64(volatile void *p, unsigned long val)
-{
- unsigned long prev;
-
- __asm__ __volatile__(
- LWSYNC_ON_SMP
-"1: ldarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stdcx. %3,0,%2 \n\
- bne- 1b"
- ISYNC_ON_SMP
- : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
-
- return prev;
-}
-
-static __always_inline unsigned long
-__xchg_u64_local(volatile void *p, unsigned long val)
-{
- unsigned long prev;
-
- __asm__ __volatile__(
-"1: ldarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
-" stdcx. %3,0,%2 \n\
- bne- 1b"
- : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
- : "r" (p), "r" (val)
- : "cc", "memory");
-
- return prev;
-}
-#endif
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid xchg().
- */
-extern void __xchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-__xchg(volatile void *ptr, unsigned long x, unsigned int size)
-{
- switch (size) {
- case 4:
- return __xchg_u32(ptr, x);
-#ifdef CONFIG_PPC64
- case 8:
- return __xchg_u64(ptr, x);
-#endif
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-
-static __always_inline unsigned long
-__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
-{
- switch (size) {
- case 4:
- return __xchg_u32_local(ptr, x);
-#ifdef CONFIG_PPC64
- case 8:
- return __xchg_u64_local(ptr, x);
-#endif
- }
- __xchg_called_with_bad_pointer();
- return x;
-}
-#define xchg(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
- })
-
-#define xchg_local(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_local((ptr), \
- (unsigned long)_x_, sizeof(*(ptr))); \
- })
-
-/*
- * Compare and exchange - if *p == old, set it to new,
- * and return the old value of *p.
- */
-#define __HAVE_ARCH_CMPXCHG 1
-
-static __always_inline unsigned long
-__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
-{
- unsigned int prev;
-
- __asm__ __volatile__ (
- LWSYNC_ON_SMP
-"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n"
- PPC405_ERR77(0,%2)
-" stwcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
-
- return prev;
-}
-
-static __always_inline unsigned long
-__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
- unsigned long new)
-{
- unsigned int prev;
-
- __asm__ __volatile__ (
-"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
- cmpw 0,%0,%3\n\
- bne- 2f\n"
- PPC405_ERR77(0,%2)
-" stwcx. %4,0,%2\n\
- bne- 1b"
- "\n\
-2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
-
- return prev;
-}
-
-#ifdef CONFIG_PPC64
-static __always_inline unsigned long
-__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
-{
- unsigned long prev;
-
- __asm__ __volatile__ (
- LWSYNC_ON_SMP
-"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- ISYNC_ON_SMP
- "\n\
-2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
-
- return prev;
-}
-
-static __always_inline unsigned long
-__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
- unsigned long new)
-{
- unsigned long prev;
-
- __asm__ __volatile__ (
-"1: ldarx %0,0,%2 # __cmpxchg_u64\n\
- cmpd 0,%0,%3\n\
- bne- 2f\n\
- stdcx. %4,0,%2\n\
- bne- 1b"
- "\n\
-2:"
- : "=&r" (prev), "+m" (*p)
- : "r" (p), "r" (old), "r" (new)
- : "cc", "memory");
-
- return prev;
-}
-#endif
-
-/* This function doesn't exist, so you'll get a linker error
- if something tries to do an invalid cmpxchg(). */
-extern void __cmpxchg_called_with_bad_pointer(void);
-
-static __always_inline unsigned long
-__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32(ptr, old, new);
-#ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64(ptr, old, new);
-#endif
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-static __always_inline unsigned long
-__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
- unsigned int size)
-{
- switch (size) {
- case 4:
- return __cmpxchg_u32_local(ptr, old, new);
-#ifdef CONFIG_PPC64
- case 8:
- return __cmpxchg_u64_local(ptr, old, new);
-#endif
- }
- __cmpxchg_called_with_bad_pointer();
- return old;
-}
-
-#define cmpxchg(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-
-#define cmpxchg_local(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr))); \
- })
-
-#ifdef CONFIG_PPC64
-/*
- * We handle most unaligned accesses in hardware. On the other hand
- * unaligned DMA can be very expensive on some ppc64 IO chips (it does
- * powers of 2 writes until it reaches sufficient alignment).
- *
- * Based on this we disable the IP header alignment in network drivers.
- * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
- * cacheline alignment of buffers.
- */
-#define NET_IP_ALIGN 0
-#define NET_SKB_PAD L1_CACHE_BYTES
-
-#define cmpxchg64(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
- })
-#define cmpxchg64_local(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
- })
-#endif
-
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @a to @v.
- */
-static __inline__ void atomic_add(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- "1: lwarx %0,0,%3 # atomic_add\n\
- add %0,%2,%0 \n\
- stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (a), "r" (&v->counter)
- : "cc");
-}
-
-/**
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @a from @v.
- */
-static __inline__ void atomic_sub(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- "1: lwarx %0,0,%3 # atomic_sub \n\
- subf %0,%2,%0 \n\
- stwcx. %0,0,%3 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (a), "r" (&v->counter)
- : "cc");
-}
-
-static __inline__ atomic_sub_return(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- "lwsync\n\
- 1: lwarx %0,0,%2 # atomic_sub_return\n\
- subf %0,%1,%0\n\
- stwcx. %0,0,%2 \n\
- bne- 1b \n\
- isync"
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic_sub_and_test(int a, atomic_t *v)
-{
- return atomic_sub_return(a, v) == 0;
-}
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
-{
- atomic_add(1, v);
-}
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-static __inline__ void atomic_dec(atomic_t *v)
-{
- atomic_sub(1, v);
-}
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
- return atomic_sub_and_test(1, v);
-}
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic_inc_and_test(atomic_t *v)
-{
- return atomic_inc_return(v);
-}
-
-/**
- * atomic_add_return - add and return
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static __inline__ int atomic_add_return(int a, atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
- "lwsync \n\
- 1: lwarx %0,0,%2 # atomic_add_return \n\
- add %0,%1,%0 \n\
- stwcx. %0,0,%2 \n\
- bne- 1b \n\
- isync"
- : "=&r" (t)
- : "r" (a), "r" (&v->counter)
- : "cc", "memory");
-
- return t;
-}
-
-/**
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __inline__ int atomic_add_negative(int a, atomic_t *v)
-{
- return atomic_add_return(a, v) < 0;
-}
-
-/**
- * atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
-{
- int t;
-
- __asm__ __volatile__(
- "lwsync \n\
- 1: lwarx %0,0,%1 # atomic_add_unless\n\
- cmpd 0,%0,%3 \n\
- beq- 2f \n\
- add %0,%2,%0 \n\
- stwcx. %0,0,%1 \n\
- bne- 1b \n\
- isync \n\
- subf %0,%2,%0 \n\
- 2:"
- : "=&r" (t)
- : "r" (&v->counter), "r" (a), "r" (u)
- : "cc", "memory");
-
- return t != u;
-}
-
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc_return(v) (atomic_add_return(1,v))
-#define atomic_dec_return(v) (atomic_sub_return(1,v))
-
-/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic_dec() cmm_smp_mb()
-#define smp_mb__after_atomic_dec() cmm_smp_mb()
-#define smp_mb__before_atomic_inc() cmm_smp_mb()
-#define smp_mb__after_atomic_inc() cmm_smp_mb()
-
-#endif //0 /* duplicate with arch_atomic.h */
-
-/*
- * api_pthreads.h: API mapping to pthreads environment.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version. However, please note that much
- * of the code in this file derives from the Linux kernel, and that such
- * code may not be available except under GPLv2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (c) 2006 Paul E. McKenney, IBM.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <limits.h>
-#include <sys/types.h>
-#define __USE_GNU
-#include <pthread.h>
-#include <sched.h>
-#include <sys/param.h>
-/* #include "atomic.h" */
-
-/*
- * Default machine parameters.
- */
-
-#ifndef CAA_CACHE_LINE_SIZE
-/* #define CAA_CACHE_LINE_SIZE 128 */
-#endif /* #ifndef CAA_CACHE_LINE_SIZE */
-
-/*
- * Exclusive locking primitives.
- */
-
-typedef pthread_mutex_t spinlock_t;
-
-#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
-#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
-
-static void spin_lock_init(spinlock_t *sp)
-{
- if (pthread_mutex_init(sp, NULL) != 0) {
- perror("spin_lock_init:pthread_mutex_init");
- exit(-1);
- }
-}
-
-static void spin_lock(spinlock_t *sp)
-{
- if (pthread_mutex_lock(sp) != 0) {
- perror("spin_lock:pthread_mutex_lock");
- exit(-1);
- }
-}
-
-static void spin_unlock(spinlock_t *sp)
-{
- if (pthread_mutex_unlock(sp) != 0) {
- perror("spin_unlock:pthread_mutex_unlock");
- exit(-1);
- }
-}
-
-#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
-#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
-
-/*
- * Thread creation/destruction primitives.
- */
-
-typedef pthread_t thread_id_t;
-
-#define NR_THREADS 128
-
-#define __THREAD_ID_MAP_EMPTY 0
-#define __THREAD_ID_MAP_WAITING 1
-thread_id_t __thread_id_map[NR_THREADS];
-spinlock_t __thread_id_map_mutex;
-
-#define for_each_thread(t) \
- for (t = 0; t < NR_THREADS; t++)
-
-#define for_each_running_thread(t) \
- for (t = 0; t < NR_THREADS; t++) \
- if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
- (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
-
-#define for_each_tid(t, tid) \
- for (t = 0; t < NR_THREADS; t++) \
- if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \
- ((tid) != __THREAD_ID_MAP_WAITING))
-
-pthread_key_t thread_id_key;
-
-static int __smp_thread_id(void)
-{
- int i;
- thread_id_t tid = pthread_self();
-
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == tid) {
- long v = i + 1; /* must be non-NULL. */
-
- if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
- perror("pthread_setspecific");
- exit(-1);
- }
- return i;
- }
- }
- spin_lock(&__thread_id_map_mutex);
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == tid)
- spin_unlock(&__thread_id_map_mutex);
- return i;
- }
- spin_unlock(&__thread_id_map_mutex);
- fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
- (int)tid, (int)tid);
- exit(-1);
-}
-
-static int smp_thread_id(void)
-{
- void *id;
-
- id = pthread_getspecific(thread_id_key);
- if (id == NULL)
- return __smp_thread_id();
- return (long)(id - 1);
-}
-
-static thread_id_t create_thread(void *(*func)(void *), void *arg)
-{
- thread_id_t tid;
- int i;
-
- spin_lock(&__thread_id_map_mutex);
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
- break;
- }
- if (i >= NR_THREADS) {
- spin_unlock(&__thread_id_map_mutex);
- fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
- exit(-1);
- }
- __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
- spin_unlock(&__thread_id_map_mutex);
- if (pthread_create(&tid, NULL, func, arg) != 0) {
- perror("create_thread:pthread_create");
- exit(-1);
- }
- __thread_id_map[i] = tid;
- return tid;
-}
-
-static void *wait_thread(thread_id_t tid)
-{
- int i;
- void *vp;
-
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == tid)
- break;
- }
- if (i >= NR_THREADS){
- fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
- (int)tid, (int)tid);
- exit(-1);
- }
- if (pthread_join(tid, &vp) != 0) {
- perror("wait_thread:pthread_join");
- exit(-1);
- }
- __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
- return vp;
-}
-
-static void wait_all_threads(void)
-{
- int i;
- thread_id_t tid;
-
- for (i = 1; i < NR_THREADS; i++) {
- tid = __thread_id_map[i];
- if (tid != __THREAD_ID_MAP_EMPTY &&
- tid != __THREAD_ID_MAP_WAITING)
- (void)wait_thread(tid);
- }
-}
-
-static void run_on(int cpu)
-{
- cpu_set_t mask;
-
- CPU_ZERO(&mask);
- CPU_SET(cpu, &mask);
- sched_setaffinity(0, sizeof(mask), &mask);
-}
-
-/*
- * timekeeping -- very crude -- should use MONOTONIC...
- */
-
-long long get_microseconds(void)
-{
- struct timeval tv;
-
- if (gettimeofday(&tv, NULL) != 0)
- abort();
- return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
-}
-
-/*
- * Per-thread variables.
- */
-
-#define DEFINE_PER_THREAD(type, name) \
- struct { \
- __typeof__(type) v \
- __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
- } __per_thread_##name[NR_THREADS];
-#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
-
-#define per_thread(name, thread) __per_thread_##name[thread].v
-#define __get_thread_var(name) per_thread(name, smp_thread_id())
-
-#define init_per_thread(name, v) \
- do { \
- int __i_p_t_i; \
- for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
- per_thread(name, __i_p_t_i) = v; \
- } while (0)
-
-/*
- * CPU traversal primitives.
- */
-
-#ifndef NR_CPUS
-#define NR_CPUS 16
-#endif /* #ifndef NR_CPUS */
-
-#define for_each_possible_cpu(cpu) \
- for (cpu = 0; cpu < NR_CPUS; cpu++)
-#define for_each_online_cpu(cpu) \
- for (cpu = 0; cpu < NR_CPUS; cpu++)
-
-/*
- * Per-CPU variables.
- */
-
-#define DEFINE_PER_CPU(type, name) \
- struct { \
- __typeof__(type) v \
- __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
- } __per_cpu_##name[NR_CPUS]
-#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
-
-DEFINE_PER_THREAD(int, smp_processor_id);
-
-#define per_cpu(name, thread) __per_cpu_##name[thread].v
-#define __get_cpu_var(name) per_cpu(name, smp_processor_id())
-
-#define init_per_cpu(name, v) \
- do { \
- int __i_p_c_i; \
- for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
- per_cpu(name, __i_p_c_i) = v; \
- } while (0)
-
-/*
- * CPU state checking (crowbarred).
- */
-
-#define idle_cpu(cpu) 0
-#define in_softirq() 1
-#define hardirq_count() 0
-#define PREEMPT_SHIFT 0
-#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define PREEMPT_BITS 8
-#define SOFTIRQ_BITS 8
-
-/*
- * CPU hotplug.
- */
-
-struct notifier_block {
- int (*notifier_call)(struct notifier_block *, unsigned long, void *);
- struct notifier_block *next;
- int priority;
-};
-
-#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
-#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
-#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
-#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
-#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
- * not handling interrupts, soon dead */
-#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
- * lock is dropped */
-
-/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
- * operation in progress
- */
-#define CPU_TASKS_FROZEN 0x0010
-
-#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
-#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
-#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
-#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
-#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
-
-/* Hibernation and suspend events */
-#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
-#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
-#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
-#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
-#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
-#define PM_POST_RESTORE 0x0006 /* Restore failed */
-
-#define NOTIFY_DONE 0x0000 /* Don't care */
-#define NOTIFY_OK 0x0001 /* Suits me */
-#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
-#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
- /* Bad/Veto action */
-/*
- * Clean way to return from the notifier and stop further calls.
- */
-#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
-
-/*
- * Bug checks.
- */
-
-#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
-
-/*
- * Initialization -- Must be called before calling any primitives.
- */
-
-static void smp_init(void)
-{
- int i;
-
- spin_lock_init(&__thread_id_map_mutex);
- __thread_id_map[0] = pthread_self();
- for (i = 1; i < NR_THREADS; i++)
- __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
- init_per_thread(smp_processor_id, 0);
- if (pthread_key_create(&thread_id_key, NULL) != 0) {
- perror("pthread_key_create");
- exit(-1);
- }
-}
-
-/* Taken from the Linux kernel source tree, so GPLv2-only!!! */
-
-#ifndef _LINUX_LIST_H
-#define _LINUX_LIST_H
-
-#define LIST_POISON1 ((void *) 0x00100100)
-#define LIST_POISON2 ((void *) 0x00200200)
-
-#if 0
-
-/*
- * Simple doubly linked list implementation.
- *
- * Some of the internal functions ("__xxx") are useful when
- * manipulating whole lists rather than single entries, as
- * sometimes we already know the next/prev entries and we can
- * generate better code by using them directly rather than
- * using the generic single-entry routines.
- */
-
-struct cds_list_head {
- struct cds_list_head *next, *prev;
-};
-
-#define CDS_LIST_HEAD_INIT(name) { &(name), &(name) }
-
-#define CDS_LIST_HEAD(name) \
- struct cds_list_head name = CDS_LIST_HEAD_INIT(name)
-
-static inline void CDS_INIT_LIST_HEAD(struct cds_list_head *list)
-{
- list->next = list;
- list->prev = list;
-}
-
-/*
- * Insert a new entry between two known consecutive entries.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-#ifndef CONFIG_DEBUG_LIST
-static inline void __cds_list_add(struct cds_list_head *new,
- struct cds_list_head *prev,
- struct cds_list_head *next)
-{
- next->prev = new;
- new->next = next;
- new->prev = prev;
- prev->next = new;
-}
-#else
-extern void __cds_list_add(struct cds_list_head *new,
- struct cds_list_head *prev,
- struct cds_list_head *next);
-#endif
-
-/**
- * cds_list_add - add a new entry
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- */
-static inline void cds_list_add(struct cds_list_head *new, struct cds_list_head *head)
-{
- __cds_list_add(new, head, head->next);
-}
-
-
-/**
- * cds_list_add_tail - add a new entry
- * @new: new entry to be added
- * @head: list head to add it before
- *
- * Insert a new entry before the specified head.
- * This is useful for implementing queues.
- */
-static inline void cds_list_add_tail(struct cds_list_head *new, struct cds_list_head *head)
-{
- __cds_list_add(new, head->prev, head);
-}
-
-/*
- * Delete a list entry by making the prev/next entries
- * point to each other.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-static inline void __cds_list_del(struct cds_list_head * prev, struct cds_list_head * next)
-{
- next->prev = prev;
- prev->next = next;
-}
-
-/**
- * cds_list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: cds_list_empty() on entry does not return true after this, the entry is
- * in an undefined state.
- */
-#ifndef CONFIG_DEBUG_LIST
-static inline void cds_list_del(struct cds_list_head *entry)
-{
- __cds_list_del(entry->prev, entry->next);
- entry->next = LIST_POISON1;
- entry->prev = LIST_POISON2;
-}
-#else
-extern void cds_list_del(struct cds_list_head *entry);
-#endif
-
-/**
- * cds_list_replace - replace old entry by new one
- * @old : the element to be replaced
- * @new : the new element to insert
- *
- * If @old was empty, it will be overwritten.
- */
-static inline void cds_list_replace(struct cds_list_head *old,
- struct cds_list_head *new)
-{
- new->next = old->next;
- new->next->prev = new;
- new->prev = old->prev;
- new->prev->next = new;
-}
-
-static inline void cds_list_replace_init(struct cds_list_head *old,
- struct cds_list_head *new)
-{
- cds_list_replace(old, new);
- CDS_INIT_LIST_HEAD(old);
-}
-
-/**
- * cds_list_del_init - deletes entry from list and reinitialize it.
- * @entry: the element to delete from the list.
- */
-static inline void cds_list_del_init(struct cds_list_head *entry)
-{
- __cds_list_del(entry->prev, entry->next);
- CDS_INIT_LIST_HEAD(entry);
-}
-
-/**
- * cds_list_move - delete from one list and add as another's head
- * @list: the entry to move
- * @head: the head that will precede our entry
- */
-static inline void cds_list_move(struct cds_list_head *list, struct cds_list_head *head)
-{
- __cds_list_del(list->prev, list->next);
- cds_list_add(list, head);
-}
-
-/**
- * cds_list_move_tail - delete from one list and add as another's tail
- * @list: the entry to move
- * @head: the head that will follow our entry
- */
-static inline void cds_list_move_tail(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- __cds_list_del(list->prev, list->next);
- cds_list_add_tail(list, head);
-}
-
-/**
- * list_is_last - tests whether @list is the last entry in list @head
- * @list: the entry to test
- * @head: the head of the list
- */
-static inline int list_is_last(const struct cds_list_head *list,
- const struct cds_list_head *head)
-{
- return list->next == head;
-}
-
-/**
- * cds_list_empty - tests whether a list is empty
- * @head: the list to test.
- */
-static inline int cds_list_empty(const struct cds_list_head *head)
-{
- return head->next == head;
-}
-
-/**
- * cds_list_empty_careful - tests whether a list is empty and not being modified
- * @head: the list to test
- *
- * Description:
- * tests whether a list is empty _and_ checks that no other CPU might be
- * in the process of modifying either member (next or prev)
- *
- * NOTE: using cds_list_empty_careful() without synchronization
- * can only be safe if the only activity that can happen
- * to the list entry is cds_list_del_init(). Eg. it cannot be used
- * if another CPU could re-list_add() it.
- */
-static inline int cds_list_empty_careful(const struct cds_list_head *head)
-{
- struct cds_list_head *next = head->next;
- return (next == head) && (next == head->prev);
-}
-
-/**
- * list_is_singular - tests whether a list has just one entry.
- * @head: the list to test.
- */
-static inline int list_is_singular(const struct cds_list_head *head)
-{
- return !list_empty(head) && (head->next == head->prev);
-}
-
-static inline void __list_cut_position(struct cds_list_head *list,
- struct cds_list_head *head, struct cds_list_head *entry)
-{
- struct cds_list_head *new_first = entry->next;
- list->next = head->next;
- list->next->prev = list;
- list->prev = entry;
- entry->next = list;
- head->next = new_first;
- new_first->prev = head;
-}
-
-/**
- * list_cut_position - cut a list into two
- * @list: a new list to add all removed entries
- * @head: a list with entries
- * @entry: an entry within head, could be the head itself
- * and if so we won't cut the list
- *
- * This helper moves the initial part of @head, up to and
- * including @entry, from @head to @list. You should
- * pass on @entry an element you know is on @head. @list
- * should be an empty list or a list you do not care about
- * losing its data.
- *
- */
-static inline void list_cut_position(struct cds_list_head *list,
- struct cds_list_head *head, struct cds_list_head *entry)
-{
- if (cds_list_empty(head))
- return;
- if (list_is_singular(head) &&
- (head->next != entry && head != entry))
- return;
- if (entry == head)
- CDS_INIT_LIST_HEAD(list);
- else
- __list_cut_position(list, head, entry);
-}
-
-static inline void __cds_list_splice(const struct cds_list_head *list,
- struct cds_list_head *prev,
- struct cds_list_head *next)
-{
- struct cds_list_head *first = list->next;
- struct cds_list_head *last = list->prev;
-
- first->prev = prev;
- prev->next = first;
-
- last->next = next;
- next->prev = last;
-}
-
-/**
- * cds_list_splice - join two lists, this is designed for stacks
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- */
-static inline void cds_list_splice(const struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list))
- __cds_list_splice(list, head, head->next);
-}
-
-/**
- * cds_list_splice_tail - join two lists, each list being a queue
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- */
-static inline void cds_list_splice_tail(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list))
- __cds_list_splice(list, head->prev, head);
-}
-
-/**
- * cds_list_splice_init - join two lists and reinitialise the emptied list.
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- *
- * The list at @list is reinitialised
- */
-static inline void cds_list_splice_init(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list)) {
- __cds_list_splice(list, head, head->next);
- CDS_INIT_LIST_HEAD(list);
- }
-}
-
-/**
- * cds_list_splice_tail_init - join two lists and reinitialise the emptied list
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- *
- * Each of the lists is a queue.
- * The list at @list is reinitialised
- */
-static inline void cds_list_splice_tail_init(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list)) {
- __cds_list_splice(list, head->prev, head);
- CDS_INIT_LIST_HEAD(list);
- }
-}
-
-/**
- * cds_list_entry - get the struct for this entry
- * @ptr: the &struct cds_list_head pointer.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_entry(ptr, type, member) \
- caa_container_of(ptr, type, member)
-
-/**
- * list_first_entry - get the first element from a list
- * @ptr: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- *
- * Note, that list is expected to be not empty.
- */
-#define list_first_entry(ptr, type, member) \
- cds_list_entry((ptr)->next, type, member)
-
-/**
- * cds_list_for_each - iterate over a list
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define cds_list_for_each(pos, head) \
- for (pos = (head)->next; prefetch(pos->next), pos != (head); \
- pos = pos->next)
-
-/**
- * __cds_list_for_each - iterate over a list
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @head: the head for your list.
- *
- * This variant differs from cds_list_for_each() in that it's the
- * simplest possible list iteration code, no prefetching is done.
- * Use this for code that knows the list to be very short (empty
- * or 1 entry) most of the time.
- */
-#define __cds_list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
-
-/**
- * cds_list_for_each_prev - iterate over a list backwards
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define cds_list_for_each_prev(pos, head) \
- for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
- pos = pos->prev)
-
-/**
- * cds_list_for_each_safe - iterate over a list safe against removal of list entry
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @n: another &struct cds_list_head to use as temporary storage
- * @head: the head for your list.
- */
-#define cds_list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
-
-/**
- * cds_list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @n: another &struct cds_list_head to use as temporary storage
- * @head: the head for your list.
- */
-#define cds_list_for_each_prev_safe(pos, n, head) \
- for (pos = (head)->prev, n = pos->prev; \
- prefetch(pos->prev), pos != (head); \
- pos = n, n = pos->prev)
-
-/**
- * cds_list_for_each_entry - iterate over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_for_each_entry(pos, head, member) \
- for (pos = cds_list_entry((head)->next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = cds_list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_reverse - iterate backwards over list of given type.
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_for_each_entry_reverse(pos, head, member) \
- for (pos = cds_list_entry((head)->prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = cds_list_entry(pos->member.prev, typeof(*pos), member))
-
-/**
- * list_prepare_entry - prepare a pos entry for use in cds_list_for_each_entry_continue()
- * @pos: the type * to use as a start point
- * @head: the head of the list
- * @member: the name of the list_struct within the struct.
- *
- * Prepares a pos entry for use as a start point in cds_list_for_each_entry_continue().
- */
-#define list_prepare_entry(pos, head, member) \
- ((pos) ? : cds_list_entry(head, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_continue - continue iteration over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Continue to iterate over list of given type, continuing after
- * the current position.
- */
-#define cds_list_for_each_entry_continue(pos, head, member) \
- for (pos = cds_list_entry(pos->member.next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = cds_list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_continue_reverse - iterate backwards from the given point
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Start to iterate over list of given type backwards, continuing after
- * the current position.
- */
-#define cds_list_for_each_entry_continue_reverse(pos, head, member) \
- for (pos = cds_list_entry(pos->member.prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = cds_list_entry(pos->member.prev, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_from - iterate over list of given type from the current point
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type, continuing from current position.
- */
-#define cds_list_for_each_entry_from(pos, head, member) \
- for (; prefetch(pos->member.next), &pos->member != (head); \
- pos = cds_list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_for_each_entry_safe(pos, n, head, member) \
- for (pos = cds_list_entry((head)->next, typeof(*pos), member), \
- n = cds_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.next, typeof(*n), member))
-
-/**
- * cds_list_for_each_entry_safe_continue
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type, continuing after current point,
- * safe against removal of list entry.
- */
-#define cds_list_for_each_entry_safe_continue(pos, n, head, member) \
- for (pos = cds_list_entry(pos->member.next, typeof(*pos), member), \
- n = cds_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.next, typeof(*n), member))
-
-/**
- * cds_list_for_each_entry_safe_from
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type from current point, safe against
- * removal of list entry.
- */
-#define cds_list_for_each_entry_safe_from(pos, n, head, member) \
- for (n = cds_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.next, typeof(*n), member))
-
-/**
- * cds_list_for_each_entry_safe_reverse
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate backwards over list of given type, safe against removal
- * of list entry.
- */
-#define cds_list_for_each_entry_safe_reverse(pos, n, head, member) \
- for (pos = cds_list_entry((head)->prev, typeof(*pos), member), \
- n = cds_list_entry(pos->member.prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.prev, typeof(*n), member))
-
-#endif //0
-
-/*
- * Double linked lists with a single pointer list head.
- * Mostly useful for hash tables where the two pointer list head is
- * too wasteful.
- * You lose the ability to access the tail in O(1).
- */
-
-struct cds_hlist_head {
- struct cds_hlist_node *first;
-};
-
-struct cds_hlist_node {
- struct cds_hlist_node *next, **pprev;
-};
-
-#define HLIST_HEAD_INIT { .first = NULL }
-#define HLIST_HEAD(name) struct cds_hlist_head name = { .first = NULL }
-#define CDS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
-static inline void INIT_HLIST_NODE(struct cds_hlist_node *h)
-{
- h->next = NULL;
- h->pprev = NULL;
-}
-
-static inline int hlist_unhashed(const struct cds_hlist_node *h)
-{
- return !h->pprev;
-}
-
-static inline int hlist_empty(const struct cds_hlist_head *h)
-{
- return !h->first;
-}
-
-static inline void __cds_hlist_del(struct cds_hlist_node *n)
-{
- struct cds_hlist_node *next = n->next;
- struct cds_hlist_node **pprev = n->pprev;
- *pprev = next;
- if (next)
- next->pprev = pprev;
-}
-
-static inline void cds_hlist_del(struct cds_hlist_node *n)
-{
- __cds_hlist_del(n);
- n->next = LIST_POISON1;
- n->pprev = LIST_POISON2;
-}
-
-static inline void cds_hlist_del_init(struct cds_hlist_node *n)
-{
- if (!hlist_unhashed(n)) {
- __cds_hlist_del(n);
- INIT_HLIST_NODE(n);
- }
-}
-
-static inline void cds_hlist_add_head(struct cds_hlist_node *n, struct cds_hlist_head *h)
-{
- struct cds_hlist_node *first = h->first;
- n->next = first;
- if (first)
- first->pprev = &n->next;
- h->first = n;
- n->pprev = &h->first;
-}
-
-/* next must be != NULL */
-static inline void hlist_add_before(struct cds_hlist_node *n,
- struct cds_hlist_node *next)
-{
- n->pprev = next->pprev;
- n->next = next;
- next->pprev = &n->next;
- *(n->pprev) = n;
-}
-
-static inline void hlist_add_after(struct cds_hlist_node *n,
- struct cds_hlist_node *next)
-{
- next->next = n->next;
- n->next = next;
- next->pprev = &n->next;
-
- if(next->next)
- next->next->pprev = &next->next;
-}
-
-/*
- * Move a list from one list head to another. Fixup the pprev
- * reference of the first entry if it exists.
- */
-static inline void hlist_move_list(struct cds_hlist_head *old,
- struct cds_hlist_head *new)
-{
- new->first = old->first;
- if (new->first)
- new->first->pprev = &new->first;
- old->first = NULL;
-}
-
-#define cds_hlist_entry(ptr, type, member) caa_container_of(ptr,type,member)
-
-#define cds_hlist_for_each(pos, head) \
- for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
- pos = pos->next)
-
-#define cds_hlist_for_each_safe(pos, n, head) \
- for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
- pos = n)
-
-/**
- * cds_hlist_for_each_entry - iterate over list of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * cds_hlist_for_each_entry_continue - iterate over a hlist continuing after current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry_continue(tpos, pos, member) \
- for (pos = (pos)->next; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * cds_hlist_for_each_entry_from - iterate over a hlist continuing from current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry_from(tpos, pos, member) \
- for (; pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * cds_hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @n: another &struct cds_hlist_node to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- for (pos = (head)->first; \
- pos && ({ n = pos->next; 1; }) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = n)
-
-#endif
-
-#endif
+++ /dev/null
-/* MECHANICALLY GENERATED, DO NOT EDIT!!! */
-
-#ifndef _INCLUDE_API_H
-#define _INCLUDE_API_H
-
-#include "../config.h"
-
-/*
- * common.h: Common Linux kernel-isms.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; but version 2 of the License only due
- * to code included from the Linux kernel.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (c) 2006 Paul E. McKenney, IBM.
- *
- * Much code taken from the Linux kernel. For such code, the option
- * to redistribute under later versions of GPL might not be available.
- */
-
-#include <urcu/arch.h>
-
-#ifndef __always_inline
-#define __always_inline inline
-#endif
-
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
-
-#ifdef __ASSEMBLY__
-# define stringify_in_c(...) __VA_ARGS__
-# define ASM_CONST(x) x
-#else
-/* This version of stringify will deal with commas... */
-# define __stringify_in_c(...) #__VA_ARGS__
-# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
-# define __ASM_CONST(x) x##UL
-# define ASM_CONST(x) __ASM_CONST(x)
-#endif
-
-
-/*
- * arch-i386.h: Expose x86 atomic instructions. 80486 and better only.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, but version 2 only due to inclusion
- * of Linux-kernel code.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (c) 2006 Paul E. McKenney, IBM.
- *
- * Much code taken from the Linux kernel. For such code, the option
- * to redistribute under later versions of GPL might not be available.
- */
-
-/*
- * Machine parameters.
- */
-
-/* #define CAA_CACHE_LINE_SIZE 64 */
-#define ____cacheline_internodealigned_in_smp \
- __attribute__((__aligned__(1 << 6)))
-
-#define LOCK_PREFIX "lock ; "
-
-#if 0 /* duplicate with arch_atomic.h */
-
-/*
- * Atomic data structure, initialization, and access.
- */
-
-typedef struct { volatile int counter; } atomic_t;
-
-#define ATOMIC_INIT(i) { (i) }
-
-#define atomic_read(v) ((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = (i))
-
-/*
- * Atomic operations.
- */
-
-/**
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic_add(int i, atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "addl %1,%0"
- :"+m" (v->counter)
- :"ir" (i));
-}
-
-/**
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v.
- */
-static __inline__ void atomic_sub(int i, atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "subl %1,%0"
- :"+m" (v->counter)
- :"ir" (i));
-}
-
-/**
- * atomic_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @v: pointer of type atomic_t
- *
- * Atomically subtracts @i from @v and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic_sub_and_test(int i, atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "subl %2,%0; sete %1"
- :"+m" (v->counter), "=qm" (c)
- :"ir" (i) : "memory");
- return c;
-}
-
-/**
- * atomic_inc - increment atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1.
- */
-static __inline__ void atomic_inc(atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0"
- :"+m" (v->counter));
-}
-
-/**
- * atomic_dec - decrement atomic variable
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1.
- */
-static __inline__ void atomic_dec(atomic_t *v)
-{
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0"
- :"+m" (v->counter));
-}
-
-/**
- * atomic_dec_and_test - decrement and test
- * @v: pointer of type atomic_t
- *
- * Atomically decrements @v by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static __inline__ int atomic_dec_and_test(atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "decl %0; sete %1"
- :"+m" (v->counter), "=qm" (c)
- : : "memory");
- return c != 0;
-}
-
-/**
- * atomic_inc_and_test - increment and test
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static __inline__ int atomic_inc_and_test(atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "incl %0; sete %1"
- :"+m" (v->counter), "=qm" (c)
- : : "memory");
- return c != 0;
-}
-
-/**
- * atomic_add_negative - add and test if negative
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static __inline__ int atomic_add_negative(int i, atomic_t *v)
-{
- unsigned char c;
-
- __asm__ __volatile__(
- LOCK_PREFIX "addl %2,%0; sets %1"
- :"+m" (v->counter), "=qm" (c)
- :"ir" (i) : "memory");
- return c;
-}
-
-/**
- * atomic_add_return - add and return
- * @v: pointer of type atomic_t
- * @i: integer value to add
- *
- * Atomically adds @i to @v and returns @i + @v
- */
-static __inline__ int atomic_add_return(int i, atomic_t *v)
-{
- int __i;
-
- __i = i;
- __asm__ __volatile__(
- LOCK_PREFIX "xaddl %0, %1;"
- :"=r"(i)
- :"m"(v->counter), "0"(i));
- return i + __i;
-}
-
-static __inline__ int atomic_sub_return(int i, atomic_t *v)
-{
- return atomic_add_return(-i,v);
-}
-
-static inline unsigned int
-cmpxchg(volatile long *ptr, long oldval, long newval)
-{
- unsigned long retval;
-
- asm("# cmpxchg\n"
- "lock; cmpxchgl %4,(%2)\n"
- "# end atomic_cmpxchg4"
- : "=a" (retval), "=m" (*ptr)
- : "r" (ptr), "0" (oldval), "r" (newval), "m" (*ptr)
- : "cc");
- return (retval);
-}
-
-#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-/**
- * atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-#define atomic_add_unless(v, a, u) \
-({ \
- int c, old; \
- c = atomic_read(v); \
- for (;;) { \
- if (unlikely(c == (u))) \
- break; \
- old = atomic_cmpxchg((v), c, c + (a)); \
- if (likely(old == c)) \
- break; \
- c = old; \
- } \
- c != (u); \
-})
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-
-#define atomic_inc_return(v) (atomic_add_return(1,v))
-#define atomic_dec_return(v) (atomic_sub_return(1,v))
-
-/* These are x86-specific, used by some header files */
-#define atomic_clear_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
-: : "r" (~(mask)),"m" (*addr) : "memory")
-
-#define atomic_set_mask(mask, addr) \
-__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \
-: : "r" (mask),"m" (*(addr)) : "memory")
-
-/* Atomic operations are already serializing on x86 */
-#define smp_mb__before_atomic_dec() cmm_barrier()
-#define smp_mb__after_atomic_dec() cmm_barrier()
-#define smp_mb__before_atomic_inc() cmm_barrier()
-#define smp_mb__after_atomic_inc() cmm_barrier()
-
-#endif //0
-
-/*
- * api_pthreads.h: API mapping to pthreads environment.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version. However, please note that much
- * of the code in this file derives from the Linux kernel, and that such
- * code may not be available except under GPLv2.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (c) 2006 Paul E. McKenney, IBM.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <limits.h>
-#include <sys/types.h>
-#define __USE_GNU
-#include <pthread.h>
-#include <sched.h>
-#include <sys/param.h>
-/* #include "atomic.h" */
-
-/*
- * Default machine parameters.
- */
-
-#ifndef CAA_CACHE_LINE_SIZE
-/* #define CAA_CACHE_LINE_SIZE 128 */
-#endif /* #ifndef CAA_CACHE_LINE_SIZE */
-
-/*
- * Exclusive locking primitives.
- */
-
-typedef pthread_mutex_t spinlock_t;
-
-#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER;
-#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER
-
-static void spin_lock_init(spinlock_t *sp)
-{
- if (pthread_mutex_init(sp, NULL) != 0) {
- perror("spin_lock_init:pthread_mutex_init");
- exit(-1);
- }
-}
-
-static void spin_lock(spinlock_t *sp)
-{
- if (pthread_mutex_lock(sp) != 0) {
- perror("spin_lock:pthread_mutex_lock");
- exit(-1);
- }
-}
-
-static void spin_unlock(spinlock_t *sp)
-{
- if (pthread_mutex_unlock(sp) != 0) {
- perror("spin_unlock:pthread_mutex_unlock");
- exit(-1);
- }
-}
-
-#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0)
-#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0)
-
-/*
- * Thread creation/destruction primitives.
- */
-
-typedef pthread_t thread_id_t;
-
-#define NR_THREADS 128
-
-#define __THREAD_ID_MAP_EMPTY 0
-#define __THREAD_ID_MAP_WAITING 1
-thread_id_t __thread_id_map[NR_THREADS];
-spinlock_t __thread_id_map_mutex;
-
-#define for_each_thread(t) \
- for (t = 0; t < NR_THREADS; t++)
-
-#define for_each_running_thread(t) \
- for (t = 0; t < NR_THREADS; t++) \
- if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \
- (__thread_id_map[t] != __THREAD_ID_MAP_WAITING))
-
-pthread_key_t thread_id_key;
-
-static int __smp_thread_id(void)
-{
- int i;
- thread_id_t tid = pthread_self();
-
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == tid) {
- long v = i + 1; /* must be non-NULL. */
-
- if (pthread_setspecific(thread_id_key, (void *)v) != 0) {
- perror("pthread_setspecific");
- exit(-1);
- }
- return i;
- }
- }
- spin_lock(&__thread_id_map_mutex);
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == tid)
- spin_unlock(&__thread_id_map_mutex);
- return i;
- }
- spin_unlock(&__thread_id_map_mutex);
- fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n",
- (int)tid, (int)tid);
- exit(-1);
-}
-
-static int smp_thread_id(void)
-{
- void *id;
-
- id = pthread_getspecific(thread_id_key);
- if (id == NULL)
- return __smp_thread_id();
- return (long)(id - 1);
-}
-
-static thread_id_t create_thread(void *(*func)(void *), void *arg)
-{
- thread_id_t tid;
- int i;
-
- spin_lock(&__thread_id_map_mutex);
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY)
- break;
- }
- if (i >= NR_THREADS) {
- spin_unlock(&__thread_id_map_mutex);
- fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
- exit(-1);
- }
- __thread_id_map[i] = __THREAD_ID_MAP_WAITING;
- spin_unlock(&__thread_id_map_mutex);
- if (pthread_create(&tid, NULL, func, arg) != 0) {
- perror("create_thread:pthread_create");
- exit(-1);
- }
- __thread_id_map[i] = tid;
- return tid;
-}
-
-static void *wait_thread(thread_id_t tid)
-{
- int i;
- void *vp;
-
- for (i = 0; i < NR_THREADS; i++) {
- if (__thread_id_map[i] == tid)
- break;
- }
- if (i >= NR_THREADS){
- fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n",
- (int)tid, (int)tid);
- exit(-1);
- }
- if (pthread_join(tid, &vp) != 0) {
- perror("wait_thread:pthread_join");
- exit(-1);
- }
- __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
- return vp;
-}
-
-static void wait_all_threads(void)
-{
- int i;
- thread_id_t tid;
-
- for (i = 1; i < NR_THREADS; i++) {
- tid = __thread_id_map[i];
- if (tid != __THREAD_ID_MAP_EMPTY &&
- tid != __THREAD_ID_MAP_WAITING)
- (void)wait_thread(tid);
- }
-}
-
-#ifndef HAVE_CPU_SET_T
-typedef unsigned long cpu_set_t;
-# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
-# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
-#endif
-
-static void run_on(int cpu)
-{
-#if HAVE_SCHED_SETAFFINITY
- cpu_set_t mask;
-
- CPU_ZERO(&mask);
- CPU_SET(cpu, &mask);
-#if SCHED_SETAFFINITY_ARGS == 2
- sched_setaffinity(0, &mask);
-#else
- sched_setaffinity(0, sizeof(mask), &mask);
-#endif
-#endif /* HAVE_SCHED_SETAFFINITY */
-}
-
-/*
- * timekeeping -- very crude -- should use MONOTONIC...
- */
-
-long long get_microseconds(void)
-{
- struct timeval tv;
-
- if (gettimeofday(&tv, NULL) != 0)
- abort();
- return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec;
-}
-
-/*
- * Per-thread variables.
- */
-
-#define DEFINE_PER_THREAD(type, name) \
- struct { \
- __typeof__(type) v \
- __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
- } __per_thread_##name[NR_THREADS];
-#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name)
-
-#define per_thread(name, thread) __per_thread_##name[thread].v
-#define __get_thread_var(name) per_thread(name, smp_thread_id())
-
-#define init_per_thread(name, v) \
- do { \
- int __i_p_t_i; \
- for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \
- per_thread(name, __i_p_t_i) = v; \
- } while (0)
-
-/*
- * CPU traversal primitives.
- */
-
-#ifndef NR_CPUS
-#define NR_CPUS 16
-#endif /* #ifndef NR_CPUS */
-
-#define for_each_possible_cpu(cpu) \
- for (cpu = 0; cpu < NR_CPUS; cpu++)
-#define for_each_online_cpu(cpu) \
- for (cpu = 0; cpu < NR_CPUS; cpu++)
-
-/*
- * Per-CPU variables.
- */
-
-#define DEFINE_PER_CPU(type, name) \
- struct { \
- __typeof__(type) v \
- __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \
- } __per_cpu_##name[NR_CPUS]
-#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name)
-
-DEFINE_PER_THREAD(int, smp_processor_id);
-
-#define per_cpu(name, thread) __per_cpu_##name[thread].v
-#define __get_cpu_var(name) per_cpu(name, smp_processor_id())
-
-#define init_per_cpu(name, v) \
- do { \
- int __i_p_c_i; \
- for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \
- per_cpu(name, __i_p_c_i) = v; \
- } while (0)
-
-/*
- * CPU state checking (crowbarred).
- */
-
-#define idle_cpu(cpu) 0
-#define in_softirq() 1
-#define hardirq_count() 0
-#define PREEMPT_SHIFT 0
-#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define PREEMPT_BITS 8
-#define SOFTIRQ_BITS 8
-
-/*
- * CPU hotplug.
- */
-
-struct notifier_block {
- int (*notifier_call)(struct notifier_block *, unsigned long, void *);
- struct notifier_block *next;
- int priority;
-};
-
-#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
-#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
-#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
-#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
-#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
-#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
-#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
- * not handling interrupts, soon dead */
-#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
- * lock is dropped */
-
-/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
- * operation in progress
- */
-#define CPU_TASKS_FROZEN 0x0010
-
-#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
-#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
-#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
-#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
-#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
-#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
-
-/* Hibernation and suspend events */
-#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */
-#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */
-#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */
-#define PM_POST_SUSPEND 0x0004 /* Suspend finished */
-#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
-#define PM_POST_RESTORE 0x0006 /* Restore failed */
-
-#define NOTIFY_DONE 0x0000 /* Don't care */
-#define NOTIFY_OK 0x0001 /* Suits me */
-#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
-#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
- /* Bad/Veto action */
-/*
- * Clean way to return from the notifier and stop further calls.
- */
-#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
-
-/*
- * Bug checks.
- */
-
-#define BUG_ON(c) do { if (!(c)) abort(); } while (0)
-
-/*
- * Initialization -- Must be called before calling any primitives.
- */
-
-static void smp_init(void)
-{
- int i;
-
- spin_lock_init(&__thread_id_map_mutex);
- __thread_id_map[0] = pthread_self();
- for (i = 1; i < NR_THREADS; i++)
- __thread_id_map[i] = __THREAD_ID_MAP_EMPTY;
- init_per_thread(smp_processor_id, 0);
- if (pthread_key_create(&thread_id_key, NULL) != 0) {
- perror("pthread_key_create");
- exit(-1);
- }
-}
-
-/* Taken from the Linux kernel source tree, so GPLv2-only!!! */
-
-#ifndef _LINUX_LIST_H
-#define _LINUX_LIST_H
-
-#define LIST_POISON1 ((void *) 0x00100100)
-#define LIST_POISON2 ((void *) 0x00200200)
-
-#if 0
-
-/*
- * Simple doubly linked list implementation.
- *
- * Some of the internal functions ("__xxx") are useful when
- * manipulating whole lists rather than single entries, as
- * sometimes we already know the next/prev entries and we can
- * generate better code by using them directly rather than
- * using the generic single-entry routines.
- */
-
-struct cds_list_head {
- struct cds_list_head *next, *prev;
-};
-
-#define CDS_LIST_HEAD_INIT(name) { &(name), &(name) }
-
-#define CDS_LIST_HEAD(name) \
- struct cds_list_head name = CDS_LIST_HEAD_INIT(name)
-
-static inline void CDS_INIT_LIST_HEAD(struct cds_list_head *list)
-{
- list->next = list;
- list->prev = list;
-}
-
-/*
- * Insert a new entry between two known consecutive entries.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-#ifndef CONFIG_DEBUG_LIST
-static inline void __cds_list_add(struct cds_list_head *new,
- struct cds_list_head *prev,
- struct cds_list_head *next)
-{
- next->prev = new;
- new->next = next;
- new->prev = prev;
- prev->next = new;
-}
-#else
-extern void __cds_list_add(struct cds_list_head *new,
- struct cds_list_head *prev,
- struct cds_list_head *next);
-#endif
-
-/**
- * cds_list_add - add a new entry
- * @new: new entry to be added
- * @head: list head to add it after
- *
- * Insert a new entry after the specified head.
- * This is good for implementing stacks.
- */
-static inline void cds_list_add(struct cds_list_head *new, struct cds_list_head *head)
-{
- __cds_list_add(new, head, head->next);
-}
-
-
-/**
- * cds_list_add_tail - add a new entry
- * @new: new entry to be added
- * @head: list head to add it before
- *
- * Insert a new entry before the specified head.
- * This is useful for implementing queues.
- */
-static inline void cds_list_add_tail(struct cds_list_head *new, struct cds_list_head *head)
-{
- __cds_list_add(new, head->prev, head);
-}
-
-/*
- * Delete a list entry by making the prev/next entries
- * point to each other.
- *
- * This is only for internal list manipulation where we know
- * the prev/next entries already!
- */
-static inline void __cds_list_del(struct cds_list_head * prev, struct cds_list_head * next)
-{
- next->prev = prev;
- prev->next = next;
-}
-
-/**
- * cds_list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: cds_list_empty() on entry does not return true after this, the entry is
- * in an undefined state.
- */
-#ifndef CONFIG_DEBUG_LIST
-static inline void cds_list_del(struct cds_list_head *entry)
-{
- __cds_list_del(entry->prev, entry->next);
- entry->next = LIST_POISON1;
- entry->prev = LIST_POISON2;
-}
-#else
-extern void cds_list_del(struct cds_list_head *entry);
-#endif
-
-/**
- * cds_list_replace - replace old entry by new one
- * @old : the element to be replaced
- * @new : the new element to insert
- *
- * If @old was empty, it will be overwritten.
- */
-static inline void cds_list_replace(struct cds_list_head *old,
- struct cds_list_head *new)
-{
- new->next = old->next;
- new->next->prev = new;
- new->prev = old->prev;
- new->prev->next = new;
-}
-
-static inline void cds_list_replace_init(struct cds_list_head *old,
- struct cds_list_head *new)
-{
- cds_list_replace(old, new);
- CDS_INIT_LIST_HEAD(old);
-}
-
-/**
- * cds_list_del_init - deletes entry from list and reinitialize it.
- * @entry: the element to delete from the list.
- */
-static inline void cds_list_del_init(struct cds_list_head *entry)
-{
- __cds_list_del(entry->prev, entry->next);
- CDS_INIT_LIST_HEAD(entry);
-}
-
-/**
- * cds_list_move - delete from one list and add as another's head
- * @list: the entry to move
- * @head: the head that will precede our entry
- */
-static inline void cds_list_move(struct cds_list_head *list, struct cds_list_head *head)
-{
- __cds_list_del(list->prev, list->next);
- cds_list_add(list, head);
-}
-
-/**
- * cds_list_move_tail - delete from one list and add as another's tail
- * @list: the entry to move
- * @head: the head that will follow our entry
- */
-static inline void cds_list_move_tail(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- __cds_list_del(list->prev, list->next);
- cds_list_add_tail(list, head);
-}
-
-/**
- * list_is_last - tests whether @list is the last entry in list @head
- * @list: the entry to test
- * @head: the head of the list
- */
-static inline int list_is_last(const struct cds_list_head *list,
- const struct cds_list_head *head)
-{
- return list->next == head;
-}
-
-/**
- * cds_list_empty - tests whether a list is empty
- * @head: the list to test.
- */
-static inline int cds_list_empty(const struct cds_list_head *head)
-{
- return head->next == head;
-}
-
-/**
- * cds_list_empty_careful - tests whether a list is empty and not being modified
- * @head: the list to test
- *
- * Description:
- * tests whether a list is empty _and_ checks that no other CPU might be
- * in the process of modifying either member (next or prev)
- *
- * NOTE: using cds_list_empty_careful() without synchronization
- * can only be safe if the only activity that can happen
- * to the list entry is cds_list_del_init(). Eg. it cannot be used
- * if another CPU could re-list_add() it.
- */
-static inline int cds_list_empty_careful(const struct cds_list_head *head)
-{
- struct cds_list_head *next = head->next;
- return (next == head) && (next == head->prev);
-}
-
-/**
- * list_is_singular - tests whether a list has just one entry.
- * @head: the list to test.
- */
-static inline int list_is_singular(const struct cds_list_head *head)
-{
- return !list_empty(head) && (head->next == head->prev);
-}
-
-static inline void __list_cut_position(struct cds_list_head *list,
- struct cds_list_head *head, struct cds_list_head *entry)
-{
- struct cds_list_head *new_first = entry->next;
- list->next = head->next;
- list->next->prev = list;
- list->prev = entry;
- entry->next = list;
- head->next = new_first;
- new_first->prev = head;
-}
-
-/**
- * list_cut_position - cut a list into two
- * @list: a new list to add all removed entries
- * @head: a list with entries
- * @entry: an entry within head, could be the head itself
- * and if so we won't cut the list
- *
- * This helper moves the initial part of @head, up to and
- * including @entry, from @head to @list. You should
- * pass on @entry an element you know is on @head. @list
- * should be an empty list or a list you do not care about
- * losing its data.
- *
- */
-static inline void list_cut_position(struct cds_list_head *list,
- struct cds_list_head *head, struct cds_list_head *entry)
-{
- if (cds_list_empty(head))
- return;
- if (list_is_singular(head) &&
- (head->next != entry && head != entry))
- return;
- if (entry == head)
- CDS_INIT_LIST_HEAD(list);
- else
- __list_cut_position(list, head, entry);
-}
-
-static inline void __cds_list_splice(const struct cds_list_head *list,
- struct cds_list_head *prev,
- struct cds_list_head *next)
-{
- struct cds_list_head *first = list->next;
- struct cds_list_head *last = list->prev;
-
- first->prev = prev;
- prev->next = first;
-
- last->next = next;
- next->prev = last;
-}
-
-/**
- * cds_list_splice - join two lists, this is designed for stacks
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- */
-static inline void cds_list_splice(const struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list))
- __cds_list_splice(list, head, head->next);
-}
-
-/**
- * cds_list_splice_tail - join two lists, each list being a queue
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- */
-static inline void cds_list_splice_tail(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list))
- __cds_list_splice(list, head->prev, head);
-}
-
-/**
- * cds_list_splice_init - join two lists and reinitialise the emptied list.
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- *
- * The list at @list is reinitialised
- */
-static inline void cds_list_splice_init(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list)) {
- __cds_list_splice(list, head, head->next);
- CDS_INIT_LIST_HEAD(list);
- }
-}
-
-/**
- * cds_list_splice_tail_init - join two lists and reinitialise the emptied list
- * @list: the new list to add.
- * @head: the place to add it in the first list.
- *
- * Each of the lists is a queue.
- * The list at @list is reinitialised
- */
-static inline void cds_list_splice_tail_init(struct cds_list_head *list,
- struct cds_list_head *head)
-{
- if (!cds_list_empty(list)) {
- __cds_list_splice(list, head->prev, head);
- CDS_INIT_LIST_HEAD(list);
- }
-}
-
-/**
- * cds_list_entry - get the struct for this entry
- * @ptr: the &struct cds_list_head pointer.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_entry(ptr, type, member) \
- caa_container_of(ptr, type, member)
-
-/**
- * list_first_entry - get the first element from a list
- * @ptr: the list head to take the element from.
- * @type: the type of the struct this is embedded in.
- * @member: the name of the list_struct within the struct.
- *
- * Note, that list is expected to be not empty.
- */
-#define list_first_entry(ptr, type, member) \
- cds_list_entry((ptr)->next, type, member)
-
-/**
- * cds_list_for_each - iterate over a list
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define cds_list_for_each(pos, head) \
- for (pos = (head)->next; prefetch(pos->next), pos != (head); \
- pos = pos->next)
-
-/**
- * __cds_list_for_each - iterate over a list
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @head: the head for your list.
- *
- * This variant differs from cds_list_for_each() in that it's the
- * simplest possible list iteration code, no prefetching is done.
- * Use this for code that knows the list to be very short (empty
- * or 1 entry) most of the time.
- */
-#define __cds_list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
-
-/**
- * cds_list_for_each_prev - iterate over a list backwards
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @head: the head for your list.
- */
-#define cds_list_for_each_prev(pos, head) \
- for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \
- pos = pos->prev)
-
-/**
- * cds_list_for_each_safe - iterate over a list safe against removal of list entry
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @n: another &struct cds_list_head to use as temporary storage
- * @head: the head for your list.
- */
-#define cds_list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
-
-/**
- * cds_list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
- * @pos: the &struct cds_list_head to use as a loop cursor.
- * @n: another &struct cds_list_head to use as temporary storage
- * @head: the head for your list.
- */
-#define cds_list_for_each_prev_safe(pos, n, head) \
- for (pos = (head)->prev, n = pos->prev; \
- prefetch(pos->prev), pos != (head); \
- pos = n, n = pos->prev)
-
-/**
- * cds_list_for_each_entry - iterate over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_for_each_entry(pos, head, member) \
- for (pos = cds_list_entry((head)->next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = cds_list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_reverse - iterate backwards over list of given type.
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_for_each_entry_reverse(pos, head, member) \
- for (pos = cds_list_entry((head)->prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = cds_list_entry(pos->member.prev, typeof(*pos), member))
-
-/**
- * list_prepare_entry - prepare a pos entry for use in cds_list_for_each_entry_continue()
- * @pos: the type * to use as a start point
- * @head: the head of the list
- * @member: the name of the list_struct within the struct.
- *
- * Prepares a pos entry for use as a start point in cds_list_for_each_entry_continue().
- */
-#define list_prepare_entry(pos, head, member) \
- ((pos) ? : cds_list_entry(head, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_continue - continue iteration over list of given type
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Continue to iterate over list of given type, continuing after
- * the current position.
- */
-#define cds_list_for_each_entry_continue(pos, head, member) \
- for (pos = cds_list_entry(pos->member.next, typeof(*pos), member); \
- prefetch(pos->member.next), &pos->member != (head); \
- pos = cds_list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_continue_reverse - iterate backwards from the given point
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Start to iterate over list of given type backwards, continuing after
- * the current position.
- */
-#define cds_list_for_each_entry_continue_reverse(pos, head, member) \
- for (pos = cds_list_entry(pos->member.prev, typeof(*pos), member); \
- prefetch(pos->member.prev), &pos->member != (head); \
- pos = cds_list_entry(pos->member.prev, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_from - iterate over list of given type from the current point
- * @pos: the type * to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type, continuing from current position.
- */
-#define cds_list_for_each_entry_from(pos, head, member) \
- for (; prefetch(pos->member.next), &pos->member != (head); \
- pos = cds_list_entry(pos->member.next, typeof(*pos), member))
-
-/**
- * cds_list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- */
-#define cds_list_for_each_entry_safe(pos, n, head, member) \
- for (pos = cds_list_entry((head)->next, typeof(*pos), member), \
- n = cds_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.next, typeof(*n), member))
-
-/**
- * cds_list_for_each_entry_safe_continue
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type, continuing after current point,
- * safe against removal of list entry.
- */
-#define cds_list_for_each_entry_safe_continue(pos, n, head, member) \
- for (pos = cds_list_entry(pos->member.next, typeof(*pos), member), \
- n = cds_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.next, typeof(*n), member))
-
-/**
- * cds_list_for_each_entry_safe_from
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate over list of given type from current point, safe against
- * removal of list entry.
- */
-#define cds_list_for_each_entry_safe_from(pos, n, head, member) \
- for (n = cds_list_entry(pos->member.next, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.next, typeof(*n), member))
-
-/**
- * cds_list_for_each_entry_safe_reverse
- * @pos: the type * to use as a loop cursor.
- * @n: another type * to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the list_struct within the struct.
- *
- * Iterate backwards over list of given type, safe against removal
- * of list entry.
- */
-#define cds_list_for_each_entry_safe_reverse(pos, n, head, member) \
- for (pos = cds_list_entry((head)->prev, typeof(*pos), member), \
- n = cds_list_entry(pos->member.prev, typeof(*pos), member); \
- &pos->member != (head); \
- pos = n, n = cds_list_entry(n->member.prev, typeof(*n), member))
-
-#endif //0
-
-/*
- * Double linked lists with a single pointer list head.
- * Mostly useful for hash tables where the two pointer list head is
- * too wasteful.
- * You lose the ability to access the tail in O(1).
- */
-
-struct cds_hlist_head {
- struct cds_hlist_node *first;
-};
-
-struct cds_hlist_node {
- struct cds_hlist_node *next, **pprev;
-};
-
-#define HLIST_HEAD_INIT { .first = NULL }
-#define HLIST_HEAD(name) struct cds_hlist_head name = { .first = NULL }
-#define CDS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
-static inline void INIT_HLIST_NODE(struct cds_hlist_node *h)
-{
- h->next = NULL;
- h->pprev = NULL;
-}
-
-static inline int hlist_unhashed(const struct cds_hlist_node *h)
-{
- return !h->pprev;
-}
-
-static inline int hlist_empty(const struct cds_hlist_head *h)
-{
- return !h->first;
-}
-
-static inline void __cds_hlist_del(struct cds_hlist_node *n)
-{
- struct cds_hlist_node *next = n->next;
- struct cds_hlist_node **pprev = n->pprev;
- *pprev = next;
- if (next)
- next->pprev = pprev;
-}
-
-static inline void cds_hlist_del(struct cds_hlist_node *n)
-{
- __cds_hlist_del(n);
- n->next = LIST_POISON1;
- n->pprev = LIST_POISON2;
-}
-
-static inline void cds_hlist_del_init(struct cds_hlist_node *n)
-{
- if (!hlist_unhashed(n)) {
- __cds_hlist_del(n);
- INIT_HLIST_NODE(n);
- }
-}
-
-static inline void cds_hlist_add_head(struct cds_hlist_node *n, struct cds_hlist_head *h)
-{
- struct cds_hlist_node *first = h->first;
- n->next = first;
- if (first)
- first->pprev = &n->next;
- h->first = n;
- n->pprev = &h->first;
-}
-
-/* next must be != NULL */
-static inline void hlist_add_before(struct cds_hlist_node *n,
- struct cds_hlist_node *next)
-{
- n->pprev = next->pprev;
- n->next = next;
- next->pprev = &n->next;
- *(n->pprev) = n;
-}
-
-static inline void hlist_add_after(struct cds_hlist_node *n,
- struct cds_hlist_node *next)
-{
- next->next = n->next;
- n->next = next;
- next->pprev = &n->next;
-
- if(next->next)
- next->next->pprev = &next->next;
-}
-
-/*
- * Move a list from one list head to another. Fixup the pprev
- * reference of the first entry if it exists.
- */
-static inline void hlist_move_list(struct cds_hlist_head *old,
- struct cds_hlist_head *new)
-{
- new->first = old->first;
- if (new->first)
- new->first->pprev = &new->first;
- old->first = NULL;
-}
-
-#define cds_hlist_entry(ptr, type, member) caa_container_of(ptr,type,member)
-
-#define cds_hlist_for_each(pos, head) \
- for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \
- pos = pos->next)
-
-#define cds_hlist_for_each_safe(pos, n, head) \
- for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
- pos = n)
-
-/**
- * cds_hlist_for_each_entry - iterate over list of given type
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @head: the head for your list.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry(tpos, pos, head, member) \
- for (pos = (head)->first; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * cds_hlist_for_each_entry_continue - iterate over a hlist continuing after current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry_continue(tpos, pos, member) \
- for (pos = (pos)->next; \
- pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * cds_hlist_for_each_entry_from - iterate over a hlist continuing from current point
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry_from(tpos, pos, member) \
- for (; pos && ({ prefetch(pos->next); 1;}) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = pos->next)
-
-/**
- * cds_hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
- * @tpos: the type * to use as a loop cursor.
- * @pos: the &struct cds_hlist_node to use as a loop cursor.
- * @n: another &struct cds_hlist_node to use as temporary storage
- * @head: the head for your list.
- * @member: the name of the cds_hlist_node within the struct.
- */
-#define cds_hlist_for_each_entry_safe(tpos, pos, n, head, member) \
- for (pos = (head)->first; \
- pos && ({ n = pos->next; 1; }) && \
- ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \
- pos = n)
-
-#endif
-
-#endif
rcu_register_thread();
run_on(me);
uatomic_inc(&nthreadsrunning);
+ put_thread_offline();
while (goflag == GOFLAG_INIT)
poll(NULL, 0, 1);
- mark_rcu_quiescent_state();
+ put_thread_online();
while (goflag == GOFLAG_RUN) {
for (i = 0; i < RCU_READ_RUN; i++) {
rcu_read_lock();
int pc;
rcu_register_thread();
+ put_thread_offline();
while (goflag == GOFLAG_INIT)
poll(NULL, 0, 1);
- mark_rcu_quiescent_state();
+ put_thread_online();
while (goflag == GOFLAG_RUN) {
rcu_read_lock();
p = rcu_dereference(rcu_stress_current);
time2 = caa_get_cycles();
printf("CPU clock cycles per loop: %g\n", (time2 - time1) /
(double)NR_LOOPS);
+ return 0;
}
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
#if defined(_syscall0)
_syscall0(pid_t, gettid)
#elif defined(__NR_gettid)
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <pthread.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
#if defined(_syscall0)
_syscall0(pid_t, gettid)
#elif defined(__NR_gettid)
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
- * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across
+ * both alloc and free, which insures we never run over our tail.
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
struct test_array *ret;
int index;
- rcu_copy_mutex_lock();
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
- rcu_copy_mutex_unlock();
return ret;
}
{
if (!ptr)
return;
- rcu_copy_mutex_lock();
ptr->a = ARRAY_POISON;
- rcu_copy_mutex_unlock();
}
void *thr_reader(void *_count)
cmm_smp_mb();
for (;;) {
+ rcu_copy_mutex_lock();
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
if (old)
old->a = 0;
test_array_free(old);
+ rcu_copy_mutex_unlock();
nr_writes++;
if (unlikely(!test_duration_write()))
break;
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
#if defined(_syscall0)
_syscall0(pid_t, gettid)
#elif defined(__NR_gettid)
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <pthread.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
#if defined(_syscall0)
_syscall0(pid_t, gettid)
#elif defined(__NR_gettid)
#include <stdio.h>
#include <assert.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
struct testvals {
unsigned char c;
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
- * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across
+ * both alloc and free, which insures we never run over our tail.
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
struct test_array *ret;
int index;
- rcu_copy_mutex_lock();
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
- rcu_copy_mutex_unlock();
return ret;
}
{
if (!ptr)
return;
- rcu_copy_mutex_lock();
ptr->a = ARRAY_POISON;
- rcu_copy_mutex_unlock();
}
void *thr_reader(void *_count)
cmm_smp_mb();
for (;;) {
+ rcu_copy_mutex_lock();
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
if (old)
old->a = 0;
test_array_free(old);
+ rcu_copy_mutex_unlock();
nr_writes++;
if (unlikely(!test_duration_write()))
break;
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
- * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across
+ * both alloc and free, which insures we never run over our tail.
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
struct test_array *ret;
int index;
- rcu_copy_mutex_lock();
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
- rcu_copy_mutex_unlock();
return ret;
}
{
if (!ptr)
return;
- rcu_copy_mutex_lock();
ptr->a = ARRAY_POISON;
- rcu_copy_mutex_unlock();
}
void *thr_reader(void *_count)
cmm_smp_mb();
for (;;) {
+ rcu_copy_mutex_lock();
new = test_array_alloc();
new->a = 8;
- rcu_copy_mutex_lock();
old = test_rcu_pointer;
rcu_assign_pointer(test_rcu_pointer, new);
if (unlikely(wduration))
loop_sleep(wduration);
- rcu_copy_mutex_unlock();
synchronize_rcu();
if (old)
old->a = 0;
test_array_free(old);
+ rcu_copy_mutex_unlock();
nr_writes++;
if (unlikely(!test_duration_write()))
break;
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
/*
* malloc/free are reusing memory areas too quickly, which does not let us
* test races appropriately. Use a large circular array for allocations.
- * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail.
+ * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across
+ * both alloc and free, which insures we never run over our tail.
*/
#define ARRAY_SIZE (1048576 * nr_writers)
#define ARRAY_POISON 0xDEADBEEF
struct test_array *ret;
int index;
- rcu_copy_mutex_lock();
index = array_index % ARRAY_SIZE;
assert(test_array[index].a == ARRAY_POISON ||
test_array[index].a == 0);
array_index++;
if (array_index == ARRAY_SIZE)
array_index = 0;
- rcu_copy_mutex_unlock();
return ret;
}
{
if (!ptr)
return;
- rcu_copy_mutex_lock();
ptr->a = ARRAY_POISON;
- rcu_copy_mutex_unlock();
}
void *thr_reader(void *_count)
cmm_smp_mb();
for (;;) {
+ rcu_copy_mutex_lock();
new = test_array_alloc();
new->a = 8;
old = rcu_xchg_pointer(&test_rcu_pointer, new);
if (old)
old->a = 0;
test_array_free(old);
+ rcu_copy_mutex_unlock();
nr_writes++;
if (unlikely(!test_duration_write()))
break;
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#define _LGPL_SOURCE
#endif
#include <urcu.h>
-#include <urcu/rculfqueue.h>
+#include <urcu/cds.h>
#include <urcu-defer.h>
static volatile int test_go, test_stop;
}
-static void rcu_release_node(struct urcu_ref *ref)
-{
- struct cds_lfq_node_rcu *node = caa_container_of(ref, struct cds_lfq_node_rcu, ref);
- defer_rcu(free, node);
- //synchronize_rcu();
- //free(node);
-}
-
void *thr_dequeuer(void *_count)
{
unsigned long long *count = _count;
cmm_smp_mb();
for (;;) {
- struct cds_lfq_node_rcu *node = cds_lfq_dequeue_rcu(&q,
- rcu_release_node);
+ struct cds_lfq_node_rcu *node;
+ node = cds_lfq_dequeue_rcu(&q);
if (node) {
- urcu_ref_put(&node->ref, rcu_release_node);
+ defer_rcu(free, node);
nr_successful_dequeues++;
}
rcu_unregister_thread();
rcu_defer_unregister_thread();
-
printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, "
"dequeues %llu, successful_dequeues %llu\n",
pthread_self(), (unsigned long)gettid(), nr_dequeues,
return ((void*)2);
}
-static void release_node(struct urcu_ref *ref)
-{
- struct cds_lfq_node_rcu *node = caa_container_of(ref, struct cds_lfq_node_rcu, ref);
- free(node);
-}
-
void test_end(struct cds_lfq_queue_rcu *q, unsigned long long *nr_dequeues)
{
struct cds_lfq_node_rcu *node;
do {
- node = cds_lfq_dequeue_rcu(q, release_node);
+ node = cds_lfq_dequeue_rcu(q);
if (node) {
- urcu_ref_put(&node->ref, release_node);
+ free(node); /* no more concurrent access */
(*nr_dequeues)++;
}
} while (node);
}
test_end(&q, &end_dequeues);
+ err = cds_lfq_destroy_rcu(&q);
+ assert(!err);
printf_verbose("total number of enqueues : %llu, dequeues %llu\n",
tot_enqueues, tot_dequeues);
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#define _LGPL_SOURCE
#endif
#include <urcu.h>
-#include <urcu/rculfstack.h>
+#include <urcu/cds.h>
#include <urcu-defer.h>
static volatile int test_go, test_stop;
if (!node)
goto fail;
cds_lfs_node_init_rcu(node);
+ /* No rcu read-side is needed for push */
cds_lfs_push_rcu(&s, node);
nr_successful_enqueues++;
cmm_smp_mb();
for (;;) {
- struct cds_lfs_node_rcu *node = cds_lfs_pop_rcu(&s);
+ struct cds_lfs_node_rcu *node;
+ node = cds_lfs_pop_rcu(&s);
if (node) {
defer_rcu(free, node);
nr_successful_dequeues++;
}
-
nr_dequeues++;
if (unlikely(!test_duration_dequeue()))
break;
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
#if defined(_syscall0)
_syscall0(pid_t, gettid)
#elif defined(__NR_gettid)
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
-#include <sys/syscall.h>
#include <sched.h>
#include <errno.h>
#include <urcu/arch.h>
+#ifdef __linux__
+#include <syscall.h>
+#endif
+
/* hardcoded number of CPUs */
#define NR_CPUS 16384
#include <urcu-bp.h>
#endif
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#include <urcu/rculist.h>
#include "rcutorture.h"
+++ /dev/null
-#ifndef _URCU_BP_MAP_H
-#define _URCU_BP_MAP_H
-
-/*
- * urcu-map.h
- *
- * Userspace RCU header -- name mapping to allow multiple flavors to be
- * used in the same executable.
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * LGPL-compatible code should include this header with :
- *
- * #define _LGPL_SOURCE
- * #include <urcu.h>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * IBM's contributions to this file may be relicensed under LGPLv2 or later.
- */
-
-/* Mapping macros to allow multiple flavors in a single binary. */
-
-#define rcu_read_lock rcu_read_lock_bp
-#define _rcu_read_lock _rcu_read_lock_bp
-#define rcu_read_unlock rcu_read_unlock_bp
-#define _rcu_read_unlock _rcu_read_unlock_bp
-#define rcu_register_thread rcu_register_thread_bp
-#define rcu_unregister_thread rcu_unregister_thread_bp
-#define rcu_init rcu_init_bp
-#define rcu_exit rcu_exit_bp
-#define synchronize_rcu synchronize_rcu_bp
-#define rcu_reader rcu_reader_bp
-#define rcu_gp_ctr rcu_gp_ctr_bp
-
-#define get_cpu_call_rcu_data get_cpu_call_rcu_data_bp
-#define get_call_rcu_thread get_call_rcu_thread_bp
-#define create_call_rcu_data create_call_rcu_data_bp
-#define set_cpu_call_rcu_data set_cpu_call_rcu_data_bp
-#define get_default_call_rcu_data get_default_call_rcu_data_bp
-#define get_call_rcu_data get_call_rcu_data_bp
-#define get_thread_call_rcu_data get_thread_call_rcu_data_bp
-#define set_thread_call_rcu_data set_thread_call_rcu_data_bp
-#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_bp
-#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_bp
-#define call_rcu call_rcu_bp
-
-#define defer_rcu defer_rcu_bp
-#define rcu_defer_register_thread rcu_defer_register_thread_bp
-#define rcu_defer_unregister_thread rcu_defer_unregister_thread_bp
-#define rcu_defer_barrier rcu_defer_barrier_bp
-#define rcu_defer_barrier_thread rcu_defer_barrier_thread_bp
-
-#endif /* _URCU_BP_MAP_H */
+++ /dev/null
-#ifndef _URCU_BP_STATIC_H
-#define _URCU_BP_STATIC_H
-
-/*
- * urcu-bp-static.h
- *
- * Userspace RCU header.
- *
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
- * dynamically with the userspace rcu library.
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * IBM's contributions to this file may be relicensed under LGPLv2 or later.
- */
-
-#include <stdlib.h>
-#include <pthread.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include <urcu/compiler.h>
-#include <urcu/arch.h>
-#include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
-#include <urcu/list.h>
-
-/*
- * This code section can only be included in LGPL 2.1 compatible source code.
- * See below for the function call wrappers which can be used in code meant to
- * be only linked with the Userspace RCU library. This comes with a small
- * performance degradation on the read-side due to the added function calls.
- * This is required to permit relinking with newer versions of the library.
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Active attempts to check for reader Q.S. before calling sleep().
- */
-#define RCU_QS_ACTIVE_ATTEMPTS 100
-
-#ifdef DEBUG_RCU
-#define rcu_assert(args...) assert(args)
-#else
-#define rcu_assert(args...)
-#endif
-
-#ifdef DEBUG_YIELD
-#include <sched.h>
-#include <time.h>
-#include <pthread.h>
-#include <unistd.h>
-
-#define YIELD_READ (1 << 0)
-#define YIELD_WRITE (1 << 1)
-
-/*
- * Updates without RCU_MB are much slower. Account this in
- * the delay.
- */
-/* maximum sleep delay, in us */
-#define MAX_SLEEP 50
-
-extern unsigned int yield_active;
-extern unsigned int __thread rand_yield;
-
-static inline void debug_yield_read(void)
-{
- if (yield_active & YIELD_READ)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
-}
-
-static inline void debug_yield_write(void)
-{
- if (yield_active & YIELD_WRITE)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
-}
-
-static inline void debug_yield_init(void)
-{
- rand_yield = time(NULL) ^ pthread_self();
-}
-#else
-static inline void debug_yield_read(void)
-{
-}
-
-static inline void debug_yield_write(void)
-{
-}
-
-static inline void debug_yield_init(void)
-{
-
-}
-#endif
-
-/*
- * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use a
- * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
- */
-#define RCU_GP_COUNT (1UL << 0)
-/* Use the amount of bits equal to half of the architecture long size */
-#define RCU_GP_CTR_PHASE (1UL << (sizeof(long) << 2))
-#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
-
-/*
- * Used internally by _rcu_read_lock.
- */
-extern void rcu_bp_register(void);
-
-/*
- * Global quiescent period counter with low-order bits unused.
- * Using a int rather than a char to eliminate false register dependencies
- * causing stalls on some architectures.
- */
-extern long rcu_gp_ctr;
-
-struct rcu_reader {
- /* Data used by both reader and synchronize_rcu() */
- long ctr;
- /* Data used for registry */
- struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
- pthread_t tid;
- int alloc; /* registry entry allocated */
-};
-
-/*
- * Bulletproof version keeps a pointer to a registry not part of the TLS.
- * Adds a pointer dereference on the read-side, but won't require to unregister
- * the reader thread.
- */
-extern struct rcu_reader __thread *rcu_reader;
-
-static inline int rcu_old_gp_ongoing(long *value)
-{
- long v;
-
- if (value == NULL)
- return 0;
- /*
- * Make sure both tests below are done on the same version of *value
- * to insure consistency.
- */
- v = CMM_LOAD_SHARED(*value);
- return (v & RCU_GP_CTR_NEST_MASK) &&
- ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
-}
-
-static inline void _rcu_read_lock(void)
-{
- long tmp;
-
- /* Check if registered */
- if (unlikely(!rcu_reader))
- rcu_bp_register();
-
- cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- tmp = rcu_reader->ctr;
- /*
- * rcu_gp_ctr is
- * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
- */
- if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
- /*
- * Set active readers count for outermost nesting level before
- * accessing the pointer.
- */
- cmm_smp_mb();
- } else {
- _CMM_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
- }
-}
-
-static inline void _rcu_read_unlock(void)
-{
- /*
- * Finish using rcu before decrementing the pointer.
- */
- cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
- cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_BP_STATIC_H */
#include <unistd.h>
#include <sys/mman.h>
-#include "urcu-bp-map.h"
+#include "urcu/map/urcu-bp.h"
-#include "urcu-bp-static.h"
+#include "urcu/static/urcu-bp.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu-bp.h"
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#ifndef __linux__
+
+#define MREMAP_MAYMOVE 1
+#define MREMAP_FIXED 2
+
+/*
+ * mremap wrapper for non-Linux systems. Maps a RW, anonymous private mapping.
+ * This is not generic.
+*/
+void *mremap(void *old_address, size_t old_size, size_t new_size, int flags)
+{
+ void *new_address;
+
+ assert(flags & MREMAP_MAYMOVE);
+ assert(!(flags & MREMAP_FIXED));
+ new_address = mmap(old_address, new_size,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (new_address == MAP_FAILED)
+ return MAP_FAILED;
+ if (old_address) {
+ memcpy(new_address, old_address, old_size);
+ munmap(old_address, old_size);
+ }
+ return new_address;
+}
+#endif
+
/* Sleep delay in us */
#define RCU_SLEEP_DELAY 1000
#define ARENA_INIT_ALLOC 16
+/*
+ * Active attempts to check for reader Q.S. before calling sleep().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
+
void __attribute__((destructor)) rcu_bp_exit(void);
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
if (new_arena == arena->p)
return;
- memcpy(new_arena, arena->p, arena->len);
bzero(new_arena + arena->len, len - arena->len);
arena->p = new_arena;
}
if (registry_arena.len
< registry_arena.used + sizeof(struct rcu_reader))
resize_arena(®istry_arena,
- max(registry_arena.len << 1, ARENA_INIT_ALLOC));
+ caa_max(registry_arena.len << 1, ARENA_INIT_ALLOC));
/*
* Find a free spot.
*/
#include <pthread.h>
/*
- * See urcu-pointer.h and urcu-pointer-static.h for pointer publication headers.
+ * See urcu-pointer.h and urcu/static/urcu-pointer.h for pointer
+ * publication headers.
*/
#include <urcu-pointer.h>
extern "C" {
#endif
-#include "urcu-bp-map.h"
+#include <urcu/map/urcu-bp.h>
/*
* Important !
#ifdef _LGPL_SOURCE
-#include <urcu-bp-static.h>
+#include <urcu/static/urcu-bp.h>
/*
* Mappings for static use of the userspace RCU library.
/*
* library wrappers to be used by non-LGPL compatible source code.
- * See LGPL-only urcu-pointer-static.h for documentation.
+ * See LGPL-only urcu/static/urcu-pointer.h for documentation.
*/
extern void rcu_read_lock(void);
}
#endif
-#include "urcu-call-rcu.h"
-#include "urcu-defer.h"
+#include <urcu-call-rcu.h>
+#include <urcu-defer.h>
#endif /* _URCU_BP_H */
#include <signal.h>
#include <assert.h>
#include <stdlib.h>
+#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <poll.h>
#include <sys/time.h>
-#include <syscall.h>
#include <unistd.h>
#include <sched.h>
#include "urcu-call-rcu.h"
#include "urcu-pointer.h"
#include "urcu/list.h"
-#include "urcu/urcu-futex.h"
+#include "urcu/futex.h"
/* Data structure that identifies a call_rcu thread. */
struct call_rcu_data {
struct cds_wfq_queue cbs;
unsigned long flags;
- int futex;
+ int32_t futex;
unsigned long qlen; /* maintained for debugging. */
pthread_t tid;
int cpu_affinity;
static struct call_rcu_data **per_cpu_call_rcu_data;
static long maxcpus;
-static void call_rcu_wait(struct call_rcu_data *crdp)
-{
- /* Read call_rcu list before read futex */
- cmm_smp_mb();
- if (uatomic_read(&crdp->futex) == -1)
- futex_async(&crdp->futex, FUTEX_WAIT, -1,
- NULL, NULL, 0);
-}
-
-static void call_rcu_wake_up(struct call_rcu_data *crdp)
-{
- /* Write to call_rcu list before reading/writing futex */
- cmm_smp_mb();
- if (unlikely(uatomic_read(&crdp->futex) == -1)) {
- uatomic_set(&crdp->futex, 0);
- futex_async(&crdp->futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
- }
-}
-
/* Allocate the array if it has not already been allocated. */
static void alloc_cpu_call_rcu_data(void)
#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
-static const struct call_rcu_data **per_cpu_call_rcu_data = NULL;
+/*
+ * per_cpu_call_rcu_data should be constant, but some functions below, used both
+ * for cases where cpu number is available and not available, assume it it not
+ * constant.
+ */
+static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
static const long maxcpus = -1;
static void alloc_cpu_call_rcu_data(void)
}
#endif
+static void call_rcu_wait(struct call_rcu_data *crdp)
+{
+ /* Read call_rcu list before read futex */
+ cmm_smp_mb();
+ if (uatomic_read(&crdp->futex) == -1)
+ futex_async(&crdp->futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0);
+}
+
+static void call_rcu_wake_up(struct call_rcu_data *crdp)
+{
+ /* Write to call_rcu list before reading/writing futex */
+ cmm_smp_mb();
+ if (unlikely(uatomic_read(&crdp->futex) == -1)) {
+ uatomic_set(&crdp->futex, 0);
+ futex_async(&crdp->futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0);
+ }
+}
+
/* This is the code run by each call_rcu thread. */
static void *call_rcu_thread(void *arg)
#include <errno.h>
#include <poll.h>
#include <sys/time.h>
-#include <syscall.h>
#include <unistd.h>
+#include <stdint.h>
-#include "urcu/urcu-futex.h"
+#include "urcu/futex.h"
#include <urcu/compiler.h>
#include <urcu/arch.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#include <urcu/list.h>
#include <urcu/system.h>
static pthread_mutex_t rcu_defer_mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t defer_thread_mutex = PTHREAD_MUTEX_INITIALIZER;
-static int defer_thread_futex;
+static int32_t defer_thread_futex;
/*
* Written to only by each individual deferer. Read by both the deferer and
+++ /dev/null
-#ifndef _URCU_MAP_H
-#define _URCU_MAP_H
-
-/*
- * urcu-map.h
- *
- * Userspace RCU header -- name mapping to allow multiple flavors to be
- * used in the same executable.
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * LGPL-compatible code should include this header with :
- *
- * #define _LGPL_SOURCE
- * #include <urcu.h>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * IBM's contributions to this file may be relicensed under LGPLv2 or later.
- */
-
-/* Mapping macros to allow multiple flavors in a single binary. */
-
-#if !defined(RCU_MEMBARRIER) && !defined(RCU_SIGNAL) && !defined(RCU_MB)
-#define RCU_MB
-#endif
-
-#ifdef RCU_MEMBARRIER
-
-#define rcu_read_lock rcu_read_lock_memb
-#define _rcu_read_lock _rcu_read_lock_memb
-#define rcu_read_unlock rcu_read_unlock_memb
-#define _rcu_read_unlock _rcu_read_unlock_memb
-#define rcu_register_thread rcu_register_thread_memb
-#define rcu_unregister_thread rcu_unregister_thread_memb
-#define rcu_init rcu_init_memb
-#define rcu_exit rcu_exit_memb
-#define synchronize_rcu synchronize_rcu_memb
-#define rcu_reader rcu_reader_memb
-#define rcu_gp_ctr rcu_gp_ctr_memb
-
-#define get_cpu_call_rcu_data get_cpu_call_rcu_data_memb
-#define get_call_rcu_thread get_call_rcu_thread_memb
-#define create_call_rcu_data create_call_rcu_data_memb
-#define set_cpu_call_rcu_data set_cpu_call_rcu_data_memb
-#define get_default_call_rcu_data get_default_call_rcu_data_memb
-#define get_call_rcu_data get_call_rcu_data_memb
-#define get_thread_call_rcu_data get_thread_call_rcu_data_memb
-#define set_thread_call_rcu_data set_thread_call_rcu_data_memb
-#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_memb
-#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_memb
-#define call_rcu call_rcu_memb
-
-#define defer_rcu defer_rcu_memb
-#define rcu_defer_register_thread rcu_defer_register_thread_memb
-#define rcu_defer_unregister_thread rcu_defer_unregister_thread_memb
-#define rcu_defer_barrier rcu_defer_barrier_memb
-#define rcu_defer_barrier_thread rcu_defer_barrier_thread_memb
-
-#elif defined(RCU_SIGNAL)
-
-#define rcu_read_lock rcu_read_lock_sig
-#define _rcu_read_lock _rcu_read_lock_sig
-#define rcu_read_unlock rcu_read_unlock_sig
-#define _rcu_read_unlock _rcu_read_unlock_sig
-#define rcu_register_thread rcu_register_thread_sig
-#define rcu_unregister_thread rcu_unregister_thread_sig
-#define rcu_init rcu_init_sig
-#define rcu_exit rcu_exit_sig
-#define synchronize_rcu synchronize_rcu_sig
-#define rcu_reader rcu_reader_sig
-#define rcu_gp_ctr rcu_gp_ctr_sig
-
-#define get_cpu_call_rcu_data get_cpu_call_rcu_data_sig
-#define get_call_rcu_thread get_call_rcu_thread_sig
-#define create_call_rcu_data create_call_rcu_data_sig
-#define set_cpu_call_rcu_data set_cpu_call_rcu_data_sig
-#define get_default_call_rcu_data get_default_call_rcu_data_sig
-#define get_call_rcu_data get_call_rcu_data_sig
-#define get_thread_call_rcu_data get_thread_call_rcu_data_sig
-#define set_thread_call_rcu_data set_thread_call_rcu_data_sig
-#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_sig
-#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_sig
-#define call_rcu call_rcu_sig
-
-#define defer_rcu defer_rcu_sig
-#define rcu_defer_register_thread rcu_defer_register_thread_sig
-#define rcu_defer_unregister_thread rcu_defer_unregister_thread_sig
-#define rcu_defer_barrier rcu_defer_barrier_sig
-#define rcu_defer_barrier_thread rcu_defer_barrier_thread_sig
-
-#elif defined(RCU_MB)
-
-#define rcu_read_lock rcu_read_lock_mb
-#define _rcu_read_lock _rcu_read_lock_mb
-#define rcu_read_unlock rcu_read_unlock_mb
-#define _rcu_read_unlock _rcu_read_unlock_mb
-#define rcu_register_thread rcu_register_thread_mb
-#define rcu_unregister_thread rcu_unregister_thread_mb
-#define rcu_init rcu_init_mb
-#define rcu_exit rcu_exit_mb
-#define synchronize_rcu synchronize_rcu_mb
-#define rcu_reader rcu_reader_mb
-#define rcu_gp_ctr rcu_gp_ctr_mb
-
-#define get_cpu_call_rcu_data get_cpu_call_rcu_data_mb
-#define get_call_rcu_thread get_call_rcu_thread_mb
-#define create_call_rcu_data create_call_rcu_data_mb
-#define set_cpu_call_rcu_data set_cpu_call_rcu_data_mb
-#define get_default_call_rcu_data get_default_call_rcu_data_mb
-#define get_call_rcu_data get_call_rcu_data_mb
-#define get_thread_call_rcu_data get_thread_call_rcu_data_mb
-#define set_thread_call_rcu_data set_thread_call_rcu_data_mb
-#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_mb
-#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_mb
-#define call_rcu call_rcu_mb
-
-#define defer_rcu defer_rcu_mb
-#define rcu_defer_register_thread rcu_defer_register_thread_mb
-#define rcu_defer_unregister_thread rcu_defer_unregister_thread_mb
-#define rcu_defer_barrier rcu_defer_barrier_mb
-#define rcu_defer_barrier_thread rcu_defer_barrier_thread_mb
-
-#else
-
-#error "Undefined selection"
-
-#endif
-
-#endif /* _URCU_MAP_H */
+++ /dev/null
-#ifndef _URCU_POINTER_STATIC_H
-#define _URCU_POINTER_STATIC_H
-
-/*
- * urcu-pointer-static.h
- *
- * Userspace RCU header. Operations on pointers.
- *
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu-pointer.h for
- * linking dynamically with the userspace rcu library.
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * IBM's contributions to this file may be relicensed under LGPLv2 or later.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/arch.h>
-#include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable
- * into a RCU read-side critical section. The pointer can later be safely
- * dereferenced within the critical section.
- *
- * This ensures that the pointer copy is invariant thorough the whole critical
- * section.
- *
- * Inserts memory barriers on architectures that require them (currently only
- * Alpha) and documents which pointers are protected by RCU.
- *
- * The compiler memory barrier in CMM_LOAD_SHARED() ensures that value-speculative
- * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the
- * data read before the pointer read by speculating the value of the pointer.
- * Correct ordering is ensured because the pointer is read as a volatile access.
- * This acts as a global side-effect operation, which forbids reordering of
- * dependent memory operations. Note that such concern about dependency-breaking
- * optimizations will eventually be taken care of by the "memory_order_consume"
- * addition to forthcoming C++ standard.
- *
- * Should match rcu_assign_pointer() or rcu_xchg_pointer().
- */
-
-#define _rcu_dereference(p) ({ \
- typeof(p) _________p1 = CMM_LOAD_SHARED(p); \
- cmm_smp_read_barrier_depends(); \
- (_________p1); \
- })
-
-/**
- * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer
- * is as expected by "old". If succeeds, returns the previous pointer to the
- * data structure, which can be safely freed after waiting for a quiescent state
- * using synchronize_rcu(). If fails (unexpected value), returns old (which
- * should not be freed !).
- */
-
-#define _rcu_cmpxchg_pointer(p, old, _new) \
- ({ \
- typeof(*p) _________pold = (old); \
- typeof(*p) _________pnew = (_new); \
- if (!__builtin_constant_p(_new) || \
- ((_new) != NULL)) \
- cmm_wmb(); \
- uatomic_cmpxchg(p, _________pold, _________pnew); \
- })
-
-/**
- * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
- * pointer to the data structure, which can be safely freed after waiting for a
- * quiescent state using synchronize_rcu().
- */
-
-#define _rcu_xchg_pointer(p, v) \
- ({ \
- typeof(*p) _________pv = (v); \
- if (!__builtin_constant_p(v) || \
- ((v) != NULL)) \
- cmm_wmb(); \
- uatomic_xchg(p, _________pv); \
- })
-
-
-#define _rcu_set_pointer(p, v) \
- ({ \
- typeof(*p) _________pv = (v); \
- if (!__builtin_constant_p(v) || \
- ((v) != NULL)) \
- cmm_wmb(); \
- uatomic_set(p, _________pv); \
- })
-
-/**
- * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure
- * meant to be read by RCU read-side critical sections. Returns the assigned
- * value.
- *
- * Documents which pointers will be dereferenced by RCU read-side critical
- * sections and adds the required memory barriers on architectures requiring
- * them. It also makes sure the compiler does not reorder code initializing the
- * data structure before its publication.
- *
- * Should match rcu_dereference_pointer().
- */
-
-#define _rcu_assign_pointer(p, v) _rcu_set_pointer(&(p), v)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_POINTER_STATIC_H */
* IBM's contributions to this file may be relicensed under LGPLv2 or later.
*/
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
-#include "urcu-pointer-static.h"
+#include "urcu/static/urcu-pointer.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu-pointer.h"
#include <urcu/compiler.h>
#include <urcu/arch.h>
-#include <urcu/uatomic_arch.h>
+#include <urcu/uatomic.h>
#ifdef __cplusplus
extern "C" {
#ifdef _LGPL_SOURCE
-#include <urcu-pointer-static.h>
+#include <urcu/static/urcu-pointer.h>
/*
* rcu_dereference(ptr)
+++ /dev/null
-#ifndef _URCU_QSBR_MAP_H
-#define _URCU_QSBR_MAP_H
-
-/*
- * urcu-map.h
- *
- * Userspace RCU header -- name mapping to allow multiple flavors to be
- * used in the same executable.
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * LGPL-compatible code should include this header with :
- *
- * #define _LGPL_SOURCE
- * #include <urcu.h>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * IBM's contributions to this file may be relicensed under LGPLv2 or later.
- */
-
-/* Mapping macros to allow multiple flavors in a single binary. */
-
-#define rcu_read_lock rcu_read_lock_qsbr
-#define _rcu_read_lock _rcu_read_lock_qsbr
-#define rcu_read_unlock rcu_read_unlock_qsbr
-#define _rcu_read_unlock _rcu_read_unlock_qsbr
-#define rcu_quiescent_state rcu_quiescent_state_qsbr
-#define _rcu_quiescent_state _rcu_quiescent_state_qsbr
-#define rcu_thread_offline rcu_thread_offline_qsbr
-#define rcu_thread_online rcu_thread_online_qsbr
-#define rcu_register_thread rcu_register_thread_qsbr
-#define rcu_unregister_thread rcu_unregister_thread_qsbr
-#define rcu_exit rcu_exit_qsbr
-#define synchronize_rcu synchronize_rcu_qsbr
-#define rcu_reader rcu_reader_qsbr
-#define rcu_gp_ctr rcu_gp_ctr_qsbr
-
-#define get_cpu_call_rcu_data get_cpu_call_rcu_data_qsbr
-#define get_call_rcu_thread get_call_rcu_thread_qsbr
-#define create_call_rcu_data create_call_rcu_data_qsbr
-#define set_cpu_call_rcu_data set_cpu_call_rcu_data_qsbr
-#define get_default_call_rcu_data get_default_call_rcu_data_qsbr
-#define get_call_rcu_data get_call_rcu_data_qsbr
-#define get_thread_call_rcu_data get_thread_call_rcu_data_qsbr
-#define set_thread_call_rcu_data set_thread_call_rcu_data_qsbr
-#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_qsbr
-#define call_rcu call_rcu_qsbr
-
-#define defer_rcu defer_rcu_qsbr
-#define rcu_defer_register_thread rcu_defer_register_thread_qsbr
-#define rcu_defer_unregister_thread rcu_defer_unregister_thread_qsbr
-#define rcu_defer_barrier rcu_defer_barrier_qsbr
-#define rcu_defer_barrier_thread rcu_defer_barrier_thread_qsbr
-
-#endif /* _URCU_QSBR_MAP_H */
+++ /dev/null
-#ifndef _URCU_QSBR_STATIC_H
-#define _URCU_QSBR_STATIC_H
-
-/*
- * urcu-qsbr-static.h
- *
- * Userspace RCU QSBR header.
- *
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu-qsbr.h for linking
- * dynamically with the userspace rcu QSBR library.
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * IBM's contributions to this file may be relicensed under LGPLv2 or later.
- */
-
-#include <stdlib.h>
-#include <pthread.h>
-#include <assert.h>
-#include <limits.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include <urcu/compiler.h>
-#include <urcu/arch.h>
-#include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
-#include <urcu/list.h>
-#include <urcu/urcu-futex.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * This code section can only be included in LGPL 2.1 compatible source code.
- * See below for the function call wrappers which can be used in code meant to
- * be only linked with the Userspace RCU library. This comes with a small
- * performance degradation on the read-side due to the added function calls.
- * This is required to permit relinking with newer versions of the library.
- */
-
-/*
- * If a reader is really non-cooperative and refuses to commit its
- * rcu_reader.ctr count to memory (there is no barrier in the reader
- * per-se), kick it after a few loops waiting for it.
- */
-#define KICK_READER_LOOPS 10000
-
-/*
- * Active attempts to check for reader Q.S. before calling futex().
- */
-#define RCU_QS_ACTIVE_ATTEMPTS 100
-
-#ifdef DEBUG_RCU
-#define rcu_assert(args...) assert(args)
-#else
-#define rcu_assert(args...)
-#endif
-
-#ifdef DEBUG_YIELD
-#include <sched.h>
-#include <time.h>
-#include <pthread.h>
-#include <unistd.h>
-
-#define YIELD_READ (1 << 0)
-#define YIELD_WRITE (1 << 1)
-
-/* maximum sleep delay, in us */
-#define MAX_SLEEP 50
-
-extern unsigned int yield_active;
-extern unsigned int __thread rand_yield;
-
-static inline void debug_yield_read(void)
-{
- if (yield_active & YIELD_READ)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
-}
-
-static inline void debug_yield_write(void)
-{
- if (yield_active & YIELD_WRITE)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
-}
-
-static inline void debug_yield_init(void)
-{
- rand_yield = time(NULL) ^ pthread_self();
-}
-#else
-static inline void debug_yield_read(void)
-{
-}
-
-static inline void debug_yield_write(void)
-{
-}
-
-static inline void debug_yield_init(void)
-{
-
-}
-#endif
-
-#define RCU_GP_ONLINE (1UL << 0)
-#define RCU_GP_CTR (1UL << 1)
-
-/*
- * Global quiescent period counter with low-order bits unused.
- * Using a int rather than a char to eliminate false register dependencies
- * causing stalls on some architectures.
- */
-extern unsigned long rcu_gp_ctr;
-
-struct rcu_reader {
- /* Data used by both reader and synchronize_rcu() */
- unsigned long ctr;
- /* Data used for registry */
- struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
- pthread_t tid;
-};
-
-extern struct rcu_reader __thread rcu_reader;
-
-extern int gp_futex;
-
-/*
- * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
- */
-static inline void wake_up_gp(void)
-{
- if (unlikely(uatomic_read(&gp_futex) == -1)) {
- uatomic_set(&gp_futex, 0);
- futex_noasync(&gp_futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
- }
-}
-
-static inline int rcu_gp_ongoing(unsigned long *ctr)
-{
- unsigned long v;
-
- v = CMM_LOAD_SHARED(*ctr);
- return v && (v != rcu_gp_ctr);
-}
-
-static inline void _rcu_read_lock(void)
-{
- rcu_assert(rcu_reader.ctr);
-}
-
-static inline void _rcu_read_unlock(void)
-{
-}
-
-static inline void _rcu_quiescent_state(void)
-{
- cmm_smp_mb();
- _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
- cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
- wake_up_gp();
- cmm_smp_mb();
-}
-
-static inline void _rcu_thread_offline(void)
-{
- cmm_smp_mb();
- CMM_STORE_SHARED(rcu_reader.ctr, 0);
- cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
- wake_up_gp();
- cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
-}
-
-static inline void _rcu_thread_online(void)
-{
- cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
- cmm_smp_mb();
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_QSBR_STATIC_H */
#include <signal.h>
#include <assert.h>
#include <stdlib.h>
+#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <poll.h>
-#include "urcu-qsbr-map.h"
+#include "urcu/map/urcu-qsbr.h"
#define BUILD_QSBR_LIB
-#include "urcu-qsbr-static.h"
+#include "urcu/static/urcu-qsbr.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu-qsbr.h"
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
-int gp_futex;
+int32_t gp_futex;
/*
* Global grace period counter.
*/
unsigned long rcu_gp_ctr = RCU_GP_ONLINE;
+/*
+ * Active attempts to check for reader Q.S. before calling futex().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
+
/*
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
for (;;) {
wait_loops++;
- if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
- uatomic_dec(&gp_futex);
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ uatomic_set(&gp_futex, -1);
+ /*
+ * Write futex before write waiting (the other side
+ * reads them in the opposite order).
+ */
+ cmm_smp_wmb();
+ cds_list_for_each_entry(index, ®istry, node) {
+ _CMM_STORE_SHARED(index->waiting, 1);
+ }
/* Write futex before read reader_gp */
cmm_smp_mb();
}
-
cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
if (!rcu_gp_ongoing(&index->ctr))
cds_list_move(&index->node, &qsreaders);
}
if (cds_list_empty(®istry)) {
- if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
cmm_smp_mb();
uatomic_set(&gp_futex, 0);
}
break;
} else {
- if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
wait_gp();
} else {
#ifndef HAS_INCOHERENT_CACHES
#include <pthread.h>
/*
- * See urcu-pointer.h and urcu-pointer-static.h for pointer publication headers.
+ * See urcu-pointer.h and urcu/static/urcu-pointer.h for pointer
+ * publication headers.
*/
#include <urcu-pointer.h>
extern "C" {
#endif
-#include "urcu-qsbr-map.h"
+#include <urcu/map/urcu-qsbr.h>
/*
* Important !
#ifdef _LGPL_SOURCE
-#include <urcu-qsbr-static.h>
+#include <urcu/static/urcu-qsbr.h>
/*
* Mappings for static use of the userspace RCU library.
}
#endif
-#include "urcu-call-rcu.h"
-#include "urcu-defer.h"
+#include <urcu-call-rcu.h>
+#include <urcu-defer.h>
#endif /* _URCU_QSBR_H */
+++ /dev/null
-#ifndef _URCU_STATIC_H
-#define _URCU_STATIC_H
-
-/*
- * urcu-static.h
- *
- * Userspace RCU header.
- *
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
- * dynamically with the userspace rcu library.
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * IBM's contributions to this file may be relicensed under LGPLv2 or later.
- */
-
-#include <stdlib.h>
-#include <pthread.h>
-#include <syscall.h>
-#include <unistd.h>
-
-#include <urcu/compiler.h>
-#include <urcu/arch.h>
-#include <urcu/system.h>
-#include <urcu/uatomic_arch.h>
-#include <urcu/list.h>
-#include <urcu/urcu-futex.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Default is RCU_MEMBARRIER */
-#if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL)
-#define RCU_MEMBARRIER
-#endif
-
-#ifdef RCU_MEMBARRIER
-#include <unistd.h>
-#include <sys/syscall.h>
-
-/* If the headers do not support SYS_membarrier, statically use RCU_MB */
-#ifdef SYS_membarrier
-# define MEMBARRIER_EXPEDITED (1 << 0)
-# define MEMBARRIER_DELAYED (1 << 1)
-# define MEMBARRIER_QUERY (1 << 16)
-# define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
-#else
-# undef RCU_MEMBARRIER
-# define RCU_MB
-#endif
-#endif
-
-/*
- * This code section can only be included in LGPL 2.1 compatible source code.
- * See below for the function call wrappers which can be used in code meant to
- * be only linked with the Userspace RCU library. This comes with a small
- * performance degradation on the read-side due to the added function calls.
- * This is required to permit relinking with newer versions of the library.
- */
-
-/*
- * The signal number used by the RCU library can be overridden with
- * -DSIGRCU= when compiling the library.
- * Provide backward compatibility for liburcu 0.3.x SIGURCU.
- */
-#ifdef SIGURCU
-#define SIGRCU SIGURCU
-#endif
-
-#ifndef SIGRCU
-#define SIGRCU SIGUSR1
-#endif
-
-/*
- * If a reader is really non-cooperative and refuses to commit its
- * rcu_active_readers count to memory (there is no barrier in the reader
- * per-se), kick it after a few loops waiting for it.
- */
-#define KICK_READER_LOOPS 10000
-
-/*
- * Active attempts to check for reader Q.S. before calling futex().
- */
-#define RCU_QS_ACTIVE_ATTEMPTS 100
-
-#ifdef DEBUG_RCU
-#define rcu_assert(args...) assert(args)
-#else
-#define rcu_assert(args...)
-#endif
-
-#ifdef DEBUG_YIELD
-#include <sched.h>
-#include <time.h>
-#include <pthread.h>
-#include <unistd.h>
-
-#define YIELD_READ (1 << 0)
-#define YIELD_WRITE (1 << 1)
-
-/*
- * Updates with RCU_SIGNAL are much slower. Account this in the delay.
- */
-#ifdef RCU_SIGNAL
-/* maximum sleep delay, in us */
-#define MAX_SLEEP 30000
-#else
-#define MAX_SLEEP 50
-#endif
-
-extern unsigned int yield_active;
-extern unsigned int __thread rand_yield;
-
-static inline void debug_yield_read(void)
-{
- if (yield_active & YIELD_READ)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
-}
-
-static inline void debug_yield_write(void)
-{
- if (yield_active & YIELD_WRITE)
- if (rand_r(&rand_yield) & 0x1)
- usleep(rand_r(&rand_yield) % MAX_SLEEP);
-}
-
-static inline void debug_yield_init(void)
-{
- rand_yield = time(NULL) ^ pthread_self();
-}
-#else
-static inline void debug_yield_read(void)
-{
-}
-
-static inline void debug_yield_write(void)
-{
-}
-
-static inline void debug_yield_init(void)
-{
-
-}
-#endif
-
-/*
- * RCU memory barrier broadcast group. Currently, only broadcast to all process
- * threads is supported (group 0).
- *
- * Slave barriers are only guaranteed to be ordered wrt master barriers.
- *
- * The pair ordering is detailed as (O: ordered, X: not ordered) :
- * slave master
- * slave X O
- * master O O
- */
-
-#define MB_GROUP_ALL 0
-#define RCU_MB_GROUP MB_GROUP_ALL
-
-#ifdef RCU_MEMBARRIER
-extern int has_sys_membarrier;
-
-static inline void smp_mb_slave(int group)
-{
- if (likely(has_sys_membarrier))
- cmm_barrier();
- else
- cmm_smp_mb();
-}
-#endif
-
-#ifdef RCU_MB
-static inline void smp_mb_slave(int group)
-{
- cmm_smp_mb();
-}
-#endif
-
-#ifdef RCU_SIGNAL
-static inline void smp_mb_slave(int group)
-{
- cmm_barrier();
-}
-#endif
-
-/*
- * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use
- * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
- */
-#define RCU_GP_COUNT (1UL << 0)
-/* Use the amount of bits equal to half of the architecture long size */
-#define RCU_GP_CTR_PHASE (1UL << (sizeof(unsigned long) << 2))
-#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
-
-/*
- * Global quiescent period counter with low-order bits unused.
- * Using a int rather than a char to eliminate false register dependencies
- * causing stalls on some architectures.
- */
-extern unsigned long rcu_gp_ctr;
-
-struct rcu_reader {
- /* Data used by both reader and synchronize_rcu() */
- unsigned long ctr;
- char need_mb;
- /* Data used for registry */
- struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
- pthread_t tid;
-};
-
-extern struct rcu_reader __thread rcu_reader;
-
-extern int gp_futex;
-
-/*
- * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
- */
-static inline void wake_up_gp(void)
-{
- if (unlikely(uatomic_read(&gp_futex) == -1)) {
- uatomic_set(&gp_futex, 0);
- futex_async(&gp_futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
- }
-}
-
-static inline int rcu_gp_ongoing(unsigned long *ctr)
-{
- unsigned long v;
-
- /*
- * Make sure both tests below are done on the same version of *value
- * to insure consistency.
- */
- v = CMM_LOAD_SHARED(*ctr);
- return (v & RCU_GP_CTR_NEST_MASK) &&
- ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
-}
-
-static inline void _rcu_read_lock(void)
-{
- unsigned long tmp;
-
- cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
- tmp = rcu_reader.ctr;
- /*
- * rcu_gp_ctr is
- * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
- */
- if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
- /*
- * Set active readers count for outermost nesting level before
- * accessing the pointer. See smp_mb_master().
- */
- smp_mb_slave(RCU_MB_GROUP);
- } else {
- _CMM_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
- }
-}
-
-static inline void _rcu_read_unlock(void)
-{
- unsigned long tmp;
-
- tmp = rcu_reader.ctr;
- /*
- * Finish using rcu before decrementing the pointer.
- * See smp_mb_master().
- */
- if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
- smp_mb_slave(RCU_MB_GROUP);
- _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
- /* write rcu_reader.ctr before read futex */
- smp_mb_slave(RCU_MB_GROUP);
- wake_up_gp();
- } else {
- _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
- }
- cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_STATIC_H */
#include <signal.h>
#include <assert.h>
#include <stdlib.h>
+#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <poll.h>
-#include "urcu-map.h"
+#include "urcu/map/urcu.h"
-#include "urcu-static.h"
+#include "urcu/static/urcu.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu.h"
+/*
+ * If a reader is really non-cooperative and refuses to commit its
+ * rcu_active_readers count to memory (there is no barrier in the reader
+ * per-se), kick it after a few loops waiting for it.
+ */
+#define KICK_READER_LOOPS 10000
+
+/*
+ * Active attempts to check for reader Q.S. before calling futex().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
+
#ifdef RCU_MEMBARRIER
static int init_done;
int has_sys_membarrier;
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
-int gp_futex;
+int32_t gp_futex;
/*
* Global grace period counter.
#include <pthread.h>
/*
- * See urcu-pointer.h and urcu-pointer-static.h for pointer publication headers.
+ * See urcu-pointer.h and urcu/static/urcu-pointer.h for pointer
+ * publication headers.
*/
#include <urcu-pointer.h>
extern "C" {
#endif
-#include "urcu-map.h"
+#include <urcu/map/urcu.h>
/*
* Important !
#ifdef _LGPL_SOURCE
-#include <urcu-static.h>
+#include <urcu/static/urcu.h>
/*
* Mappings for static use of the userspace RCU library.
/*
* library wrappers to be used by non-LGPL compatible source code.
- * See LGPL-only urcu-pointer-static.h for documentation.
+ * See LGPL-only urcu/static/urcu-pointer.h for documentation.
*/
extern void rcu_read_lock(void);
}
#endif
-#include "urcu-call-rcu.h"
-#include "urcu-defer.h"
+#include <urcu-call-rcu.h>
+#include <urcu-defer.h>
#endif /* _URCU_H */
--- /dev/null
+#ifndef _URCU_ARCH_ALPHA_H
+#define _URCU_ARCH_ALPHA_H
+
+/*
+ * arch_alpha.h: trivial definitions for the Alpha architecture.
+ *
+ * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define cmm_mb() asm volatile("mb":::"memory")
+#define cmm_wmb() asm volatile("wmb":::"memory")
+#define cmm_read_barrier_depends() asm volatile("mb":::"memory")
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t caa_get_cycles (void)
+{
+ return 0; /* not supported */
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/arch/generic.h>
+
+#endif /* _URCU_ARCH_ALPHA_H */
--- /dev/null
+#ifndef _URCU_ARCH_ARM_H
+#define _URCU_ARCH_ARM_H
+
+/*
+ * arch_arm.h: trivial definitions for the ARM architecture.
+ *
+ * Copyright (c) 2010 Paul E. McKenney, IBM Corporation.
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef CONFIG_RCU_ARM_HAVE_DMB
+#define cmm_mb() asm volatile("dmb":::"memory")
+#define cmm_rmb() asm volatile("dmb":::"memory")
+#define cmm_wmb() asm volatile("dmb":::"memory")
+#endif /* CONFIG_RCU_ARM_HAVE_DMB */
+
+#include <stdlib.h>
+#include <sys/time.h>
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t caa_get_cycles (void)
+{
+ cycles_t thetime;
+ struct timeval tv;
+
+ if (gettimeofday(&tv, NULL) != 0)
+ return 0;
+ thetime = ((cycles_t)tv.tv_sec) * 1000000ULL + ((cycles_t)tv.tv_usec);
+ return (cycles_t)thetime;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/arch/generic.h>
+
+#endif /* _URCU_ARCH_ARM_H */
--- /dev/null
+#ifndef _URCU_ARCH_GCC_H
+#define _URCU_ARCH_GCC_H
+
+/*
+ * arch_gcc.h: trivial definitions for architectures using gcc __sync_
+ *
+ * Copyright (c) 2010 Paul E. McKenney, IBM Corporation.
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdlib.h>
+#include <sys/time.h>
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t caa_get_cycles (void)
+{
+ cycles_t thetime;
+ struct timeval tv;
+
+ if (gettimeofday(&tv, NULL) != 0)
+ return 0;
+ thetime = ((cycles_t)tv.tv_sec) * 1000000ULL + ((cycles_t)tv.tv_usec);
+ return (cycles_t)thetime;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/arch/generic.h>
+
+#endif /* _URCU_ARCH_GCC_H */
--- /dev/null
+#ifndef _URCU_ARCH_GENERIC_H
+#define _URCU_ARCH_GENERIC_H
+
+/*
+ * arch_generic.h: common definitions for multiple architectures.
+ *
+ * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CAA_CACHE_LINE_SIZE
+#define CAA_CACHE_LINE_SIZE 64
+#endif
+
+#if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc)
+#define CONFIG_HAVE_MEM_COHERENCY
+/*
+ * Architectures with cache coherency must _not_ define cmm_mc/cmm_rmc/cmm_wmc.
+ *
+ * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a * simple compiler barrier;
+ * in addition, we provide defaults for cmm_mb (using GCC builtins) as well as
+ * cmm_rmb and cmm_wmb (defaulting to cmm_mb).
+ */
+
+#ifndef cmm_mb
+#define cmm_mb() __sync_synchronize()
+#endif
+
+#ifndef cmm_rmb
+#define cmm_rmb() cmm_mb()
+#endif
+
+#ifndef cmm_wmb
+#define cmm_wmb() cmm_mb()
+#endif
+
+#define cmm_mc() cmm_barrier()
+#define cmm_rmc() cmm_barrier()
+#define cmm_wmc() cmm_barrier()
+#else
+/*
+ * Architectures without cache coherency need something like the following:
+ *
+ * #define cmm_mc() arch_cache_flush()
+ * #define cmm_rmc() arch_cache_flush_read()
+ * #define cmm_wmc() arch_cache_flush_write()
+ *
+ * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to cmm_mc.
+ * cmm_mb/cmm_rmb/cmm_wmb use these definitions by default:
+ *
+ * #define cmm_mb() cmm_mc()
+ * #define cmm_rmb() cmm_rmc()
+ * #define cmm_wmb() cmm_wmc()
+ */
+
+#ifndef cmm_mb
+#define cmm_mb() cmm_mc()
+#endif
+
+#ifndef cmm_rmb
+#define cmm_rmb() cmm_rmc()
+#endif
+
+#ifndef cmm_wmb
+#define cmm_wmb() cmm_wmc()
+#endif
+
+#ifndef cmm_rmc
+#define cmm_rmc() cmm_mc()
+#endif
+
+#ifndef cmm_wmc
+#define cmm_wmc() cmm_mc()
+#endif
+#endif
+
+/* Nop everywhere except on alpha. */
+#ifndef cmm_read_barrier_depends
+#define cmm_read_barrier_depends()
+#endif
+
+#ifdef CONFIG_RCU_SMP
+#define cmm_smp_mb() cmm_mb()
+#define cmm_smp_rmb() cmm_rmb()
+#define cmm_smp_wmb() cmm_wmb()
+#define cmm_smp_mc() cmm_mc()
+#define cmm_smp_rmc() cmm_rmc()
+#define cmm_smp_wmc() cmm_wmc()
+#define cmm_smp_read_barrier_depends() cmm_read_barrier_depends()
+#else
+#define cmm_smp_mb() cmm_barrier()
+#define cmm_smp_rmb() cmm_barrier()
+#define cmm_smp_wmb() cmm_barrier()
+#define cmm_smp_mc() cmm_barrier()
+#define cmm_smp_rmc() cmm_barrier()
+#define cmm_smp_wmc() cmm_barrier()
+#define cmm_smp_read_barrier_depends()
+#endif
+
+#ifndef caa_cpu_relax
+#define caa_cpu_relax() cmm_barrier()
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_ARCH_GENERIC_H */
--- /dev/null
+#ifndef _URCU_ARCH_PPC_H
+#define _URCU_ARCH_PPC_H
+
+/*
+ * arch_ppc.h: trivial definitions for the powerpc architecture.
+ *
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Include size of POWER5+ L3 cache lines: 256 bytes */
+#define CAA_CACHE_LINE_SIZE 256
+
+#define cmm_mb() asm volatile("sync":::"memory")
+
+#define mftbl() \
+ ({ \
+ unsigned long rval; \
+ asm volatile("mftbl %0" : "=r" (rval)); \
+ rval; \
+ })
+
+#define mftbu() \
+ ({ \
+ unsigned long rval; \
+ asm volatile("mftbu %0" : "=r" (rval)); \
+ rval; \
+ })
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t caa_get_cycles (void)
+{
+ long h, l;
+
+ for (;;) {
+ h = mftbu();
+ cmm_barrier();
+ l = mftbl();
+ cmm_barrier();
+ if (mftbu() == h)
+ return (((cycles_t) h) << 32) + l;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/arch/generic.h>
+
+#endif /* _URCU_ARCH_PPC_H */
--- /dev/null
+#ifndef _URCU_ARCH_S390_H
+#define _URCU_ARCH_S390_H
+
+/*
+ * Trivial definitions for the S390 architecture based on information from the
+ * Principles of Operation "CPU Serialization" (5-91), "BRANCH ON CONDITION"
+ * (7-25) and "STORE CLOCK" (7-169).
+ *
+ * Copyright (c) 2009 Novell, Inc.
+ * Author: Jan Blunck <jblunck@suse.de>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CAA_CACHE_LINE_SIZE 128
+
+#define cmm_mb() __asm__ __volatile__("bcr 15,0" : : : "memory")
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t caa_get_cycles (void)
+{
+ cycles_t cycles;
+
+ __asm__ __volatile__("stck %0" : "=m" (cycles) : : "cc", "memory" );
+
+ return cycles;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/arch/generic.h>
+
+#endif /* _URCU_ARCH_S390_H */
--- /dev/null
+#ifndef _URCU_ARCH_SPARC64_H
+#define _URCU_ARCH_SPARC64_H
+
+/*
+ * arch_sparc64.h: trivial definitions for the Sparc64 architecture.
+ *
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CAA_CACHE_LINE_SIZE 256
+
+/*
+ * Inspired from the Linux kernel. Workaround Spitfire bug #51.
+ */
+#define membar_safe(type) \
+__asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
+ "membar " type "\n" \
+ "1:\n" \
+ : : : "memory")
+
+#define cmm_mb() membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
+#define cmm_rmb() membar_safe("#LoadLoad")
+#define cmm_wmb() membar_safe("#StoreStore")
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t caa_get_cycles (void)
+{
+ return 0; /* unimplemented */
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/arch/generic.h>
+
+#endif /* _URCU_ARCH_SPARC64_H */
--- /dev/null
+#ifndef _URCU_ARCH_UNKNOWN_H
+#define _URCU_ARCH_UNKNOWN_H
+
+/*
+ * arch_unknown.h: #error to prevent build on unknown architectures.
+ *
+ * Copyright (c) 2010 Paul E. McKenney, IBM Corporation.
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* See configure.ac for the list of recognized architectures. */
+#error "Cannot build: unrecognized architecture detected."
+
+#endif /* _URCU_ARCH_UNKNOWN_H */
--- /dev/null
+#ifndef _URCU_ARCH_X86_H
+#define _URCU_ARCH_X86_H
+
+/*
+ * arch_x86.h: trivial definitions for the x86 architecture.
+ *
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CAA_CACHE_LINE_SIZE 128
+
+#ifdef CONFIG_RCU_HAVE_FENCE
+#define cmm_mb() asm volatile("mfence":::"memory")
+#define cmm_rmb() asm volatile("lfence":::"memory")
+#define cmm_wmb() asm volatile("sfence"::: "memory")
+#else
+/*
+ * Some non-Intel clones support out of order store. cmm_wmb() ceases to be a
+ * nop for these.
+ */
+#define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
+#endif
+
+#define caa_cpu_relax() asm volatile("rep; nop" : : : "memory");
+
+#define rdtscll(val) \
+ do { \
+ unsigned int __a, __d; \
+ asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
+ (val) = ((unsigned long long)__a) \
+ | (((unsigned long long)__d) << 32); \
+ } while(0)
+
+typedef unsigned long long cycles_t;
+
+static inline cycles_t caa_get_cycles(void)
+{
+ cycles_t ret = 0;
+
+ rdtscll(ret);
+ return ret;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/arch/generic.h>
+
+#endif /* _URCU_ARCH_X86_H */
+++ /dev/null
-#ifndef _URCU_ARCH_ALPHA_H
-#define _URCU_ARCH_ALPHA_H
-
-/*
- * arch_alpha.h: trivial definitions for the Alpha architecture.
- *
- * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/compiler.h>
-#include <urcu/config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define cmm_mb() asm volatile("mb":::"memory")
-#define cmm_wmb() asm volatile("wmb":::"memory")
-#define cmm_read_barrier_depends() asm volatile("mb":::"memory")
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t caa_get_cycles (void)
-{
- return 0; /* not supported */
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/arch_generic.h>
-
-#endif /* _URCU_ARCH_ALPHA_H */
+++ /dev/null
-#ifndef _URCU_ARCH_ARM_H
-#define _URCU_ARCH_ARM_H
-
-/*
- * arch_arm.h: trivial definitions for the ARM architecture.
- *
- * Copyright (c) 2010 Paul E. McKenney, IBM Corporation.
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/compiler.h>
-#include <urcu/config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef CONFIG_RCU_ARM_HAVE_DMB
-#define cmm_mb() asm volatile("dmb":::"memory")
-#define cmm_rmb() asm volatile("dmb":::"memory")
-#define cmm_wmb() asm volatile("dmb":::"memory")
-#endif /* CONFIG_RCU_ARM_HAVE_DMB */
-
-#include <stdlib.h>
-#include <sys/time.h>
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t caa_get_cycles (void)
-{
- cycles_t thetime;
- struct timeval tv;
-
- if (gettimeofday(&tv, NULL) != 0)
- return 0;
- thetime = ((cycles_t)tv.tv_sec) * 1000000ULL + ((cycles_t)tv.tv_usec);
- return (cycles_t)thetime;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/arch_generic.h>
-
-#endif /* _URCU_ARCH_ARM_H */
+++ /dev/null
-#ifndef _URCU_ARCH_GCC_H
-#define _URCU_ARCH_GCC_H
-
-/*
- * arch_gcc.h: trivial definitions for architectures using gcc __sync_
- *
- * Copyright (c) 2010 Paul E. McKenney, IBM Corporation.
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/compiler.h>
-#include <urcu/config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdlib.h>
-#include <sys/time.h>
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t caa_get_cycles (void)
-{
- cycles_t thetime;
- struct timeval tv;
-
- if (gettimeofday(&tv, NULL) != 0)
- return 0;
- thetime = ((cycles_t)tv.tv_sec) * 1000000ULL + ((cycles_t)tv.tv_usec);
- return (cycles_t)thetime;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/arch_generic.h>
-
-#endif /* _URCU_ARCH_GCC_H */
+++ /dev/null
-#ifndef _URCU_ARCH_GENERIC_H
-#define _URCU_ARCH_GENERIC_H
-
-/*
- * arch_generic.h: common definitions for multiple architectures.
- *
- * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/compiler.h>
-#include <urcu/config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef CAA_CACHE_LINE_SIZE
-#define CAA_CACHE_LINE_SIZE 64
-#endif
-
-#if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc)
-#define CONFIG_HAVE_MEM_COHERENCY
-/*
- * Architectures with cache coherency must _not_ define cmm_mc/cmm_rmc/cmm_wmc.
- *
- * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a * simple compiler barrier;
- * in addition, we provide defaults for cmm_mb (using GCC builtins) as well as
- * cmm_rmb and cmm_wmb (defaulting to cmm_mb).
- */
-
-#ifndef cmm_mb
-#define cmm_mb() __sync_synchronize()
-#endif
-
-#ifndef cmm_rmb
-#define cmm_rmb() cmm_mb()
-#endif
-
-#ifndef cmm_wmb
-#define cmm_wmb() cmm_mb()
-#endif
-
-#define cmm_mc() cmm_barrier()
-#define cmm_rmc() cmm_barrier()
-#define cmm_wmc() cmm_barrier()
-#else
-/*
- * Architectures without cache coherency need something like the following:
- *
- * #define cmm_mc() arch_cache_flush()
- * #define cmm_rmc() arch_cache_flush_read()
- * #define cmm_wmc() arch_cache_flush_write()
- *
- * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to cmm_mc.
- * cmm_mb/cmm_rmb/cmm_wmb use these definitions by default:
- *
- * #define cmm_mb() cmm_mc()
- * #define cmm_rmb() cmm_rmc()
- * #define cmm_wmb() cmm_wmc()
- */
-
-#ifndef cmm_mb
-#define cmm_mb() cmm_mc()
-#endif
-
-#ifndef cmm_rmb
-#define cmm_rmb() cmm_rmc()
-#endif
-
-#ifndef cmm_wmb
-#define cmm_wmb() cmm_wmc()
-#endif
-
-#ifndef cmm_rmc
-#define cmm_rmc() cmm_mc()
-#endif
-
-#ifndef cmm_wmc
-#define cmm_wmc() cmm_mc()
-#endif
-#endif
-
-/* Nop everywhere except on alpha. */
-#ifndef cmm_read_barrier_depends
-#define cmm_read_barrier_depends()
-#endif
-
-#ifdef CONFIG_RCU_SMP
-#define cmm_smp_mb() cmm_mb()
-#define cmm_smp_rmb() cmm_rmb()
-#define cmm_smp_wmb() cmm_wmb()
-#define cmm_smp_mc() cmm_mc()
-#define cmm_smp_rmc() cmm_rmc()
-#define cmm_smp_wmc() cmm_wmc()
-#define cmm_smp_read_barrier_depends() cmm_read_barrier_depends()
-#else
-#define cmm_smp_mb() cmm_barrier()
-#define cmm_smp_rmb() cmm_barrier()
-#define cmm_smp_wmb() cmm_barrier()
-#define cmm_smp_mc() cmm_barrier()
-#define cmm_smp_rmc() cmm_barrier()
-#define cmm_smp_wmc() cmm_barrier()
-#define cmm_smp_read_barrier_depends()
-#endif
-
-#ifndef caa_cpu_relax
-#define caa_cpu_relax() cmm_barrier()
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_ARCH_GENERIC_H */
+++ /dev/null
-#ifndef _URCU_ARCH_PPC_H
-#define _URCU_ARCH_PPC_H
-
-/*
- * arch_ppc.h: trivial definitions for the powerpc architecture.
- *
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/compiler.h>
-#include <urcu/config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Include size of POWER5+ L3 cache lines: 256 bytes */
-#define CAA_CACHE_LINE_SIZE 256
-
-#define cmm_mb() asm volatile("sync":::"memory")
-
-#define mftbl() \
- ({ \
- unsigned long rval; \
- asm volatile("mftbl %0" : "=r" (rval)); \
- rval; \
- })
-
-#define mftbu() \
- ({ \
- unsigned long rval; \
- asm volatile("mftbu %0" : "=r" (rval)); \
- rval; \
- })
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t caa_get_cycles (void)
-{
- long h, l;
-
- for (;;) {
- h = mftbu();
- cmm_barrier();
- l = mftbl();
- cmm_barrier();
- if (mftbu() == h)
- return (((cycles_t) h) << 32) + l;
- }
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/arch_generic.h>
-
-#endif /* _URCU_ARCH_PPC_H */
+++ /dev/null
-#ifndef _URCU_ARCH_S390_H
-#define _URCU_ARCH_S390_H
-
-/*
- * Trivial definitions for the S390 architecture based on information from the
- * Principles of Operation "CPU Serialization" (5-91), "BRANCH ON CONDITION"
- * (7-25) and "STORE CLOCK" (7-169).
- *
- * Copyright (c) 2009 Novell, Inc.
- * Author: Jan Blunck <jblunck@suse.de>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define CAA_CACHE_LINE_SIZE 128
-
-#define cmm_mb() __asm__ __volatile__("bcr 15,0" : : : "memory")
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t caa_get_cycles (void)
-{
- cycles_t cycles;
-
- __asm__ __volatile__("stck %0" : "=m" (cycles) : : "cc", "memory" );
-
- return cycles;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/arch_generic.h>
-
-#endif /* _URCU_ARCH_S390_H */
+++ /dev/null
-#ifndef _URCU_ARCH_SPARC64_H
-#define _URCU_ARCH_SPARC64_H
-
-/*
- * arch_sparc64.h: trivial definitions for the Sparc64 architecture.
- *
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/compiler.h>
-#include <urcu/config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define CAA_CACHE_LINE_SIZE 256
-
-/*
- * Inspired from the Linux kernel. Workaround Spitfire bug #51.
- */
-#define membar_safe(type) \
-__asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
- "membar " type "\n" \
- "1:\n" \
- : : : "memory")
-
-#define cmm_mb() membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
-#define cmm_rmb() membar_safe("#LoadLoad")
-#define cmm_wmb() membar_safe("#StoreStore")
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t caa_get_cycles (void)
-{
- return 0; /* unimplemented */
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/arch_generic.h>
-
-#endif /* _URCU_ARCH_SPARC64_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UNKNOWN_H
-#define _URCU_ARCH_UNKNOWN_H
-
-/*
- * arch_unknown.h: #error to prevent build on unknown architectures.
- *
- * Copyright (c) 2010 Paul E. McKenney, IBM Corporation.
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/* See configure.ac for the list of recognized architectures. */
-#error "Cannot build: unrecognized architecture detected."
-
-#endif /* _URCU_ARCH_UNKNOWN_H */
+++ /dev/null
-#ifndef _URCU_ARCH_X86_H
-#define _URCU_ARCH_X86_H
-
-/*
- * arch_x86.h: trivial definitions for the x86 architecture.
- *
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/compiler.h>
-#include <urcu/config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define CAA_CACHE_LINE_SIZE 128
-
-#ifdef CONFIG_RCU_HAVE_FENCE
-#define cmm_mb() asm volatile("mfence":::"memory")
-#define cmm_rmb() asm volatile("lfence":::"memory")
-#define cmm_wmb() asm volatile("sfence"::: "memory")
-#else
-/*
- * Some non-Intel clones support out of order store. cmm_wmb() ceases to be a
- * nop for these.
- */
-#define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
-#endif
-
-#define caa_cpu_relax() asm volatile("rep; nop" : : : "memory");
-
-#define rdtscll(val) \
- do { \
- unsigned int __a, __d; \
- asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
- (val) = ((unsigned long long)__a) \
- | (((unsigned long long)__d) << 32); \
- } while(0)
-
-typedef unsigned long long cycles_t;
-
-static inline cycles_t caa_get_cycles(void)
-{
- cycles_t ret = 0;
-
- rdtscll(ret);
- return ret;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/arch_generic.h>
-
-#endif /* _URCU_ARCH_X86_H */
--- /dev/null
+#ifndef _URCU_CDS_H
+#define _URCU_CDS_H
+
+/*
+ * urcu/cds.h
+ *
+ * Userspace RCU library - Concurrent Data Structures
+ *
+ * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/hlist.h>
+#include <urcu/list.h>
+#include <urcu/rcuhlist.h>
+#include <urcu/rculist.h>
+#include <urcu/rculfqueue.h>
+#include <urcu/rculfstack.h>
+#include <urcu/wfqueue.h>
+#include <urcu/wfstack.h>
+
+#endif /* _URCU_CDS_H */
*/
#define CMM_ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
-#ifndef max
-#define max(a,b) ((a)>(b)?(a):(b))
+#ifndef caa_max
+#define caa_max(a,b) ((a)>(b)?(a):(b))
#endif
-#ifndef min
-#define min(a,b) ((a)<(b)?(a):(b))
+#ifndef caa_min
+#define caa_min(a,b) ((a)<(b)?(a):(b))
#endif
#if defined(__SIZEOF_LONG__)
(type *)((char *)__ptr - offsetof(type, member)); \
})
+#define CAA_BUILD_BUG_ON_ZERO(cond) (sizeof(struct { int:-!!(cond); }))
+#define CAA_BUILD_BUG_ON(cond) ((void)BUILD_BUG_ON_ZERO(cond))
+
#endif /* _URCU_COMPILER_H */
--- /dev/null
+#ifndef _URCU_FUTEX_H
+#define _URCU_FUTEX_H
+
+/*
+ * urcu-futex.h
+ *
+ * Userspace RCU - sys_futex/compat_futex header.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/config.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+/*
+ * sys_futex compatibility header.
+ * Use *only* *either of* futex_noasync OR futex_async on a given address.
+ *
+ * futex_noasync cannot be executed in signal handlers, but ensures that
+ * it will be put in a wait queue even in compatibility mode.
+ *
+ * futex_async is signal-handler safe for the wakeup. It uses polling
+ * on the wait-side in compatibility mode.
+ */
+
+#ifdef CONFIG_RCU_HAVE_FUTEX
+#include <sys/syscall.h>
+#define futex(...) syscall(__NR_futex, __VA_ARGS__)
+#define futex_noasync(uaddr, op, val, timeout, uaddr2, val3) \
+ futex(uaddr, op, val, timeout, uaddr2, val3)
+#define futex_async(uaddr, op, val, timeout, uaddr2, val3) \
+ futex(uaddr, op, val, timeout, uaddr2, val3)
+#else
+extern int compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3);
+#define futex_noasync(uaddr, op, val, timeout, uaddr2, val3) \
+ compat_futex_noasync(uaddr, op, val, timeout, uaddr2, val3)
+extern int compat_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3);
+#define futex_async(uaddr, op, val, timeout, uaddr2, val3) \
+ compat_futex_async(uaddr, op, val, timeout, uaddr2, val3)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_FUTEX_H */
--- /dev/null
+#ifndef _URCU_BP_MAP_H
+#define _URCU_BP_MAP_H
+
+/*
+ * urcu-map.h
+ *
+ * Userspace RCU header -- name mapping to allow multiple flavors to be
+ * used in the same executable.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * LGPL-compatible code should include this header with :
+ *
+ * #define _LGPL_SOURCE
+ * #include <urcu.h>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+/* Mapping macros to allow multiple flavors in a single binary. */
+
+#define rcu_read_lock rcu_read_lock_bp
+#define _rcu_read_lock _rcu_read_lock_bp
+#define rcu_read_unlock rcu_read_unlock_bp
+#define _rcu_read_unlock _rcu_read_unlock_bp
+#define rcu_register_thread rcu_register_thread_bp
+#define rcu_unregister_thread rcu_unregister_thread_bp
+#define rcu_init rcu_init_bp
+#define rcu_exit rcu_exit_bp
+#define synchronize_rcu synchronize_rcu_bp
+#define rcu_reader rcu_reader_bp
+#define rcu_gp_ctr rcu_gp_ctr_bp
+
+#define get_cpu_call_rcu_data get_cpu_call_rcu_data_bp
+#define get_call_rcu_thread get_call_rcu_thread_bp
+#define create_call_rcu_data create_call_rcu_data_bp
+#define set_cpu_call_rcu_data set_cpu_call_rcu_data_bp
+#define get_default_call_rcu_data get_default_call_rcu_data_bp
+#define get_call_rcu_data get_call_rcu_data_bp
+#define get_thread_call_rcu_data get_thread_call_rcu_data_bp
+#define set_thread_call_rcu_data set_thread_call_rcu_data_bp
+#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_bp
+#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_bp
+#define call_rcu call_rcu_bp
+
+#define defer_rcu defer_rcu_bp
+#define rcu_defer_register_thread rcu_defer_register_thread_bp
+#define rcu_defer_unregister_thread rcu_defer_unregister_thread_bp
+#define rcu_defer_barrier rcu_defer_barrier_bp
+#define rcu_defer_barrier_thread rcu_defer_barrier_thread_bp
+
+/* Concurrent Data Structures */
+#define cds_lfq_node_init_rcu cds_lfq_node_init_rcu_bp
+#define cds_lfq_init_rcu cds_lfq_init_rcu_bp
+#define cds_lfq_destroy_rcu cds_lfq_destroy_rcu_bp
+#define cds_lfq_enqueue_rcu cds_lfq_enqueue_rcu_bp
+#define cds_lfq_dequeue_rcu cds_lfq_dequeue_rcu_bp
+
+#define cds_lfs_node_init_rcu cds_lfs_node_init_rcu_bp
+#define cds_lfs_init_rcu cds_lfs_init_rcu_bp
+#define cds_lfs_push_rcu cds_lfs_push_rcu_bp
+#define cds_lfs_pop_rcu cds_lfs_pop_rcu_bp
+
+#endif /* _URCU_BP_MAP_H */
--- /dev/null
+#ifndef _URCU_QSBR_MAP_H
+#define _URCU_QSBR_MAP_H
+
+/*
+ * urcu-map.h
+ *
+ * Userspace RCU header -- name mapping to allow multiple flavors to be
+ * used in the same executable.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * LGPL-compatible code should include this header with :
+ *
+ * #define _LGPL_SOURCE
+ * #include <urcu.h>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+/* Mapping macros to allow multiple flavors in a single binary. */
+
+#define rcu_read_lock rcu_read_lock_qsbr
+#define _rcu_read_lock _rcu_read_lock_qsbr
+#define rcu_read_unlock rcu_read_unlock_qsbr
+#define _rcu_read_unlock _rcu_read_unlock_qsbr
+#define rcu_quiescent_state rcu_quiescent_state_qsbr
+#define _rcu_quiescent_state _rcu_quiescent_state_qsbr
+#define rcu_thread_offline rcu_thread_offline_qsbr
+#define rcu_thread_online rcu_thread_online_qsbr
+#define rcu_register_thread rcu_register_thread_qsbr
+#define rcu_unregister_thread rcu_unregister_thread_qsbr
+#define rcu_exit rcu_exit_qsbr
+#define synchronize_rcu synchronize_rcu_qsbr
+#define rcu_reader rcu_reader_qsbr
+#define rcu_gp_ctr rcu_gp_ctr_qsbr
+
+#define get_cpu_call_rcu_data get_cpu_call_rcu_data_qsbr
+#define get_call_rcu_thread get_call_rcu_thread_qsbr
+#define create_call_rcu_data create_call_rcu_data_qsbr
+#define set_cpu_call_rcu_data set_cpu_call_rcu_data_qsbr
+#define get_default_call_rcu_data get_default_call_rcu_data_qsbr
+#define get_call_rcu_data get_call_rcu_data_qsbr
+#define get_thread_call_rcu_data get_thread_call_rcu_data_qsbr
+#define set_thread_call_rcu_data set_thread_call_rcu_data_qsbr
+#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_qsbr
+#define call_rcu call_rcu_qsbr
+
+#define defer_rcu defer_rcu_qsbr
+#define rcu_defer_register_thread rcu_defer_register_thread_qsbr
+#define rcu_defer_unregister_thread rcu_defer_unregister_thread_qsbr
+#define rcu_defer_barrier rcu_defer_barrier_qsbr
+#define rcu_defer_barrier_thread rcu_defer_barrier_thread_qsbr
+
+/* Concurrent Data Structures */
+#define cds_lfq_node_init_rcu cds_lfq_node_init_rcu_qsbr
+#define cds_lfq_init_rcu cds_lfq_init_rcu_qsbr
+#define cds_lfq_destroy_rcu cds_lfq_destroy_rcu_qsbr
+#define cds_lfq_enqueue_rcu cds_lfq_enqueue_rcu_qsbr
+#define cds_lfq_dequeue_rcu cds_lfq_dequeue_rcu_qsbr
+
+#define cds_lfs_node_init_rcu cds_lfs_node_init_rcu_qsbr
+#define cds_lfs_init_rcu cds_lfs_init_rcu_qsbr
+#define cds_lfs_push_rcu cds_lfs_push_rcu_qsbr
+#define cds_lfs_pop_rcu cds_lfs_pop_rcu_qsbr
+
+#endif /* _URCU_QSBR_MAP_H */
--- /dev/null
+#ifndef _URCU_MAP_H
+#define _URCU_MAP_H
+
+/*
+ * urcu-map.h
+ *
+ * Userspace RCU header -- name mapping to allow multiple flavors to be
+ * used in the same executable.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * LGPL-compatible code should include this header with :
+ *
+ * #define _LGPL_SOURCE
+ * #include <urcu.h>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+/* Mapping macros to allow multiple flavors in a single binary. */
+
+#if !defined(RCU_MEMBARRIER) && !defined(RCU_SIGNAL) && !defined(RCU_MB)
+#define RCU_MEMBARRIER
+#endif
+
+/*
+ * RCU_MEMBARRIER is only possibly available on Linux. Fallback to
+ * RCU_MB
+ * otherwise.
+ */
+#if !defined(__linux__) && defined(RCU_MEMBARRIER)
+#undef RCU_MEMBARRIER
+#define RCU_MB
+#endif
+
+#ifdef RCU_MEMBARRIER
+#include <syscall.h>
+
+/* If the headers do not support SYS_membarrier, statically use RCU_MB */
+#ifdef SYS_membarrier
+# define MEMBARRIER_EXPEDITED (1 << 0)
+# define MEMBARRIER_DELAYED (1 << 1)
+# define MEMBARRIER_QUERY (1 << 16)
+# define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
+#else
+# undef RCU_MEMBARRIER
+# define RCU_MB
+#endif
+#endif
+
+#ifdef RCU_MEMBARRIER
+
+#define rcu_read_lock rcu_read_lock_memb
+#define _rcu_read_lock _rcu_read_lock_memb
+#define rcu_read_unlock rcu_read_unlock_memb
+#define _rcu_read_unlock _rcu_read_unlock_memb
+#define rcu_register_thread rcu_register_thread_memb
+#define rcu_unregister_thread rcu_unregister_thread_memb
+#define rcu_init rcu_init_memb
+#define rcu_exit rcu_exit_memb
+#define synchronize_rcu synchronize_rcu_memb
+#define rcu_reader rcu_reader_memb
+#define rcu_gp_ctr rcu_gp_ctr_memb
+
+#define get_cpu_call_rcu_data get_cpu_call_rcu_data_memb
+#define get_call_rcu_thread get_call_rcu_thread_memb
+#define create_call_rcu_data create_call_rcu_data_memb
+#define set_cpu_call_rcu_data set_cpu_call_rcu_data_memb
+#define get_default_call_rcu_data get_default_call_rcu_data_memb
+#define get_call_rcu_data get_call_rcu_data_memb
+#define get_thread_call_rcu_data get_thread_call_rcu_data_memb
+#define set_thread_call_rcu_data set_thread_call_rcu_data_memb
+#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_memb
+#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_memb
+#define call_rcu call_rcu_memb
+
+#define defer_rcu defer_rcu_memb
+#define rcu_defer_register_thread rcu_defer_register_thread_memb
+#define rcu_defer_unregister_thread rcu_defer_unregister_thread_memb
+#define rcu_defer_barrier rcu_defer_barrier_memb
+#define rcu_defer_barrier_thread rcu_defer_barrier_thread_memb
+
+/* Concurrent Data Structures */
+#define cds_lfq_node_init_rcu cds_lfq_node_init_rcu_memb
+#define cds_lfq_init_rcu cds_lfq_init_rcu_memb
+#define cds_lfq_destroy_rcu cds_lfq_destroy_rcu_memb
+#define cds_lfq_enqueue_rcu cds_lfq_enqueue_rcu_memb
+#define cds_lfq_dequeue_rcu cds_lfq_dequeue_rcu_memb
+
+#define cds_lfs_node_init_rcu cds_lfs_node_init_rcu_memb
+#define cds_lfs_init_rcu cds_lfs_init_rcu_memb
+#define cds_lfs_push_rcu cds_lfs_push_rcu_memb
+#define cds_lfs_pop_rcu cds_lfs_pop_rcu_memb
+
+#elif defined(RCU_SIGNAL)
+
+#define rcu_read_lock rcu_read_lock_sig
+#define _rcu_read_lock _rcu_read_lock_sig
+#define rcu_read_unlock rcu_read_unlock_sig
+#define _rcu_read_unlock _rcu_read_unlock_sig
+#define rcu_register_thread rcu_register_thread_sig
+#define rcu_unregister_thread rcu_unregister_thread_sig
+#define rcu_init rcu_init_sig
+#define rcu_exit rcu_exit_sig
+#define synchronize_rcu synchronize_rcu_sig
+#define rcu_reader rcu_reader_sig
+#define rcu_gp_ctr rcu_gp_ctr_sig
+
+#define get_cpu_call_rcu_data get_cpu_call_rcu_data_sig
+#define get_call_rcu_thread get_call_rcu_thread_sig
+#define create_call_rcu_data create_call_rcu_data_sig
+#define set_cpu_call_rcu_data set_cpu_call_rcu_data_sig
+#define get_default_call_rcu_data get_default_call_rcu_data_sig
+#define get_call_rcu_data get_call_rcu_data_sig
+#define get_thread_call_rcu_data get_thread_call_rcu_data_sig
+#define set_thread_call_rcu_data set_thread_call_rcu_data_sig
+#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_sig
+#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_sig
+#define call_rcu call_rcu_sig
+
+#define defer_rcu defer_rcu_sig
+#define rcu_defer_register_thread rcu_defer_register_thread_sig
+#define rcu_defer_unregister_thread rcu_defer_unregister_thread_sig
+#define rcu_defer_barrier rcu_defer_barrier_sig
+#define rcu_defer_barrier_thread rcu_defer_barrier_thread_sig
+
+/* Concurrent Data Structures */
+#define cds_lfq_node_init_rcu cds_lfq_node_init_rcu_sig
+#define cds_lfq_init_rcu cds_lfq_init_rcu_sig
+#define cds_lfq_destroy_rcu cds_lfq_destroy_rcu_sig
+#define cds_lfq_enqueue_rcu cds_lfq_enqueue_rcu_sig
+#define cds_lfq_dequeue_rcu cds_lfq_dequeue_rcu_sig
+
+#define cds_lfs_node_init_rcu cds_lfs_node_init_rcu_sig
+#define cds_lfs_init_rcu cds_lfs_init_rcu_sig
+#define cds_lfs_push_rcu cds_lfs_push_rcu_sig
+#define cds_lfs_pop_rcu cds_lfs_pop_rcu_sig
+
+#elif defined(RCU_MB)
+
+#define rcu_read_lock rcu_read_lock_mb
+#define _rcu_read_lock _rcu_read_lock_mb
+#define rcu_read_unlock rcu_read_unlock_mb
+#define _rcu_read_unlock _rcu_read_unlock_mb
+#define rcu_register_thread rcu_register_thread_mb
+#define rcu_unregister_thread rcu_unregister_thread_mb
+#define rcu_init rcu_init_mb
+#define rcu_exit rcu_exit_mb
+#define synchronize_rcu synchronize_rcu_mb
+#define rcu_reader rcu_reader_mb
+#define rcu_gp_ctr rcu_gp_ctr_mb
+
+#define get_cpu_call_rcu_data get_cpu_call_rcu_data_mb
+#define get_call_rcu_thread get_call_rcu_thread_mb
+#define create_call_rcu_data create_call_rcu_data_mb
+#define set_cpu_call_rcu_data set_cpu_call_rcu_data_mb
+#define get_default_call_rcu_data get_default_call_rcu_data_mb
+#define get_call_rcu_data get_call_rcu_data_mb
+#define get_thread_call_rcu_data get_thread_call_rcu_data_mb
+#define set_thread_call_rcu_data set_thread_call_rcu_data_mb
+#define create_all_cpu_call_rcu_data create_all_cpu_call_rcu_data_mb
+#define free_all_cpu_call_rcu_data free_all_cpu_call_rcu_data_mb
+#define call_rcu call_rcu_mb
+
+#define defer_rcu defer_rcu_mb
+#define rcu_defer_register_thread rcu_defer_register_thread_mb
+#define rcu_defer_unregister_thread rcu_defer_unregister_thread_mb
+#define rcu_defer_barrier rcu_defer_barrier_mb
+#define rcu_defer_barrier_thread rcu_defer_barrier_thread_mb
+
+/* Concurrent Data Structures */
+#define cds_lfq_node_init_rcu cds_lfq_node_init_rcu_mb
+#define cds_lfq_init_rcu cds_lfq_init_rcu_mb
+#define cds_lfq_destroy_rcu cds_lfq_destroy_rcu_mb
+#define cds_lfq_enqueue_rcu cds_lfq_enqueue_rcu_mb
+#define cds_lfq_dequeue_rcu cds_lfq_dequeue_rcu_mb
+
+#define cds_lfs_node_init_rcu cds_lfs_node_init_rcu_mb
+#define cds_lfs_init_rcu cds_lfs_init_rcu_mb
+#define cds_lfs_push_rcu cds_lfs_push_rcu_mb
+#define cds_lfs_pop_rcu cds_lfs_pop_rcu_mb
+
+#else
+
+#error "Undefined selection"
+
+#endif
+
+#endif /* _URCU_MAP_H */
+++ /dev/null
-#ifndef _URCU_RCULFQUEUE_STATIC_H
-#define _URCU_RCULFQUEUE_STATIC_H
-
-/*
- * rculfqueue-static.h
- *
- * Userspace RCU library - Lock-Free RCU Queue
- *
- * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
- * dynamically with the userspace rcu library.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/urcu_ref.h>
-#include <urcu/uatomic_arch.h>
-#include <assert.h>
-/* A urcu implementation header should be already included. */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Lock-free RCU queue using reference counting. Enqueue and dequeue operations
- * hold a RCU read lock to deal with cmpxchg ABA problem. This implementation
- * keeps a dummy head node to ensure we can always update the queue locklessly.
- * Given that this is a queue, the dummy head node must always advance as we
- * dequeue entries. Therefore, we keep a reference count on each entry we are
- * dequeueing, so they can be kept as dummy head node until the next dequeue, at
- * which point their reference count will be decremented.
- */
-
-#define URCU_LFQ_PERMANENT_REF 128
-
-void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
-{
- node->next = NULL;
- urcu_ref_init(&node->ref);
-}
-
-void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q)
-{
- _cds_lfq_node_init_rcu(&q->init);
- /* Make sure the initial node is never freed. */
- urcu_ref_set(&q->init.ref, URCU_LFQ_PERMANENT_REF);
- q->head = q->tail = &q->init;
-}
-
-void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, struct cds_lfq_node_rcu *node)
-{
- urcu_ref_get(&node->ref);
-
- /*
- * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
- * node before publication.
- */
-
- for (;;) {
- struct cds_lfq_node_rcu *tail, *next;
-
- rcu_read_lock();
- tail = rcu_dereference(q->tail);
- /*
- * Typically expect tail->next to be NULL.
- */
- next = uatomic_cmpxchg(&tail->next, NULL, node);
- if (next == NULL) {
- /*
- * Tail was at the end of queue, we successfully
- * appended to it.
- * Now move tail (another enqueue might beat
- * us to it, that's fine).
- */
- (void) uatomic_cmpxchg(&q->tail, tail, node);
- rcu_read_unlock();
- return;
- } else {
- /*
- * Failure to append to current tail. Help moving tail
- * further and retry.
- */
- (void) uatomic_cmpxchg(&q->tail, tail, next);
- rcu_read_unlock();
- continue;
- }
- }
-}
-
-/*
- * The entry returned by dequeue must be taken care of by doing a urcu_ref_put,
- * which calls the release primitive when the reference count drops to zero. A
- * grace period must be waited after execution of the release callback before
- * performing the actual memory reclamation or modifying the cds_lfq_node_rcu
- * structure.
- * In other words, the entry lfq node returned by dequeue must not be
- * modified/re-used/freed until the reference count reaches zero and a grace
- * period has elapsed (after the refcount reached 0).
- */
-struct cds_lfq_node_rcu *
-_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q, void (*release)(struct urcu_ref *))
-{
- for (;;) {
- struct cds_lfq_node_rcu *head, *next;
-
- rcu_read_lock();
- head = rcu_dereference(q->head);
- next = rcu_dereference(head->next);
- if (next) {
- if (uatomic_cmpxchg(&q->head, head, next) == head) {
- rcu_read_unlock();
- urcu_ref_put(&head->ref, release);
- return next;
- } else {
- /* Concurrently pushed, retry */
- rcu_read_unlock();
- continue;
- }
- } else {
- /* Empty */
- rcu_read_unlock();
- return NULL;
- }
- }
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_RCULFQUEUE_STATIC_H */
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <urcu/urcu_ref.h>
#include <assert.h>
+#include <urcu-call-rcu.h>
#ifdef __cplusplus
extern "C" {
#endif
-/*
- * Lock-free RCU queue using reference counting. Enqueue and dequeue operations
- * hold a RCU read lock to deal with cmpxchg ABA problem. This implementation
- * keeps a dummy head node to ensure we can always update the queue locklessly.
- * Given that this is a queue, the dummy head node must always advance as we
- * dequeue entries. Therefore, we keep a reference count on each entry we are
- * dequeueing, so they can be kept as dummy head node until the next dequeue, at
- * which point their reference count will be decremented.
- */
+struct cds_lfq_queue_rcu;
struct cds_lfq_node_rcu {
struct cds_lfq_node_rcu *next;
- struct urcu_ref ref;
+ int dummy;
};
struct cds_lfq_queue_rcu {
struct cds_lfq_node_rcu *head, *tail;
- struct cds_lfq_node_rcu init; /* Dummy initialization node */
};
#ifdef _LGPL_SOURCE
-#include <urcu/rculfqueue-static.h>
+#include <urcu/static/rculfqueue.h>
+
+#define cds_lfq_node_init_rcu_qsbr _cds_lfq_node_init_rcu
+#define cds_lfq_init_rcu_qsbr _cds_lfq_init_rcu
+#define cds_lfq_destroy_rcu_qsbr _cds_lfq_destroy_rcu
+#define cds_lfq_enqueue_rcu_qsbr _cds_lfq_enqueue_rcu
+#define cds_lfq_dequeue_rcu_qsbr _cds_lfq_dequeue_rcu
+
+#define cds_lfq_node_init_rcu_bp _cds_lfq_node_init_rcu
+#define cds_lfq_init_rcu_bp _cds_lfq_init_rcu
+#define cds_lfq_destroy_rcu_bp _cds_lfq_destroy_rcu
+#define cds_lfq_enqueue_rcu_bp _cds_lfq_enqueue_rcu
+#define cds_lfq_dequeue_rcu_bp _cds_lfq_dequeue_rcu
-#define cds_lfq_node_init_rcu _cds_lfq_node_init_rcu
-#define cds_lfq_init_rcu _cds_lfq_init_rcu
-#define cds_lfq_enqueue_rcu _cds_lfq_enqueue_rcu
-#define cds_lfq_dequeue_rcu _cds_lfq_dequeue_rcu
+#define cds_lfq_node_init_rcu_memb _cds_lfq_node_init_rcu
+#define cds_lfq_init_rcu_memb _cds_lfq_init_rcu
+#define cds_lfq_destroy_rcu_memb _cds_lfq_destroy_rcu
+#define cds_lfq_enqueue_rcu_memb _cds_lfq_enqueue_rcu
+#define cds_lfq_dequeue_rcu_memb _cds_lfq_dequeue_rcu
+
+#define cds_lfq_node_init_rcu_mb _cds_lfq_node_init_rcu
+#define cds_lfq_init_rcu_mb _cds_lfq_init_rcu
+#define cds_lfq_destroy_rcu_mb _cds_lfq_destroy_rcu
+#define cds_lfq_enqueue_rcu_mb _cds_lfq_enqueue_rcu
+#define cds_lfq_dequeue_rcu_mb _cds_lfq_dequeue_rcu
+
+#define cds_lfq_node_init_rcu_sig _cds_lfq_node_init_rcu
+#define cds_lfq_init_rcu_sig _cds_lfq_init_rcu
+#define cds_lfq_destroy_rcu_sig _cds_lfq_destroy_rcu
+#define cds_lfq_enqueue_rcu_sig _cds_lfq_enqueue_rcu
+#define cds_lfq_dequeue_rcu_sig _cds_lfq_dequeue_rcu
#else /* !_LGPL_SOURCE */
extern void cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node);
extern void cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q);
-extern void cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, struct cds_lfq_node_rcu *node);
+/*
+ * The queue should be emptied before calling destroy.
+ *
+ * Return 0 on success, -EPERM if queue is not empty.
+ */
+extern int cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q);
+
+/*
+ * Acts as a RCU reader.
+ */
+extern void cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
+ struct cds_lfq_node_rcu *node);
/*
- * The entry returned by dequeue must be taken care of by doing a urcu_ref_put,
- * which calls the release primitive when the reference count drops to zero. A
- * grace period must be waited after execution of the release callback before
- * performing the actual memory reclamation or modifying the cds_lfq_node_rcu
- * structure.
- * In other words, the entry lfq node returned by dequeue must not be
- * modified/re-used/freed until the reference count reaches zero and a grace
- * period has elapsed (after the refcount reached 0).
+ * Acts as a RCU reader.
+ *
+ * The caller must wait for a grace period to pass before freeing the returned
+ * node or modifying the cds_lfq_node_rcu structure.
+ * Returns NULL if queue is empty.
*/
-extern struct cds_lfq_node_rcu *
-cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q, void (*release)(struct urcu_ref *));
+extern
+struct cds_lfq_node_rcu *cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q);
#endif /* !_LGPL_SOURCE */
+++ /dev/null
-#ifndef _URCU_RCULFSTACK_STATIC_H
-#define _URCU_RCULFSTACK_STATIC_H
-
-/*
- * rculfstack-static.h
- *
- * Userspace RCU library - Lock-Free RCU Stack
- *
- * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfstack.h for linking
- * dynamically with the userspace rcu library.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/uatomic_arch.h>
-/* A urcu implementation header should be already included. */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void _cds_lfs_node_init_rcu(struct cds_lfs_node_rcu *node)
-{
-}
-
-void _cds_lfs_init_rcu(struct cds_lfs_stack_rcu *s)
-{
- s->head = NULL;
-}
-
-void _cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, struct cds_lfs_node_rcu *node)
-{
- struct cds_lfs_node_rcu *head = NULL;
-
- for (;;) {
- struct cds_lfs_node_rcu *old_head = head;
-
- node->next = head;
- /*
- * uatomic_cmpxchg() implicit memory barrier orders earlier
- * stores to node before publication.
- */
- head = uatomic_cmpxchg(&s->head, old_head, node);
- if (old_head == head)
- break;
- }
-}
-
-/*
- * The caller must wait for a grace period to pass before freeing the returned
- * node or modifying the cds_lfs_node_rcu structure.
- * Returns NULL if stack is empty.
- */
-struct cds_lfs_node_rcu *
-_cds_lfs_pop_rcu(struct cds_lfs_stack_rcu *s)
-{
- for (;;) {
- struct cds_lfs_node_rcu *head;
-
- rcu_read_lock();
- head = rcu_dereference(s->head);
- if (head) {
- struct cds_lfs_node_rcu *next = rcu_dereference(head->next);
-
- if (uatomic_cmpxchg(&s->head, head, next) == head) {
- rcu_read_unlock();
- return head;
- } else {
- /* Concurrent modification. Retry. */
- rcu_read_unlock();
- continue;
- }
- } else {
- /* Empty stack */
- rcu_read_unlock();
- return NULL;
- }
- }
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_RCULFSTACK_STATIC_H */
#ifdef _LGPL_SOURCE
-#include <urcu/rculfstack-static.h>
+#include <urcu/static/rculfstack.h>
-#define cds_lfs_node_init_rcu _cds_lfs_node_init_rcu
-#define cds_lfs_init_rcu _cds_lfs_init_rcu
-#define cds_lfs_push_rcu _cds_lfs_push_rcu
-#define cds_lfs_pop_rcu _cds_lfs_pop_rcu
+#define cds_lfs_node_init_rcu_qsbr _cds_lfs_node_init_rcu
+#define cds_lfs_init_rcu_qsbr _cds_lfs_init_rcu
+#define cds_lfs_push_rcu_qsbr _cds_lfs_push_rcu
+#define cds_lfs_pop_rcu_qsbr _cds_lfs_pop_rcu
+
+#define cds_lfs_node_init_rcu_bp _cds_lfs_node_init_rcu
+#define cds_lfs_init_rcu_bp _cds_lfs_init_rcu
+#define cds_lfs_push_rcu_bp _cds_lfs_push_rcu
+#define cds_lfs_pop_rcu_bp _cds_lfs_pop_rcu
+
+#define cds_lfs_node_init_rcu_memb _cds_lfs_node_init_rcu
+#define cds_lfs_init_rcu_memb _cds_lfs_init_rcu
+#define cds_lfs_push_rcu_memb _cds_lfs_push_rcu
+#define cds_lfs_pop_rcu_memb _cds_lfs_pop_rcu
+
+#define cds_lfs_node_init_rcu_mb _cds_lfs_node_init_rcu
+#define cds_lfs_init_rcu_mb _cds_lfs_init_rcu
+#define cds_lfs_push_rcu_mb _cds_lfs_push_rcu
+#define cds_lfs_pop_rcu_mb _cds_lfs_pop_rcu
+
+#define cds_lfs_node_init_rcu_sig _cds_lfs_node_init_rcu
+#define cds_lfs_init_rcu_sig _cds_lfs_init_rcu
+#define cds_lfs_push_rcu_sig _cds_lfs_push_rcu
+#define cds_lfs_pop_rcu_sig _cds_lfs_pop_rcu
#else /* !_LGPL_SOURCE */
extern void cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, struct cds_lfs_node_rcu *node);
/*
+ * Acts as a RCU reader.
+ *
* The caller must wait for a grace period to pass before freeing the returned
* node or modifying the cds_lfs_node_rcu structure.
* Returns NULL if stack is empty.
--- /dev/null
+#ifndef _URCU_REF_H
+#define _URCU_REF_H
+
+/*
+ * Userspace RCU - Reference counting
+ *
+ * Copyright (C) 2009 Novell Inc.
+ * Copyright (C) 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Author: Jan Blunck <jblunck@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License version 2.1 as
+ * published by the Free Software Foundation.
+ */
+
+#include <assert.h>
+#include <urcu/uatomic.h>
+
+struct urcu_ref {
+ long refcount; /* ATOMIC */
+};
+
+static inline void urcu_ref_set(struct urcu_ref *ref, long val)
+{
+ uatomic_set(&ref->refcount, val);
+}
+
+static inline void urcu_ref_init(struct urcu_ref *ref)
+{
+ urcu_ref_set(ref, 1);
+}
+
+static inline void urcu_ref_get(struct urcu_ref *ref)
+{
+ uatomic_add(&ref->refcount, 1);
+}
+
+static inline void urcu_ref_put(struct urcu_ref *ref,
+ void (*release)(struct urcu_ref *))
+{
+ long res = uatomic_sub_return(&ref->refcount, 1);
+ assert (res >= 0);
+ if (res == 0)
+ release(ref);
+}
+
+#endif /* _URCU_REF_H */
--- /dev/null
+#ifndef _URCU_RCULFQUEUE_STATIC_H
+#define _URCU_RCULFQUEUE_STATIC_H
+
+/*
+ * rculfqueue-static.h
+ *
+ * Userspace RCU library - Lock-Free RCU Queue
+ *
+ * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfqueue.h for linking
+ * dynamically with the userspace rcu library.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu-call-rcu.h>
+#include <urcu/uatomic.h>
+#include <assert.h>
+#include <errno.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct cds_lfq_node_rcu_dummy {
+ struct cds_lfq_node_rcu parent;
+ struct rcu_head head;
+ struct cds_lfq_queue_rcu *q;
+};
+
+/*
+ * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read
+ * lock to deal with cmpxchg ABA problem. This queue is *not* circular:
+ * head points to the oldest node, tail points to the newest node.
+ * A dummy node is kept to ensure enqueue and dequeue can always proceed
+ * concurrently. Keeping a separate head and tail helps with large
+ * queues: enqueue and dequeue can proceed concurrently without
+ * wrestling for exclusive access to the same variables.
+ *
+ * Dequeue retry if it detects that it would be dequeueing the last node
+ * (it means a dummy node dequeue-requeue is in progress). This ensures
+ * that there is always at least one node in the queue.
+ *
+ * In the dequeue operation, we internally reallocate the dummy node
+ * upon dequeue/requeue and use call_rcu to free the old one after a
+ * grace period.
+ */
+
+static inline
+struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q,
+ struct cds_lfq_node_rcu *next)
+{
+ struct cds_lfq_node_rcu_dummy *dummy;
+
+ dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy));
+ assert(dummy);
+ dummy->parent.next = next;
+ dummy->parent.dummy = 1;
+ dummy->q = q;
+ return &dummy->parent;
+}
+
+static inline
+void free_dummy_cb(struct rcu_head *head)
+{
+ struct cds_lfq_node_rcu_dummy *dummy =
+ caa_container_of(head, struct cds_lfq_node_rcu_dummy, head);
+ free(dummy);
+}
+
+static inline
+void rcu_free_dummy(struct cds_lfq_node_rcu *node)
+{
+ struct cds_lfq_node_rcu_dummy *dummy;
+
+ assert(node->dummy);
+ dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
+ call_rcu(&dummy->head, free_dummy_cb);
+}
+
+static inline
+void free_dummy(struct cds_lfq_node_rcu *node)
+{
+ struct cds_lfq_node_rcu_dummy *dummy;
+
+ assert(node->dummy);
+ dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent);
+ free(dummy);
+}
+
+static inline
+void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node)
+{
+ node->next = NULL;
+ node->dummy = 0;
+}
+
+static inline
+void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q)
+{
+ q->tail = make_dummy(q, NULL);
+ q->head = q->tail;
+}
+
+/*
+ * The queue should be emptied before calling destroy.
+ *
+ * Return 0 on success, -EPERM if queue is not empty.
+ */
+static inline
+int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q)
+{
+ struct cds_lfq_node_rcu *head;
+
+ head = rcu_dereference(q->head);
+ if (!(head->dummy && head->next == NULL))
+ return -EPERM; /* not empty */
+ free_dummy(head);
+ return 0;
+}
+
+/*
+ * Acts as a RCU reader.
+ */
+static inline
+void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q,
+ struct cds_lfq_node_rcu *node)
+{
+ /*
+ * uatomic_cmpxchg() implicit memory barrier orders earlier stores to
+ * node before publication.
+ */
+
+ for (;;) {
+ struct cds_lfq_node_rcu *tail, *next;
+
+ rcu_read_lock();
+ tail = rcu_dereference(q->tail);
+ next = uatomic_cmpxchg(&tail->next, NULL, node);
+ if (next == NULL) {
+ /*
+ * Tail was at the end of queue, we successfully
+ * appended to it. Now move tail (another
+ * enqueue might beat us to it, that's fine).
+ */
+ (void) uatomic_cmpxchg(&q->tail, tail, node);
+ rcu_read_unlock();
+ return;
+ } else {
+ /*
+ * Failure to append to current tail.
+ * Help moving tail further and retry.
+ */
+ (void) uatomic_cmpxchg(&q->tail, tail, next);
+ rcu_read_unlock();
+ continue;
+ }
+ }
+}
+
+static inline
+void enqueue_dummy(struct cds_lfq_queue_rcu *q)
+{
+ struct cds_lfq_node_rcu *node;
+
+ /* We need to reallocate to protect from ABA. */
+ node = make_dummy(q, NULL);
+ _cds_lfq_enqueue_rcu(q, node);
+}
+
+/*
+ * Acts as a RCU reader.
+ *
+ * The caller must wait for a grace period to pass before freeing the returned
+ * node or modifying the cds_lfq_node_rcu structure.
+ * Returns NULL if queue is empty.
+ */
+static inline
+struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q)
+{
+ for (;;) {
+ struct cds_lfq_node_rcu *head, *next;
+
+ rcu_read_lock();
+ head = rcu_dereference(q->head);
+ next = rcu_dereference(head->next);
+ if (head->dummy && next == NULL) {
+ rcu_read_unlock();
+ return NULL; /* empty */
+ }
+ /*
+ * We never, ever allow dequeue to get to a state where
+ * the queue is empty (we need at least one node in the
+ * queue). This is ensured by checking if the head next
+ * is NULL, which means we need to enqueue a dummy node
+ * before we can hope dequeuing anything.
+ */
+ if (!next) {
+ enqueue_dummy(q);
+ next = rcu_dereference(head->next);
+ }
+ if (uatomic_cmpxchg(&q->head, head, next) != head) {
+ rcu_read_unlock();
+ continue; /* Concurrently pushed. */
+ }
+ if (head->dummy) {
+ /* Free dummy after grace period. */
+ rcu_free_dummy(head);
+ rcu_read_unlock();
+ continue; /* try again */
+ }
+ rcu_read_unlock();
+ return head;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_RCULFQUEUE_STATIC_H */
--- /dev/null
+#ifndef _URCU_RCULFSTACK_STATIC_H
+#define _URCU_RCULFSTACK_STATIC_H
+
+/*
+ * rculfstack-static.h
+ *
+ * Userspace RCU library - Lock-Free RCU Stack
+ *
+ * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See rculfstack.h for linking
+ * dynamically with the userspace rcu library.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <urcu/uatomic.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline
+void _cds_lfs_node_init_rcu(struct cds_lfs_node_rcu *node)
+{
+}
+
+static inline
+void _cds_lfs_init_rcu(struct cds_lfs_stack_rcu *s)
+{
+ s->head = NULL;
+}
+
+static inline
+void _cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, struct cds_lfs_node_rcu *node)
+{
+ struct cds_lfs_node_rcu *head = NULL;
+
+ for (;;) {
+ struct cds_lfs_node_rcu *old_head = head;
+
+ node->next = head;
+ /*
+ * uatomic_cmpxchg() implicit memory barrier orders earlier
+ * stores to node before publication.
+ */
+ head = uatomic_cmpxchg(&s->head, old_head, node);
+ if (old_head == head)
+ break;
+ }
+}
+
+/*
+ * Acts as a RCU reader.
+ *
+ * The caller must wait for a grace period to pass before freeing the returned
+ * node or modifying the cds_lfs_node_rcu structure.
+ * Returns NULL if stack is empty.
+ */
+static inline
+struct cds_lfs_node_rcu *
+_cds_lfs_pop_rcu(struct cds_lfs_stack_rcu *s)
+{
+ for (;;) {
+ struct cds_lfs_node_rcu *head;
+
+ rcu_read_lock();
+ head = rcu_dereference(s->head);
+ if (head) {
+ struct cds_lfs_node_rcu *next = rcu_dereference(head->next);
+
+ if (uatomic_cmpxchg(&s->head, head, next) == head) {
+ rcu_read_unlock();
+ return head;
+ } else {
+ /* Concurrent modification. Retry. */
+ rcu_read_unlock();
+ continue;
+ }
+ } else {
+ /* Empty stack */
+ rcu_read_unlock();
+ return NULL;
+ }
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_RCULFSTACK_STATIC_H */
--- /dev/null
+#ifndef _URCU_BP_STATIC_H
+#define _URCU_BP_STATIC_H
+
+/*
+ * urcu-bp-static.h
+ *
+ * Userspace RCU header.
+ *
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
+ * dynamically with the userspace rcu library.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#include <urcu/compiler.h>
+#include <urcu/arch.h>
+#include <urcu/system.h>
+#include <urcu/uatomic.h>
+#include <urcu/list.h>
+
+/*
+ * This code section can only be included in LGPL 2.1 compatible source code.
+ * See below for the function call wrappers which can be used in code meant to
+ * be only linked with the Userspace RCU library. This comes with a small
+ * performance degradation on the read-side due to the added function calls.
+ * This is required to permit relinking with newer versions of the library.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef DEBUG_RCU
+#define rcu_assert(args...) assert(args)
+#else
+#define rcu_assert(args...)
+#endif
+
+#ifdef DEBUG_YIELD
+#include <sched.h>
+#include <time.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#define YIELD_READ (1 << 0)
+#define YIELD_WRITE (1 << 1)
+
+/*
+ * Updates without RCU_MB are much slower. Account this in
+ * the delay.
+ */
+/* maximum sleep delay, in us */
+#define MAX_SLEEP 50
+
+extern unsigned int yield_active;
+extern unsigned int __thread rand_yield;
+
+static inline void debug_yield_read(void)
+{
+ if (yield_active & YIELD_READ)
+ if (rand_r(&rand_yield) & 0x1)
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
+}
+
+static inline void debug_yield_write(void)
+{
+ if (yield_active & YIELD_WRITE)
+ if (rand_r(&rand_yield) & 0x1)
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
+}
+
+static inline void debug_yield_init(void)
+{
+ rand_yield = time(NULL) ^ pthread_self();
+}
+#else
+static inline void debug_yield_read(void)
+{
+}
+
+static inline void debug_yield_write(void)
+{
+}
+
+static inline void debug_yield_init(void)
+{
+
+}
+#endif
+
+/*
+ * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use a
+ * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
+ */
+#define RCU_GP_COUNT (1UL << 0)
+/* Use the amount of bits equal to half of the architecture long size */
+#define RCU_GP_CTR_PHASE (1UL << (sizeof(long) << 2))
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
+
+/*
+ * Used internally by _rcu_read_lock.
+ */
+extern void rcu_bp_register(void);
+
+/*
+ * Global quiescent period counter with low-order bits unused.
+ * Using a int rather than a char to eliminate false register dependencies
+ * causing stalls on some architectures.
+ */
+extern long rcu_gp_ctr;
+
+struct rcu_reader {
+ /* Data used by both reader and synchronize_rcu() */
+ long ctr;
+ /* Data used for registry */
+ struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+ pthread_t tid;
+ int alloc; /* registry entry allocated */
+};
+
+/*
+ * Bulletproof version keeps a pointer to a registry not part of the TLS.
+ * Adds a pointer dereference on the read-side, but won't require to unregister
+ * the reader thread.
+ */
+extern struct rcu_reader __thread *rcu_reader;
+
+static inline int rcu_old_gp_ongoing(long *value)
+{
+ long v;
+
+ if (value == NULL)
+ return 0;
+ /*
+ * Make sure both tests below are done on the same version of *value
+ * to insure consistency.
+ */
+ v = CMM_LOAD_SHARED(*value);
+ return (v & RCU_GP_CTR_NEST_MASK) &&
+ ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
+}
+
+static inline void _rcu_read_lock(void)
+{
+ long tmp;
+
+ /* Check if registered */
+ if (unlikely(!rcu_reader))
+ rcu_bp_register();
+
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
+ tmp = rcu_reader->ctr;
+ /*
+ * rcu_gp_ctr is
+ * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
+ */
+ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
+ _CMM_STORE_SHARED(rcu_reader->ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ /*
+ * Set active readers count for outermost nesting level before
+ * accessing the pointer.
+ */
+ cmm_smp_mb();
+ } else {
+ _CMM_STORE_SHARED(rcu_reader->ctr, tmp + RCU_GP_COUNT);
+ }
+}
+
+static inline void _rcu_read_unlock(void)
+{
+ /*
+ * Finish using rcu before decrementing the pointer.
+ */
+ cmm_smp_mb();
+ _CMM_STORE_SHARED(rcu_reader->ctr, rcu_reader->ctr - RCU_GP_COUNT);
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_BP_STATIC_H */
--- /dev/null
+#ifndef _URCU_POINTER_STATIC_H
+#define _URCU_POINTER_STATIC_H
+
+/*
+ * urcu-pointer-static.h
+ *
+ * Userspace RCU header. Operations on pointers.
+ *
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu-pointer.h for
+ * linking dynamically with the userspace rcu library.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/arch.h>
+#include <urcu/system.h>
+#include <urcu/uatomic.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable
+ * into a RCU read-side critical section. The pointer can later be safely
+ * dereferenced within the critical section.
+ *
+ * This ensures that the pointer copy is invariant thorough the whole critical
+ * section.
+ *
+ * Inserts memory barriers on architectures that require them (currently only
+ * Alpha) and documents which pointers are protected by RCU.
+ *
+ * The compiler memory barrier in CMM_LOAD_SHARED() ensures that value-speculative
+ * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the
+ * data read before the pointer read by speculating the value of the pointer.
+ * Correct ordering is ensured because the pointer is read as a volatile access.
+ * This acts as a global side-effect operation, which forbids reordering of
+ * dependent memory operations. Note that such concern about dependency-breaking
+ * optimizations will eventually be taken care of by the "memory_order_consume"
+ * addition to forthcoming C++ standard.
+ *
+ * Should match rcu_assign_pointer() or rcu_xchg_pointer().
+ */
+
+#define _rcu_dereference(p) ({ \
+ typeof(p) _________p1 = CMM_LOAD_SHARED(p); \
+ cmm_smp_read_barrier_depends(); \
+ (_________p1); \
+ })
+
+/**
+ * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer
+ * is as expected by "old". If succeeds, returns the previous pointer to the
+ * data structure, which can be safely freed after waiting for a quiescent state
+ * using synchronize_rcu(). If fails (unexpected value), returns old (which
+ * should not be freed !).
+ */
+
+#define _rcu_cmpxchg_pointer(p, old, _new) \
+ ({ \
+ typeof(*p) _________pold = (old); \
+ typeof(*p) _________pnew = (_new); \
+ if (!__builtin_constant_p(_new) || \
+ ((_new) != NULL)) \
+ cmm_wmb(); \
+ uatomic_cmpxchg(p, _________pold, _________pnew); \
+ })
+
+/**
+ * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
+ * pointer to the data structure, which can be safely freed after waiting for a
+ * quiescent state using synchronize_rcu().
+ */
+
+#define _rcu_xchg_pointer(p, v) \
+ ({ \
+ typeof(*p) _________pv = (v); \
+ if (!__builtin_constant_p(v) || \
+ ((v) != NULL)) \
+ cmm_wmb(); \
+ uatomic_xchg(p, _________pv); \
+ })
+
+
+#define _rcu_set_pointer(p, v) \
+ ({ \
+ typeof(*p) _________pv = (v); \
+ if (!__builtin_constant_p(v) || \
+ ((v) != NULL)) \
+ cmm_wmb(); \
+ uatomic_set(p, _________pv); \
+ })
+
+/**
+ * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure
+ * meant to be read by RCU read-side critical sections. Returns the assigned
+ * value.
+ *
+ * Documents which pointers will be dereferenced by RCU read-side critical
+ * sections and adds the required memory barriers on architectures requiring
+ * them. It also makes sure the compiler does not reorder code initializing the
+ * data structure before its publication.
+ *
+ * Should match rcu_dereference_pointer().
+ */
+
+#define _rcu_assign_pointer(p, v) _rcu_set_pointer(&(p), v)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_POINTER_STATIC_H */
--- /dev/null
+#ifndef _URCU_QSBR_STATIC_H
+#define _URCU_QSBR_STATIC_H
+
+/*
+ * urcu-qsbr-static.h
+ *
+ * Userspace RCU QSBR header.
+ *
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu-qsbr.h for linking
+ * dynamically with the userspace rcu QSBR library.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <assert.h>
+#include <limits.h>
+#include <unistd.h>
+#include <stdint.h>
+
+#include <urcu/compiler.h>
+#include <urcu/arch.h>
+#include <urcu/system.h>
+#include <urcu/uatomic.h>
+#include <urcu/list.h>
+#include <urcu/futex.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * This code section can only be included in LGPL 2.1 compatible source code.
+ * See below for the function call wrappers which can be used in code meant to
+ * be only linked with the Userspace RCU library. This comes with a small
+ * performance degradation on the read-side due to the added function calls.
+ * This is required to permit relinking with newer versions of the library.
+ */
+
+#ifdef DEBUG_RCU
+#define rcu_assert(args...) assert(args)
+#else
+#define rcu_assert(args...)
+#endif
+
+#ifdef DEBUG_YIELD
+#include <sched.h>
+#include <time.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#define YIELD_READ (1 << 0)
+#define YIELD_WRITE (1 << 1)
+
+/* maximum sleep delay, in us */
+#define MAX_SLEEP 50
+
+extern unsigned int yield_active;
+extern unsigned int __thread rand_yield;
+
+static inline void debug_yield_read(void)
+{
+ if (yield_active & YIELD_READ)
+ if (rand_r(&rand_yield) & 0x1)
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
+}
+
+static inline void debug_yield_write(void)
+{
+ if (yield_active & YIELD_WRITE)
+ if (rand_r(&rand_yield) & 0x1)
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
+}
+
+static inline void debug_yield_init(void)
+{
+ rand_yield = time(NULL) ^ pthread_self();
+}
+#else
+static inline void debug_yield_read(void)
+{
+}
+
+static inline void debug_yield_write(void)
+{
+}
+
+static inline void debug_yield_init(void)
+{
+
+}
+#endif
+
+#define RCU_GP_ONLINE (1UL << 0)
+#define RCU_GP_CTR (1UL << 1)
+
+/*
+ * Global quiescent period counter with low-order bits unused.
+ * Using a int rather than a char to eliminate false register dependencies
+ * causing stalls on some architectures.
+ */
+extern unsigned long rcu_gp_ctr;
+
+struct rcu_reader {
+ /* Data used by both reader and synchronize_rcu() */
+ unsigned long ctr;
+ /* Data used for registry */
+ struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+ int waiting;
+ pthread_t tid;
+};
+
+extern struct rcu_reader __thread rcu_reader;
+
+extern int32_t gp_futex;
+
+/*
+ * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
+ */
+static inline void wake_up_gp(void)
+{
+ if (unlikely(_CMM_LOAD_SHARED(rcu_reader.waiting))) {
+ _CMM_STORE_SHARED(rcu_reader.waiting, 0);
+ cmm_smp_mb();
+ if (uatomic_read(&gp_futex) != -1)
+ return;
+ uatomic_set(&gp_futex, 0);
+ futex_noasync(&gp_futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0);
+ }
+}
+
+static inline int rcu_gp_ongoing(unsigned long *ctr)
+{
+ unsigned long v;
+
+ v = CMM_LOAD_SHARED(*ctr);
+ return v && (v != rcu_gp_ctr);
+}
+
+static inline void _rcu_read_lock(void)
+{
+ rcu_assert(rcu_reader.ctr);
+}
+
+static inline void _rcu_read_unlock(void)
+{
+}
+
+static inline void _rcu_quiescent_state(void)
+{
+ cmm_smp_mb();
+ _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
+ wake_up_gp();
+ cmm_smp_mb();
+}
+
+static inline void _rcu_thread_offline(void)
+{
+ cmm_smp_mb();
+ CMM_STORE_SHARED(rcu_reader.ctr, 0);
+ cmm_smp_mb(); /* write rcu_reader.ctr before read futex */
+ wake_up_gp();
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
+}
+
+static inline void _rcu_thread_online(void)
+{
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
+ _CMM_STORE_SHARED(rcu_reader.ctr, CMM_LOAD_SHARED(rcu_gp_ctr));
+ cmm_smp_mb();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_QSBR_STATIC_H */
--- /dev/null
+#ifndef _URCU_STATIC_H
+#define _URCU_STATIC_H
+
+/*
+ * urcu-static.h
+ *
+ * Userspace RCU header.
+ *
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu.h for linking
+ * dynamically with the userspace rcu library.
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <stdint.h>
+
+#include <urcu/compiler.h>
+#include <urcu/arch.h>
+#include <urcu/system.h>
+#include <urcu/uatomic.h>
+#include <urcu/list.h>
+#include <urcu/futex.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Default is RCU_MEMBARRIER */
+#if !defined(RCU_MEMBARRIER) && !defined(RCU_MB) && !defined(RCU_SIGNAL)
+#define RCU_MEMBARRIER
+#endif
+
+/*
+ * RCU_MEMBARRIER is only possibly available on Linux. Fallback to RCU_MB
+ * otherwise.
+ */
+#if !defined(__linux__) && defined(RCU_MEMBARRIER)
+#undef RCU_MEMBARRIER
+#define RCU_MB
+#endif
+
+#ifdef RCU_MEMBARRIER
+#include <syscall.h>
+
+/* If the headers do not support SYS_membarrier, statically use RCU_MB */
+#ifdef SYS_membarrier
+# define MEMBARRIER_EXPEDITED (1 << 0)
+# define MEMBARRIER_DELAYED (1 << 1)
+# define MEMBARRIER_QUERY (1 << 16)
+# define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
+#else
+# undef RCU_MEMBARRIER
+# define RCU_MB
+#endif
+#endif
+
+/*
+ * This code section can only be included in LGPL 2.1 compatible source code.
+ * See below for the function call wrappers which can be used in code meant to
+ * be only linked with the Userspace RCU library. This comes with a small
+ * performance degradation on the read-side due to the added function calls.
+ * This is required to permit relinking with newer versions of the library.
+ */
+
+/*
+ * The signal number used by the RCU library can be overridden with
+ * -DSIGRCU= when compiling the library.
+ * Provide backward compatibility for liburcu 0.3.x SIGURCU.
+ */
+#ifdef SIGURCU
+#define SIGRCU SIGURCU
+#endif
+
+#ifndef SIGRCU
+#define SIGRCU SIGUSR1
+#endif
+
+#ifdef DEBUG_RCU
+#define rcu_assert(args...) assert(args)
+#else
+#define rcu_assert(args...)
+#endif
+
+#ifdef DEBUG_YIELD
+#include <sched.h>
+#include <time.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#define YIELD_READ (1 << 0)
+#define YIELD_WRITE (1 << 1)
+
+/*
+ * Updates with RCU_SIGNAL are much slower. Account this in the delay.
+ */
+#ifdef RCU_SIGNAL
+/* maximum sleep delay, in us */
+#define MAX_SLEEP 30000
+#else
+#define MAX_SLEEP 50
+#endif
+
+extern unsigned int yield_active;
+extern unsigned int __thread rand_yield;
+
+static inline void debug_yield_read(void)
+{
+ if (yield_active & YIELD_READ)
+ if (rand_r(&rand_yield) & 0x1)
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
+}
+
+static inline void debug_yield_write(void)
+{
+ if (yield_active & YIELD_WRITE)
+ if (rand_r(&rand_yield) & 0x1)
+ usleep(rand_r(&rand_yield) % MAX_SLEEP);
+}
+
+static inline void debug_yield_init(void)
+{
+ rand_yield = time(NULL) ^ (unsigned long) pthread_self();
+}
+#else
+static inline void debug_yield_read(void)
+{
+}
+
+static inline void debug_yield_write(void)
+{
+}
+
+static inline void debug_yield_init(void)
+{
+
+}
+#endif
+
+/*
+ * RCU memory barrier broadcast group. Currently, only broadcast to all process
+ * threads is supported (group 0).
+ *
+ * Slave barriers are only guaranteed to be ordered wrt master barriers.
+ *
+ * The pair ordering is detailed as (O: ordered, X: not ordered) :
+ * slave master
+ * slave X O
+ * master O O
+ */
+
+#define MB_GROUP_ALL 0
+#define RCU_MB_GROUP MB_GROUP_ALL
+
+#ifdef RCU_MEMBARRIER
+extern int has_sys_membarrier;
+
+static inline void smp_mb_slave(int group)
+{
+ if (likely(has_sys_membarrier))
+ cmm_barrier();
+ else
+ cmm_smp_mb();
+}
+#endif
+
+#ifdef RCU_MB
+static inline void smp_mb_slave(int group)
+{
+ cmm_smp_mb();
+}
+#endif
+
+#ifdef RCU_SIGNAL
+static inline void smp_mb_slave(int group)
+{
+ cmm_barrier();
+}
+#endif
+
+/*
+ * The trick here is that RCU_GP_CTR_PHASE must be a multiple of 8 so we can use
+ * a full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
+ */
+#define RCU_GP_COUNT (1UL << 0)
+/* Use the amount of bits equal to half of the architecture long size */
+#define RCU_GP_CTR_PHASE (1UL << (sizeof(unsigned long) << 2))
+#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_PHASE - 1)
+
+/*
+ * Global quiescent period counter with low-order bits unused.
+ * Using a int rather than a char to eliminate false register dependencies
+ * causing stalls on some architectures.
+ */
+extern unsigned long rcu_gp_ctr;
+
+struct rcu_reader {
+ /* Data used by both reader and synchronize_rcu() */
+ unsigned long ctr;
+ char need_mb;
+ /* Data used for registry */
+ struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+ pthread_t tid;
+};
+
+extern struct rcu_reader __thread rcu_reader;
+
+extern int32_t gp_futex;
+
+/*
+ * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
+ */
+static inline void wake_up_gp(void)
+{
+ if (unlikely(uatomic_read(&gp_futex) == -1)) {
+ uatomic_set(&gp_futex, 0);
+ futex_async(&gp_futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0);
+ }
+}
+
+static inline int rcu_gp_ongoing(unsigned long *ctr)
+{
+ unsigned long v;
+
+ /*
+ * Make sure both tests below are done on the same version of *value
+ * to insure consistency.
+ */
+ v = CMM_LOAD_SHARED(*ctr);
+ return (v & RCU_GP_CTR_NEST_MASK) &&
+ ((v ^ rcu_gp_ctr) & RCU_GP_CTR_PHASE);
+}
+
+static inline void _rcu_read_lock(void)
+{
+ unsigned long tmp;
+
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
+ tmp = rcu_reader.ctr;
+ /*
+ * rcu_gp_ctr is
+ * RCU_GP_COUNT | (~RCU_GP_CTR_PHASE or RCU_GP_CTR_PHASE)
+ */
+ if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
+ _CMM_STORE_SHARED(rcu_reader.ctr, _CMM_LOAD_SHARED(rcu_gp_ctr));
+ /*
+ * Set active readers count for outermost nesting level before
+ * accessing the pointer. See smp_mb_master().
+ */
+ smp_mb_slave(RCU_MB_GROUP);
+ } else {
+ _CMM_STORE_SHARED(rcu_reader.ctr, tmp + RCU_GP_COUNT);
+ }
+}
+
+static inline void _rcu_read_unlock(void)
+{
+ unsigned long tmp;
+
+ tmp = rcu_reader.ctr;
+ /*
+ * Finish using rcu before decrementing the pointer.
+ * See smp_mb_master().
+ */
+ if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
+ smp_mb_slave(RCU_MB_GROUP);
+ _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
+ /* write rcu_reader.ctr before read futex */
+ smp_mb_slave(RCU_MB_GROUP);
+ wake_up_gp();
+ } else {
+ _CMM_STORE_SHARED(rcu_reader.ctr, rcu_reader.ctr - RCU_GP_COUNT);
+ }
+ cmm_barrier(); /* Ensure the compiler does not reorder us with mutex */
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_STATIC_H */
--- /dev/null
+#ifndef _URCU_WFQUEUE_STATIC_H
+#define _URCU_WFQUEUE_STATIC_H
+
+/*
+ * wfqueue-static.h
+ *
+ * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue
+ *
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfqueue.h for linking
+ * dynamically with the userspace rcu library.
+ *
+ * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <pthread.h>
+#include <assert.h>
+#include <poll.h>
+#include <urcu/compiler.h>
+#include <urcu/uatomic.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Queue with wait-free enqueue/blocking dequeue.
+ * This implementation adds a dummy head node when the queue is empty to ensure
+ * we can always update the queue locklessly.
+ *
+ * Inspired from half-wait-free/half-blocking queue implementation done by
+ * Paul E. McKenney.
+ */
+
+#define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
+#define WFQ_WAIT 10 /* Wait 10 ms if being set */
+
+static inline void _cds_wfq_node_init(struct cds_wfq_node *node)
+{
+ node->next = NULL;
+}
+
+static inline void _cds_wfq_init(struct cds_wfq_queue *q)
+{
+ int ret;
+
+ _cds_wfq_node_init(&q->dummy);
+ /* Set queue head and tail */
+ q->head = &q->dummy;
+ q->tail = &q->dummy.next;
+ ret = pthread_mutex_init(&q->lock, NULL);
+ assert(!ret);
+}
+
+static inline void _cds_wfq_enqueue(struct cds_wfq_queue *q,
+ struct cds_wfq_node *node)
+{
+ struct cds_wfq_node **old_tail;
+
+ /*
+ * uatomic_xchg() implicit memory barrier orders earlier stores to data
+ * structure containing node and setting node->next to NULL before
+ * publication.
+ */
+ old_tail = uatomic_xchg(&q->tail, &node->next);
+ /*
+ * At this point, dequeuers see a NULL old_tail->next, which indicates
+ * that the queue is being appended to. The following store will append
+ * "node" to the queue from a dequeuer perspective.
+ */
+ CMM_STORE_SHARED(*old_tail, node);
+}
+
+/*
+ * It is valid to reuse and free a dequeued node immediately.
+ *
+ * No need to go on a waitqueue here, as there is no possible state in which the
+ * list could cause dequeue to busy-loop needlessly while waiting for another
+ * thread to be scheduled. The queue appears empty until tail->next is set by
+ * enqueue.
+ */
+static inline struct cds_wfq_node *
+___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q)
+{
+ struct cds_wfq_node *node, *next;
+ int attempt = 0;
+
+ /*
+ * Queue is empty if it only contains the dummy node.
+ */
+ if (q->head == &q->dummy && CMM_LOAD_SHARED(q->tail) == &q->dummy.next)
+ return NULL;
+ node = q->head;
+
+ /*
+ * Adaptative busy-looping waiting for enqueuer to complete enqueue.
+ */
+ while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
+ if (++attempt >= WFQ_ADAPT_ATTEMPTS) {
+ poll(NULL, 0, WFQ_WAIT); /* Wait for 10ms */
+ attempt = 0;
+ } else
+ caa_cpu_relax();
+ }
+ /*
+ * Move queue head forward.
+ */
+ q->head = next;
+ /*
+ * Requeue dummy node if we just dequeued it.
+ */
+ if (node == &q->dummy) {
+ _cds_wfq_node_init(node);
+ _cds_wfq_enqueue(q, node);
+ return ___cds_wfq_dequeue_blocking(q);
+ }
+ return node;
+}
+
+static inline struct cds_wfq_node *
+_cds_wfq_dequeue_blocking(struct cds_wfq_queue *q)
+{
+ struct cds_wfq_node *retnode;
+ int ret;
+
+ ret = pthread_mutex_lock(&q->lock);
+ assert(!ret);
+ retnode = ___cds_wfq_dequeue_blocking(q);
+ ret = pthread_mutex_unlock(&q->lock);
+ assert(!ret);
+ return retnode;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_WFQUEUE_STATIC_H */
--- /dev/null
+#ifndef _URCU_WFSTACK_STATIC_H
+#define _URCU_WFSTACK_STATIC_H
+
+/*
+ * wfstack-static.h
+ *
+ * Userspace RCU library - Stack with Wait-Free push, Blocking pop.
+ *
+ * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfstack.h for linking
+ * dynamically with the userspace rcu library.
+ *
+ * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <pthread.h>
+#include <assert.h>
+#include <poll.h>
+#include <urcu/compiler.h>
+#include <urcu/uatomic.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CDS_WF_STACK_END ((void *)0x1UL)
+#define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
+#define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
+
+static inline
+void _cds_wfs_node_init(struct cds_wfs_node *node)
+{
+ node->next = NULL;
+}
+
+static inline
+void _cds_wfs_init(struct cds_wfs_stack *s)
+{
+ int ret;
+
+ s->head = CDS_WF_STACK_END;
+ ret = pthread_mutex_init(&s->lock, NULL);
+ assert(!ret);
+}
+
+static inline
+void _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node)
+{
+ struct cds_wfs_node *old_head;
+
+ assert(node->next == NULL);
+ /*
+ * uatomic_xchg() implicit memory barrier orders earlier stores to node
+ * (setting it to NULL) before publication.
+ */
+ old_head = uatomic_xchg(&s->head, node);
+ /*
+ * At this point, dequeuers see a NULL node->next, they should busy-wait
+ * until node->next is set to old_head.
+ */
+ CMM_STORE_SHARED(node->next, old_head);
+}
+
+/*
+ * Returns NULL if stack is empty.
+ */
+static inline
+struct cds_wfs_node *
+___cds_wfs_pop_blocking(struct cds_wfs_stack *s)
+{
+ struct cds_wfs_node *head, *next;
+ int attempt = 0;
+
+retry:
+ head = CMM_LOAD_SHARED(s->head);
+ if (head == CDS_WF_STACK_END)
+ return NULL;
+ /*
+ * Adaptative busy-looping waiting for push to complete.
+ */
+ while ((next = CMM_LOAD_SHARED(head->next)) == NULL) {
+ if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
+ poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */
+ attempt = 0;
+ } else
+ caa_cpu_relax();
+ }
+ if (uatomic_cmpxchg(&s->head, head, next) == head)
+ return head;
+ else
+ goto retry; /* Concurrent modification. Retry. */
+}
+
+static inline
+struct cds_wfs_node *
+_cds_wfs_pop_blocking(struct cds_wfs_stack *s)
+{
+ struct cds_wfs_node *retnode;
+ int ret;
+
+ ret = pthread_mutex_lock(&s->lock);
+ assert(!ret);
+ retnode = ___cds_wfs_pop_blocking(s);
+ ret = pthread_mutex_unlock(&s->lock);
+ assert(!ret);
+ return retnode;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_WFSTACK_STATIC_H */
--- /dev/null
+#ifndef _URCU_UATOMIC_ARCH_ALPHA_H
+#define _URCU_UATOMIC_ARCH_ALPHA_H
+
+/*
+ * Atomic exchange operations for the Alpha architecture. Let GCC do it.
+ *
+ * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_UATOMIC_ARCH_ALPHA_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_ARM_H
+#define _URCU_ARCH_UATOMIC_ARM_H
+
+/*
+ * Atomics for ARM. This approach is usable on kernels back to 2.6.15.
+ *
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
+ * (Adapted from uatomic_arch_ppc.h)
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* xchg */
+#define uatomic_xchg(addr, v) __sync_lock_test_and_set(addr, v)
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_ARM_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_GCC_H
+#define _URCU_ARCH_UATOMIC_GCC_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
+ * (Adapted from uatomic_arch_ppc.h)
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * If your platform doesn't have a full set of atomics, you will need
+ * a separate uatomic_arch_*.h file for your architecture. Otherwise,
+ * just rely on the definitions in uatomic/generic.h.
+ */
+#define UATOMIC_HAS_ATOMIC_BYTE
+#define UATOMIC_HAS_ATOMIC_SHORT
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_GCC_H */
--- /dev/null
+#ifndef _URCU_UATOMIC_GENERIC_H
+#define _URCU_UATOMIC_GENERIC_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ * Copyright (c) 2010 Paolo Bonzini
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef uatomic_set
+#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
+#endif
+
+#ifndef uatomic_read
+#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
+#endif
+
+#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
+static inline __attribute__((always_inline))
+void _uatomic_link_error()
+{
+#ifdef ILLEGAL_INSTR
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+#else
+ __builtin_trap ();
+#endif
+}
+
+#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
+extern void _uatomic_link_error ();
+#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
+
+/* cmpxchg */
+
+#ifndef uatomic_cmpxchg
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ return __sync_val_compare_and_swap_1(addr, old, _new);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ return __sync_val_compare_and_swap_2(addr, old, _new);
+#endif
+ case 4:
+ return __sync_val_compare_and_swap_4(addr, old, _new);
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ return __sync_val_compare_and_swap_8(addr, old, _new);
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+
+/* uatomic_and */
+
+#ifndef uatomic_and
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ __sync_and_and_fetch_1(addr, val);
+ return;
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ __sync_and_and_fetch_2(addr, val);
+ return;
+#endif
+ case 4:
+ __sync_and_and_fetch_4(addr, val);
+ return;
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ __sync_and_and_fetch_8(addr, val);
+ return;
+#endif
+ }
+ _uatomic_link_error();
+}
+
+#define uatomic_and(addr, v) \
+ (_uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif
+
+/* uatomic_or */
+
+#ifndef uatomic_or
+static inline __attribute__((always_inline))
+void _uatomic_or(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ __sync_or_and_fetch_1(addr, val);
+ return;
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ __sync_or_and_fetch_2(addr, val);
+ return;
+#endif
+ case 4:
+ __sync_or_and_fetch_4(addr, val);
+ return;
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ __sync_or_and_fetch_8(addr, val);
+ return;
+#endif
+ }
+ _uatomic_link_error();
+ return;
+}
+
+#define uatomic_or(addr, v) \
+ (_uatomic_or((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif
+
+/* uatomic_add_return */
+
+#ifndef uatomic_add_return
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ return __sync_add_and_fetch_1(addr, val);
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ return __sync_add_and_fetch_2(addr, val);
+#endif
+ case 4:
+ return __sync_add_and_fetch_4(addr, val);
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ return __sync_add_and_fetch_8(addr, val);
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_add_return */
+
+#ifndef uatomic_xchg
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old;
+
+ do {
+ old = uatomic_read((unsigned char *)addr);
+ } while (!__sync_bool_compare_and_swap_1(addr, old, val));
+
+ return old;
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old;
+
+ do {
+ old = uatomic_read((unsigned short *)addr);
+ } while (!__sync_bool_compare_and_swap_2(addr, old, val));
+
+ return old;
+ }
+#endif
+ case 4:
+ {
+ unsigned int old;
+
+ do {
+ old = uatomic_read((unsigned int *)addr);
+ } while (!__sync_bool_compare_and_swap_4(addr, old, val));
+
+ return old;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old;
+
+ do {
+ old = uatomic_read((unsigned long *)addr);
+ } while (!__sync_bool_compare_and_swap_8(addr, old, val));
+
+ return old;
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_xchg */
+
+#else /* #ifndef uatomic_cmpxchg */
+
+#ifndef uatomic_and
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void _uatomic_and(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old, oldt;
+
+ oldt = uatomic_read((unsigned char *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
+ } while (oldt != old);
+
+ return;
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old, oldt;
+
+ oldt = uatomic_read((unsigned short *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
+ } while (oldt != old);
+ }
+#endif
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
+ } while (oldt != old);
+
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
+ } while (oldt != old);
+
+ return;
+ }
+#endif
+ }
+ _uatomic_link_error();
+}
+
+#define uatomic_and(addr, v) \
+ (_uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_and */
+
+#ifndef uatomic_or
+/* uatomic_or */
+
+static inline __attribute__((always_inline))
+void _uatomic_or(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old, oldt;
+
+ oldt = uatomic_read((unsigned char *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
+ } while (oldt != old);
+
+ return;
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old, oldt;
+
+ oldt = uatomic_read((unsigned short *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
+ } while (oldt != old);
+
+ return;
+ }
+#endif
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
+ } while (oldt != old);
+
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
+ } while (oldt != old);
+
+ return;
+ }
+#endif
+ }
+ _uatomic_link_error();
+}
+
+#define uatomic_or(addr, v) \
+ (_uatomic_or((addr), \
+ (unsigned long)(v),\
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_or */
+
+#ifndef uatomic_add_return
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old, oldt;
+
+ oldt = uatomic_read((unsigned char *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned char *)addr,
+ old, old + val);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old, oldt;
+
+ oldt = uatomic_read((unsigned short *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned short *)addr,
+ old, old + val);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#endif
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned int *)addr,
+ old, old + val);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned long *)addr,
+ old, old + val);
+ } while (oldt != old);
+
+ return old + val;
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_add_return */
+
+#ifndef uatomic_xchg
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+#ifdef UATOMIC_HAS_ATOMIC_BYTE
+ case 1:
+ {
+ unsigned char old, oldt;
+
+ oldt = uatomic_read((unsigned char *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned char *)addr,
+ old, val);
+ } while (oldt != old);
+
+ return old;
+ }
+#endif
+#ifdef UATOMIC_HAS_ATOMIC_SHORT
+ case 2:
+ {
+ unsigned short old, oldt;
+
+ oldt = uatomic_read((unsigned short *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned short *)addr,
+ old, val);
+ } while (oldt != old);
+
+ return old;
+ }
+#endif
+ case 4:
+ {
+ unsigned int old, oldt;
+
+ oldt = uatomic_read((unsigned int *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned int *)addr,
+ old, val);
+ } while (oldt != old);
+
+ return old;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old, oldt;
+
+ oldt = uatomic_read((unsigned long *)addr);
+ do {
+ old = oldt;
+ oldt = uatomic_cmpxchg((unsigned long *)addr,
+ old, val);
+ } while (oldt != old);
+
+ return old;
+ }
+#endif
+ }
+ _uatomic_link_error();
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+#endif /* #ifndef uatomic_xchg */
+
+#endif /* #else #ifndef uatomic_cmpxchg */
+
+/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
+
+#ifndef uatomic_add
+#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
+#endif
+
+#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
+#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
+
+#ifndef uatomic_inc
+#define uatomic_inc(addr) uatomic_add((addr), 1)
+#endif
+
+#ifndef uatomic_dec
+#define uatomic_dec(addr) uatomic_add((addr), -1)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _URCU_UATOMIC_GENERIC_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_PPC_H
+#define _URCU_ARCH_UATOMIC_PPC_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __NO_LWSYNC__
+#define LWSYNC_OPCODE "sync\n"
+#else
+#define LWSYNC_OPCODE "lwsync\n"
+#endif
+
+#define ILLEGAL_INSTR ".long 0xd00d00"
+
+/*
+ * Using a isync as second barrier for exchange to provide acquire semantic.
+ * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
+ * explicit that this also has acquire semantics."
+ * Derived from AO_compare_and_swap(), but removed the comparison.
+ */
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int result;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
+ "stwcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
+ "stdcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int old_val;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
+ "cmpw %0,%3\n" /* if load is not equal to */
+ "bne 2f\n" /* old, fail */
+ "stwcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ "2:\n"
+ : "=&r"(old_val)
+ : "r"(addr), "r"((unsigned int)_new),
+ "r"((unsigned int)old)
+ : "memory", "cc");
+
+ return old_val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old_val;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
+ "cmpd %0,%3\n" /* if load is not equal to */
+ "bne 2f\n" /* old, fail */
+ "stdcx. %2,0,%1\n" /* else store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ "2:\n"
+ : "=&r"(old_val)
+ : "r"(addr), "r"((unsigned long)_new),
+ "r"((unsigned long)old)
+ : "memory", "cc");
+
+ return old_val;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+ return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int result;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
+ "add %0,%2,%0\n" /* add val to value loaded */
+ "stwcx. %0,0,%1\n" /* store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result;
+
+ __asm__ __volatile__(
+ LWSYNC_OPCODE
+ "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
+ "add %0,%2,%0\n" /* add val to value loaded */
+ "stdcx. %0,0,%1\n" /* store conditional */
+ "bne- 1b\n" /* retry if lost reservation */
+ "isync\n"
+ : "=&r"(result)
+ : "r"(addr), "r"(val)
+ : "memory", "cc");
+
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__(ILLEGAL_INSTR);
+ return 0;
+}
+
+
+#define uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_PPC_H */
--- /dev/null
+#ifndef _URCU_UATOMIC_ARCH_S390_H
+#define _URCU_UATOMIC_ARCH_S390_H
+
+/*
+ * Atomic exchange operations for the S390 architecture. Based on information
+ * taken from the Principles of Operation Appendix A "Conditional Swapping
+ * Instructions (CS, CDS)".
+ *
+ * Copyright (c) 2009 Novell, Inc.
+ * Author: Jan Blunck <jblunck@suse.de>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
+#define COMPILER_HAVE_SHORT_MEM_OPERAND
+#endif
+
+/*
+ * MEMOP assembler operand rules:
+ * - op refer to MEMOP_IN operand
+ * - MEMOP_IN can expand to more than a single operand. Use it at the end of
+ * operand list only.
+ */
+
+#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
+
+#define MEMOP_OUT(addr) "=Q" (*(addr))
+#define MEMOP_IN(addr) "Q" (*(addr))
+#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
+
+#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
+
+#define MEMOP_OUT(addr) "=m" (*(addr))
+#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
+#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
+
+#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
+
+struct __uatomic_dummy {
+ unsigned long v[10];
+};
+#define __hp(x) ((struct __uatomic_dummy *)(x))
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int old_val;
+
+ __asm__ __volatile__(
+ "0: cs %0,%2," MEMOP_REF(%3) "\n"
+ " brc 4,0b\n"
+ : "=&r" (old_val), MEMOP_OUT (__hp(addr))
+ : "r" (val), MEMOP_IN (__hp(addr))
+ : "memory", "cc");
+ return old_val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long old_val;
+
+ __asm__ __volatile__(
+ "0: csg %0,%2," MEMOP_REF(%3) "\n"
+ " brc 4,0b\n"
+ : "=&r" (old_val), MEMOP_OUT (__hp(addr))
+ : "r" (val), MEMOP_IN (__hp(addr))
+ : "memory", "cc");
+ return old_val;
+ }
+#endif
+ default:
+ __asm__ __volatile__(".long 0xd00d00");
+ }
+
+ return 0;
+}
+
+#define uatomic_xchg(addr, v) \
+ (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr)))
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ unsigned int old_val = (unsigned int)old;
+
+ __asm__ __volatile__(
+ " cs %0,%2," MEMOP_REF(%3) "\n"
+ : "+r" (old_val), MEMOP_OUT (__hp(addr))
+ : "r" (_new), MEMOP_IN (__hp(addr))
+ : "memory", "cc");
+ return old_val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ " csg %0,%2," MEMOP_REF(%3) "\n"
+ : "+r" (old), MEMOP_OUT (__hp(addr))
+ : "r" (_new), MEMOP_IN (__hp(addr))
+ : "memory", "cc");
+ return old;
+ }
+#endif
+ default:
+ __asm__ __volatile__(".long 0xd00d00");
+ }
+
+ return 0;
+}
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
+ (unsigned long)(old), \
+ (unsigned long)(_new), \
+ sizeof(*(addr)))
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_UATOMIC_ARCH_S390_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
+#define _URCU_ARCH_UATOMIC_SPARC64_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 4:
+ {
+ __asm__ __volatile__ (
+ "membar #StoreLoad | #LoadLoad\n\t"
+ "cas [%1],%2,%0\n\t"
+ "membar #StoreLoad | #StoreStore\n\t"
+ : "+&r" (_new)
+ : "r" (addr), "r" (old)
+ : "memory");
+
+ return _new;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__ (
+ "membar #StoreLoad | #LoadLoad\n\t"
+ "casx [%1],%2,%0\n\t"
+ "membar #StoreLoad | #StoreStore\n\t"
+ : "+&r" (_new)
+ : "r" (addr), "r" (old)
+ : "memory");
+
+ return _new;
+ }
+#endif
+ }
+ __builtin_trap();
+ return 0;
+}
+
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_PPC_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_UNKNOWN_H
+#define _URCU_ARCH_UATOMIC_UNKNOWN_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
+ * (Adapted from uatomic_arch_ppc.h)
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ */
+
+/* See configure.ac for the list of recognized architectures. */
+#error "Cannot build: unrecognized architecture detected."
+
+#endif /* _URCU_ARCH_UATOMIC_UNKNOWN_H */
--- /dev/null
+#ifndef _URCU_ARCH_UATOMIC_X86_H
+#define _URCU_ARCH_UATOMIC_X86_H
+
+/*
+ * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
+ * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (c) 2009 Mathieu Desnoyers
+ *
+ * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
+ * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
+ *
+ * Permission is hereby granted to use or copy this program
+ * for any purpose, provided the above notices are retained on all copies.
+ * Permission to modify the code and to distribute modified code is granted,
+ * provided the above notices are retained, and a notice that the code was
+ * modified is included with the above copyright notice.
+ *
+ * Code inspired from libuatomic_ops-1.2, inherited in part from the
+ * Boehm-Demers-Weiser conservative garbage collector.
+ */
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#define UATOMIC_HAS_ATOMIC_BYTE
+#define UATOMIC_HAS_ATOMIC_SHORT
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Derived from AO_compare_and_swap() and AO_test_and_set_full().
+ */
+
+struct __uatomic_dummy {
+ unsigned long v[10];
+};
+#define __hp(x) ((struct __uatomic_dummy *)(x))
+
+#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
+
+/* cmpxchg */
+
+static inline __attribute__((always_inline))
+unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ unsigned char result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgb %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "q"((unsigned char)_new)
+ : "memory");
+ return result;
+ }
+ case 2:
+ {
+ unsigned short result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgw %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "r"((unsigned short)_new)
+ : "memory");
+ return result;
+ }
+ case 4:
+ {
+ unsigned int result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgl %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "r"((unsigned int)_new)
+ : "memory");
+ return result;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result = old;
+
+ __asm__ __volatile__(
+ "lock; cmpxchgq %2, %1"
+ : "+a"(result), "+m"(*__hp(addr))
+ : "r"((unsigned long)_new)
+ : "memory");
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define _uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
+{
+ /* Note: the "xchg" instruction does not need a "lock" prefix. */
+ switch (len) {
+ case 1:
+ {
+ unsigned char result;
+ __asm__ __volatile__(
+ "xchgb %0, %1"
+ : "=q"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned char)val)
+ : "memory");
+ return result;
+ }
+ case 2:
+ {
+ unsigned short result;
+ __asm__ __volatile__(
+ "xchgw %0, %1"
+ : "=r"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned short)val)
+ : "memory");
+ return result;
+ }
+ case 4:
+ {
+ unsigned int result;
+ __asm__ __volatile__(
+ "xchgl %0, %1"
+ : "=r"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned int)val)
+ : "memory");
+ return result;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result;
+ __asm__ __volatile__(
+ "xchgq %0, %1"
+ : "=r"(result), "+m"(*__hp(addr))
+ : "0" ((unsigned long)val)
+ : "memory");
+ return result;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define _uatomic_xchg(addr, v) \
+ ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
+ sizeof(*(addr))))
+
+/* uatomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long __uatomic_add_return(void *addr, unsigned long val,
+ int len)
+{
+ switch (len) {
+ case 1:
+ {
+ unsigned char result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddb %1, %0"
+ : "+m"(*__hp(addr)), "+q" (result)
+ :
+ : "memory");
+ return result + (unsigned char)val;
+ }
+ case 2:
+ {
+ unsigned short result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddw %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned short)val;
+ }
+ case 4:
+ {
+ unsigned int result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddl %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned int)val;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ unsigned long result = val;
+
+ __asm__ __volatile__(
+ "lock; xaddq %1, %0"
+ : "+m"(*__hp(addr)), "+r" (result)
+ :
+ : "memory");
+ return result + (unsigned long)val;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return 0;
+}
+
+#define _uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) __uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+/* uatomic_and */
+
+static inline __attribute__((always_inline))
+void __uatomic_and(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; andb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; andw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; andl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; andq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_and(addr, v) \
+ (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_or */
+
+static inline __attribute__((always_inline))
+void __uatomic_or(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; orb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; orw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; orl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; orq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_or(addr, v) \
+ (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_add */
+
+static inline __attribute__((always_inline))
+void __uatomic_add(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; addb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; addw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; addl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; addq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_add(addr, v) \
+ (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
+
+
+/* uatomic_inc */
+
+static inline __attribute__((always_inline))
+void __uatomic_inc(void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; incb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; incw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; incl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; incq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
+
+/* uatomic_dec */
+
+static inline __attribute__((always_inline))
+void __uatomic_dec(void *addr, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; decb %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; decw %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; decl %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; decq %0"
+ : "=m"(*__hp(addr))
+ :
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
+
+#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+extern int __rcu_cas_avail;
+extern int __rcu_cas_init(void);
+
+#define UATOMIC_COMPAT(insn) \
+ ((likely(__rcu_cas_avail > 0)) \
+ ? (_uatomic_##insn) \
+ : ((unlikely(__rcu_cas_avail < 0) \
+ ? ((__rcu_cas_init() > 0) \
+ ? (_uatomic_##insn) \
+ : (compat_uatomic_##insn)) \
+ : (compat_uatomic_##insn))))
+
+extern unsigned long _compat_uatomic_set(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_set(addr, _new) \
+ ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+
+extern unsigned long _compat_uatomic_xchg(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_xchg(addr, _new) \
+ ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
+ unsigned long _new, int len);
+#define compat_uatomic_cmpxchg(addr, old, _new) \
+ ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
+ (unsigned long)(old), \
+ (unsigned long)(_new), \
+ sizeof(*(addr))))
+
+extern void _compat_uatomic_and(void *addr, unsigned long _new, int len);
+#define compat_uatomic_and(addr, v) \
+ (_compat_uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+extern void _compat_uatomic_or(void *addr, unsigned long _new, int len);
+#define compat_uatomic_or(addr, v) \
+ (_compat_uatomic_or((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_add_return(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_add_return(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+#define compat_uatomic_add(addr, v) \
+ ((void)compat_uatomic_add_return((addr), (v)))
+#define compat_uatomic_inc(addr) \
+ (compat_uatomic_add((addr), 1))
+#define compat_uatomic_dec(addr) \
+ (compat_uatomic_add((addr), -1))
+
+#else
+#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
+#endif
+
+/* Read is atomic even in compat mode */
+#define uatomic_set(addr, v) \
+ UATOMIC_COMPAT(set(addr, v))
+
+#define uatomic_cmpxchg(addr, old, _new) \
+ UATOMIC_COMPAT(cmpxchg(addr, old, _new))
+#define uatomic_xchg(addr, v) \
+ UATOMIC_COMPAT(xchg(addr, v))
+#define uatomic_and(addr, v) \
+ UATOMIC_COMPAT(and(addr, v))
+#define uatomic_or(addr, v) \
+ UATOMIC_COMPAT(or(addr, v))
+#define uatomic_add_return(addr, v) \
+ UATOMIC_COMPAT(add_return(addr, v))
+
+#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
+#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
+#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic/generic.h>
+
+#endif /* _URCU_ARCH_UATOMIC_X86_H */
--- /dev/null
+#warning "urcu/uatomic_arch.h is deprecated. Please include urcu/uatomic.h instead."
+#include <urcu/uatomic.h>
+++ /dev/null
-#ifndef _URCU_UATOMIC_ARCH_ALPHA_H
-#define _URCU_UATOMIC_ARCH_ALPHA_H
-
-/*
- * Atomic exchange operations for the Alpha architecture. Let GCC do it.
- *
- * Copyright (c) 2010 Paolo Bonzini <pbonzini@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_ALPHA_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_ARM_H
-#define _URCU_ARCH_UATOMIC_ARM_H
-
-/*
- * Atomics for ARM. This approach is usable on kernels back to 2.6.15.
- *
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
- * (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* xchg */
-#define uatomic_xchg(addr, v) __sync_lock_test_and_set(addr, v)
-
-/* cmpxchg */
-#define uatomic_cmpxchg(addr, old, _new) \
- __sync_val_compare_and_swap(addr, old, _new)
-
-/* uatomic_add_return */
-#define uatomic_add_return(addr, v) __sync_add_and_fetch(addr, v)
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_ARM_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_GCC_H
-#define _URCU_ARCH_UATOMIC_GCC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
- * (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * If your platform doesn't have a full set of atomics, you will need
- * a separate uatomic_arch_*.h file for your architecture. Otherwise,
- * just rely on the definitions in uatomic_generic.h.
- */
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_GCC_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_PPC_H
-#define _URCU_ARCH_UATOMIC_PPC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifdef __NO_LWSYNC__
-#define LWSYNC_OPCODE "sync\n"
-#else
-#define LWSYNC_OPCODE "lwsync\n"
-#endif
-
-#define ILLEGAL_INSTR ".long 0xd00d00"
-
-/*
- * Using a isync as second barrier for exchange to provide acquire semantic.
- * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
- * explicit that this also has acquire semantics."
- * Derived from AO_compare_and_swap(), but removed the comparison.
- */
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "stwcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "stdcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "cmpw %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stwcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val)
- : "r"(addr), "r"((unsigned int)_new),
- "r"((unsigned int)old)
- : "memory", "cc");
-
- return old_val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old_val;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "cmpd %0,%3\n" /* if load is not equal to */
- "bne 2f\n" /* old, fail */
- "stdcx. %2,0,%1\n" /* else store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- "2:\n"
- : "=&r"(old_val)
- : "r"(addr), "r"((unsigned long)_new),
- "r"((unsigned long)old)
- : "memory", "cc");
-
- return old_val;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "lwarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stwcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
-
- __asm__ __volatile__(
- LWSYNC_OPCODE
- "1:\t" "ldarx %0,0,%1\n" /* load and reserve */
- "add %0,%2,%0\n" /* add val to value loaded */
- "stdcx. %0,0,%1\n" /* store conditional */
- "bne- 1b\n" /* retry if lost reservation */
- "isync\n"
- : "=&r"(result)
- : "r"(addr), "r"(val)
- : "memory", "cc");
-
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
- return 0;
-}
-
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
+++ /dev/null
-#ifndef _URCU_UATOMIC_ARCH_S390_H
-#define _URCU_UATOMIC_ARCH_S390_H
-
-/*
- * Atomic exchange operations for the S390 architecture. Based on information
- * taken from the Principles of Operation Appendix A "Conditional Swapping
- * Instructions (CS, CDS)".
- *
- * Copyright (c) 2009 Novell, Inc.
- * Author: Jan Blunck <jblunck@suse.de>
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
-#define COMPILER_HAVE_SHORT_MEM_OPERAND
-#endif
-
-/*
- * MEMOP assembler operand rules:
- * - op refer to MEMOP_IN operand
- * - MEMOP_IN can expand to more than a single operand. Use it at the end of
- * operand list only.
- */
-
-#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND
-
-#define MEMOP_OUT(addr) "=Q" (*(addr))
-#define MEMOP_IN(addr) "Q" (*(addr))
-#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */
-
-#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-#define MEMOP_OUT(addr) "=m" (*(addr))
-#define MEMOP_IN(addr) "a" (addr), "m" (*(addr))
-#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */
-
-#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */
-
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val;
-
- __asm__ __volatile__(
- "0: cs %0,%2," MEMOP_REF(%3) "\n"
- " brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (val), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old_val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old_val;
-
- __asm__ __volatile__(
- "0: csg %0,%2," MEMOP_REF(%3) "\n"
- " brc 4,0b\n"
- : "=&r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (val), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old_val;
- }
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr)))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- unsigned int old_val = (unsigned int)old;
-
- __asm__ __volatile__(
- " cs %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old_val), MEMOP_OUT (__hp(addr))
- : "r" (_new), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old_val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- " csg %0,%2," MEMOP_REF(%3) "\n"
- : "+r" (old), MEMOP_OUT (__hp(addr))
- : "r" (_new), MEMOP_IN (__hp(addr))
- : "memory", "cc");
- return old;
- }
-#endif
- default:
- __asm__ __volatile__(".long 0xd00d00");
- }
-
- return 0;
-}
-
-#define uatomic_cmpxchg(addr, old, _new) \
- (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \
- (unsigned long)(old), \
- (unsigned long)(_new), \
- sizeof(*(addr)))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_UATOMIC_ARCH_S390_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_SPARC64_H
-#define _URCU_ARCH_UATOMIC_SPARC64_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 4:
- {
- __asm__ __volatile__ (
- "membar #StoreLoad | #LoadLoad\n\t"
- "cas [%1],%2,%0\n\t"
- "membar #StoreLoad | #StoreStore\n\t"
- : "+&r" (_new)
- : "r" (addr), "r" (old)
- : "memory");
-
- return _new;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__ (
- "membar #StoreLoad | #LoadLoad\n\t"
- "casx [%1],%2,%0\n\t"
- "membar #StoreLoad | #StoreStore\n\t"
- : "+&r" (_new)
- : "r" (addr), "r" (old)
- : "memory");
-
- return _new;
- }
-#endif
- }
- __builtin_trap();
- return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_PPC_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_UNKNOWN_H
-#define _URCU_ARCH_UATOMIC_UNKNOWN_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- * Copyright (c) 2010 Paul E. McKenney, IBM Corporation
- * (Adapted from uatomic_arch_ppc.h)
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- */
-
-/* See configure.ac for the list of recognized architectures. */
-#error "Cannot build: unrecognized architecture detected."
-
-#endif /* _URCU_ARCH_UATOMIC_UNKNOWN_H */
+++ /dev/null
-#ifndef _URCU_ARCH_UATOMIC_X86_H
-#define _URCU_ARCH_UATOMIC_X86_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#define UATOMIC_HAS_ATOMIC_BYTE
-#define UATOMIC_HAS_ATOMIC_SHORT
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Derived from AO_compare_and_swap() and AO_test_and_set_full().
- */
-
-struct __uatomic_dummy {
- unsigned long v[10];
-};
-#define __hp(x) ((struct __uatomic_dummy *)(x))
-
-#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
-
-/* cmpxchg */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
- case 1:
- {
- unsigned char result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgb %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "q"((unsigned char)_new)
- : "memory");
- return result;
- }
- case 2:
- {
- unsigned short result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgw %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned short)_new)
- : "memory");
- return result;
- }
- case 4:
- {
- unsigned int result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgl %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned int)_new)
- : "memory");
- return result;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result = old;
-
- __asm__ __volatile__(
- "lock; cmpxchgq %2, %1"
- : "+a"(result), "+m"(*__hp(addr))
- : "r"((unsigned long)_new)
- : "memory");
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define _uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_exchange(void *addr, unsigned long val, int len)
-{
- /* Note: the "xchg" instruction does not need a "lock" prefix. */
- switch (len) {
- case 1:
- {
- unsigned char result;
- __asm__ __volatile__(
- "xchgb %0, %1"
- : "=q"(result), "+m"(*__hp(addr))
- : "0" ((unsigned char)val)
- : "memory");
- return result;
- }
- case 2:
- {
- unsigned short result;
- __asm__ __volatile__(
- "xchgw %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned short)val)
- : "memory");
- return result;
- }
- case 4:
- {
- unsigned int result;
- __asm__ __volatile__(
- "xchgl %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned int)val)
- : "memory");
- return result;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result;
- __asm__ __volatile__(
- "xchgq %0, %1"
- : "=r"(result), "+m"(*__hp(addr))
- : "0" ((unsigned long)val)
- : "memory");
- return result;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define _uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long __uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
- case 1:
- {
- unsigned char result = val;
-
- __asm__ __volatile__(
- "lock; xaddb %1, %0"
- : "+m"(*__hp(addr)), "+q" (result)
- :
- : "memory");
- return result + (unsigned char)val;
- }
- case 2:
- {
- unsigned short result = val;
-
- __asm__ __volatile__(
- "lock; xaddw %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned short)val;
- }
- case 4:
- {
- unsigned int result = val;
-
- __asm__ __volatile__(
- "lock; xaddl %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned int)val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long result = val;
-
- __asm__ __volatile__(
- "lock; xaddq %1, %0"
- : "+m"(*__hp(addr)), "+r" (result)
- :
- : "memory");
- return result + (unsigned long)val;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return 0;
-}
-
-#define _uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) __uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-/* uatomic_and */
-
-static inline __attribute__((always_inline))
-void __uatomic_and(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; andb %1, %0"
- : "=m"(*__hp(addr))
- : "iq" ((unsigned char)val)
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; andw %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned short)val)
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; andl %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned int)val)
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; andq %1, %0"
- : "=m"(*__hp(addr))
- : "er" ((unsigned long)val)
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_and(addr, v) \
- (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
-
-/* uatomic_or */
-
-static inline __attribute__((always_inline))
-void __uatomic_or(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; orb %1, %0"
- : "=m"(*__hp(addr))
- : "iq" ((unsigned char)val)
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; orw %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned short)val)
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; orl %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned int)val)
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; orq %1, %0"
- : "=m"(*__hp(addr))
- : "er" ((unsigned long)val)
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_or(addr, v) \
- (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
-
-/* uatomic_add */
-
-static inline __attribute__((always_inline))
-void __uatomic_add(void *addr, unsigned long val, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; addb %1, %0"
- : "=m"(*__hp(addr))
- : "iq" ((unsigned char)val)
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; addw %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned short)val)
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; addl %1, %0"
- : "=m"(*__hp(addr))
- : "ir" ((unsigned int)val)
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; addq %1, %0"
- : "=m"(*__hp(addr))
- : "er" ((unsigned long)val)
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_add(addr, v) \
- (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
-
-
-/* uatomic_inc */
-
-static inline __attribute__((always_inline))
-void __uatomic_inc(void *addr, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; incb %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; incw %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; incl %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; incq %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr))))
-
-/* uatomic_dec */
-
-static inline __attribute__((always_inline))
-void __uatomic_dec(void *addr, int len)
-{
- switch (len) {
- case 1:
- {
- __asm__ __volatile__(
- "lock; decb %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 2:
- {
- __asm__ __volatile__(
- "lock; decw %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
- case 4:
- {
- __asm__ __volatile__(
- "lock; decl %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- __asm__ __volatile__(
- "lock; decq %0"
- : "=m"(*__hp(addr))
- :
- : "memory");
- return;
- }
-#endif
- }
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__("ud2");
- return;
-}
-
-#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-
-#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
-extern int __rcu_cas_avail;
-extern int __rcu_cas_init(void);
-
-#define UATOMIC_COMPAT(insn) \
- ((likely(__rcu_cas_avail > 0)) \
- ? (_uatomic_##insn) \
- : ((unlikely(__rcu_cas_avail < 0) \
- ? ((__rcu_cas_init() > 0) \
- ? (_uatomic_##insn) \
- : (compat_uatomic_##insn)) \
- : (compat_uatomic_##insn))))
-
-extern unsigned long _compat_uatomic_set(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_set(addr, _new) \
- ((__typeof__(*(addr))) _compat_uatomic_set((addr), \
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-
-extern unsigned long _compat_uatomic_xchg(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_xchg(addr, _new) \
- ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len);
-#define compat_uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \
- (unsigned long)(old), \
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_and(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_and(addr, v) \
- ((__typeof__(*(addr))) _compat_uatomic_and((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_or(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_or(addr, v) \
- ((__typeof__(*(addr))) _compat_uatomic_or((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-extern unsigned long _compat_uatomic_add_return(void *addr,
- unsigned long _new, int len);
-#define compat_uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-
-#define compat_uatomic_add(addr, v) \
- ((void)compat_uatomic_add_return((addr), (v)))
-#define compat_uatomic_inc(addr) \
- (compat_uatomic_add((addr), 1))
-#define compat_uatomic_dec(addr) \
- (compat_uatomic_add((addr), -1))
-
-#else
-#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
-#endif
-
-/* Read is atomic even in compat mode */
-#define uatomic_set(addr, v) \
- UATOMIC_COMPAT(set(addr, v))
-
-#define uatomic_cmpxchg(addr, old, _new) \
- UATOMIC_COMPAT(cmpxchg(addr, old, _new))
-#define uatomic_xchg(addr, v) \
- UATOMIC_COMPAT(xchg(addr, v))
-#define uatomic_and(addr, v) \
- UATOMIC_COMPAT(and(addr, v))
-#define uatomic_or(addr, v) \
- UATOMIC_COMPAT(or(addr, v))
-#define uatomic_add_return(addr, v) \
- UATOMIC_COMPAT(add_return(addr, v))
-
-#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
-#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
-#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
-
-#ifdef __cplusplus
-}
-#endif
-
-#include <urcu/uatomic_generic.h>
-
-#endif /* _URCU_ARCH_UATOMIC_X86_H */
+++ /dev/null
-#ifndef _URCU_UATOMIC_GENERIC_H
-#define _URCU_UATOMIC_GENERIC_H
-
-/*
- * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
- * Copyright (c) 2009 Mathieu Desnoyers
- * Copyright (c) 2010 Paolo Bonzini
- *
- * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
- * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
- *
- * Permission is hereby granted to use or copy this program
- * for any purpose, provided the above notices are retained on all copies.
- * Permission to modify the code and to distribute modified code is granted,
- * provided the above notices are retained, and a notice that the code was
- * modified is included with the above copyright notice.
- *
- * Code inspired from libuatomic_ops-1.2, inherited in part from the
- * Boehm-Demers-Weiser conservative garbage collector.
- */
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#ifndef uatomic_set
-#define uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
-#endif
-
-#ifndef uatomic_read
-#define uatomic_read(addr) CMM_LOAD_SHARED(*(addr))
-#endif
-
-#if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR
-static inline __attribute__((always_inline))
-void _uatomic_link_error()
-{
-#ifdef ILLEGAL_INSTR
- /* generate an illegal instruction. Cannot catch this with linker tricks
- * when optimizations are disabled. */
- __asm__ __volatile__(ILLEGAL_INSTR);
-#else
- __builtin_trap ();
-#endif
-}
-
-#else /* #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
-extern void _uatomic_link_error ();
-#endif /* #else #if !defined __OPTIMIZE__ || defined UATOMIC_NO_LINK_ERROR */
-
-/* cmpxchg */
-
-#ifndef uatomic_cmpxchg
-static inline __attribute__((always_inline))
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
- unsigned long _new, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- return __sync_val_compare_and_swap_1(addr, old, _new);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- return __sync_val_compare_and_swap_2(addr, old, _new);
-#endif
- case 4:
- return __sync_val_compare_and_swap_4(addr, old, _new);
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- return __sync_val_compare_and_swap_8(addr, old, _new);
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-
-#define uatomic_cmpxchg(addr, old, _new) \
- ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
- (unsigned long)(_new), \
- sizeof(*(addr))))
-
-
-/* uatomic_and */
-
-#ifndef uatomic_and
-static inline __attribute__((always_inline))
-void _uatomic_and(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- __sync_and_and_fetch_1(addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- __sync_and_and_fetch_2(addr, val);
-#endif
- case 4:
- __sync_and_and_fetch_4(addr, val);
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- __sync_and_and_fetch_8(addr, val);
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_and(addr, v) \
- (_uatomic_and((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif
-
-/* uatomic_or */
-
-#ifndef uatomic_or
-static inline __attribute__((always_inline))
-void _uatomic_or(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- __sync_or_and_fetch_1(addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- __sync_or_and_fetch_2(addr, val);
-#endif
- case 4:
- __sync_or_and_fetch_4(addr, val);
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- __sync_or_and_fetch_8(addr, val);
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_or(addr, v) \
- (_uatomic_or((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif
-
-/* uatomic_add_return */
-
-#ifndef uatomic_add_return
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
- int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- return __sync_add_and_fetch_1(addr, val);
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- return __sync_add_and_fetch_2(addr, val);
-#endif
- case 4:
- return __sync_add_and_fetch_4(addr, val);
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- return __sync_add_and_fetch_8(addr, val);
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
-
-#ifndef uatomic_xchg
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old;
-
- do {
- old = uatomic_read((unsigned char *)addr);
- } while (!__sync_bool_compare_and_swap_1(addr, old, val));
-
- return old;
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old;
-
- do {
- old = uatomic_read((unsigned short *)addr);
- } while (!__sync_bool_compare_and_swap_2(addr, old, val));
-
- return old;
- }
-#endif
- case 4:
- {
- unsigned int old;
-
- do {
- old = uatomic_read((unsigned int *)addr);
- } while (!__sync_bool_compare_and_swap_4(addr, old, val));
-
- return old;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old;
-
- do {
- old = uatomic_read((unsigned long *)addr);
- } while (!__sync_bool_compare_and_swap_8(addr, old, val));
-
- return old;
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
-
-#else /* #ifndef uatomic_cmpxchg */
-
-#ifndef uatomic_and
-/* uatomic_and */
-
-static inline __attribute__((always_inline))
-void _uatomic_and(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old, oldt;
-
- oldt = uatomic_read((unsigned char *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old & val, 1);
- } while (oldt != old);
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old, oldt;
-
- oldt = uatomic_read((unsigned short *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old & val, 2);
- } while (oldt != old);
- }
-#endif
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old & val, 4);
- } while (oldt != old);
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old & val, 8);
- } while (oldt != old);
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_and(addr, v) \
- (uatomic_and((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_and */
-
-#ifndef uatomic_or
-/* uatomic_or */
-
-static inline __attribute__((always_inline))
-void _uatomic_or(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old, oldt;
-
- oldt = uatomic_read((unsigned char *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old | val, 1);
- } while (oldt != old);
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old, oldt;
-
- oldt = uatomic_read((unsigned short *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old | val, 2);
- } while (oldt != old);
- }
-#endif
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old | val, 4);
- } while (oldt != old);
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = _uatomic_cmpxchg(addr, old, old | val, 8);
- } while (oldt != old);
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_or(addr, v) \
- (uatomic_or((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_or */
-
-#ifndef uatomic_add_return
-/* uatomic_add_return */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old, oldt;
-
- oldt = uatomic_read((unsigned char *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned char *)addr,
- old, old + val);
- } while (oldt != old);
-
- return old + val;
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old, oldt;
-
- oldt = uatomic_read((unsigned short *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned short *)addr,
- old, old + val);
- } while (oldt != old);
-
- return old + val;
- }
-#endif
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned int *)addr,
- old, old + val);
- } while (oldt != old);
-
- return old + val;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned long *)addr,
- old, old + val);
- } while (oldt != old);
-
- return old + val;
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_add_return(addr, v) \
- ((__typeof__(*(addr))) _uatomic_add_return((addr), \
- (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_add_return */
-
-#ifndef uatomic_xchg
-/* xchg */
-
-static inline __attribute__((always_inline))
-unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
-{
- switch (len) {
-#ifdef UATOMIC_HAS_ATOMIC_BYTE
- case 1:
- {
- unsigned char old, oldt;
-
- oldt = uatomic_read((unsigned char *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned char *)addr,
- old, val);
- } while (oldt != old);
-
- return old;
- }
-#endif
-#ifdef UATOMIC_HAS_ATOMIC_SHORT
- case 2:
- {
- unsigned short old, oldt;
-
- oldt = uatomic_read((unsigned short *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned short *)addr,
- old, val);
- } while (oldt != old);
-
- return old;
- }
-#endif
- case 4:
- {
- unsigned int old, oldt;
-
- oldt = uatomic_read((unsigned int *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned int *)addr,
- old, val);
- } while (oldt != old);
-
- return old;
- }
-#if (CAA_BITS_PER_LONG == 64)
- case 8:
- {
- unsigned long old, oldt;
-
- oldt = uatomic_read((unsigned long *)addr);
- do {
- old = oldt;
- oldt = uatomic_cmpxchg((unsigned long *)addr,
- old, val);
- } while (oldt != old);
-
- return old;
- }
-#endif
- }
- _uatomic_link_error();
- return 0;
-}
-
-#define uatomic_xchg(addr, v) \
- ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
- sizeof(*(addr))))
-#endif /* #ifndef uatomic_xchg */
-
-#endif /* #else #ifndef uatomic_cmpxchg */
-
-/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
-
-#ifndef uatomic_add
-#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v))
-#endif
-
-#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v))
-#define uatomic_sub(addr, v) uatomic_add((addr), -(v))
-
-#ifndef uatomic_inc
-#define uatomic_inc(addr) uatomic_add((addr), 1)
-#endif
-
-#ifndef uatomic_dec
-#define uatomic_dec(addr) uatomic_add((addr), -1)
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_UATOMIC_GENERIC_H */
-#ifndef _URCU_FUTEX_H
-#define _URCU_FUTEX_H
-
-/*
- * urcu-futex.h
- *
- * Userspace RCU - sys_futex/compat_futex header.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <urcu/config.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-/*
- * sys_futex compatibility header.
- * Use *only* *either of* futex_noasync OR futex_async on a given address.
- *
- * futex_noasync cannot be executed in signal handlers, but ensures that
- * it will be put in a wait queue even in compatibility mode.
- *
- * futex_async is signal-handler safe for the wakeup. It uses polling
- * on the wait-side in compatibility mode.
- */
-
-#ifdef CONFIG_RCU_HAVE_FUTEX
-#include <sys/syscall.h>
-#define futex(...) syscall(__NR_futex, __VA_ARGS__)
-#define futex_noasync(uaddr, op, val, timeout, uaddr2, val3) \
- futex(uaddr, op, val, timeout, uaddr2, val3)
-#define futex_async(uaddr, op, val, timeout, uaddr2, val3) \
- futex(uaddr, op, val, timeout, uaddr2, val3)
-#else
-extern int compat_futex_noasync(int *uaddr, int op, int val,
- const struct timespec *timeout, int *uaddr2, int val3);
-#define futex_noasync(uaddr, op, val, timeout, uaddr2, val3) \
- compat_futex_noasync(uaddr, op, val, timeout, uaddr2, val3)
-extern int compat_futex_async(int *uaddr, int op, int val,
- const struct timespec *timeout, int *uaddr2, int val3);
-#define futex_async(uaddr, op, val, timeout, uaddr2, val3) \
- compat_futex_async(uaddr, op, val, timeout, uaddr2, val3)
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_FUTEX_H */
+#warning "urcu/urcu-futex.h is deprecated. Please include urcu/futex.h instead."
+#include <urcu/futex.h>
-#ifndef _URCU_REF_H
-#define _URCU_REF_H
-
-/*
- * Userspace RCU - Reference counting
- *
- * Copyright (C) 2009 Novell Inc.
- * Copyright (C) 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Author: Jan Blunck <jblunck@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU Lesser General Public License version 2.1 as
- * published by the Free Software Foundation.
- */
-
-#include <assert.h>
-#include <urcu/uatomic_arch.h>
-
-struct urcu_ref {
- long refcount; /* ATOMIC */
-};
-
-static inline void urcu_ref_set(struct urcu_ref *ref, long val)
-{
- uatomic_set(&ref->refcount, val);
-}
-
-static inline void urcu_ref_init(struct urcu_ref *ref)
-{
- urcu_ref_set(ref, 1);
-}
-
-static inline void urcu_ref_get(struct urcu_ref *ref)
-{
- uatomic_add(&ref->refcount, 1);
-}
-
-static inline void urcu_ref_put(struct urcu_ref *ref,
- void (*release)(struct urcu_ref *))
-{
- long res = uatomic_sub_return(&ref->refcount, 1);
- assert (res >= 0);
- if (res == 0)
- release(ref);
-}
-
-#endif /* _URCU_REF_H */
+#warning "urcu/urcu_ref.h is deprecated. Please include urcu/ref.h instead."
+#include <urcu/ref.h>
+++ /dev/null
-#ifndef _URCU_WFQUEUE_STATIC_H
-#define _URCU_WFQUEUE_STATIC_H
-
-/*
- * wfqueue-static.h
- *
- * Userspace RCU library - Queue with Wait-Free Enqueue/Blocking Dequeue
- *
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfqueue.h for linking
- * dynamically with the userspace rcu library.
- *
- * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <pthread.h>
-#include <assert.h>
-#include <poll.h>
-#include <urcu/compiler.h>
-#include <urcu/uatomic_arch.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/*
- * Queue with wait-free enqueue/blocking dequeue.
- * This implementation adds a dummy head node when the queue is empty to ensure
- * we can always update the queue locklessly.
- *
- * Inspired from half-wait-free/half-blocking queue implementation done by
- * Paul E. McKenney.
- */
-
-#define WFQ_ADAPT_ATTEMPTS 10 /* Retry if being set */
-#define WFQ_WAIT 10 /* Wait 10 ms if being set */
-
-static inline void _cds_wfq_node_init(struct cds_wfq_node *node)
-{
- node->next = NULL;
-}
-
-static inline void _cds_wfq_init(struct cds_wfq_queue *q)
-{
- int ret;
-
- _cds_wfq_node_init(&q->dummy);
- /* Set queue head and tail */
- q->head = &q->dummy;
- q->tail = &q->dummy.next;
- ret = pthread_mutex_init(&q->lock, NULL);
- assert(!ret);
-}
-
-static inline void _cds_wfq_enqueue(struct cds_wfq_queue *q,
- struct cds_wfq_node *node)
-{
- struct cds_wfq_node **old_tail;
-
- /*
- * uatomic_xchg() implicit memory barrier orders earlier stores to data
- * structure containing node and setting node->next to NULL before
- * publication.
- */
- old_tail = uatomic_xchg(&q->tail, node);
- /*
- * At this point, dequeuers see a NULL old_tail->next, which indicates
- * that the queue is being appended to. The following store will append
- * "node" to the queue from a dequeuer perspective.
- */
- CMM_STORE_SHARED(*old_tail, node);
-}
-
-/*
- * It is valid to reuse and free a dequeued node immediately.
- *
- * No need to go on a waitqueue here, as there is no possible state in which the
- * list could cause dequeue to busy-loop needlessly while waiting for another
- * thread to be scheduled. The queue appears empty until tail->next is set by
- * enqueue.
- */
-static inline struct cds_wfq_node *
-___cds_wfq_dequeue_blocking(struct cds_wfq_queue *q)
-{
- struct cds_wfq_node *node, *next;
- int attempt = 0;
-
- /*
- * Queue is empty if it only contains the dummy node.
- */
- if (q->head == &q->dummy && CMM_LOAD_SHARED(q->tail) == &q->dummy.next)
- return NULL;
- node = q->head;
-
- /*
- * Adaptative busy-looping waiting for enqueuer to complete enqueue.
- */
- while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
- if (++attempt >= WFQ_ADAPT_ATTEMPTS) {
- poll(NULL, 0, WFQ_WAIT); /* Wait for 10ms */
- attempt = 0;
- } else
- caa_cpu_relax();
- }
- /*
- * Move queue head forward.
- */
- q->head = next;
- /*
- * Requeue dummy node if we just dequeued it.
- */
- if (node == &q->dummy) {
- _cds_wfq_node_init(node);
- _cds_wfq_enqueue(q, node);
- return ___cds_wfq_dequeue_blocking(q);
- }
- return node;
-}
-
-static inline struct cds_wfq_node *
-_cds_wfq_dequeue_blocking(struct cds_wfq_queue *q)
-{
- struct cds_wfq_node *retnode;
- int ret;
-
- ret = pthread_mutex_lock(&q->lock);
- assert(!ret);
- retnode = ___cds_wfq_dequeue_blocking(q);
- ret = pthread_mutex_unlock(&q->lock);
- assert(!ret);
- return retnode;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_WFQUEUE_STATIC_H */
#ifdef _LGPL_SOURCE
-#include <urcu/wfqueue-static.h>
+#include <urcu/static/wfqueue.h>
#define cds_wfq_node_init _cds_wfq_node_init
#define cds_wfq_init _cds_wfq_init
+++ /dev/null
-#ifndef _URCU_WFSTACK_STATIC_H
-#define _URCU_WFSTACK_STATIC_H
-
-/*
- * wfstack-static.h
- *
- * Userspace RCU library - Stack with Wait-Free push, Blocking pop.
- *
- * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See wfstack.h for linking
- * dynamically with the userspace rcu library.
- *
- * Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <pthread.h>
-#include <assert.h>
-#include <poll.h>
-#include <urcu/compiler.h>
-#include <urcu/uatomic_arch.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define CDS_WF_STACK_END ((void *)0x1UL)
-#define CDS_WFS_ADAPT_ATTEMPTS 10 /* Retry if being set */
-#define CDS_WFS_WAIT 10 /* Wait 10 ms if being set */
-
-void _cds_wfs_node_init(struct cds_wfs_node *node)
-{
- node->next = NULL;
-}
-
-void _cds_wfs_init(struct cds_wfs_stack *s)
-{
- int ret;
-
- s->head = CDS_WF_STACK_END;
- ret = pthread_mutex_init(&s->lock, NULL);
- assert(!ret);
-}
-
-void _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node)
-{
- struct cds_wfs_node *old_head;
-
- assert(node->next == NULL);
- /*
- * uatomic_xchg() implicit memory barrier orders earlier stores to node
- * (setting it to NULL) before publication.
- */
- old_head = uatomic_xchg(&s->head, node);
- /*
- * At this point, dequeuers see a NULL node->next, they should busy-wait
- * until node->next is set to old_head.
- */
- CMM_STORE_SHARED(node->next, old_head);
-}
-
-/*
- * Returns NULL if stack is empty.
- */
-struct cds_wfs_node *
-___cds_wfs_pop_blocking(struct cds_wfs_stack *s)
-{
- struct cds_wfs_node *head, *next;
- int attempt = 0;
-
-retry:
- head = CMM_LOAD_SHARED(s->head);
- if (head == CDS_WF_STACK_END)
- return NULL;
- /*
- * Adaptative busy-looping waiting for push to complete.
- */
- while ((next = CMM_LOAD_SHARED(head->next)) == NULL) {
- if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
- poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */
- attempt = 0;
- } else
- caa_cpu_relax();
- }
- if (uatomic_cmpxchg(&s->head, head, next) == head)
- return head;
- else
- goto retry; /* Concurrent modification. Retry. */
-}
-
-struct cds_wfs_node *
-_cds_wfs_pop_blocking(struct cds_wfs_stack *s)
-{
- struct cds_wfs_node *retnode;
- int ret;
-
- ret = pthread_mutex_lock(&s->lock);
- assert(!ret);
- retnode = ___cds_wfs_pop_blocking(s);
- ret = pthread_mutex_unlock(&s->lock);
- assert(!ret);
- return retnode;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _URCU_WFSTACK_STATIC_H */
#ifdef _LGPL_SOURCE
-#include <urcu/wfstack-static.h>
+#include <urcu/static/wfstack.h>
#define cds_wfs_node_init _cds_wfs_node_init
#define cds_wfs_init _cds_wfs_init
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu/wfqueue.h"
-#include "urcu/wfqueue-static.h"
+#include "urcu/static/wfqueue.h"
/*
* library wrappers to be used by non-LGPL compatible source code.
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu/wfstack.h"
-#include "urcu/wfstack-static.h"
+#include "urcu/static/wfstack.h"
/*
* library wrappers to be used by non-LGPL compatible source code.