From: Mathieu Desnoyers Date: Sat, 3 Sep 2011 14:48:44 +0000 (-0400) Subject: Merge branch 'master' into urcu/ht X-Git-Tag: v0.7.0~43^2~191 X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=commitdiff_plain;h=92cfe223501f564d3bd726a6c580702daeef4198;hp=76412f2410948c653536a4e34c468814c4cb2544 Merge branch 'master' into urcu/ht Conflicts: Makefile.am Signed-off-by: Mathieu Desnoyers --- diff --git a/ChangeLog b/ChangeLog index 6782b61..e355ee8 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +2011-07-21 Userspace RCU 0.6.4 + * uatomic: Fix ARM build errors in uatomic. + * urcu tests: hold mutex across use of custom allocator. + * Portability fixes to support FreeBSD 8.2. + 2011-06-27 Userspace RCU 0.6.3 * uatomic: Fix i386 compatibility build errors in uatomic. diff --git a/Makefile.am b/Makefile.am index 2326049..ff7f1bb 100644 --- a/Makefile.am +++ b/Makefile.am @@ -34,28 +34,36 @@ if COMPAT_FUTEX COMPAT+=compat_futex.c endif -lib_LTLIBRARIES = liburcu-cds.la liburcu.la liburcu-qsbr.la \ - liburcu-mb.la liburcu-signal.la liburcu-bp.la +lib_LTLIBRARIES = liburcu-common.la \ + liburcu.la liburcu-qsbr.la \ + liburcu-mb.la liburcu-signal.la liburcu-bp.la \ + liburcu-cds.la -liburcu_cds_la_SOURCES = wfqueue.c wfstack.c rculfqueue.c rculfstack.c \ - rculfhash.c $(COMPAT) +# +# liburcu-common contains wait-free queues (needed by call_rcu) as well +# as futex fallbacks. +# +liburcu_common_la_SOURCES = wfqueue.c wfstack.c $(COMPAT) liburcu_la_SOURCES = urcu.c urcu-pointer.c $(COMPAT) -liburcu_la_LIBADD = liburcu-cds.la +liburcu_la_LIBADD = liburcu-common.la liburcu_qsbr_la_SOURCES = urcu-qsbr.c urcu-pointer.c $(COMPAT) -liburcu_qsbr_la_LIBADD = liburcu-cds.la +liburcu_qsbr_la_LIBADD = liburcu-common.la liburcu_mb_la_SOURCES = urcu.c urcu-pointer.c $(COMPAT) liburcu_mb_la_CFLAGS = -DRCU_MB -liburcu_mb_la_LIBADD = liburcu-cds.la +liburcu_mb_la_LIBADD = liburcu-common.la liburcu_signal_la_SOURCES = urcu.c urcu-pointer.c $(COMPAT) liburcu_signal_la_CFLAGS = -DRCU_SIGNAL -liburcu_signal_la_LIBADD = liburcu-cds.la +liburcu_signal_la_LIBADD = liburcu-common.la liburcu_bp_la_SOURCES = urcu-bp.c urcu-pointer.c $(COMPAT) -liburcu_bp_la_LIBADD = liburcu-cds.la +liburcu_bp_la_LIBADD = liburcu-common.la + +liburcu_cds_la_SOURCES = rculfqueue.c rculfstack.c rculfhash.c $(COMPAT) +liburcu_cds_la_LIBADD = liburcu-common.la pkgconfigdir = $(libdir)/pkgconfig pkgconfig_DATA = liburcu-cds.pc liburcu.pc liburcu-bp.pc liburcu-qsbr.pc \ diff --git a/configure.ac b/configure.ac index e4105c3..c4bd10a 100644 --- a/configure.ac +++ b/configure.ac @@ -2,7 +2,7 @@ # Process this file with autoconf to produce a configure script. -AC_INIT([userspace-rcu], [0.6.3], [mathieu dot desnoyers at efficios dot com]) +AC_INIT([userspace-rcu], [0.6.4], [mathieu dot desnoyers at efficios dot com]) # Following the numbering scheme proposed by libtool for the library version # http://www.gnu.org/software/libtool/manual/html_node/Updating-version-info.html @@ -83,11 +83,6 @@ fi UATOMICSRC=urcu/uatomic/$ARCHTYPE.h ARCHSRC=urcu/arch/$ARCHTYPE.h -if test "x$ARCHTYPE" != xx86 -a "x$ARCHTYPE" != xppc; then - APISRC=tests/api_gcc.h -else - APISRC=tests/api_$ARCHTYPE.h -fi if test "$ARCHTYPE" == "armv7l"; then CFLAGS="-mcpu=cortex-a9 -mtune=cortex-a9 -O" fi @@ -241,7 +236,6 @@ CFLAGS=$saved_CFLAGS AC_CONFIG_LINKS([ urcu/arch.h:$ARCHSRC urcu/uatomic.h:$UATOMICSRC - tests/api.h:$APISRC ]) AC_CONFIG_FILES([ Makefile diff --git a/rculfqueue.c b/rculfqueue.c index 0daee5d..38eddcf 100644 --- a/rculfqueue.c +++ b/rculfqueue.c @@ -20,13 +20,10 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#define _LGPL_SOURCE -/* Use the urcu symbols to select the appropriate rcu flavor at link time */ -#include "urcu.h" - -#undef _LGPL_SOURCE /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ +#undef _LGPL_SOURCE #include "urcu/rculfqueue.h" +#define _LGPL_SOURCE #include "urcu/static/rculfqueue.h" /* @@ -39,9 +36,15 @@ void cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node) } void cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q, - void (*release)(struct urcu_ref *ref)) + void queue_call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head))) +{ + _cds_lfq_init_rcu(q, queue_call_rcu); +} + +int cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q) { - _cds_lfq_init_rcu(q, release); + return _cds_lfq_destroy_rcu(q); } void cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, struct cds_lfq_node_rcu *node) diff --git a/rculfstack.c b/rculfstack.c index 4a3041d..574ea87 100644 --- a/rculfstack.c +++ b/rculfstack.c @@ -20,13 +20,10 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -/* Use the urcu symbols to select the appropriate rcu flavor at link time */ -#define _LGPL_SOURCE -#include "urcu.h" - -#undef _LGPL_SOURCE /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ +#undef _LGPL_SOURCE #include "urcu/rculfstack.h" +#define _LGPL_SOURCE #include "urcu/static/rculfstack.h" /* @@ -44,9 +41,10 @@ void cds_lfs_init_rcu(struct cds_lfs_stack_rcu *s) _cds_lfs_init_rcu(s); } -void cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, struct cds_lfs_node_rcu *node) +int cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, + struct cds_lfs_node_rcu *node) { - _cds_lfs_push_rcu(s, node); + return _cds_lfs_push_rcu(s, node); } struct cds_lfs_node_rcu *cds_lfs_pop_rcu(struct cds_lfs_stack_rcu *s) diff --git a/tests/Makefile.am b/tests/Makefile.am index 3917cd8..d32088b 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -37,6 +37,7 @@ URCU_SIGNAL=$(top_srcdir)/urcu.c $(top_srcdir)/urcu-pointer.c $(top_srcdir)/wfqu URCU_BP=$(top_srcdir)/urcu-bp.c $(top_srcdir)/urcu-pointer.c $(top_srcdir)/wfqueue.c $(COMPAT) URCU_DEFER=$(top_srcdir)/urcu.c $(top_srcdir)/urcu-pointer.c $(top_srcdir)/wfqueue.c $(COMPAT) +URCU_COMMON_LIB=$(top_builddir)/liburcu-common.la URCU_LIB=$(top_builddir)/liburcu.la URCU_QSBR_LIB=$(top_builddir)/liburcu-qsbr.la URCU_MB_LIB=$(top_builddir)/liburcu-mb.la @@ -44,8 +45,7 @@ URCU_SIGNAL_LIB=$(top_builddir)/liburcu-signal.la URCU_BP_LIB=$(top_builddir)/liburcu-bp.la URCU_CDS_LIB=$(top_builddir)/liburcu-cds.la -EXTRA_DIST = $(top_srcdir)/tests/api_*.h - +EXTRA_DIST = $(top_srcdir)/tests/api.h runall.sh test_urcu_SOURCES = test_urcu.c $(URCU) @@ -92,23 +92,23 @@ test_perthreadlock_SOURCES = test_perthreadlock.c $(URCU_SIGNAL) rcutorture_urcu_SOURCES = urcutorture.c rcutorture_urcu_CFLAGS = -DRCU_MEMBARRIER $(AM_CFLAGS) -rcutorture_urcu_LDADD = $(URCU) $(URCU_CDS_LIB) +rcutorture_urcu_LDADD = $(URCU) rcutorture_urcu_mb_SOURCES = urcutorture.c rcutorture_urcu_mb_CFLAGS = -DRCU_MB $(AM_CFLAGS) -rcutorture_urcu_mb_LDADD = $(URCU_MB_LIB) $(URCU_CDS_LIB) +rcutorture_urcu_mb_LDADD = $(URCU_MB_LIB) rcutorture_qsbr_SOURCES = urcutorture.c -rcutorture_qsbr_CFLAGS = -DRCU_QSBR $(AM_CFLAGS) -rcutorture_qsbr_LDADD = $(URCU_QSBR_LIB) $(URCU_CDS_LIB) +rcutorture_qsbr_CFLAGS = -DTORTURE_QSBR -DRCU_QSBR $(AM_CFLAGS) +rcutorture_qsbr_LDADD = $(URCU_QSBR_LIB) rcutorture_urcu_signal_SOURCES = urcutorture.c rcutorture_urcu_signal_CFLAGS = -DRCU_SIGNAL $(AM_CFLAGS) -rcutorture_urcu_signal_LDADD = $(URCU_SIGNAL_LIB) $(URCU_CDS_LIB) +rcutorture_urcu_signal_LDADD = $(URCU_SIGNAL_LIB) rcutorture_urcu_bp_SOURCES = urcutorture.c rcutorture_urcu_bp_CFLAGS = -DRCU_BP $(AM_CFLAGS) -rcutorture_urcu_bp_LDADD = $(URCU_BP_LIB) $(URCU_CDS_LIB) +rcutorture_urcu_bp_LDADD = $(URCU_BP_LIB) test_mutex_SOURCES = test_mutex.c $(URCU) @@ -155,25 +155,25 @@ test_urcu_bp_SOURCES = test_urcu_bp.c $(URCU_BP) test_urcu_bp_dynamic_link_SOURCES = test_urcu_bp.c $(URCU_BP) test_urcu_bp_dynamic_link_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS) -test_urcu_lfq_SOURCES = test_urcu_lfq.c $(URCU) +test_urcu_lfq_SOURCES = test_urcu_lfq.c $(URCU) $(URCU_CDS_LIB) test_urcu_lfq_dynlink_SOURCES = test_urcu_lfq.c $(URCU) test_urcu_lfq_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS) test_urcu_lfq_dynlink_LDADD = $(URCU_CDS_LIB) -test_urcu_wfq_SOURCES = test_urcu_wfq.c $(COMPAT) +test_urcu_wfq_SOURCES = test_urcu_wfq.c $(URCU_COMMON_LIB) test_urcu_wfq_dynlink_SOURCES = test_urcu_wfq.c test_urcu_wfq_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS) -test_urcu_wfq_dynlink_LDADD = $(URCU_CDS_LIB) +test_urcu_wfq_dynlink_LDADD = $(URCU_COMMON_LIB) -test_urcu_lfs_SOURCES = test_urcu_lfs.c $(URCU_DEFER) +test_urcu_lfs_SOURCES = test_urcu_lfs.c $(URCU_CDS_LIB) $(URCU_DEFER) test_urcu_lfs_dynlink_SOURCES = test_urcu_lfs.c $(URCU_DEFER) test_urcu_lfs_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS) test_urcu_lfs_dynlink_LDADD = $(URCU_CDS_LIB) -test_urcu_wfs_SOURCES = test_urcu_wfs.c $(COMPAT) +test_urcu_wfs_SOURCES = test_urcu_wfs.c $(URCU_COMMON_LIB) test_urcu_wfs_dynlink_SOURCES = test_urcu_wfs.c test_urcu_wfs_dynlink_CFLAGS = -DDYNAMIC_LINK_TEST $(AM_CFLAGS) -test_urcu_wfs_dynlink_LDADD = $(URCU_CDS_LIB) +test_urcu_wfs_dynlink_LDADD = $(URCU_COMMON_LIB) test_urcu_hash_SOURCES = test_urcu_hash.c $(COMPAT) test_urcu_hash_CFLAGS = -DRCU_MEMBARRIER $(AM_CFLAGS) diff --git a/tests/api.h b/tests/api.h new file mode 100644 index 0000000..c5d716f --- /dev/null +++ b/tests/api.h @@ -0,0 +1,317 @@ + +#ifndef _INCLUDE_API_H +#define _INCLUDE_API_H + +#include "../config.h" + +/* + * common.h: Common Linux kernel-isms. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; but version 2 of the License only due + * to code included from the Linux kernel. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (c) 2006 Paul E. McKenney, IBM. + * + * Much code taken from the Linux kernel. For such code, the option + * to redistribute under later versions of GPL might not be available. + */ + +#include +#include + +/* + * Machine parameters. + */ + +#define ____cacheline_internodealigned_in_smp \ + __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))) + +/* + * api_pthreads.h: API mapping to pthreads environment. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. However, please note that much + * of the code in this file derives from the Linux kernel, and that such + * code may not be available except under GPLv2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (c) 2006 Paul E. McKenney, IBM. + */ + +#include +#include +#include +#include +#include +#define __USE_GNU +#include +#include +#include +/* #include "atomic.h" */ + +/* + * Exclusive locking primitives. + */ + +typedef pthread_mutex_t spinlock_t; + +#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER; +#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER + +static void spin_lock_init(spinlock_t *sp) +{ + if (pthread_mutex_init(sp, NULL) != 0) { + perror("spin_lock_init:pthread_mutex_init"); + exit(-1); + } +} + +static void spin_lock(spinlock_t *sp) +{ + if (pthread_mutex_lock(sp) != 0) { + perror("spin_lock:pthread_mutex_lock"); + exit(-1); + } +} + +static void spin_unlock(spinlock_t *sp) +{ + if (pthread_mutex_unlock(sp) != 0) { + perror("spin_unlock:pthread_mutex_unlock"); + exit(-1); + } +} + +#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0) +#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0) + +/* + * Thread creation/destruction primitives. + */ + +typedef pthread_t thread_id_t; + +#define NR_THREADS 128 + +#define __THREAD_ID_MAP_EMPTY ((thread_id_t) 0) +#define __THREAD_ID_MAP_WAITING ((thread_id_t) 1) +thread_id_t __thread_id_map[NR_THREADS]; +spinlock_t __thread_id_map_mutex; + +#define for_each_thread(t) \ + for (t = 0; t < NR_THREADS; t++) + +#define for_each_running_thread(t) \ + for (t = 0; t < NR_THREADS; t++) \ + if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \ + (__thread_id_map[t] != __THREAD_ID_MAP_WAITING)) + +#define for_each_tid(t, tid) \ + for (t = 0; t < NR_THREADS; t++) \ + if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \ + ((tid) != __THREAD_ID_MAP_WAITING)) + +pthread_key_t thread_id_key; + +static int __smp_thread_id(void) +{ + int i; + thread_id_t tid = pthread_self(); + + for (i = 0; i < NR_THREADS; i++) { + if (__thread_id_map[i] == tid) { + long v = i + 1; /* must be non-NULL. */ + + if (pthread_setspecific(thread_id_key, (void *)v) != 0) { + perror("pthread_setspecific"); + exit(-1); + } + return i; + } + } + spin_lock(&__thread_id_map_mutex); + for (i = 0; i < NR_THREADS; i++) { + if (__thread_id_map[i] == tid) + spin_unlock(&__thread_id_map_mutex); + return i; + } + spin_unlock(&__thread_id_map_mutex); + fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n", + (int)tid, (int)tid); + exit(-1); +} + +static int smp_thread_id(void) +{ + void *id; + + id = pthread_getspecific(thread_id_key); + if (id == NULL) + return __smp_thread_id(); + return (long)(id - 1); +} + +static thread_id_t create_thread(void *(*func)(void *), void *arg) +{ + thread_id_t tid; + int i; + + spin_lock(&__thread_id_map_mutex); + for (i = 0; i < NR_THREADS; i++) { + if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY) + break; + } + if (i >= NR_THREADS) { + spin_unlock(&__thread_id_map_mutex); + fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS); + exit(-1); + } + __thread_id_map[i] = __THREAD_ID_MAP_WAITING; + spin_unlock(&__thread_id_map_mutex); + if (pthread_create(&tid, NULL, func, arg) != 0) { + perror("create_thread:pthread_create"); + exit(-1); + } + __thread_id_map[i] = tid; + return tid; +} + +static void *wait_thread(thread_id_t tid) +{ + int i; + void *vp; + + for (i = 0; i < NR_THREADS; i++) { + if (__thread_id_map[i] == tid) + break; + } + if (i >= NR_THREADS){ + fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n", + (int)tid, (int)tid); + exit(-1); + } + if (pthread_join(tid, &vp) != 0) { + perror("wait_thread:pthread_join"); + exit(-1); + } + __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; + return vp; +} + +static void wait_all_threads(void) +{ + int i; + thread_id_t tid; + + for (i = 1; i < NR_THREADS; i++) { + tid = __thread_id_map[i]; + if (tid != __THREAD_ID_MAP_EMPTY && + tid != __THREAD_ID_MAP_WAITING) + (void)wait_thread(tid); + } +} + +#ifndef HAVE_CPU_SET_T +typedef unsigned long cpu_set_t; +# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0) +# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0) +#endif + +static void run_on(int cpu) +{ +#if HAVE_SCHED_SETAFFINITY + cpu_set_t mask; + + CPU_ZERO(&mask); + CPU_SET(cpu, &mask); +#if SCHED_SETAFFINITY_ARGS == 2 + sched_setaffinity(0, &mask); +#else + sched_setaffinity(0, sizeof(mask), &mask); +#endif +#endif /* HAVE_SCHED_SETAFFINITY */ +} + +/* + * timekeeping -- very crude -- should use MONOTONIC... + */ + +long long get_microseconds(void) +{ + struct timeval tv; + + if (gettimeofday(&tv, NULL) != 0) + abort(); + return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec; +} + +/* + * Per-thread variables. + */ + +#define DEFINE_PER_THREAD(type, name) \ + struct { \ + __typeof__(type) v \ + __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \ + } __per_thread_##name[NR_THREADS]; +#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name) + +#define per_thread(name, thread) __per_thread_##name[thread].v +#define __get_thread_var(name) per_thread(name, smp_thread_id()) + +#define init_per_thread(name, v) \ + do { \ + int __i_p_t_i; \ + for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \ + per_thread(name, __i_p_t_i) = v; \ + } while (0) + +DEFINE_PER_THREAD(int, smp_processor_id); + +/* + * Bug checks. + */ + +#define BUG_ON(c) do { if (!(c)) abort(); } while (0) + +/* + * Initialization -- Must be called before calling any primitives. + */ + +static void smp_init(void) +{ + int i; + + spin_lock_init(&__thread_id_map_mutex); + __thread_id_map[0] = pthread_self(); + for (i = 1; i < NR_THREADS; i++) + __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; + init_per_thread(smp_processor_id, 0); + if (pthread_key_create(&thread_id_key, NULL) != 0) { + perror("pthread_key_create"); + exit(-1); + } +} + +#endif diff --git a/tests/api_gcc.h b/tests/api_gcc.h deleted file mode 100644 index 2db1ef2..0000000 --- a/tests/api_gcc.h +++ /dev/null @@ -1,1339 +0,0 @@ - -#ifndef _INCLUDE_API_H -#define _INCLUDE_API_H - -#include "../config.h" - -/* - * common.h: Common Linux kernel-isms. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; but version 2 of the License only due - * to code included from the Linux kernel. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (c) 2006 Paul E. McKenney, IBM. - * - * Much code taken from the Linux kernel. For such code, the option - * to redistribute under later versions of GPL might not be available. - */ - -#ifndef __always_inline -#define __always_inline inline -#endif - -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) - -#ifdef __ASSEMBLY__ -# define stringify_in_c(...) __VA_ARGS__ -# define ASM_CONST(x) x -#else -/* This version of stringify will deal with commas... */ -# define __stringify_in_c(...) #__VA_ARGS__ -# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " -# define __ASM_CONST(x) x##UL -# define ASM_CONST(x) __ASM_CONST(x) -#endif - - -/* - * arch-i386.h: Expose x86 atomic instructions. 80486 and better only. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, but version 2 only due to inclusion - * of Linux-kernel code. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (c) 2006 Paul E. McKenney, IBM. - * - * Much code taken from the Linux kernel. For such code, the option - * to redistribute under later versions of GPL might not be available. - */ - -/* - * Machine parameters. - */ - -/* #define CAA_CACHE_LINE_SIZE 64 */ -#define ____cacheline_internodealigned_in_smp \ - __attribute__((__aligned__(1 << 6))) - -#define LOCK_PREFIX "lock ; " - -#if 0 /* duplicate with arch_atomic.h */ -/* - * Atomic data structure, initialization, and access. - */ - -typedef struct { volatile int counter; } atomic_t; - -#define ATOMIC_INIT(i) { (i) } - -#define atomic_read(v) ((v)->counter) -#define atomic_set(v, i) (((v)->counter) = (i)) - -/* - * Atomic operations. - */ - -/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ - -static __inline__ void atomic_add(int i, atomic_t *v) -{ - (void)__sync_fetch_and_add(&v->counter, i); -} - -/** - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic_sub(int i, atomic_t *v) -{ - (void)__sync_fetch_and_add(&v->counter, -i); -} - -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_sub_and_test(int i, atomic_t *v) -{ - return __sync_add_and_fetch(&v->counter, -i) == 0; -} - -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -static __inline__ void atomic_inc(atomic_t *v) -{ - (void)__sync_fetch_and_add(&v->counter, 1); -} - -/** - * atomic_dec - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -static __inline__ void atomic_dec(atomic_t *v) -{ - (void)__sync_fetch_and_add(&v->counter, -1); -} - -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __inline__ int atomic_dec_and_test(atomic_t *v) -{ - return __sync_add_and_fetch(&v->counter, -1) == 0; -} - -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_inc_and_test(atomic_t *v) -{ - return __sync_add_and_fetch(&v->counter, 1) == 0; -} - -/** - * atomic_add_negative - add and test if negative - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __inline__ int atomic_add_negative(int i, atomic_t *v) -{ - return __sync_add_and_fetch(&v->counter, i) < 0; -} - -/** - * atomic_add_return - add and return - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns @i + @v - */ -static __inline__ int atomic_add_return(int i, atomic_t *v) -{ - return __sync_add_and_fetch(&v->counter, i); -} - -static __inline__ int atomic_sub_return(int i, atomic_t *v) -{ - return atomic_add_return(-i,v); -} - -static inline unsigned int -cmpxchg(volatile long *ptr, long oldval, long newval) -{ - return __sync_val_compare_and_swap(ptr, oldval, newval); -} - -#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -/** - * atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. - */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) - -/* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic_dec() cmm_barrier() -#define smp_mb__after_atomic_dec() cmm_barrier() -#define smp_mb__before_atomic_inc() cmm_barrier() -#define smp_mb__after_atomic_inc() cmm_barrier() - -#endif //0 /* duplicate with arch_atomic.h */ - -/* - * api_pthreads.h: API mapping to pthreads environment. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. However, please note that much - * of the code in this file derives from the Linux kernel, and that such - * code may not be available except under GPLv2. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (c) 2006 Paul E. McKenney, IBM. - */ - -#include -#include -#include -#include -#include -#define __USE_GNU -#include -#include -#include -/* #include "atomic.h" */ - -/* - * Default machine parameters. - */ - -#ifndef CAA_CACHE_LINE_SIZE -/* #define CAA_CACHE_LINE_SIZE 128 */ -#endif /* #ifndef CAA_CACHE_LINE_SIZE */ - -/* - * Exclusive locking primitives. - */ - -typedef pthread_mutex_t spinlock_t; - -#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER; -#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER - -static void spin_lock_init(spinlock_t *sp) -{ - if (pthread_mutex_init(sp, NULL) != 0) { - perror("spin_lock_init:pthread_mutex_init"); - exit(-1); - } -} - -static void spin_lock(spinlock_t *sp) -{ - if (pthread_mutex_lock(sp) != 0) { - perror("spin_lock:pthread_mutex_lock"); - exit(-1); - } -} - -static void spin_unlock(spinlock_t *sp) -{ - if (pthread_mutex_unlock(sp) != 0) { - perror("spin_unlock:pthread_mutex_unlock"); - exit(-1); - } -} - -#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0) -#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0) - -/* - * Thread creation/destruction primitives. - */ - -typedef pthread_t thread_id_t; - -#define NR_THREADS 128 - -#define __THREAD_ID_MAP_EMPTY 0 -#define __THREAD_ID_MAP_WAITING 1 -thread_id_t __thread_id_map[NR_THREADS]; -spinlock_t __thread_id_map_mutex; - -#define for_each_thread(t) \ - for (t = 0; t < NR_THREADS; t++) - -#define for_each_running_thread(t) \ - for (t = 0; t < NR_THREADS; t++) \ - if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \ - (__thread_id_map[t] != __THREAD_ID_MAP_WAITING)) - -pthread_key_t thread_id_key; - -static int __smp_thread_id(void) -{ - int i; - thread_id_t tid = pthread_self(); - - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == tid) { - long v = i + 1; /* must be non-NULL. */ - - if (pthread_setspecific(thread_id_key, (void *)v) != 0) { - perror("pthread_setspecific"); - exit(-1); - } - return i; - } - } - spin_lock(&__thread_id_map_mutex); - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == tid) - spin_unlock(&__thread_id_map_mutex); - return i; - } - spin_unlock(&__thread_id_map_mutex); - fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n", - (int)tid, (int)tid); - exit(-1); -} - -static int smp_thread_id(void) -{ - void *id; - - id = pthread_getspecific(thread_id_key); - if (id == NULL) - return __smp_thread_id(); - return (long)(id - 1); -} - -static thread_id_t create_thread(void *(*func)(void *), void *arg) -{ - thread_id_t tid; - int i; - - spin_lock(&__thread_id_map_mutex); - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY) - break; - } - if (i >= NR_THREADS) { - spin_unlock(&__thread_id_map_mutex); - fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS); - exit(-1); - } - __thread_id_map[i] = __THREAD_ID_MAP_WAITING; - spin_unlock(&__thread_id_map_mutex); - if (pthread_create(&tid, NULL, func, arg) != 0) { - perror("create_thread:pthread_create"); - exit(-1); - } - __thread_id_map[i] = tid; - return tid; -} - -static void *wait_thread(thread_id_t tid) -{ - int i; - void *vp; - - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == tid) - break; - } - if (i >= NR_THREADS){ - fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n", - (int)tid, (int)tid); - exit(-1); - } - if (pthread_join(tid, &vp) != 0) { - perror("wait_thread:pthread_join"); - exit(-1); - } - __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; - return vp; -} - -static void wait_all_threads(void) -{ - int i; - thread_id_t tid; - - for (i = 1; i < NR_THREADS; i++) { - tid = __thread_id_map[i]; - if (tid != __THREAD_ID_MAP_EMPTY && - tid != __THREAD_ID_MAP_WAITING) - (void)wait_thread(tid); - } -} - -#ifndef HAVE_CPU_SET_T -typedef unsigned long cpu_set_t; -# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0) -# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0) -#endif - -static void run_on(int cpu) -{ -#if HAVE_SCHED_SETAFFINITY - cpu_set_t mask; - - CPU_ZERO(&mask); - CPU_SET(cpu, &mask); -#if SCHED_SETAFFINITY_ARGS == 2 - sched_setaffinity(0, &mask); -#else - sched_setaffinity(0, sizeof(mask), &mask); -#endif -#endif /* HAVE_SCHED_SETAFFINITY */ -} - -/* - * timekeeping -- very crude -- should use MONOTONIC... - */ - -long long get_microseconds(void) -{ - struct timeval tv; - - if (gettimeofday(&tv, NULL) != 0) - abort(); - return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec; -} - -/* - * Per-thread variables. - */ - -#define DEFINE_PER_THREAD(type, name) \ - struct { \ - __typeof__(type) v \ - __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \ - } __per_thread_##name[NR_THREADS]; -#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name) - -#define per_thread(name, thread) __per_thread_##name[thread].v -#define __get_thread_var(name) per_thread(name, smp_thread_id()) - -#define init_per_thread(name, v) \ - do { \ - int __i_p_t_i; \ - for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \ - per_thread(name, __i_p_t_i) = v; \ - } while (0) - -/* - * CPU traversal primitives. - */ - -#ifndef NR_CPUS -#define NR_CPUS 16 -#endif /* #ifndef NR_CPUS */ - -#define for_each_possible_cpu(cpu) \ - for (cpu = 0; cpu < NR_CPUS; cpu++) -#define for_each_online_cpu(cpu) \ - for (cpu = 0; cpu < NR_CPUS; cpu++) - -/* - * Per-CPU variables. - */ - -#define DEFINE_PER_CPU(type, name) \ - struct { \ - __typeof__(type) v \ - __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \ - } __per_cpu_##name[NR_CPUS] -#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name) - -DEFINE_PER_THREAD(int, smp_processor_id); - -#define per_cpu(name, thread) __per_cpu_##name[thread].v -#define __get_cpu_var(name) per_cpu(name, smp_processor_id()) - -#define init_per_cpu(name, v) \ - do { \ - int __i_p_c_i; \ - for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \ - per_cpu(name, __i_p_c_i) = v; \ - } while (0) - -/* - * CPU state checking (crowbarred). - */ - -#define idle_cpu(cpu) 0 -#define in_softirq() 1 -#define hardirq_count() 0 -#define PREEMPT_SHIFT 0 -#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) -#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) -#define PREEMPT_BITS 8 -#define SOFTIRQ_BITS 8 - -/* - * CPU hotplug. - */ - -struct notifier_block { - int (*notifier_call)(struct notifier_block *, unsigned long, void *); - struct notifier_block *next; - int priority; -}; - -#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ -#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ -#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ -#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ -#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ -#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ -#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, - * not handling interrupts, soon dead */ -#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug - * lock is dropped */ - -/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend - * operation in progress - */ -#define CPU_TASKS_FROZEN 0x0010 - -#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) -#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) -#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) -#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) -#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) -#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) -#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) - -/* Hibernation and suspend events */ -#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ -#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ -#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ -#define PM_POST_SUSPEND 0x0004 /* Suspend finished */ -#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ -#define PM_POST_RESTORE 0x0006 /* Restore failed */ - -#define NOTIFY_DONE 0x0000 /* Don't care */ -#define NOTIFY_OK 0x0001 /* Suits me */ -#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ -#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) - /* Bad/Veto action */ -/* - * Clean way to return from the notifier and stop further calls. - */ -#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK) - -/* - * Bug checks. - */ - -#define BUG_ON(c) do { if (!(c)) abort(); } while (0) - -/* - * Initialization -- Must be called before calling any primitives. - */ - -static void smp_init(void) -{ - int i; - - spin_lock_init(&__thread_id_map_mutex); - __thread_id_map[0] = pthread_self(); - for (i = 1; i < NR_THREADS; i++) - __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; - init_per_thread(smp_processor_id, 0); - if (pthread_key_create(&thread_id_key, NULL) != 0) { - perror("pthread_key_create"); - exit(-1); - } -} - -/* Taken from the Linux kernel source tree, so GPLv2-only!!! */ - -#ifndef _LINUX_LIST_H -#define _LINUX_LIST_H - -#define LIST_POISON1 ((void *) 0x00100100) -#define LIST_POISON2 ((void *) 0x00200200) - -#if 0 -/* - * Simple doubly linked list implementation. - * - * Some of the internal functions ("__xxx") are useful when - * manipulating whole lists rather than single entries, as - * sometimes we already know the next/prev entries and we can - * generate better code by using them directly rather than - * using the generic single-entry routines. - */ - -struct cds_list_head { - struct cds_list_head *next, *prev; -}; - -#define CDS_LIST_HEAD_INIT(name) { &(name), &(name) } - -#define CDS_LIST_HEAD(name) \ - struct cds_list_head name = CDS_LIST_HEAD_INIT(name) - -static inline void CDS_INIT_LIST_HEAD(struct cds_list_head *list) -{ - list->next = list; - list->prev = list; -} - -/* - * Insert a new entry between two known consecutive entries. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -#ifndef CONFIG_DEBUG_LIST -static inline void __cds_list_add(struct cds_list_head *new, - struct cds_list_head *prev, - struct cds_list_head *next) -{ - next->prev = new; - new->next = next; - new->prev = prev; - prev->next = new; -} -#else -extern void __cds_list_add(struct cds_list_head *new, - struct cds_list_head *prev, - struct cds_list_head *next); -#endif - -/** - * cds_list_add - add a new entry - * @new: new entry to be added - * @head: list head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - */ -static inline void cds_list_add(struct cds_list_head *new, struct cds_list_head *head) -{ - __cds_list_add(new, head, head->next); -} - - -/** - * cds_list_add_tail - add a new entry - * @new: new entry to be added - * @head: list head to add it before - * - * Insert a new entry before the specified head. - * This is useful for implementing queues. - */ -static inline void cds_list_add_tail(struct cds_list_head *new, struct cds_list_head *head) -{ - __cds_list_add(new, head->prev, head); -} - -/* - * Delete a list entry by making the prev/next entries - * point to each other. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static inline void __cds_list_del(struct cds_list_head * prev, struct cds_list_head * next) -{ - next->prev = prev; - prev->next = next; -} - -/** - * cds_list_del - deletes entry from list. - * @entry: the element to delete from the list. - * Note: cds_list_empty() on entry does not return true after this, the entry is - * in an undefined state. - */ -#ifndef CONFIG_DEBUG_LIST -static inline void cds_list_del(struct cds_list_head *entry) -{ - __cds_list_del(entry->prev, entry->next); - entry->next = LIST_POISON1; - entry->prev = LIST_POISON2; -} -#else -extern void cds_list_del(struct cds_list_head *entry); -#endif - -/** - * cds_list_replace - replace old entry by new one - * @old : the element to be replaced - * @new : the new element to insert - * - * If @old was empty, it will be overwritten. - */ -static inline void cds_list_replace(struct cds_list_head *old, - struct cds_list_head *new) -{ - new->next = old->next; - new->next->prev = new; - new->prev = old->prev; - new->prev->next = new; -} - -static inline void cds_list_replace_init(struct cds_list_head *old, - struct cds_list_head *new) -{ - cds_list_replace(old, new); - CDS_INIT_LIST_HEAD(old); -} - -/** - * cds_list_del_init - deletes entry from list and reinitialize it. - * @entry: the element to delete from the list. - */ -static inline void cds_list_del_init(struct cds_list_head *entry) -{ - __cds_list_del(entry->prev, entry->next); - CDS_INIT_LIST_HEAD(entry); -} - -/** - * cds_list_move - delete from one list and add as another's head - * @list: the entry to move - * @head: the head that will precede our entry - */ -static inline void cds_list_move(struct cds_list_head *list, struct cds_list_head *head) -{ - __cds_list_del(list->prev, list->next); - cds_list_add(list, head); -} - -/** - * cds_list_move_tail - delete from one list and add as another's tail - * @list: the entry to move - * @head: the head that will follow our entry - */ -static inline void cds_list_move_tail(struct cds_list_head *list, - struct cds_list_head *head) -{ - __cds_list_del(list->prev, list->next); - cds_list_add_tail(list, head); -} - -/** - * list_is_last - tests whether @list is the last entry in list @head - * @list: the entry to test - * @head: the head of the list - */ -static inline int list_is_last(const struct cds_list_head *list, - const struct cds_list_head *head) -{ - return list->next == head; -} - -/** - * cds_list_empty - tests whether a list is empty - * @head: the list to test. - */ -static inline int cds_list_empty(const struct cds_list_head *head) -{ - return head->next == head; -} - -/** - * cds_list_empty_careful - tests whether a list is empty and not being modified - * @head: the list to test - * - * Description: - * tests whether a list is empty _and_ checks that no other CPU might be - * in the process of modifying either member (next or prev) - * - * NOTE: using cds_list_empty_careful() without synchronization - * can only be safe if the only activity that can happen - * to the list entry is cds_list_del_init(). Eg. it cannot be used - * if another CPU could re-list_add() it. - */ -static inline int cds_list_empty_careful(const struct cds_list_head *head) -{ - struct cds_list_head *next = head->next; - return (next == head) && (next == head->prev); -} - -/** - * list_is_singular - tests whether a list has just one entry. - * @head: the list to test. - */ -static inline int list_is_singular(const struct cds_list_head *head) -{ - return !list_empty(head) && (head->next == head->prev); -} - -static inline void __list_cut_position(struct cds_list_head *list, - struct cds_list_head *head, struct cds_list_head *entry) -{ - struct cds_list_head *new_first = entry->next; - list->next = head->next; - list->next->prev = list; - list->prev = entry; - entry->next = list; - head->next = new_first; - new_first->prev = head; -} - -/** - * list_cut_position - cut a list into two - * @list: a new list to add all removed entries - * @head: a list with entries - * @entry: an entry within head, could be the head itself - * and if so we won't cut the list - * - * This helper moves the initial part of @head, up to and - * including @entry, from @head to @list. You should - * pass on @entry an element you know is on @head. @list - * should be an empty list or a list you do not care about - * losing its data. - * - */ -static inline void list_cut_position(struct cds_list_head *list, - struct cds_list_head *head, struct cds_list_head *entry) -{ - if (cds_list_empty(head)) - return; - if (list_is_singular(head) && - (head->next != entry && head != entry)) - return; - if (entry == head) - CDS_INIT_LIST_HEAD(list); - else - __list_cut_position(list, head, entry); -} - -static inline void __cds_list_splice(const struct cds_list_head *list, - struct cds_list_head *prev, - struct cds_list_head *next) -{ - struct cds_list_head *first = list->next; - struct cds_list_head *last = list->prev; - - first->prev = prev; - prev->next = first; - - last->next = next; - next->prev = last; -} - -/** - * cds_list_splice - join two lists, this is designed for stacks - * @list: the new list to add. - * @head: the place to add it in the first list. - */ -static inline void cds_list_splice(const struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) - __cds_list_splice(list, head, head->next); -} - -/** - * cds_list_splice_tail - join two lists, each list being a queue - * @list: the new list to add. - * @head: the place to add it in the first list. - */ -static inline void cds_list_splice_tail(struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) - __cds_list_splice(list, head->prev, head); -} - -/** - * cds_list_splice_init - join two lists and reinitialise the emptied list. - * @list: the new list to add. - * @head: the place to add it in the first list. - * - * The list at @list is reinitialised - */ -static inline void cds_list_splice_init(struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) { - __cds_list_splice(list, head, head->next); - CDS_INIT_LIST_HEAD(list); - } -} - -/** - * cds_list_splice_tail_init - join two lists and reinitialise the emptied list - * @list: the new list to add. - * @head: the place to add it in the first list. - * - * Each of the lists is a queue. - * The list at @list is reinitialised - */ -static inline void cds_list_splice_tail_init(struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) { - __cds_list_splice(list, head->prev, head); - CDS_INIT_LIST_HEAD(list); - } -} - -/** - * cds_list_entry - get the struct for this entry - * @ptr: the &struct cds_list_head pointer. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_entry(ptr, type, member) \ - caa_container_of(ptr, type, member) - -/** - * list_first_entry - get the first element from a list - * @ptr: the list head to take the element from. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - * - * Note, that list is expected to be not empty. - */ -#define list_first_entry(ptr, type, member) \ - cds_list_entry((ptr)->next, type, member) - -/** - * cds_list_for_each - iterate over a list - * @pos: the &struct cds_list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define cds_list_for_each(pos, head) \ - for (pos = (head)->next; prefetch(pos->next), pos != (head); \ - pos = pos->next) - -/** - * __cds_list_for_each - iterate over a list - * @pos: the &struct cds_list_head to use as a loop cursor. - * @head: the head for your list. - * - * This variant differs from cds_list_for_each() in that it's the - * simplest possible list iteration code, no prefetching is done. - * Use this for code that knows the list to be very short (empty - * or 1 entry) most of the time. - */ -#define __cds_list_for_each(pos, head) \ - for (pos = (head)->next; pos != (head); pos = pos->next) - -/** - * cds_list_for_each_prev - iterate over a list backwards - * @pos: the &struct cds_list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define cds_list_for_each_prev(pos, head) \ - for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ - pos = pos->prev) - -/** - * cds_list_for_each_safe - iterate over a list safe against removal of list entry - * @pos: the &struct cds_list_head to use as a loop cursor. - * @n: another &struct cds_list_head to use as temporary storage - * @head: the head for your list. - */ -#define cds_list_for_each_safe(pos, n, head) \ - for (pos = (head)->next, n = pos->next; pos != (head); \ - pos = n, n = pos->next) - -/** - * cds_list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry - * @pos: the &struct cds_list_head to use as a loop cursor. - * @n: another &struct cds_list_head to use as temporary storage - * @head: the head for your list. - */ -#define cds_list_for_each_prev_safe(pos, n, head) \ - for (pos = (head)->prev, n = pos->prev; \ - prefetch(pos->prev), pos != (head); \ - pos = n, n = pos->prev) - -/** - * cds_list_for_each_entry - iterate over list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_for_each_entry(pos, head, member) \ - for (pos = cds_list_entry((head)->next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ - pos = cds_list_entry(pos->member.next, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_reverse - iterate backwards over list of given type. - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_for_each_entry_reverse(pos, head, member) \ - for (pos = cds_list_entry((head)->prev, typeof(*pos), member); \ - prefetch(pos->member.prev), &pos->member != (head); \ - pos = cds_list_entry(pos->member.prev, typeof(*pos), member)) - -/** - * list_prepare_entry - prepare a pos entry for use in cds_list_for_each_entry_continue() - * @pos: the type * to use as a start point - * @head: the head of the list - * @member: the name of the list_struct within the struct. - * - * Prepares a pos entry for use as a start point in cds_list_for_each_entry_continue(). - */ -#define list_prepare_entry(pos, head, member) \ - ((pos) ? : cds_list_entry(head, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_continue - continue iteration over list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Continue to iterate over list of given type, continuing after - * the current position. - */ -#define cds_list_for_each_entry_continue(pos, head, member) \ - for (pos = cds_list_entry(pos->member.next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ - pos = cds_list_entry(pos->member.next, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_continue_reverse - iterate backwards from the given point - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Start to iterate over list of given type backwards, continuing after - * the current position. - */ -#define cds_list_for_each_entry_continue_reverse(pos, head, member) \ - for (pos = cds_list_entry(pos->member.prev, typeof(*pos), member); \ - prefetch(pos->member.prev), &pos->member != (head); \ - pos = cds_list_entry(pos->member.prev, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_from - iterate over list of given type from the current point - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type, continuing from current position. - */ -#define cds_list_for_each_entry_from(pos, head, member) \ - for (; prefetch(pos->member.next), &pos->member != (head); \ - pos = cds_list_entry(pos->member.next, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_safe - iterate over list of given type safe against removal of list entry - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_for_each_entry_safe(pos, n, head, member) \ - for (pos = cds_list_entry((head)->next, typeof(*pos), member), \ - n = cds_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.next, typeof(*n), member)) - -/** - * cds_list_for_each_entry_safe_continue - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type, continuing after current point, - * safe against removal of list entry. - */ -#define cds_list_for_each_entry_safe_continue(pos, n, head, member) \ - for (pos = cds_list_entry(pos->member.next, typeof(*pos), member), \ - n = cds_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.next, typeof(*n), member)) - -/** - * cds_list_for_each_entry_safe_from - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type from current point, safe against - * removal of list entry. - */ -#define cds_list_for_each_entry_safe_from(pos, n, head, member) \ - for (n = cds_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.next, typeof(*n), member)) - -/** - * cds_list_for_each_entry_safe_reverse - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate backwards over list of given type, safe against removal - * of list entry. - */ -#define cds_list_for_each_entry_safe_reverse(pos, n, head, member) \ - for (pos = cds_list_entry((head)->prev, typeof(*pos), member), \ - n = cds_list_entry(pos->member.prev, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.prev, typeof(*n), member)) - -#endif //0 - -/* - * Double linked lists with a single pointer list head. - * Mostly useful for hash tables where the two pointer list head is - * too wasteful. - * You lose the ability to access the tail in O(1). - */ - -struct cds_hlist_head { - struct cds_hlist_node *first; -}; - -struct cds_hlist_node { - struct cds_hlist_node *next, **pprev; -}; - -#define HLIST_HEAD_INIT { .first = NULL } -#define HLIST_HEAD(name) struct cds_hlist_head name = { .first = NULL } -#define CDS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) -static inline void INIT_HLIST_NODE(struct cds_hlist_node *h) -{ - h->next = NULL; - h->pprev = NULL; -} - -static inline int hlist_unhashed(const struct cds_hlist_node *h) -{ - return !h->pprev; -} - -static inline int hlist_empty(const struct cds_hlist_head *h) -{ - return !h->first; -} - -static inline void __cds_hlist_del(struct cds_hlist_node *n) -{ - struct cds_hlist_node *next = n->next; - struct cds_hlist_node **pprev = n->pprev; - *pprev = next; - if (next) - next->pprev = pprev; -} - -static inline void cds_hlist_del(struct cds_hlist_node *n) -{ - __cds_hlist_del(n); - n->next = LIST_POISON1; - n->pprev = LIST_POISON2; -} - -static inline void cds_hlist_del_init(struct cds_hlist_node *n) -{ - if (!hlist_unhashed(n)) { - __cds_hlist_del(n); - INIT_HLIST_NODE(n); - } -} - -static inline void cds_hlist_add_head(struct cds_hlist_node *n, struct cds_hlist_head *h) -{ - struct cds_hlist_node *first = h->first; - n->next = first; - if (first) - first->pprev = &n->next; - h->first = n; - n->pprev = &h->first; -} - -/* next must be != NULL */ -static inline void hlist_add_before(struct cds_hlist_node *n, - struct cds_hlist_node *next) -{ - n->pprev = next->pprev; - n->next = next; - next->pprev = &n->next; - *(n->pprev) = n; -} - -static inline void hlist_add_after(struct cds_hlist_node *n, - struct cds_hlist_node *next) -{ - next->next = n->next; - n->next = next; - next->pprev = &n->next; - - if(next->next) - next->next->pprev = &next->next; -} - -/* - * Move a list from one list head to another. Fixup the pprev - * reference of the first entry if it exists. - */ -static inline void hlist_move_list(struct cds_hlist_head *old, - struct cds_hlist_head *new) -{ - new->first = old->first; - if (new->first) - new->first->pprev = &new->first; - old->first = NULL; -} - -#define cds_hlist_entry(ptr, type, member) caa_container_of(ptr,type,member) - -#define cds_hlist_for_each(pos, head) \ - for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ - pos = pos->next) - -#define cds_hlist_for_each_safe(pos, n, head) \ - for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ - pos = n) - -/** - * cds_hlist_for_each_entry - iterate over list of given type - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry(tpos, pos, head, member) \ - for (pos = (head)->first; \ - pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * cds_hlist_for_each_entry_continue - iterate over a hlist continuing after current point - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry_continue(tpos, pos, member) \ - for (pos = (pos)->next; \ - pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * cds_hlist_for_each_entry_from - iterate over a hlist continuing from current point - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry_from(tpos, pos, member) \ - for (; pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * cds_hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @n: another &struct cds_hlist_node to use as temporary storage - * @head: the head for your list. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry_safe(tpos, pos, n, head, member) \ - for (pos = (head)->first; \ - pos && ({ n = pos->next; 1; }) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = n) - -#endif - -#endif diff --git a/tests/api_ppc.h b/tests/api_ppc.h deleted file mode 100644 index 038c9cb..0000000 --- a/tests/api_ppc.h +++ /dev/null @@ -1,1698 +0,0 @@ -/* MECHANICALLY GENERATED, DO NOT EDIT!!! */ - -#ifndef _INCLUDE_API_H -#define _INCLUDE_API_H - -/* - * common.h: Common Linux kernel-isms. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; but version 2 of the License only due - * to code included from the Linux kernel. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (c) 2006 Paul E. McKenney, IBM. - * - * Much code taken from the Linux kernel. For such code, the option - * to redistribute under later versions of GPL might not be available. - */ - -#include - -#ifndef __always_inline -#define __always_inline inline -#endif - -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) - -#ifdef __ASSEMBLY__ -# define stringify_in_c(...) __VA_ARGS__ -# define ASM_CONST(x) x -#else -/* This version of stringify will deal with commas... */ -# define __stringify_in_c(...) #__VA_ARGS__ -# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " -# define __ASM_CONST(x) x##UL -# define ASM_CONST(x) __ASM_CONST(x) -#endif - - -/* - * arch-ppc64.h: Expose PowerPC atomic instructions. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; but version 2 of the License only due - * to code included from the Linux kernel. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (c) 2006 Paul E. McKenney, IBM. - * - * Much code taken from the Linux kernel. For such code, the option - * to redistribute under later versions of GPL might not be available. - */ - -/* - * Machine parameters. - */ - -#define CONFIG_PPC64 - -/*#define CAA_CACHE_LINE_SIZE 128 */ -#define ____cacheline_internodealigned_in_smp \ - __attribute__((__aligned__(1 << 7))) - -#if 0 /* duplicate with arch_atomic.h */ - -/* - * Atomic data structure, initialization, and access. - */ - -typedef struct { volatile int counter; } atomic_t; - -#define ATOMIC_INIT(i) { (i) } - -#define atomic_read(v) ((v)->counter) -#define atomic_set(v, i) (((v)->counter) = (i)) - -/* - * Atomic operations. - */ - -#define LWSYNC lwsync -#define PPC405_ERR77(ra,rb) -#ifdef CONFIG_SMP -# define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n" -# define ISYNC_ON_SMP "\n\tisync\n" -#else -# define LWSYNC_ON_SMP -# define ISYNC_ON_SMP -#endif - -/* - * Atomic exchange - * - * Changes the memory location '*ptr' to be val and returns - * the previous value stored there. - */ -static __always_inline unsigned long -__xchg_u32(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( - LWSYNC_ON_SMP -"1: lwarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stwcx. %3,0,%2 \n\ - bne- 1b" - ISYNC_ON_SMP - : "=&r" (prev), "+m" (*(volatile unsigned int *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} - -/* - * Atomic exchange - * - * Changes the memory location '*ptr' to be val and returns - * the previous value stored there. - */ -static __always_inline unsigned long -__xchg_u32_local(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( -"1: lwarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stwcx. %3,0,%2 \n\ - bne- 1b" - : "=&r" (prev), "+m" (*(volatile unsigned int *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} - -#ifdef CONFIG_PPC64 -static __always_inline unsigned long -__xchg_u64(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( - LWSYNC_ON_SMP -"1: ldarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stdcx. %3,0,%2 \n\ - bne- 1b" - ISYNC_ON_SMP - : "=&r" (prev), "+m" (*(volatile unsigned long *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} - -static __always_inline unsigned long -__xchg_u64_local(volatile void *p, unsigned long val) -{ - unsigned long prev; - - __asm__ __volatile__( -"1: ldarx %0,0,%2 \n" - PPC405_ERR77(0,%2) -" stdcx. %3,0,%2 \n\ - bne- 1b" - : "=&r" (prev), "+m" (*(volatile unsigned long *)p) - : "r" (p), "r" (val) - : "cc", "memory"); - - return prev; -} -#endif - -/* - * This function doesn't exist, so you'll get a linker error - * if something tries to do an invalid xchg(). - */ -extern void __xchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -__xchg(volatile void *ptr, unsigned long x, unsigned int size) -{ - switch (size) { - case 4: - return __xchg_u32(ptr, x); -#ifdef CONFIG_PPC64 - case 8: - return __xchg_u64(ptr, x); -#endif - } - __xchg_called_with_bad_pointer(); - return x; -} - -static __always_inline unsigned long -__xchg_local(volatile void *ptr, unsigned long x, unsigned int size) -{ - switch (size) { - case 4: - return __xchg_u32_local(ptr, x); -#ifdef CONFIG_PPC64 - case 8: - return __xchg_u64_local(ptr, x); -#endif - } - __xchg_called_with_bad_pointer(); - return x; -} -#define xchg(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ - }) - -#define xchg_local(ptr,x) \ - ({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_local((ptr), \ - (unsigned long)_x_, sizeof(*(ptr))); \ - }) - -/* - * Compare and exchange - if *p == old, set it to new, - * and return the old value of *p. - */ -#define __HAVE_ARCH_CMPXCHG 1 - -static __always_inline unsigned long -__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) -{ - unsigned int prev; - - __asm__ __volatile__ ( - LWSYNC_ON_SMP -"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ - cmpw 0,%0,%3\n\ - bne- 2f\n" - PPC405_ERR77(0,%2) -" stwcx. %4,0,%2\n\ - bne- 1b" - ISYNC_ON_SMP - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} - -static __always_inline unsigned long -__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, - unsigned long new) -{ - unsigned int prev; - - __asm__ __volatile__ ( -"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ - cmpw 0,%0,%3\n\ - bne- 2f\n" - PPC405_ERR77(0,%2) -" stwcx. %4,0,%2\n\ - bne- 1b" - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} - -#ifdef CONFIG_PPC64 -static __always_inline unsigned long -__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) -{ - unsigned long prev; - - __asm__ __volatile__ ( - LWSYNC_ON_SMP -"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ - cmpd 0,%0,%3\n\ - bne- 2f\n\ - stdcx. %4,0,%2\n\ - bne- 1b" - ISYNC_ON_SMP - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} - -static __always_inline unsigned long -__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, - unsigned long new) -{ - unsigned long prev; - - __asm__ __volatile__ ( -"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ - cmpd 0,%0,%3\n\ - bne- 2f\n\ - stdcx. %4,0,%2\n\ - bne- 1b" - "\n\ -2:" - : "=&r" (prev), "+m" (*p) - : "r" (p), "r" (old), "r" (new) - : "cc", "memory"); - - return prev; -} -#endif - -/* This function doesn't exist, so you'll get a linker error - if something tries to do an invalid cmpxchg(). */ -extern void __cmpxchg_called_with_bad_pointer(void); - -static __always_inline unsigned long -__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, - unsigned int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32(ptr, old, new); -#ifdef CONFIG_PPC64 - case 8: - return __cmpxchg_u64(ptr, old, new); -#endif - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -static __always_inline unsigned long -__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, - unsigned int size) -{ - switch (size) { - case 4: - return __cmpxchg_u32_local(ptr, old, new); -#ifdef CONFIG_PPC64 - case 8: - return __cmpxchg_u64_local(ptr, old, new); -#endif - } - __cmpxchg_called_with_bad_pointer(); - return old; -} - -#define cmpxchg(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - - -#define cmpxchg_local(ptr, o, n) \ - ({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ - (unsigned long)_n_, sizeof(*(ptr))); \ - }) - -#ifdef CONFIG_PPC64 -/* - * We handle most unaligned accesses in hardware. On the other hand - * unaligned DMA can be very expensive on some ppc64 IO chips (it does - * powers of 2 writes until it reaches sufficient alignment). - * - * Based on this we disable the IP header alignment in network drivers. - * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining - * cacheline alignment of buffers. - */ -#define NET_IP_ALIGN 0 -#define NET_SKB_PAD L1_CACHE_BYTES - -#define cmpxchg64(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg((ptr), (o), (n)); \ - }) -#define cmpxchg64_local(ptr, o, n) \ - ({ \ - BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ - cmpxchg_local((ptr), (o), (n)); \ - }) -#endif - -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @a to @v. - */ -static __inline__ void atomic_add(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( - "1: lwarx %0,0,%3 # atomic_add\n\ - add %0,%2,%0 \n\ - stwcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); -} - -/** - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @a from @v. - */ -static __inline__ void atomic_sub(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( - "1: lwarx %0,0,%3 # atomic_sub \n\ - subf %0,%2,%0 \n\ - stwcx. %0,0,%3 \n\ - bne- 1b" - : "=&r" (t), "+m" (v->counter) - : "r" (a), "r" (&v->counter) - : "cc"); -} - -static __inline__ atomic_sub_return(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( - "lwsync\n\ - 1: lwarx %0,0,%2 # atomic_sub_return\n\ - subf %0,%1,%0\n\ - stwcx. %0,0,%2 \n\ - bne- 1b \n\ - isync" - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); - - return t; -} - -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_sub_and_test(int a, atomic_t *v) -{ - return atomic_sub_return(a, v) == 0; -} - -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -static __inline__ void atomic_inc(atomic_t *v) -{ - atomic_add(1, v); -} - -/** - * atomic_dec - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -static __inline__ void atomic_dec(atomic_t *v) -{ - atomic_sub(1, v); -} - -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __inline__ int atomic_dec_and_test(atomic_t *v) -{ - return atomic_sub_and_test(1, v); -} - -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_inc_and_test(atomic_t *v) -{ - return atomic_inc_return(v); -} - -/** - * atomic_add_return - add and return - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns @i + @v - */ -static __inline__ int atomic_add_return(int a, atomic_t *v) -{ - int t; - - __asm__ __volatile__( - "lwsync \n\ - 1: lwarx %0,0,%2 # atomic_add_return \n\ - add %0,%1,%0 \n\ - stwcx. %0,0,%2 \n\ - bne- 1b \n\ - isync" - : "=&r" (t) - : "r" (a), "r" (&v->counter) - : "cc", "memory"); - - return t; -} - -/** - * atomic_add_negative - add and test if negative - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __inline__ int atomic_add_negative(int a, atomic_t *v) -{ - return atomic_add_return(a, v) < 0; -} - -/** - * atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. - */ -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) -{ - int t; - - __asm__ __volatile__( - "lwsync \n\ - 1: lwarx %0,0,%1 # atomic_add_unless\n\ - cmpd 0,%0,%3 \n\ - beq- 2f \n\ - add %0,%2,%0 \n\ - stwcx. %0,0,%1 \n\ - bne- 1b \n\ - isync \n\ - subf %0,%2,%0 \n\ - 2:" - : "=&r" (t) - : "r" (&v->counter), "r" (a), "r" (u) - : "cc", "memory"); - - return t != u; -} - -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) - -/* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic_dec() cmm_smp_mb() -#define smp_mb__after_atomic_dec() cmm_smp_mb() -#define smp_mb__before_atomic_inc() cmm_smp_mb() -#define smp_mb__after_atomic_inc() cmm_smp_mb() - -#endif //0 /* duplicate with arch_atomic.h */ - -/* - * api_pthreads.h: API mapping to pthreads environment. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. However, please note that much - * of the code in this file derives from the Linux kernel, and that such - * code may not be available except under GPLv2. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (c) 2006 Paul E. McKenney, IBM. - */ - -#include -#include -#include -#include -#include -#define __USE_GNU -#include -#include -#include -/* #include "atomic.h" */ - -/* - * Default machine parameters. - */ - -#ifndef CAA_CACHE_LINE_SIZE -/* #define CAA_CACHE_LINE_SIZE 128 */ -#endif /* #ifndef CAA_CACHE_LINE_SIZE */ - -/* - * Exclusive locking primitives. - */ - -typedef pthread_mutex_t spinlock_t; - -#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER; -#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER - -static void spin_lock_init(spinlock_t *sp) -{ - if (pthread_mutex_init(sp, NULL) != 0) { - perror("spin_lock_init:pthread_mutex_init"); - exit(-1); - } -} - -static void spin_lock(spinlock_t *sp) -{ - if (pthread_mutex_lock(sp) != 0) { - perror("spin_lock:pthread_mutex_lock"); - exit(-1); - } -} - -static void spin_unlock(spinlock_t *sp) -{ - if (pthread_mutex_unlock(sp) != 0) { - perror("spin_unlock:pthread_mutex_unlock"); - exit(-1); - } -} - -#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0) -#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0) - -/* - * Thread creation/destruction primitives. - */ - -typedef pthread_t thread_id_t; - -#define NR_THREADS 128 - -#define __THREAD_ID_MAP_EMPTY 0 -#define __THREAD_ID_MAP_WAITING 1 -thread_id_t __thread_id_map[NR_THREADS]; -spinlock_t __thread_id_map_mutex; - -#define for_each_thread(t) \ - for (t = 0; t < NR_THREADS; t++) - -#define for_each_running_thread(t) \ - for (t = 0; t < NR_THREADS; t++) \ - if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \ - (__thread_id_map[t] != __THREAD_ID_MAP_WAITING)) - -#define for_each_tid(t, tid) \ - for (t = 0; t < NR_THREADS; t++) \ - if ((((tid) = __thread_id_map[t]) != __THREAD_ID_MAP_EMPTY) && \ - ((tid) != __THREAD_ID_MAP_WAITING)) - -pthread_key_t thread_id_key; - -static int __smp_thread_id(void) -{ - int i; - thread_id_t tid = pthread_self(); - - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == tid) { - long v = i + 1; /* must be non-NULL. */ - - if (pthread_setspecific(thread_id_key, (void *)v) != 0) { - perror("pthread_setspecific"); - exit(-1); - } - return i; - } - } - spin_lock(&__thread_id_map_mutex); - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == tid) - spin_unlock(&__thread_id_map_mutex); - return i; - } - spin_unlock(&__thread_id_map_mutex); - fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n", - (int)tid, (int)tid); - exit(-1); -} - -static int smp_thread_id(void) -{ - void *id; - - id = pthread_getspecific(thread_id_key); - if (id == NULL) - return __smp_thread_id(); - return (long)(id - 1); -} - -static thread_id_t create_thread(void *(*func)(void *), void *arg) -{ - thread_id_t tid; - int i; - - spin_lock(&__thread_id_map_mutex); - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY) - break; - } - if (i >= NR_THREADS) { - spin_unlock(&__thread_id_map_mutex); - fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS); - exit(-1); - } - __thread_id_map[i] = __THREAD_ID_MAP_WAITING; - spin_unlock(&__thread_id_map_mutex); - if (pthread_create(&tid, NULL, func, arg) != 0) { - perror("create_thread:pthread_create"); - exit(-1); - } - __thread_id_map[i] = tid; - return tid; -} - -static void *wait_thread(thread_id_t tid) -{ - int i; - void *vp; - - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == tid) - break; - } - if (i >= NR_THREADS){ - fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n", - (int)tid, (int)tid); - exit(-1); - } - if (pthread_join(tid, &vp) != 0) { - perror("wait_thread:pthread_join"); - exit(-1); - } - __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; - return vp; -} - -static void wait_all_threads(void) -{ - int i; - thread_id_t tid; - - for (i = 1; i < NR_THREADS; i++) { - tid = __thread_id_map[i]; - if (tid != __THREAD_ID_MAP_EMPTY && - tid != __THREAD_ID_MAP_WAITING) - (void)wait_thread(tid); - } -} - -static void run_on(int cpu) -{ - cpu_set_t mask; - - CPU_ZERO(&mask); - CPU_SET(cpu, &mask); - sched_setaffinity(0, sizeof(mask), &mask); -} - -/* - * timekeeping -- very crude -- should use MONOTONIC... - */ - -long long get_microseconds(void) -{ - struct timeval tv; - - if (gettimeofday(&tv, NULL) != 0) - abort(); - return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec; -} - -/* - * Per-thread variables. - */ - -#define DEFINE_PER_THREAD(type, name) \ - struct { \ - __typeof__(type) v \ - __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \ - } __per_thread_##name[NR_THREADS]; -#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name) - -#define per_thread(name, thread) __per_thread_##name[thread].v -#define __get_thread_var(name) per_thread(name, smp_thread_id()) - -#define init_per_thread(name, v) \ - do { \ - int __i_p_t_i; \ - for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \ - per_thread(name, __i_p_t_i) = v; \ - } while (0) - -/* - * CPU traversal primitives. - */ - -#ifndef NR_CPUS -#define NR_CPUS 16 -#endif /* #ifndef NR_CPUS */ - -#define for_each_possible_cpu(cpu) \ - for (cpu = 0; cpu < NR_CPUS; cpu++) -#define for_each_online_cpu(cpu) \ - for (cpu = 0; cpu < NR_CPUS; cpu++) - -/* - * Per-CPU variables. - */ - -#define DEFINE_PER_CPU(type, name) \ - struct { \ - __typeof__(type) v \ - __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \ - } __per_cpu_##name[NR_CPUS] -#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name) - -DEFINE_PER_THREAD(int, smp_processor_id); - -#define per_cpu(name, thread) __per_cpu_##name[thread].v -#define __get_cpu_var(name) per_cpu(name, smp_processor_id()) - -#define init_per_cpu(name, v) \ - do { \ - int __i_p_c_i; \ - for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \ - per_cpu(name, __i_p_c_i) = v; \ - } while (0) - -/* - * CPU state checking (crowbarred). - */ - -#define idle_cpu(cpu) 0 -#define in_softirq() 1 -#define hardirq_count() 0 -#define PREEMPT_SHIFT 0 -#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) -#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) -#define PREEMPT_BITS 8 -#define SOFTIRQ_BITS 8 - -/* - * CPU hotplug. - */ - -struct notifier_block { - int (*notifier_call)(struct notifier_block *, unsigned long, void *); - struct notifier_block *next; - int priority; -}; - -#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ -#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ -#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ -#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ -#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ -#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ -#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, - * not handling interrupts, soon dead */ -#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug - * lock is dropped */ - -/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend - * operation in progress - */ -#define CPU_TASKS_FROZEN 0x0010 - -#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) -#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) -#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) -#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) -#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) -#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) -#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) - -/* Hibernation and suspend events */ -#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ -#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ -#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ -#define PM_POST_SUSPEND 0x0004 /* Suspend finished */ -#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ -#define PM_POST_RESTORE 0x0006 /* Restore failed */ - -#define NOTIFY_DONE 0x0000 /* Don't care */ -#define NOTIFY_OK 0x0001 /* Suits me */ -#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ -#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) - /* Bad/Veto action */ -/* - * Clean way to return from the notifier and stop further calls. - */ -#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK) - -/* - * Bug checks. - */ - -#define BUG_ON(c) do { if (!(c)) abort(); } while (0) - -/* - * Initialization -- Must be called before calling any primitives. - */ - -static void smp_init(void) -{ - int i; - - spin_lock_init(&__thread_id_map_mutex); - __thread_id_map[0] = pthread_self(); - for (i = 1; i < NR_THREADS; i++) - __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; - init_per_thread(smp_processor_id, 0); - if (pthread_key_create(&thread_id_key, NULL) != 0) { - perror("pthread_key_create"); - exit(-1); - } -} - -/* Taken from the Linux kernel source tree, so GPLv2-only!!! */ - -#ifndef _LINUX_LIST_H -#define _LINUX_LIST_H - -#define LIST_POISON1 ((void *) 0x00100100) -#define LIST_POISON2 ((void *) 0x00200200) - -#if 0 - -/* - * Simple doubly linked list implementation. - * - * Some of the internal functions ("__xxx") are useful when - * manipulating whole lists rather than single entries, as - * sometimes we already know the next/prev entries and we can - * generate better code by using them directly rather than - * using the generic single-entry routines. - */ - -struct cds_list_head { - struct cds_list_head *next, *prev; -}; - -#define CDS_LIST_HEAD_INIT(name) { &(name), &(name) } - -#define CDS_LIST_HEAD(name) \ - struct cds_list_head name = CDS_LIST_HEAD_INIT(name) - -static inline void CDS_INIT_LIST_HEAD(struct cds_list_head *list) -{ - list->next = list; - list->prev = list; -} - -/* - * Insert a new entry between two known consecutive entries. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -#ifndef CONFIG_DEBUG_LIST -static inline void __cds_list_add(struct cds_list_head *new, - struct cds_list_head *prev, - struct cds_list_head *next) -{ - next->prev = new; - new->next = next; - new->prev = prev; - prev->next = new; -} -#else -extern void __cds_list_add(struct cds_list_head *new, - struct cds_list_head *prev, - struct cds_list_head *next); -#endif - -/** - * cds_list_add - add a new entry - * @new: new entry to be added - * @head: list head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - */ -static inline void cds_list_add(struct cds_list_head *new, struct cds_list_head *head) -{ - __cds_list_add(new, head, head->next); -} - - -/** - * cds_list_add_tail - add a new entry - * @new: new entry to be added - * @head: list head to add it before - * - * Insert a new entry before the specified head. - * This is useful for implementing queues. - */ -static inline void cds_list_add_tail(struct cds_list_head *new, struct cds_list_head *head) -{ - __cds_list_add(new, head->prev, head); -} - -/* - * Delete a list entry by making the prev/next entries - * point to each other. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static inline void __cds_list_del(struct cds_list_head * prev, struct cds_list_head * next) -{ - next->prev = prev; - prev->next = next; -} - -/** - * cds_list_del - deletes entry from list. - * @entry: the element to delete from the list. - * Note: cds_list_empty() on entry does not return true after this, the entry is - * in an undefined state. - */ -#ifndef CONFIG_DEBUG_LIST -static inline void cds_list_del(struct cds_list_head *entry) -{ - __cds_list_del(entry->prev, entry->next); - entry->next = LIST_POISON1; - entry->prev = LIST_POISON2; -} -#else -extern void cds_list_del(struct cds_list_head *entry); -#endif - -/** - * cds_list_replace - replace old entry by new one - * @old : the element to be replaced - * @new : the new element to insert - * - * If @old was empty, it will be overwritten. - */ -static inline void cds_list_replace(struct cds_list_head *old, - struct cds_list_head *new) -{ - new->next = old->next; - new->next->prev = new; - new->prev = old->prev; - new->prev->next = new; -} - -static inline void cds_list_replace_init(struct cds_list_head *old, - struct cds_list_head *new) -{ - cds_list_replace(old, new); - CDS_INIT_LIST_HEAD(old); -} - -/** - * cds_list_del_init - deletes entry from list and reinitialize it. - * @entry: the element to delete from the list. - */ -static inline void cds_list_del_init(struct cds_list_head *entry) -{ - __cds_list_del(entry->prev, entry->next); - CDS_INIT_LIST_HEAD(entry); -} - -/** - * cds_list_move - delete from one list and add as another's head - * @list: the entry to move - * @head: the head that will precede our entry - */ -static inline void cds_list_move(struct cds_list_head *list, struct cds_list_head *head) -{ - __cds_list_del(list->prev, list->next); - cds_list_add(list, head); -} - -/** - * cds_list_move_tail - delete from one list and add as another's tail - * @list: the entry to move - * @head: the head that will follow our entry - */ -static inline void cds_list_move_tail(struct cds_list_head *list, - struct cds_list_head *head) -{ - __cds_list_del(list->prev, list->next); - cds_list_add_tail(list, head); -} - -/** - * list_is_last - tests whether @list is the last entry in list @head - * @list: the entry to test - * @head: the head of the list - */ -static inline int list_is_last(const struct cds_list_head *list, - const struct cds_list_head *head) -{ - return list->next == head; -} - -/** - * cds_list_empty - tests whether a list is empty - * @head: the list to test. - */ -static inline int cds_list_empty(const struct cds_list_head *head) -{ - return head->next == head; -} - -/** - * cds_list_empty_careful - tests whether a list is empty and not being modified - * @head: the list to test - * - * Description: - * tests whether a list is empty _and_ checks that no other CPU might be - * in the process of modifying either member (next or prev) - * - * NOTE: using cds_list_empty_careful() without synchronization - * can only be safe if the only activity that can happen - * to the list entry is cds_list_del_init(). Eg. it cannot be used - * if another CPU could re-list_add() it. - */ -static inline int cds_list_empty_careful(const struct cds_list_head *head) -{ - struct cds_list_head *next = head->next; - return (next == head) && (next == head->prev); -} - -/** - * list_is_singular - tests whether a list has just one entry. - * @head: the list to test. - */ -static inline int list_is_singular(const struct cds_list_head *head) -{ - return !list_empty(head) && (head->next == head->prev); -} - -static inline void __list_cut_position(struct cds_list_head *list, - struct cds_list_head *head, struct cds_list_head *entry) -{ - struct cds_list_head *new_first = entry->next; - list->next = head->next; - list->next->prev = list; - list->prev = entry; - entry->next = list; - head->next = new_first; - new_first->prev = head; -} - -/** - * list_cut_position - cut a list into two - * @list: a new list to add all removed entries - * @head: a list with entries - * @entry: an entry within head, could be the head itself - * and if so we won't cut the list - * - * This helper moves the initial part of @head, up to and - * including @entry, from @head to @list. You should - * pass on @entry an element you know is on @head. @list - * should be an empty list or a list you do not care about - * losing its data. - * - */ -static inline void list_cut_position(struct cds_list_head *list, - struct cds_list_head *head, struct cds_list_head *entry) -{ - if (cds_list_empty(head)) - return; - if (list_is_singular(head) && - (head->next != entry && head != entry)) - return; - if (entry == head) - CDS_INIT_LIST_HEAD(list); - else - __list_cut_position(list, head, entry); -} - -static inline void __cds_list_splice(const struct cds_list_head *list, - struct cds_list_head *prev, - struct cds_list_head *next) -{ - struct cds_list_head *first = list->next; - struct cds_list_head *last = list->prev; - - first->prev = prev; - prev->next = first; - - last->next = next; - next->prev = last; -} - -/** - * cds_list_splice - join two lists, this is designed for stacks - * @list: the new list to add. - * @head: the place to add it in the first list. - */ -static inline void cds_list_splice(const struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) - __cds_list_splice(list, head, head->next); -} - -/** - * cds_list_splice_tail - join two lists, each list being a queue - * @list: the new list to add. - * @head: the place to add it in the first list. - */ -static inline void cds_list_splice_tail(struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) - __cds_list_splice(list, head->prev, head); -} - -/** - * cds_list_splice_init - join two lists and reinitialise the emptied list. - * @list: the new list to add. - * @head: the place to add it in the first list. - * - * The list at @list is reinitialised - */ -static inline void cds_list_splice_init(struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) { - __cds_list_splice(list, head, head->next); - CDS_INIT_LIST_HEAD(list); - } -} - -/** - * cds_list_splice_tail_init - join two lists and reinitialise the emptied list - * @list: the new list to add. - * @head: the place to add it in the first list. - * - * Each of the lists is a queue. - * The list at @list is reinitialised - */ -static inline void cds_list_splice_tail_init(struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) { - __cds_list_splice(list, head->prev, head); - CDS_INIT_LIST_HEAD(list); - } -} - -/** - * cds_list_entry - get the struct for this entry - * @ptr: the &struct cds_list_head pointer. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_entry(ptr, type, member) \ - caa_container_of(ptr, type, member) - -/** - * list_first_entry - get the first element from a list - * @ptr: the list head to take the element from. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - * - * Note, that list is expected to be not empty. - */ -#define list_first_entry(ptr, type, member) \ - cds_list_entry((ptr)->next, type, member) - -/** - * cds_list_for_each - iterate over a list - * @pos: the &struct cds_list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define cds_list_for_each(pos, head) \ - for (pos = (head)->next; prefetch(pos->next), pos != (head); \ - pos = pos->next) - -/** - * __cds_list_for_each - iterate over a list - * @pos: the &struct cds_list_head to use as a loop cursor. - * @head: the head for your list. - * - * This variant differs from cds_list_for_each() in that it's the - * simplest possible list iteration code, no prefetching is done. - * Use this for code that knows the list to be very short (empty - * or 1 entry) most of the time. - */ -#define __cds_list_for_each(pos, head) \ - for (pos = (head)->next; pos != (head); pos = pos->next) - -/** - * cds_list_for_each_prev - iterate over a list backwards - * @pos: the &struct cds_list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define cds_list_for_each_prev(pos, head) \ - for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ - pos = pos->prev) - -/** - * cds_list_for_each_safe - iterate over a list safe against removal of list entry - * @pos: the &struct cds_list_head to use as a loop cursor. - * @n: another &struct cds_list_head to use as temporary storage - * @head: the head for your list. - */ -#define cds_list_for_each_safe(pos, n, head) \ - for (pos = (head)->next, n = pos->next; pos != (head); \ - pos = n, n = pos->next) - -/** - * cds_list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry - * @pos: the &struct cds_list_head to use as a loop cursor. - * @n: another &struct cds_list_head to use as temporary storage - * @head: the head for your list. - */ -#define cds_list_for_each_prev_safe(pos, n, head) \ - for (pos = (head)->prev, n = pos->prev; \ - prefetch(pos->prev), pos != (head); \ - pos = n, n = pos->prev) - -/** - * cds_list_for_each_entry - iterate over list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_for_each_entry(pos, head, member) \ - for (pos = cds_list_entry((head)->next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ - pos = cds_list_entry(pos->member.next, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_reverse - iterate backwards over list of given type. - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_for_each_entry_reverse(pos, head, member) \ - for (pos = cds_list_entry((head)->prev, typeof(*pos), member); \ - prefetch(pos->member.prev), &pos->member != (head); \ - pos = cds_list_entry(pos->member.prev, typeof(*pos), member)) - -/** - * list_prepare_entry - prepare a pos entry for use in cds_list_for_each_entry_continue() - * @pos: the type * to use as a start point - * @head: the head of the list - * @member: the name of the list_struct within the struct. - * - * Prepares a pos entry for use as a start point in cds_list_for_each_entry_continue(). - */ -#define list_prepare_entry(pos, head, member) \ - ((pos) ? : cds_list_entry(head, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_continue - continue iteration over list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Continue to iterate over list of given type, continuing after - * the current position. - */ -#define cds_list_for_each_entry_continue(pos, head, member) \ - for (pos = cds_list_entry(pos->member.next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ - pos = cds_list_entry(pos->member.next, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_continue_reverse - iterate backwards from the given point - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Start to iterate over list of given type backwards, continuing after - * the current position. - */ -#define cds_list_for_each_entry_continue_reverse(pos, head, member) \ - for (pos = cds_list_entry(pos->member.prev, typeof(*pos), member); \ - prefetch(pos->member.prev), &pos->member != (head); \ - pos = cds_list_entry(pos->member.prev, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_from - iterate over list of given type from the current point - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type, continuing from current position. - */ -#define cds_list_for_each_entry_from(pos, head, member) \ - for (; prefetch(pos->member.next), &pos->member != (head); \ - pos = cds_list_entry(pos->member.next, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_safe - iterate over list of given type safe against removal of list entry - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_for_each_entry_safe(pos, n, head, member) \ - for (pos = cds_list_entry((head)->next, typeof(*pos), member), \ - n = cds_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.next, typeof(*n), member)) - -/** - * cds_list_for_each_entry_safe_continue - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type, continuing after current point, - * safe against removal of list entry. - */ -#define cds_list_for_each_entry_safe_continue(pos, n, head, member) \ - for (pos = cds_list_entry(pos->member.next, typeof(*pos), member), \ - n = cds_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.next, typeof(*n), member)) - -/** - * cds_list_for_each_entry_safe_from - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type from current point, safe against - * removal of list entry. - */ -#define cds_list_for_each_entry_safe_from(pos, n, head, member) \ - for (n = cds_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.next, typeof(*n), member)) - -/** - * cds_list_for_each_entry_safe_reverse - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate backwards over list of given type, safe against removal - * of list entry. - */ -#define cds_list_for_each_entry_safe_reverse(pos, n, head, member) \ - for (pos = cds_list_entry((head)->prev, typeof(*pos), member), \ - n = cds_list_entry(pos->member.prev, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.prev, typeof(*n), member)) - -#endif //0 - -/* - * Double linked lists with a single pointer list head. - * Mostly useful for hash tables where the two pointer list head is - * too wasteful. - * You lose the ability to access the tail in O(1). - */ - -struct cds_hlist_head { - struct cds_hlist_node *first; -}; - -struct cds_hlist_node { - struct cds_hlist_node *next, **pprev; -}; - -#define HLIST_HEAD_INIT { .first = NULL } -#define HLIST_HEAD(name) struct cds_hlist_head name = { .first = NULL } -#define CDS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) -static inline void INIT_HLIST_NODE(struct cds_hlist_node *h) -{ - h->next = NULL; - h->pprev = NULL; -} - -static inline int hlist_unhashed(const struct cds_hlist_node *h) -{ - return !h->pprev; -} - -static inline int hlist_empty(const struct cds_hlist_head *h) -{ - return !h->first; -} - -static inline void __cds_hlist_del(struct cds_hlist_node *n) -{ - struct cds_hlist_node *next = n->next; - struct cds_hlist_node **pprev = n->pprev; - *pprev = next; - if (next) - next->pprev = pprev; -} - -static inline void cds_hlist_del(struct cds_hlist_node *n) -{ - __cds_hlist_del(n); - n->next = LIST_POISON1; - n->pprev = LIST_POISON2; -} - -static inline void cds_hlist_del_init(struct cds_hlist_node *n) -{ - if (!hlist_unhashed(n)) { - __cds_hlist_del(n); - INIT_HLIST_NODE(n); - } -} - -static inline void cds_hlist_add_head(struct cds_hlist_node *n, struct cds_hlist_head *h) -{ - struct cds_hlist_node *first = h->first; - n->next = first; - if (first) - first->pprev = &n->next; - h->first = n; - n->pprev = &h->first; -} - -/* next must be != NULL */ -static inline void hlist_add_before(struct cds_hlist_node *n, - struct cds_hlist_node *next) -{ - n->pprev = next->pprev; - n->next = next; - next->pprev = &n->next; - *(n->pprev) = n; -} - -static inline void hlist_add_after(struct cds_hlist_node *n, - struct cds_hlist_node *next) -{ - next->next = n->next; - n->next = next; - next->pprev = &n->next; - - if(next->next) - next->next->pprev = &next->next; -} - -/* - * Move a list from one list head to another. Fixup the pprev - * reference of the first entry if it exists. - */ -static inline void hlist_move_list(struct cds_hlist_head *old, - struct cds_hlist_head *new) -{ - new->first = old->first; - if (new->first) - new->first->pprev = &new->first; - old->first = NULL; -} - -#define cds_hlist_entry(ptr, type, member) caa_container_of(ptr,type,member) - -#define cds_hlist_for_each(pos, head) \ - for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ - pos = pos->next) - -#define cds_hlist_for_each_safe(pos, n, head) \ - for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ - pos = n) - -/** - * cds_hlist_for_each_entry - iterate over list of given type - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry(tpos, pos, head, member) \ - for (pos = (head)->first; \ - pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * cds_hlist_for_each_entry_continue - iterate over a hlist continuing after current point - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry_continue(tpos, pos, member) \ - for (pos = (pos)->next; \ - pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * cds_hlist_for_each_entry_from - iterate over a hlist continuing from current point - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry_from(tpos, pos, member) \ - for (; pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * cds_hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @n: another &struct cds_hlist_node to use as temporary storage - * @head: the head for your list. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry_safe(tpos, pos, n, head, member) \ - for (pos = (head)->first; \ - pos && ({ n = pos->next; 1; }) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = n) - -#endif - -#endif diff --git a/tests/api_x86.h b/tests/api_x86.h deleted file mode 100644 index 527221c..0000000 --- a/tests/api_x86.h +++ /dev/null @@ -1,1401 +0,0 @@ -/* MECHANICALLY GENERATED, DO NOT EDIT!!! */ - -#ifndef _INCLUDE_API_H -#define _INCLUDE_API_H - -#include "../config.h" - -/* - * common.h: Common Linux kernel-isms. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; but version 2 of the License only due - * to code included from the Linux kernel. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (c) 2006 Paul E. McKenney, IBM. - * - * Much code taken from the Linux kernel. For such code, the option - * to redistribute under later versions of GPL might not be available. - */ - -#include - -#ifndef __always_inline -#define __always_inline inline -#endif - -#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) -#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) - -#ifdef __ASSEMBLY__ -# define stringify_in_c(...) __VA_ARGS__ -# define ASM_CONST(x) x -#else -/* This version of stringify will deal with commas... */ -# define __stringify_in_c(...) #__VA_ARGS__ -# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " -# define __ASM_CONST(x) x##UL -# define ASM_CONST(x) __ASM_CONST(x) -#endif - - -/* - * arch-i386.h: Expose x86 atomic instructions. 80486 and better only. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, but version 2 only due to inclusion - * of Linux-kernel code. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (c) 2006 Paul E. McKenney, IBM. - * - * Much code taken from the Linux kernel. For such code, the option - * to redistribute under later versions of GPL might not be available. - */ - -/* - * Machine parameters. - */ - -/* #define CAA_CACHE_LINE_SIZE 64 */ -#define ____cacheline_internodealigned_in_smp \ - __attribute__((__aligned__(1 << 6))) - -#define LOCK_PREFIX "lock ; " - -#if 0 /* duplicate with arch_atomic.h */ - -/* - * Atomic data structure, initialization, and access. - */ - -typedef struct { volatile int counter; } atomic_t; - -#define ATOMIC_INIT(i) { (i) } - -#define atomic_read(v) ((v)->counter) -#define atomic_set(v, i) (((v)->counter) = (i)) - -/* - * Atomic operations. - */ - -/** - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic_add(int i, atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "addl %1,%0" - :"+m" (v->counter) - :"ir" (i)); -} - -/** - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic_sub(int i, atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "subl %1,%0" - :"+m" (v->counter) - :"ir" (i)); -} - -/** - * atomic_sub_and_test - subtract value from variable and test result - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v and returns - * true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_sub_and_test(int i, atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "subl %2,%0; sete %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * atomic_inc - increment atomic variable - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1. - */ -static __inline__ void atomic_inc(atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "incl %0" - :"+m" (v->counter)); -} - -/** - * atomic_dec - decrement atomic variable - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1. - */ -static __inline__ void atomic_dec(atomic_t *v) -{ - __asm__ __volatile__( - LOCK_PREFIX "decl %0" - :"+m" (v->counter)); -} - -/** - * atomic_dec_and_test - decrement and test - * @v: pointer of type atomic_t - * - * Atomically decrements @v by 1 and - * returns true if the result is 0, or false for all other - * cases. - */ -static __inline__ int atomic_dec_and_test(atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "decl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * atomic_inc_and_test - increment and test - * @v: pointer of type atomic_t - * - * Atomically increments @v by 1 - * and returns true if the result is zero, or false for all - * other cases. - */ -static __inline__ int atomic_inc_and_test(atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "incl %0; sete %1" - :"+m" (v->counter), "=qm" (c) - : : "memory"); - return c != 0; -} - -/** - * atomic_add_negative - add and test if negative - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns true - * if the result is negative, or false when - * result is greater than or equal to zero. - */ -static __inline__ int atomic_add_negative(int i, atomic_t *v) -{ - unsigned char c; - - __asm__ __volatile__( - LOCK_PREFIX "addl %2,%0; sets %1" - :"+m" (v->counter), "=qm" (c) - :"ir" (i) : "memory"); - return c; -} - -/** - * atomic_add_return - add and return - * @v: pointer of type atomic_t - * @i: integer value to add - * - * Atomically adds @i to @v and returns @i + @v - */ -static __inline__ int atomic_add_return(int i, atomic_t *v) -{ - int __i; - - __i = i; - __asm__ __volatile__( - LOCK_PREFIX "xaddl %0, %1;" - :"=r"(i) - :"m"(v->counter), "0"(i)); - return i + __i; -} - -static __inline__ int atomic_sub_return(int i, atomic_t *v) -{ - return atomic_add_return(-i,v); -} - -static inline unsigned int -cmpxchg(volatile long *ptr, long oldval, long newval) -{ - unsigned long retval; - - asm("# cmpxchg\n" - "lock; cmpxchgl %4,(%2)\n" - "# end atomic_cmpxchg4" - : "=a" (retval), "=m" (*ptr) - : "r" (ptr), "0" (oldval), "r" (newval), "m" (*ptr) - : "cc"); - return (retval); -} - -#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) - -/** - * atomic_add_unless - add unless the number is a given value - * @v: pointer of type atomic_t - * @a: the amount to add to v... - * @u: ...unless v is equal to u. - * - * Atomically adds @a to @v, so long as it was not @u. - * Returns non-zero if @v was not @u, and zero otherwise. - */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - for (;;) { \ - if (unlikely(c == (u))) \ - break; \ - old = atomic_cmpxchg((v), c, c + (a)); \ - if (likely(old == c)) \ - break; \ - c = old; \ - } \ - c != (u); \ -}) -#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) - -#define atomic_inc_return(v) (atomic_add_return(1,v)) -#define atomic_dec_return(v) (atomic_sub_return(1,v)) - -/* These are x86-specific, used by some header files */ -#define atomic_clear_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ -: : "r" (~(mask)),"m" (*addr) : "memory") - -#define atomic_set_mask(mask, addr) \ -__asm__ __volatile__(LOCK_PREFIX "orl %0,%1" \ -: : "r" (mask),"m" (*(addr)) : "memory") - -/* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic_dec() cmm_barrier() -#define smp_mb__after_atomic_dec() cmm_barrier() -#define smp_mb__before_atomic_inc() cmm_barrier() -#define smp_mb__after_atomic_inc() cmm_barrier() - -#endif //0 - -/* - * api_pthreads.h: API mapping to pthreads environment. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. However, please note that much - * of the code in this file derives from the Linux kernel, and that such - * code may not be available except under GPLv2. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (c) 2006 Paul E. McKenney, IBM. - */ - -#include -#include -#include -#include -#include -#define __USE_GNU -#include -#include -#include -/* #include "atomic.h" */ - -/* - * Default machine parameters. - */ - -#ifndef CAA_CACHE_LINE_SIZE -/* #define CAA_CACHE_LINE_SIZE 128 */ -#endif /* #ifndef CAA_CACHE_LINE_SIZE */ - -/* - * Exclusive locking primitives. - */ - -typedef pthread_mutex_t spinlock_t; - -#define DEFINE_SPINLOCK(lock) spinlock_t lock = PTHREAD_MUTEX_INITIALIZER; -#define __SPIN_LOCK_UNLOCKED(lockp) PTHREAD_MUTEX_INITIALIZER - -static void spin_lock_init(spinlock_t *sp) -{ - if (pthread_mutex_init(sp, NULL) != 0) { - perror("spin_lock_init:pthread_mutex_init"); - exit(-1); - } -} - -static void spin_lock(spinlock_t *sp) -{ - if (pthread_mutex_lock(sp) != 0) { - perror("spin_lock:pthread_mutex_lock"); - exit(-1); - } -} - -static void spin_unlock(spinlock_t *sp) -{ - if (pthread_mutex_unlock(sp) != 0) { - perror("spin_unlock:pthread_mutex_unlock"); - exit(-1); - } -} - -#define spin_lock_irqsave(l, f) do { f = 1; spin_lock(l); } while (0) -#define spin_unlock_irqrestore(l, f) do { f = 0; spin_unlock(l); } while (0) - -/* - * Thread creation/destruction primitives. - */ - -typedef pthread_t thread_id_t; - -#define NR_THREADS 128 - -#define __THREAD_ID_MAP_EMPTY ((thread_id_t) 0) -#define __THREAD_ID_MAP_WAITING ((thread_id_t) 1) -thread_id_t __thread_id_map[NR_THREADS]; -spinlock_t __thread_id_map_mutex; - -#define for_each_thread(t) \ - for (t = 0; t < NR_THREADS; t++) - -#define for_each_running_thread(t) \ - for (t = 0; t < NR_THREADS; t++) \ - if ((__thread_id_map[t] != __THREAD_ID_MAP_EMPTY) && \ - (__thread_id_map[t] != __THREAD_ID_MAP_WAITING)) - -pthread_key_t thread_id_key; - -static int __smp_thread_id(void) -{ - int i; - thread_id_t tid = pthread_self(); - - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == tid) { - long v = i + 1; /* must be non-NULL. */ - - if (pthread_setspecific(thread_id_key, (void *)v) != 0) { - perror("pthread_setspecific"); - exit(-1); - } - return i; - } - } - spin_lock(&__thread_id_map_mutex); - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == tid) - spin_unlock(&__thread_id_map_mutex); - return i; - } - spin_unlock(&__thread_id_map_mutex); - fprintf(stderr, "smp_thread_id: Rogue thread, id: %d(%#x)\n", - (int)tid, (int)tid); - exit(-1); -} - -static int smp_thread_id(void) -{ - void *id; - - id = pthread_getspecific(thread_id_key); - if (id == NULL) - return __smp_thread_id(); - return (long)(id - 1); -} - -static thread_id_t create_thread(void *(*func)(void *), void *arg) -{ - thread_id_t tid; - int i; - - spin_lock(&__thread_id_map_mutex); - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == __THREAD_ID_MAP_EMPTY) - break; - } - if (i >= NR_THREADS) { - spin_unlock(&__thread_id_map_mutex); - fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS); - exit(-1); - } - __thread_id_map[i] = __THREAD_ID_MAP_WAITING; - spin_unlock(&__thread_id_map_mutex); - if (pthread_create(&tid, NULL, func, arg) != 0) { - perror("create_thread:pthread_create"); - exit(-1); - } - __thread_id_map[i] = tid; - return tid; -} - -static void *wait_thread(thread_id_t tid) -{ - int i; - void *vp; - - for (i = 0; i < NR_THREADS; i++) { - if (__thread_id_map[i] == tid) - break; - } - if (i >= NR_THREADS){ - fprintf(stderr, "wait_thread: bad tid = %d(%#x)\n", - (int)tid, (int)tid); - exit(-1); - } - if (pthread_join(tid, &vp) != 0) { - perror("wait_thread:pthread_join"); - exit(-1); - } - __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; - return vp; -} - -static void wait_all_threads(void) -{ - int i; - thread_id_t tid; - - for (i = 1; i < NR_THREADS; i++) { - tid = __thread_id_map[i]; - if (tid != __THREAD_ID_MAP_EMPTY && - tid != __THREAD_ID_MAP_WAITING) - (void)wait_thread(tid); - } -} - -#ifndef HAVE_CPU_SET_T -typedef unsigned long cpu_set_t; -# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0) -# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0) -#endif - -static void run_on(int cpu) -{ -#if HAVE_SCHED_SETAFFINITY - cpu_set_t mask; - - CPU_ZERO(&mask); - CPU_SET(cpu, &mask); -#if SCHED_SETAFFINITY_ARGS == 2 - sched_setaffinity(0, &mask); -#else - sched_setaffinity(0, sizeof(mask), &mask); -#endif -#endif /* HAVE_SCHED_SETAFFINITY */ -} - -/* - * timekeeping -- very crude -- should use MONOTONIC... - */ - -long long get_microseconds(void) -{ - struct timeval tv; - - if (gettimeofday(&tv, NULL) != 0) - abort(); - return ((long long)tv.tv_sec) * 1000000LL + (long long)tv.tv_usec; -} - -/* - * Per-thread variables. - */ - -#define DEFINE_PER_THREAD(type, name) \ - struct { \ - __typeof__(type) v \ - __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \ - } __per_thread_##name[NR_THREADS]; -#define DECLARE_PER_THREAD(type, name) extern DEFINE_PER_THREAD(type, name) - -#define per_thread(name, thread) __per_thread_##name[thread].v -#define __get_thread_var(name) per_thread(name, smp_thread_id()) - -#define init_per_thread(name, v) \ - do { \ - int __i_p_t_i; \ - for (__i_p_t_i = 0; __i_p_t_i < NR_THREADS; __i_p_t_i++) \ - per_thread(name, __i_p_t_i) = v; \ - } while (0) - -/* - * CPU traversal primitives. - */ - -#ifndef NR_CPUS -#define NR_CPUS 16 -#endif /* #ifndef NR_CPUS */ - -#define for_each_possible_cpu(cpu) \ - for (cpu = 0; cpu < NR_CPUS; cpu++) -#define for_each_online_cpu(cpu) \ - for (cpu = 0; cpu < NR_CPUS; cpu++) - -/* - * Per-CPU variables. - */ - -#define DEFINE_PER_CPU(type, name) \ - struct { \ - __typeof__(type) v \ - __attribute__((__aligned__(CAA_CACHE_LINE_SIZE))); \ - } __per_cpu_##name[NR_CPUS] -#define DECLARE_PER_CPU(type, name) extern DEFINE_PER_CPU(type, name) - -DEFINE_PER_THREAD(int, smp_processor_id); - -#define per_cpu(name, thread) __per_cpu_##name[thread].v -#define __get_cpu_var(name) per_cpu(name, smp_processor_id()) - -#define init_per_cpu(name, v) \ - do { \ - int __i_p_c_i; \ - for (__i_p_c_i = 0; __i_p_c_i < NR_CPUS; __i_p_c_i++) \ - per_cpu(name, __i_p_c_i) = v; \ - } while (0) - -/* - * CPU state checking (crowbarred). - */ - -#define idle_cpu(cpu) 0 -#define in_softirq() 1 -#define hardirq_count() 0 -#define PREEMPT_SHIFT 0 -#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) -#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) -#define PREEMPT_BITS 8 -#define SOFTIRQ_BITS 8 - -/* - * CPU hotplug. - */ - -struct notifier_block { - int (*notifier_call)(struct notifier_block *, unsigned long, void *); - struct notifier_block *next; - int priority; -}; - -#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ -#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ -#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ -#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ -#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ -#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ -#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, - * not handling interrupts, soon dead */ -#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug - * lock is dropped */ - -/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend - * operation in progress - */ -#define CPU_TASKS_FROZEN 0x0010 - -#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) -#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) -#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) -#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) -#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) -#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) -#define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) - -/* Hibernation and suspend events */ -#define PM_HIBERNATION_PREPARE 0x0001 /* Going to hibernate */ -#define PM_POST_HIBERNATION 0x0002 /* Hibernation finished */ -#define PM_SUSPEND_PREPARE 0x0003 /* Going to suspend the system */ -#define PM_POST_SUSPEND 0x0004 /* Suspend finished */ -#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */ -#define PM_POST_RESTORE 0x0006 /* Restore failed */ - -#define NOTIFY_DONE 0x0000 /* Don't care */ -#define NOTIFY_OK 0x0001 /* Suits me */ -#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ -#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) - /* Bad/Veto action */ -/* - * Clean way to return from the notifier and stop further calls. - */ -#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK) - -/* - * Bug checks. - */ - -#define BUG_ON(c) do { if (!(c)) abort(); } while (0) - -/* - * Initialization -- Must be called before calling any primitives. - */ - -static void smp_init(void) -{ - int i; - - spin_lock_init(&__thread_id_map_mutex); - __thread_id_map[0] = pthread_self(); - for (i = 1; i < NR_THREADS; i++) - __thread_id_map[i] = __THREAD_ID_MAP_EMPTY; - init_per_thread(smp_processor_id, 0); - if (pthread_key_create(&thread_id_key, NULL) != 0) { - perror("pthread_key_create"); - exit(-1); - } -} - -/* Taken from the Linux kernel source tree, so GPLv2-only!!! */ - -#ifndef _LINUX_LIST_H -#define _LINUX_LIST_H - -#define LIST_POISON1 ((void *) 0x00100100) -#define LIST_POISON2 ((void *) 0x00200200) - -#if 0 - -/* - * Simple doubly linked list implementation. - * - * Some of the internal functions ("__xxx") are useful when - * manipulating whole lists rather than single entries, as - * sometimes we already know the next/prev entries and we can - * generate better code by using them directly rather than - * using the generic single-entry routines. - */ - -struct cds_list_head { - struct cds_list_head *next, *prev; -}; - -#define CDS_LIST_HEAD_INIT(name) { &(name), &(name) } - -#define CDS_LIST_HEAD(name) \ - struct cds_list_head name = CDS_LIST_HEAD_INIT(name) - -static inline void CDS_INIT_LIST_HEAD(struct cds_list_head *list) -{ - list->next = list; - list->prev = list; -} - -/* - * Insert a new entry between two known consecutive entries. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -#ifndef CONFIG_DEBUG_LIST -static inline void __cds_list_add(struct cds_list_head *new, - struct cds_list_head *prev, - struct cds_list_head *next) -{ - next->prev = new; - new->next = next; - new->prev = prev; - prev->next = new; -} -#else -extern void __cds_list_add(struct cds_list_head *new, - struct cds_list_head *prev, - struct cds_list_head *next); -#endif - -/** - * cds_list_add - add a new entry - * @new: new entry to be added - * @head: list head to add it after - * - * Insert a new entry after the specified head. - * This is good for implementing stacks. - */ -static inline void cds_list_add(struct cds_list_head *new, struct cds_list_head *head) -{ - __cds_list_add(new, head, head->next); -} - - -/** - * cds_list_add_tail - add a new entry - * @new: new entry to be added - * @head: list head to add it before - * - * Insert a new entry before the specified head. - * This is useful for implementing queues. - */ -static inline void cds_list_add_tail(struct cds_list_head *new, struct cds_list_head *head) -{ - __cds_list_add(new, head->prev, head); -} - -/* - * Delete a list entry by making the prev/next entries - * point to each other. - * - * This is only for internal list manipulation where we know - * the prev/next entries already! - */ -static inline void __cds_list_del(struct cds_list_head * prev, struct cds_list_head * next) -{ - next->prev = prev; - prev->next = next; -} - -/** - * cds_list_del - deletes entry from list. - * @entry: the element to delete from the list. - * Note: cds_list_empty() on entry does not return true after this, the entry is - * in an undefined state. - */ -#ifndef CONFIG_DEBUG_LIST -static inline void cds_list_del(struct cds_list_head *entry) -{ - __cds_list_del(entry->prev, entry->next); - entry->next = LIST_POISON1; - entry->prev = LIST_POISON2; -} -#else -extern void cds_list_del(struct cds_list_head *entry); -#endif - -/** - * cds_list_replace - replace old entry by new one - * @old : the element to be replaced - * @new : the new element to insert - * - * If @old was empty, it will be overwritten. - */ -static inline void cds_list_replace(struct cds_list_head *old, - struct cds_list_head *new) -{ - new->next = old->next; - new->next->prev = new; - new->prev = old->prev; - new->prev->next = new; -} - -static inline void cds_list_replace_init(struct cds_list_head *old, - struct cds_list_head *new) -{ - cds_list_replace(old, new); - CDS_INIT_LIST_HEAD(old); -} - -/** - * cds_list_del_init - deletes entry from list and reinitialize it. - * @entry: the element to delete from the list. - */ -static inline void cds_list_del_init(struct cds_list_head *entry) -{ - __cds_list_del(entry->prev, entry->next); - CDS_INIT_LIST_HEAD(entry); -} - -/** - * cds_list_move - delete from one list and add as another's head - * @list: the entry to move - * @head: the head that will precede our entry - */ -static inline void cds_list_move(struct cds_list_head *list, struct cds_list_head *head) -{ - __cds_list_del(list->prev, list->next); - cds_list_add(list, head); -} - -/** - * cds_list_move_tail - delete from one list and add as another's tail - * @list: the entry to move - * @head: the head that will follow our entry - */ -static inline void cds_list_move_tail(struct cds_list_head *list, - struct cds_list_head *head) -{ - __cds_list_del(list->prev, list->next); - cds_list_add_tail(list, head); -} - -/** - * list_is_last - tests whether @list is the last entry in list @head - * @list: the entry to test - * @head: the head of the list - */ -static inline int list_is_last(const struct cds_list_head *list, - const struct cds_list_head *head) -{ - return list->next == head; -} - -/** - * cds_list_empty - tests whether a list is empty - * @head: the list to test. - */ -static inline int cds_list_empty(const struct cds_list_head *head) -{ - return head->next == head; -} - -/** - * cds_list_empty_careful - tests whether a list is empty and not being modified - * @head: the list to test - * - * Description: - * tests whether a list is empty _and_ checks that no other CPU might be - * in the process of modifying either member (next or prev) - * - * NOTE: using cds_list_empty_careful() without synchronization - * can only be safe if the only activity that can happen - * to the list entry is cds_list_del_init(). Eg. it cannot be used - * if another CPU could re-list_add() it. - */ -static inline int cds_list_empty_careful(const struct cds_list_head *head) -{ - struct cds_list_head *next = head->next; - return (next == head) && (next == head->prev); -} - -/** - * list_is_singular - tests whether a list has just one entry. - * @head: the list to test. - */ -static inline int list_is_singular(const struct cds_list_head *head) -{ - return !list_empty(head) && (head->next == head->prev); -} - -static inline void __list_cut_position(struct cds_list_head *list, - struct cds_list_head *head, struct cds_list_head *entry) -{ - struct cds_list_head *new_first = entry->next; - list->next = head->next; - list->next->prev = list; - list->prev = entry; - entry->next = list; - head->next = new_first; - new_first->prev = head; -} - -/** - * list_cut_position - cut a list into two - * @list: a new list to add all removed entries - * @head: a list with entries - * @entry: an entry within head, could be the head itself - * and if so we won't cut the list - * - * This helper moves the initial part of @head, up to and - * including @entry, from @head to @list. You should - * pass on @entry an element you know is on @head. @list - * should be an empty list or a list you do not care about - * losing its data. - * - */ -static inline void list_cut_position(struct cds_list_head *list, - struct cds_list_head *head, struct cds_list_head *entry) -{ - if (cds_list_empty(head)) - return; - if (list_is_singular(head) && - (head->next != entry && head != entry)) - return; - if (entry == head) - CDS_INIT_LIST_HEAD(list); - else - __list_cut_position(list, head, entry); -} - -static inline void __cds_list_splice(const struct cds_list_head *list, - struct cds_list_head *prev, - struct cds_list_head *next) -{ - struct cds_list_head *first = list->next; - struct cds_list_head *last = list->prev; - - first->prev = prev; - prev->next = first; - - last->next = next; - next->prev = last; -} - -/** - * cds_list_splice - join two lists, this is designed for stacks - * @list: the new list to add. - * @head: the place to add it in the first list. - */ -static inline void cds_list_splice(const struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) - __cds_list_splice(list, head, head->next); -} - -/** - * cds_list_splice_tail - join two lists, each list being a queue - * @list: the new list to add. - * @head: the place to add it in the first list. - */ -static inline void cds_list_splice_tail(struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) - __cds_list_splice(list, head->prev, head); -} - -/** - * cds_list_splice_init - join two lists and reinitialise the emptied list. - * @list: the new list to add. - * @head: the place to add it in the first list. - * - * The list at @list is reinitialised - */ -static inline void cds_list_splice_init(struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) { - __cds_list_splice(list, head, head->next); - CDS_INIT_LIST_HEAD(list); - } -} - -/** - * cds_list_splice_tail_init - join two lists and reinitialise the emptied list - * @list: the new list to add. - * @head: the place to add it in the first list. - * - * Each of the lists is a queue. - * The list at @list is reinitialised - */ -static inline void cds_list_splice_tail_init(struct cds_list_head *list, - struct cds_list_head *head) -{ - if (!cds_list_empty(list)) { - __cds_list_splice(list, head->prev, head); - CDS_INIT_LIST_HEAD(list); - } -} - -/** - * cds_list_entry - get the struct for this entry - * @ptr: the &struct cds_list_head pointer. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_entry(ptr, type, member) \ - caa_container_of(ptr, type, member) - -/** - * list_first_entry - get the first element from a list - * @ptr: the list head to take the element from. - * @type: the type of the struct this is embedded in. - * @member: the name of the list_struct within the struct. - * - * Note, that list is expected to be not empty. - */ -#define list_first_entry(ptr, type, member) \ - cds_list_entry((ptr)->next, type, member) - -/** - * cds_list_for_each - iterate over a list - * @pos: the &struct cds_list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define cds_list_for_each(pos, head) \ - for (pos = (head)->next; prefetch(pos->next), pos != (head); \ - pos = pos->next) - -/** - * __cds_list_for_each - iterate over a list - * @pos: the &struct cds_list_head to use as a loop cursor. - * @head: the head for your list. - * - * This variant differs from cds_list_for_each() in that it's the - * simplest possible list iteration code, no prefetching is done. - * Use this for code that knows the list to be very short (empty - * or 1 entry) most of the time. - */ -#define __cds_list_for_each(pos, head) \ - for (pos = (head)->next; pos != (head); pos = pos->next) - -/** - * cds_list_for_each_prev - iterate over a list backwards - * @pos: the &struct cds_list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define cds_list_for_each_prev(pos, head) \ - for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ - pos = pos->prev) - -/** - * cds_list_for_each_safe - iterate over a list safe against removal of list entry - * @pos: the &struct cds_list_head to use as a loop cursor. - * @n: another &struct cds_list_head to use as temporary storage - * @head: the head for your list. - */ -#define cds_list_for_each_safe(pos, n, head) \ - for (pos = (head)->next, n = pos->next; pos != (head); \ - pos = n, n = pos->next) - -/** - * cds_list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry - * @pos: the &struct cds_list_head to use as a loop cursor. - * @n: another &struct cds_list_head to use as temporary storage - * @head: the head for your list. - */ -#define cds_list_for_each_prev_safe(pos, n, head) \ - for (pos = (head)->prev, n = pos->prev; \ - prefetch(pos->prev), pos != (head); \ - pos = n, n = pos->prev) - -/** - * cds_list_for_each_entry - iterate over list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_for_each_entry(pos, head, member) \ - for (pos = cds_list_entry((head)->next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ - pos = cds_list_entry(pos->member.next, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_reverse - iterate backwards over list of given type. - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_for_each_entry_reverse(pos, head, member) \ - for (pos = cds_list_entry((head)->prev, typeof(*pos), member); \ - prefetch(pos->member.prev), &pos->member != (head); \ - pos = cds_list_entry(pos->member.prev, typeof(*pos), member)) - -/** - * list_prepare_entry - prepare a pos entry for use in cds_list_for_each_entry_continue() - * @pos: the type * to use as a start point - * @head: the head of the list - * @member: the name of the list_struct within the struct. - * - * Prepares a pos entry for use as a start point in cds_list_for_each_entry_continue(). - */ -#define list_prepare_entry(pos, head, member) \ - ((pos) ? : cds_list_entry(head, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_continue - continue iteration over list of given type - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Continue to iterate over list of given type, continuing after - * the current position. - */ -#define cds_list_for_each_entry_continue(pos, head, member) \ - for (pos = cds_list_entry(pos->member.next, typeof(*pos), member); \ - prefetch(pos->member.next), &pos->member != (head); \ - pos = cds_list_entry(pos->member.next, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_continue_reverse - iterate backwards from the given point - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Start to iterate over list of given type backwards, continuing after - * the current position. - */ -#define cds_list_for_each_entry_continue_reverse(pos, head, member) \ - for (pos = cds_list_entry(pos->member.prev, typeof(*pos), member); \ - prefetch(pos->member.prev), &pos->member != (head); \ - pos = cds_list_entry(pos->member.prev, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_from - iterate over list of given type from the current point - * @pos: the type * to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type, continuing from current position. - */ -#define cds_list_for_each_entry_from(pos, head, member) \ - for (; prefetch(pos->member.next), &pos->member != (head); \ - pos = cds_list_entry(pos->member.next, typeof(*pos), member)) - -/** - * cds_list_for_each_entry_safe - iterate over list of given type safe against removal of list entry - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - */ -#define cds_list_for_each_entry_safe(pos, n, head, member) \ - for (pos = cds_list_entry((head)->next, typeof(*pos), member), \ - n = cds_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.next, typeof(*n), member)) - -/** - * cds_list_for_each_entry_safe_continue - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type, continuing after current point, - * safe against removal of list entry. - */ -#define cds_list_for_each_entry_safe_continue(pos, n, head, member) \ - for (pos = cds_list_entry(pos->member.next, typeof(*pos), member), \ - n = cds_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.next, typeof(*n), member)) - -/** - * cds_list_for_each_entry_safe_from - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate over list of given type from current point, safe against - * removal of list entry. - */ -#define cds_list_for_each_entry_safe_from(pos, n, head, member) \ - for (n = cds_list_entry(pos->member.next, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.next, typeof(*n), member)) - -/** - * cds_list_for_each_entry_safe_reverse - * @pos: the type * to use as a loop cursor. - * @n: another type * to use as temporary storage - * @head: the head for your list. - * @member: the name of the list_struct within the struct. - * - * Iterate backwards over list of given type, safe against removal - * of list entry. - */ -#define cds_list_for_each_entry_safe_reverse(pos, n, head, member) \ - for (pos = cds_list_entry((head)->prev, typeof(*pos), member), \ - n = cds_list_entry(pos->member.prev, typeof(*pos), member); \ - &pos->member != (head); \ - pos = n, n = cds_list_entry(n->member.prev, typeof(*n), member)) - -#endif //0 - -/* - * Double linked lists with a single pointer list head. - * Mostly useful for hash tables where the two pointer list head is - * too wasteful. - * You lose the ability to access the tail in O(1). - */ - -struct cds_hlist_head { - struct cds_hlist_node *first; -}; - -struct cds_hlist_node { - struct cds_hlist_node *next, **pprev; -}; - -#define HLIST_HEAD_INIT { .first = NULL } -#define HLIST_HEAD(name) struct cds_hlist_head name = { .first = NULL } -#define CDS_INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) -static inline void INIT_HLIST_NODE(struct cds_hlist_node *h) -{ - h->next = NULL; - h->pprev = NULL; -} - -static inline int hlist_unhashed(const struct cds_hlist_node *h) -{ - return !h->pprev; -} - -static inline int hlist_empty(const struct cds_hlist_head *h) -{ - return !h->first; -} - -static inline void __cds_hlist_del(struct cds_hlist_node *n) -{ - struct cds_hlist_node *next = n->next; - struct cds_hlist_node **pprev = n->pprev; - *pprev = next; - if (next) - next->pprev = pprev; -} - -static inline void cds_hlist_del(struct cds_hlist_node *n) -{ - __cds_hlist_del(n); - n->next = LIST_POISON1; - n->pprev = LIST_POISON2; -} - -static inline void cds_hlist_del_init(struct cds_hlist_node *n) -{ - if (!hlist_unhashed(n)) { - __cds_hlist_del(n); - INIT_HLIST_NODE(n); - } -} - -static inline void cds_hlist_add_head(struct cds_hlist_node *n, struct cds_hlist_head *h) -{ - struct cds_hlist_node *first = h->first; - n->next = first; - if (first) - first->pprev = &n->next; - h->first = n; - n->pprev = &h->first; -} - -/* next must be != NULL */ -static inline void hlist_add_before(struct cds_hlist_node *n, - struct cds_hlist_node *next) -{ - n->pprev = next->pprev; - n->next = next; - next->pprev = &n->next; - *(n->pprev) = n; -} - -static inline void hlist_add_after(struct cds_hlist_node *n, - struct cds_hlist_node *next) -{ - next->next = n->next; - n->next = next; - next->pprev = &n->next; - - if(next->next) - next->next->pprev = &next->next; -} - -/* - * Move a list from one list head to another. Fixup the pprev - * reference of the first entry if it exists. - */ -static inline void hlist_move_list(struct cds_hlist_head *old, - struct cds_hlist_head *new) -{ - new->first = old->first; - if (new->first) - new->first->pprev = &new->first; - old->first = NULL; -} - -#define cds_hlist_entry(ptr, type, member) caa_container_of(ptr,type,member) - -#define cds_hlist_for_each(pos, head) \ - for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ - pos = pos->next) - -#define cds_hlist_for_each_safe(pos, n, head) \ - for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ - pos = n) - -/** - * cds_hlist_for_each_entry - iterate over list of given type - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @head: the head for your list. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry(tpos, pos, head, member) \ - for (pos = (head)->first; \ - pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * cds_hlist_for_each_entry_continue - iterate over a hlist continuing after current point - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry_continue(tpos, pos, member) \ - for (pos = (pos)->next; \ - pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * cds_hlist_for_each_entry_from - iterate over a hlist continuing from current point - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry_from(tpos, pos, member) \ - for (; pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -/** - * cds_hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry - * @tpos: the type * to use as a loop cursor. - * @pos: the &struct cds_hlist_node to use as a loop cursor. - * @n: another &struct cds_hlist_node to use as temporary storage - * @head: the head for your list. - * @member: the name of the cds_hlist_node within the struct. - */ -#define cds_hlist_for_each_entry_safe(tpos, pos, n, head, member) \ - for (pos = (head)->first; \ - pos && ({ n = pos->next; 1; }) && \ - ({ tpos = cds_hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = n) - -#endif - -#endif diff --git a/tests/rcutorture.h b/tests/rcutorture.h index 5ba3d2b..4c6f9da 100644 --- a/tests/rcutorture.h +++ b/tests/rcutorture.h @@ -127,9 +127,10 @@ void *rcu_read_perf_test(void *arg) rcu_register_thread(); run_on(me); uatomic_inc(&nthreadsrunning); + put_thread_offline(); while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); - mark_rcu_quiescent_state(); + put_thread_online(); while (goflag == GOFLAG_RUN) { for (i = 0; i < RCU_READ_RUN; i++) { rcu_read_lock(); @@ -286,9 +287,10 @@ void *rcu_read_stress_test(void *arg) int pc; rcu_register_thread(); + put_thread_offline(); while (goflag == GOFLAG_INIT) poll(NULL, 0, 1); - mark_rcu_quiescent_state(); + put_thread_online(); while (goflag == GOFLAG_RUN) { rcu_read_lock(); p = rcu_dereference(rcu_stress_current); diff --git a/tests/test_urcu_lfq.c b/tests/test_urcu_lfq.c index cb50586..b61a7d4 100644 --- a/tests/test_urcu_lfq.c +++ b/tests/test_urcu_lfq.c @@ -66,6 +66,7 @@ static inline pid_t gettid(void) #endif #include #include +#include static volatile int test_go, test_stop; @@ -204,20 +205,6 @@ fail: } -static void rcu_free_node(struct rcu_head *head) -{ - struct cds_lfq_node_rcu *node = - caa_container_of(head, struct cds_lfq_node_rcu, rcu_head); - free(node); -} - -static void ref_release_node(struct urcu_ref *ref) -{ - struct cds_lfq_node_rcu *node = - caa_container_of(ref, struct cds_lfq_node_rcu, ref); - call_rcu(&node->rcu_head, rcu_free_node); -} - void *thr_dequeuer(void *_count) { unsigned long long *count = _count; @@ -228,6 +215,11 @@ void *thr_dequeuer(void *_count) set_affinity(); + ret = rcu_defer_register_thread(); + if (ret) { + printf("Error in rcu_defer_register_thread\n"); + exit(-1); + } rcu_register_thread(); while (!test_go) @@ -243,7 +235,7 @@ void *thr_dequeuer(void *_count) rcu_read_unlock(); if (node) { - urcu_ref_put(&node->ref, ref_release_node); + defer_rcu(free, node); nr_successful_dequeues++; } @@ -255,6 +247,7 @@ void *thr_dequeuer(void *_count) } rcu_unregister_thread(); + rcu_defer_unregister_thread(); printf_verbose("dequeuer thread_end, thread id : %lx, tid %lu, " "dequeues %llu, successful_dequeues %llu\n", pthread_self(), (unsigned long)gettid(), nr_dequeues, @@ -264,12 +257,6 @@ void *thr_dequeuer(void *_count) return ((void*)2); } -static void release_node(struct urcu_ref *ref) -{ - struct cds_lfq_node_rcu *node = caa_container_of(ref, struct cds_lfq_node_rcu, ref); - free(node); -} - void test_end(struct cds_lfq_queue_rcu *q, unsigned long long *nr_dequeues) { struct cds_lfq_node_rcu *node; @@ -279,7 +266,7 @@ void test_end(struct cds_lfq_queue_rcu *q, unsigned long long *nr_dequeues) node = cds_lfq_dequeue_rcu(q); rcu_read_unlock(); if (node) { - urcu_ref_put(&node->ref, release_node); + free(node); /* no more concurrent access */ (*nr_dequeues)++; } } while (node); @@ -376,7 +363,7 @@ int main(int argc, char **argv) tid_dequeuer = malloc(sizeof(*tid_dequeuer) * nr_dequeuers); count_enqueuer = malloc(2 * sizeof(*count_enqueuer) * nr_enqueuers); count_dequeuer = malloc(2 * sizeof(*count_dequeuer) * nr_dequeuers); - cds_lfq_init_rcu(&q, ref_release_node); + cds_lfq_init_rcu(&q, call_rcu); next_aff = 0; @@ -421,6 +408,8 @@ int main(int argc, char **argv) } test_end(&q, &end_dequeues); + err = cds_lfq_destroy_rcu(&q); + assert(!err); printf_verbose("total number of enqueues : %llu, dequeues %llu\n", tot_enqueues, tot_dequeues); diff --git a/urcu-bp.c b/urcu-bp.c index 39a6cd0..2ae3408 100644 --- a/urcu-bp.c +++ b/urcu-bp.c @@ -24,6 +24,7 @@ */ #define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include @@ -35,11 +36,14 @@ #include #include +#include "urcu/wfqueue.h" #include "urcu/map/urcu-bp.h" - #include "urcu/static/urcu-bp.h" + /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ +#undef _LGPL_SOURCE #include "urcu-bp.h" +#define _LGPL_SOURCE #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON @@ -78,6 +82,11 @@ void *mremap(void *old_address, size_t old_size, size_t new_size, int flags) #define RCU_SLEEP_DELAY 1000 #define ARENA_INIT_ALLOC 16 +/* + * Active attempts to check for reader Q.S. before calling sleep(). + */ +#define RCU_QS_ACTIVE_ATTEMPTS 100 + void __attribute__((destructor)) rcu_bp_exit(void); static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; @@ -295,7 +304,7 @@ static void add_thread(void) if (registry_arena.len < registry_arena.used + sizeof(struct rcu_reader)) resize_arena(®istry_arena, - max(registry_arena.len << 1, ARENA_INIT_ALLOC)); + caa_max(registry_arena.len << 1, ARENA_INIT_ALLOC)); /* * Find a free spot. */ @@ -364,9 +373,10 @@ end: assert(!ret); } -void rcu_bp_exit() +void rcu_bp_exit(void) { - munmap(registry_arena.p, registry_arena.len); + if (registry_arena.p) + munmap(registry_arena.p, registry_arena.len); } /* diff --git a/urcu-qsbr.c b/urcu-qsbr.c index 87cf41d..5e43484 100644 --- a/urcu-qsbr.c +++ b/urcu-qsbr.c @@ -24,6 +24,7 @@ */ #define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include @@ -34,12 +35,15 @@ #include #include +#include "urcu/wfqueue.h" #include "urcu/map/urcu-qsbr.h" - #define BUILD_QSBR_LIB #include "urcu/static/urcu-qsbr.h" + /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ +#undef _LGPL_SOURCE #include "urcu-qsbr.h" +#define _LGPL_SOURCE void __attribute__((destructor)) rcu_exit(void); @@ -52,6 +56,11 @@ int32_t gp_futex; */ unsigned long rcu_gp_ctr = RCU_GP_ONLINE; +/* + * Active attempts to check for reader Q.S. before calling futex(). + */ +#define RCU_QS_ACTIVE_ATTEMPTS 100 + /* * Written to only by each individual reader. Read by both the reader and the * writers. @@ -145,26 +154,33 @@ static void update_counter_and_wait(void) */ for (;;) { wait_loops++; - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { - uatomic_dec(&gp_futex); + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + uatomic_set(&gp_futex, -1); + /* + * Write futex before write waiting (the other side + * reads them in the opposite order). + */ + cmm_smp_wmb(); + cds_list_for_each_entry(index, ®istry, node) { + _CMM_STORE_SHARED(index->waiting, 1); + } /* Write futex before read reader_gp */ cmm_smp_mb(); } - cds_list_for_each_entry_safe(index, tmp, ®istry, node) { if (!rcu_gp_ongoing(&index->ctr)) cds_list_move(&index->node, &qsreaders); } if (cds_list_empty(®istry)) { - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { /* Read reader_gp before write futex */ cmm_smp_mb(); uatomic_set(&gp_futex, 0); } break; } else { - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { wait_gp(); } else { #ifndef HAS_INCOHERENT_CACHES diff --git a/urcu.c b/urcu.c index 2339bc6..20bbf36 100644 --- a/urcu.c +++ b/urcu.c @@ -25,6 +25,7 @@ #define _BSD_SOURCE #define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include @@ -35,11 +36,26 @@ #include #include +#include "urcu/wfqueue.h" #include "urcu/map/urcu.h" - #include "urcu/static/urcu.h" + /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ +#undef _LGPL_SOURCE #include "urcu.h" +#define _LGPL_SOURCE + +/* + * If a reader is really non-cooperative and refuses to commit its + * rcu_active_readers count to memory (there is no barrier in the reader + * per-se), kick it after a few loops waiting for it. + */ +#define KICK_READER_LOOPS 10000 + +/* + * Active attempts to check for reader Q.S. before calling futex(). + */ +#define RCU_QS_ACTIVE_ATTEMPTS 100 #ifdef RCU_MEMBARRIER static int init_done; diff --git a/urcu/compiler.h b/urcu/compiler.h index 64d12d3..4bced2a 100644 --- a/urcu/compiler.h +++ b/urcu/compiler.h @@ -39,12 +39,12 @@ */ #define CMM_ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) -#ifndef max -#define max(a,b) ((a)>(b)?(a):(b)) +#ifndef caa_max +#define caa_max(a,b) ((a)>(b)?(a):(b)) #endif -#ifndef min -#define min(a,b) ((a)<(b)?(a):(b)) +#ifndef caa_min +#define caa_min(a,b) ((a)<(b)?(a):(b)) #endif #if defined(__SIZEOF_LONG__) @@ -55,10 +55,28 @@ #define CAA_BITS_PER_LONG 32 #endif -#define caa_container_of(ptr, type, member) \ +/* + * caa_container_of - Get the address of an object containing a field. + * + * @ptr: pointer to the field. + * @type: type of the object. + * @member: name of the field within the object. + */ +#define caa_container_of(ptr, type, member) \ ({ \ - const typeof(((type *)NULL)->member) * __ptr = (ptr); \ + const typeof(((type *) NULL)->member) * __ptr = (ptr); \ (type *)((char *)__ptr - offsetof(type, member)); \ }) +#define CAA_BUILD_BUG_ON_ZERO(cond) (sizeof(struct { int:-!!(cond); })) +#define CAA_BUILD_BUG_ON(cond) ((void)BUILD_BUG_ON_ZERO(cond)) + +/* + * __rcu is an annotation that documents RCU pointer accesses that need + * to be protected by a read-side critical section. Eventually, a static + * checker will be able to use this annotation to detect incorrect RCU + * usage. + */ +#define __rcu + #endif /* _URCU_COMPILER_H */ diff --git a/urcu/list.h b/urcu/list.h index ab7a470..7d2a9a1 100644 --- a/urcu/list.h +++ b/urcu/list.h @@ -1,21 +1,26 @@ -/* Copyright (C) 2002 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ +/* + * Copyright (C) 2002 Free Software Foundation, Inc. + * (originally part of the GNU C Library) + * Contributed by Ulrich Drepper , 2002. + * + * Copyright (C) 2009 Pierre-Marc Fournier + * Conversion to RCU list. + * Copyright (C) 2010 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ #ifndef _CDS_LIST_H #define _CDS_LIST_H 1 diff --git a/urcu/map/urcu.h b/urcu/map/urcu.h index b55e304..3f436a7 100644 --- a/urcu/map/urcu.h +++ b/urcu/map/urcu.h @@ -35,9 +35,34 @@ /* Mapping macros to allow multiple flavors in a single binary. */ #if !defined(RCU_MEMBARRIER) && !defined(RCU_SIGNAL) && !defined(RCU_MB) +#define RCU_MEMBARRIER +#endif + +/* + * RCU_MEMBARRIER is only possibly available on Linux. Fallback to + * RCU_MB + * otherwise. + */ +#if !defined(__linux__) && defined(RCU_MEMBARRIER) +#undef RCU_MEMBARRIER #define RCU_MB #endif +#ifdef RCU_MEMBARRIER +#include + +/* If the headers do not support SYS_membarrier, statically use RCU_MB */ +#ifdef SYS_membarrier +# define MEMBARRIER_EXPEDITED (1 << 0) +# define MEMBARRIER_DELAYED (1 << 1) +# define MEMBARRIER_QUERY (1 << 16) +# define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__) +#else +# undef RCU_MEMBARRIER +# define RCU_MB +#endif +#endif + #ifdef RCU_MEMBARRIER #define rcu_read_lock rcu_read_lock_memb diff --git a/urcu/rcuhlist.h b/urcu/rcuhlist.h index ef024ab..83b1363 100644 --- a/urcu/rcuhlist.h +++ b/urcu/rcuhlist.h @@ -1,25 +1,26 @@ -/* Copyright (C) 2002 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - Copyright (C) 2009 Pierre-Marc Fournier - Conversion to RCU list. - Copyright (C) 2010 Mathieu Desnoyers - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ +/* + * Copyright (C) 2002 Free Software Foundation, Inc. + * (originally part of the GNU C Library) + * Contributed by Ulrich Drepper , 2002. + * + * Copyright (C) 2009 Pierre-Marc Fournier + * Conversion to RCU list. + * Copyright (C) 2010 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ #ifndef _URCU_RCUHLIST_H #define _URCU_RCUHLIST_H diff --git a/urcu/rculfqueue.h b/urcu/rculfqueue.h index fbef6f9..598fa50 100644 --- a/urcu/rculfqueue.h +++ b/urcu/rculfqueue.h @@ -23,36 +23,24 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#include #include +#include #ifdef __cplusplus extern "C" { #endif -/* - * Lock-free RCU queue using reference counting. Enqueue and dequeue operations - * hold a RCU read lock to deal with cmpxchg ABA problem. This implementation - * keeps a dummy head node to ensure we can always update the queue locklessly. - * Given that this is a queue, the dummy head node must always advance as we - * dequeue entries. Therefore, we keep a reference count on each entry we are - * dequeueing, so they can be kept as dummy head node until the next dequeue, at - * which point their reference count will be decremented. - */ - struct cds_lfq_queue_rcu; struct cds_lfq_node_rcu { struct cds_lfq_node_rcu *next; - struct urcu_ref ref; - struct cds_lfq_queue_rcu *queue; - struct rcu_head rcu_head; + int dummy; }; struct cds_lfq_queue_rcu { struct cds_lfq_node_rcu *head, *tail; - struct cds_lfq_node_rcu init; /* Dummy initialization node */ - void (*release)(struct urcu_ref *ref); + void (*queue_call_rcu)(struct rcu_head *head, + void (*func)(struct rcu_head *head)); }; #ifdef _LGPL_SOURCE @@ -61,6 +49,7 @@ struct cds_lfq_queue_rcu { #define cds_lfq_node_init_rcu _cds_lfq_node_init_rcu #define cds_lfq_init_rcu _cds_lfq_init_rcu +#define cds_lfq_destroy_rcu _cds_lfq_destroy_rcu #define cds_lfq_enqueue_rcu _cds_lfq_enqueue_rcu #define cds_lfq_dequeue_rcu _cds_lfq_dequeue_rcu @@ -68,7 +57,14 @@ struct cds_lfq_queue_rcu { extern void cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node); extern void cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q, - void (*release)(struct urcu_ref *ref)); + void queue_call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head))); +/* + * The queue should be emptied before calling destroy. + * + * Return 0 on success, -EPERM if queue is not empty. + */ +extern int cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q); /* * Should be called under rcu read lock critical section. @@ -79,12 +75,9 @@ extern void cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, /* * Should be called under rcu read lock critical section. * - * The entry returned by dequeue must be taken care of by doing a - * sequence of urcu_ref_put which release handler should do a call_rcu. - * - * In other words, the entry lfq node returned by dequeue must not be - * modified/re-used/freed until the reference count reaches zero and a grace - * period has elapsed (after the refcount reached 0). + * The caller must wait for a grace period to pass before freeing the returned + * node or modifying the cds_lfq_node_rcu structure. + * Returns NULL if queue is empty. */ extern struct cds_lfq_node_rcu *cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q); diff --git a/urcu/rculfstack.h b/urcu/rculfstack.h index 75a580b..93c0bc9 100644 --- a/urcu/rculfstack.h +++ b/urcu/rculfstack.h @@ -39,16 +39,17 @@ struct cds_lfs_stack_rcu { #include -#define cds_lfs_node_init_rcu _cds_lfs_node_init_rcu -#define cds_lfs_init_rcu _cds_lfs_init_rcu -#define cds_lfs_push_rcu _cds_lfs_push_rcu -#define cds_lfs_pop_rcu _cds_lfs_pop_rcu +#define cds_lfs_node_init_rcu _cds_lfs_node_init_rcu +#define cds_lfs_init_rcu _cds_lfs_init_rcu +#define cds_lfs_push_rcu _cds_lfs_push_rcu +#define cds_lfs_pop_rcu _cds_lfs_pop_rcu #else /* !_LGPL_SOURCE */ extern void cds_lfs_node_init_rcu(struct cds_lfs_node_rcu *node); extern void cds_lfs_init_rcu(struct cds_lfs_stack_rcu *s); -extern void cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, struct cds_lfs_node_rcu *node); +extern int cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, + struct cds_lfs_node_rcu *node); /* * Should be called under rcu read lock critical section. diff --git a/urcu/rculist.h b/urcu/rculist.h index 621831c..575e1fb 100644 --- a/urcu/rculist.h +++ b/urcu/rculist.h @@ -1,25 +1,26 @@ -/* Copyright (C) 2002 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - Copyright (C) 2009 Pierre-Marc Fournier - Conversion to RCU list. - Copyright (C) 2010 Mathieu Desnoyers - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, write to the Free - Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA - 02111-1307 USA. */ +/* + * Copyright (C) 2002 Free Software Foundation, Inc. + * (originally part of the GNU C Library) + * Contributed by Ulrich Drepper , 2002. + * + * Copyright (C) 2009 Pierre-Marc Fournier + * Conversion to RCU list. + * Copyright (C) 2010 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ #ifndef _URCU_RCULIST_H #define _URCU_RCULIST_H diff --git a/urcu/static/rculfqueue.h b/urcu/static/rculfqueue.h index b627e45..af73c6f 100644 --- a/urcu/static/rculfqueue.h +++ b/urcu/static/rculfqueue.h @@ -26,52 +26,123 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#include +#include #include +#include #include -/* A urcu implementation header should be already included. */ +#include #ifdef __cplusplus extern "C" { #endif +struct cds_lfq_node_rcu_dummy { + struct cds_lfq_node_rcu parent; + struct rcu_head head; + struct cds_lfq_queue_rcu *q; +}; + /* - * Lock-free RCU queue using reference counting. Enqueue and dequeue operations - * hold a RCU read lock to deal with cmpxchg ABA problem. This implementation - * keeps a dummy head node to ensure we can always update the queue locklessly. - * Given that this is a queue, the dummy head node must always advance as we - * dequeue entries. Therefore, we keep a reference count on each entry we are - * dequeueing, so they can be kept as dummy head node until the next dequeue, at - * which point their reference count will be decremented. + * Lock-free RCU queue. Enqueue and dequeue operations hold a RCU read + * lock to deal with cmpxchg ABA problem. This queue is *not* circular: + * head points to the oldest node, tail points to the newest node. + * A dummy node is kept to ensure enqueue and dequeue can always proceed + * concurrently. Keeping a separate head and tail helps with large + * queues: enqueue and dequeue can proceed concurrently without + * wrestling for exclusive access to the same variables. + * + * Dequeue retry if it detects that it would be dequeueing the last node + * (it means a dummy node dequeue-requeue is in progress). This ensures + * that there is always at least one node in the queue. + * + * In the dequeue operation, we internally reallocate the dummy node + * upon dequeue/requeue and use call_rcu to free the old one after a + * grace period. */ -#define URCU_LFQ_PERMANENT_REF 128 +static inline +struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q, + struct cds_lfq_node_rcu *next) +{ + struct cds_lfq_node_rcu_dummy *dummy; + + dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy)); + assert(dummy); + dummy->parent.next = next; + dummy->parent.dummy = 1; + dummy->q = q; + return &dummy->parent; +} + +static inline +void free_dummy_cb(struct rcu_head *head) +{ + struct cds_lfq_node_rcu_dummy *dummy = + caa_container_of(head, struct cds_lfq_node_rcu_dummy, head); + free(dummy); +} + +static inline +void rcu_free_dummy(struct cds_lfq_node_rcu *node) +{ + struct cds_lfq_node_rcu_dummy *dummy; + assert(node->dummy); + dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent); + dummy->q->queue_call_rcu(&dummy->head, free_dummy_cb); +} + +static inline +void free_dummy(struct cds_lfq_node_rcu *node) +{ + struct cds_lfq_node_rcu_dummy *dummy; + + assert(node->dummy); + dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent); + free(dummy); +} + +static inline void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node) { node->next = NULL; - urcu_ref_init(&node->ref); + node->dummy = 0; } +static inline void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q, - void (*release)(struct urcu_ref *ref)) + void queue_call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head))) +{ + q->tail = make_dummy(q, NULL); + q->head = q->tail; + q->queue_call_rcu = queue_call_rcu; +} + +/* + * The queue should be emptied before calling destroy. + * + * Return 0 on success, -EPERM if queue is not empty. + */ +static inline +int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q) { - _cds_lfq_node_init_rcu(&q->init); - /* Make sure the initial node is never freed. */ - urcu_ref_set(&q->init.ref, URCU_LFQ_PERMANENT_REF); - q->head = q->tail = &q->init; - q->release = release; + struct cds_lfq_node_rcu *head; + + head = rcu_dereference(q->head); + if (!(head->dummy && head->next == NULL)) + return -EPERM; /* not empty */ + free_dummy(head); + return 0; } /* * Should be called under rcu read lock critical section. */ +static inline void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, struct cds_lfq_node_rcu *node) { - urcu_ref_get(&node->ref); - node->queue = q; - /* * uatomic_cmpxchg() implicit memory barrier orders earlier stores to * node before publication. @@ -81,23 +152,19 @@ void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, struct cds_lfq_node_rcu *tail, *next; tail = rcu_dereference(q->tail); - /* - * Typically expect tail->next to be NULL. - */ next = uatomic_cmpxchg(&tail->next, NULL, node); if (next == NULL) { /* * Tail was at the end of queue, we successfully - * appended to it. - * Now move tail (another enqueue might beat - * us to it, that's fine). + * appended to it. Now move tail (another + * enqueue might beat us to it, that's fine). */ (void) uatomic_cmpxchg(&q->tail, tail, node); return; } else { /* - * Failure to append to current tail. Help moving tail - * further and retry. + * Failure to append to current tail. + * Help moving tail further and retry. */ (void) uatomic_cmpxchg(&q->tail, tail, next); continue; @@ -105,16 +172,24 @@ void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, } } +static inline +void enqueue_dummy(struct cds_lfq_queue_rcu *q) +{ + struct cds_lfq_node_rcu *node; + + /* We need to reallocate to protect from ABA. */ + node = make_dummy(q, NULL); + _cds_lfq_enqueue_rcu(q, node); +} + /* * Should be called under rcu read lock critical section. * - * The entry returned by dequeue must be taken care of by doing a - * sequence of urcu_ref_put which release handler should do a call_rcu. - * - * In other words, the entry lfq node returned by dequeue must not be - * modified/re-used/freed until the reference count reaches zero and a grace - * period has elapsed. + * The caller must wait for a grace period to pass before freeing the returned + * node or modifying the cds_lfq_node_rcu structure. + * Returns NULL if queue is empty. */ +static inline struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q) { for (;;) { @@ -122,18 +197,27 @@ struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q) head = rcu_dereference(q->head); next = rcu_dereference(head->next); - if (next) { - if (uatomic_cmpxchg(&q->head, head, next) == head) { - urcu_ref_put(&head->ref, q->release); - return next; - } else { - /* Concurrently pushed, retry */ - continue; - } - } else { - /* Empty */ - return NULL; + if (head->dummy && next == NULL) + return NULL; /* empty */ + /* + * We never, ever allow dequeue to get to a state where + * the queue is empty (we need at least one node in the + * queue). This is ensured by checking if the head next + * is NULL, which means we need to enqueue a dummy node + * before we can hope dequeuing anything. + */ + if (!next) { + enqueue_dummy(q); + next = rcu_dereference(head->next); + } + if (uatomic_cmpxchg(&q->head, head, next) != head) + continue; /* Concurrently pushed. */ + if (head->dummy) { + /* Free dummy after grace period. */ + rcu_free_dummy(head); + continue; /* try again */ } + return head; } } diff --git a/urcu/static/rculfstack.h b/urcu/static/rculfstack.h index 3f48b7e..3473cce 100644 --- a/urcu/static/rculfstack.h +++ b/urcu/static/rculfstack.h @@ -27,7 +27,7 @@ */ #include -/* A urcu implementation header should be already included. */ +#include #ifdef __cplusplus extern "C" { @@ -44,8 +44,34 @@ void _cds_lfs_init_rcu(struct cds_lfs_stack_rcu *s) s->head = NULL; } +/* + * Lock-free stack push is not subject to ABA problem, so no need to + * take the RCU read-side lock. Even if "head" changes between two + * uatomic_cmpxchg() invocations here (being popped, and then pushed + * again by one or more concurrent threads), the second + * uatomic_cmpxchg() invocation only cares about pushing a new entry at + * the head of the stack, ensuring consistency by making sure the new + * node->next is the same pointer value as the value replaced as head. + * It does not care about the content of the actual next node, so it can + * very well be reallocated between the two uatomic_cmpxchg(). + * + * We take the approach of expecting the stack to be usually empty, so + * we first try an initial uatomic_cmpxchg() on a NULL old_head, and + * retry if the old head was non-NULL (the value read by the first + * uatomic_cmpxchg() is used as old head for the following loop). The + * upside of this scheme is to minimize the amount of cacheline traffic, + * always performing an exclusive cacheline access, rather than doing + * non-exclusive followed by exclusive cacheline access (which would be + * required if we first read the old head value). This design decision + * might be revisited after more throrough benchmarking on various + * platforms. + * + * Returns 0 if the stack was empty prior to adding the node. + * Returns non-zero otherwise. + */ static inline -void _cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, struct cds_lfs_node_rcu *node) +int _cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, + struct cds_lfs_node_rcu *node) { struct cds_lfs_node_rcu *head = NULL; @@ -61,6 +87,7 @@ void _cds_lfs_push_rcu(struct cds_lfs_stack_rcu *s, struct cds_lfs_node_rcu *nod if (old_head == head) break; } + return (int) !!((unsigned long) head); } /* diff --git a/urcu/static/urcu-bp.h b/urcu/static/urcu-bp.h index 64c32ea..832ba0f 100644 --- a/urcu/static/urcu-bp.h +++ b/urcu/static/urcu-bp.h @@ -51,11 +51,6 @@ extern "C" { #endif -/* - * Active attempts to check for reader Q.S. before calling sleep(). - */ -#define RCU_QS_ACTIVE_ATTEMPTS 100 - #ifdef DEBUG_RCU #define rcu_assert(args...) assert(args) #else diff --git a/urcu/static/urcu-qsbr.h b/urcu/static/urcu-qsbr.h index c46a7be..489abb0 100644 --- a/urcu/static/urcu-qsbr.h +++ b/urcu/static/urcu-qsbr.h @@ -55,18 +55,6 @@ extern "C" { * This is required to permit relinking with newer versions of the library. */ -/* - * If a reader is really non-cooperative and refuses to commit its - * rcu_reader.ctr count to memory (there is no barrier in the reader - * per-se), kick it after a few loops waiting for it. - */ -#define KICK_READER_LOOPS 10000 - -/* - * Active attempts to check for reader Q.S. before calling futex(). - */ -#define RCU_QS_ACTIVE_ATTEMPTS 100 - #ifdef DEBUG_RCU #define rcu_assert(args...) assert(args) #else @@ -136,6 +124,7 @@ struct rcu_reader { unsigned long ctr; /* Data used for registry */ struct cds_list_head node __attribute__((aligned(CAA_CACHE_LINE_SIZE))); + int waiting; pthread_t tid; }; @@ -148,7 +137,11 @@ extern int32_t gp_futex; */ static inline void wake_up_gp(void) { - if (unlikely(uatomic_read(&gp_futex) == -1)) { + if (unlikely(_CMM_LOAD_SHARED(rcu_reader.waiting))) { + _CMM_STORE_SHARED(rcu_reader.waiting, 0); + cmm_smp_mb(); + if (uatomic_read(&gp_futex) != -1) + return; uatomic_set(&gp_futex, 0); futex_noasync(&gp_futex, FUTEX_WAKE, 1, NULL, NULL, 0); diff --git a/urcu/static/urcu.h b/urcu/static/urcu.h index 0295a3e..b993375 100644 --- a/urcu/static/urcu.h +++ b/urcu/static/urcu.h @@ -95,18 +95,6 @@ extern "C" { #define SIGRCU SIGUSR1 #endif -/* - * If a reader is really non-cooperative and refuses to commit its - * rcu_active_readers count to memory (there is no barrier in the reader - * per-se), kick it after a few loops waiting for it. - */ -#define KICK_READER_LOOPS 10000 - -/* - * Active attempts to check for reader Q.S. before calling futex(). - */ -#define RCU_QS_ACTIVE_ATTEMPTS 100 - #ifdef DEBUG_RCU #define rcu_assert(args...) assert(args) #else diff --git a/urcu/static/wfqueue.h b/urcu/static/wfqueue.h index 77828ca..19314f5 100644 --- a/urcu/static/wfqueue.h +++ b/urcu/static/wfqueue.h @@ -75,7 +75,7 @@ static inline void _cds_wfq_enqueue(struct cds_wfq_queue *q, * structure containing node and setting node->next to NULL before * publication. */ - old_tail = uatomic_xchg(&q->tail, node); + old_tail = uatomic_xchg(&q->tail, &node->next); /* * At this point, dequeuers see a NULL old_tail->next, which indicates * that the queue is being appended to. The following store will append diff --git a/urcu/static/wfstack.h b/urcu/static/wfstack.h index 79ed3f7..cb68a59 100644 --- a/urcu/static/wfstack.h +++ b/urcu/static/wfstack.h @@ -56,8 +56,11 @@ void _cds_wfs_init(struct cds_wfs_stack *s) assert(!ret); } +/* + * Returns 0 if stack was empty, 1 otherwise. + */ static inline -void _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) +int _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) { struct cds_wfs_node *old_head; @@ -72,6 +75,7 @@ void _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) * until node->next is set to old_head. */ CMM_STORE_SHARED(node->next, old_head); + return (old_head != CDS_WF_STACK_END); } /* diff --git a/urcu/uatomic/generic.h b/urcu/uatomic/generic.h index 82b7c8c..bf7cc6a 100644 --- a/urcu/uatomic/generic.h +++ b/urcu/uatomic/generic.h @@ -98,20 +98,23 @@ void _uatomic_and(void *addr, unsigned long val, #ifdef UATOMIC_HAS_ATOMIC_BYTE case 1: __sync_and_and_fetch_1(addr, val); + return; #endif #ifdef UATOMIC_HAS_ATOMIC_SHORT case 2: __sync_and_and_fetch_2(addr, val); + return; #endif case 4: __sync_and_and_fetch_4(addr, val); + return; #if (CAA_BITS_PER_LONG == 64) case 8: __sync_and_and_fetch_8(addr, val); + return; #endif } _uatomic_link_error(); - return 0; } #define uatomic_and(addr, v) \ @@ -131,20 +134,24 @@ void _uatomic_or(void *addr, unsigned long val, #ifdef UATOMIC_HAS_ATOMIC_BYTE case 1: __sync_or_and_fetch_1(addr, val); + return; #endif #ifdef UATOMIC_HAS_ATOMIC_SHORT case 2: __sync_or_and_fetch_2(addr, val); + return; #endif case 4: __sync_or_and_fetch_4(addr, val); + return; #if (CAA_BITS_PER_LONG == 64) case 8: __sync_or_and_fetch_8(addr, val); + return; #endif } _uatomic_link_error(); - return 0; + return; } #define uatomic_or(addr, v) \ diff --git a/urcu/wfstack.h b/urcu/wfstack.h index 354646d..db2ee0c 100644 --- a/urcu/wfstack.h +++ b/urcu/wfstack.h @@ -54,7 +54,7 @@ struct cds_wfs_stack { extern void cds_wfs_node_init(struct cds_wfs_node *node); extern void cds_wfs_init(struct cds_wfs_stack *s); -extern void cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node); +extern int cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node); /* __cds_wfs_pop_blocking: caller ensures mutual exclusion between pops */ extern struct cds_wfs_node *__cds_wfs_pop_blocking(struct cds_wfs_stack *s); extern struct cds_wfs_node *cds_wfs_pop_blocking(struct cds_wfs_stack *s); diff --git a/wfstack.c b/wfstack.c index d999a5b..e9799e6 100644 --- a/wfstack.c +++ b/wfstack.c @@ -38,9 +38,9 @@ void cds_wfs_init(struct cds_wfs_stack *s) _cds_wfs_init(s); } -void cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) +int cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) { - _cds_wfs_push(s, node); + return _cds_wfs_push(s, node); } struct cds_wfs_node *__cds_wfs_pop_blocking(struct cds_wfs_stack *s)