#include <stdlib.h>
#include <pthread.h>
+#include <syscall.h>
+#include <unistd.h>
-#include <compiler.h>
-#include <arch.h>
+#include <urcu/compiler.h>
+#include <urcu/arch.h>
+#include <urcu/list.h>
/*
* Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
(_________p1); \
})
+#define futex(...) syscall(__NR_futex, __VA_ARGS__)
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
/*
* This code section can only be included in LGPL 2.1 compatible source code.
* See below for the function call wrappers which can be used in code meant to
*/
#define KICK_READER_LOOPS 10000
+/*
+ * Active attempts to check for reader Q.S. before calling futex().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
+
#ifdef DEBUG_RCU
#define rcu_assert(args...) assert(args)
#else
*/
extern long urcu_gp_ctr;
-extern long __thread urcu_active_readers;
+struct urcu_reader {
+ /* Data used by both reader and synchronize_rcu() */
+ long ctr;
+ char need_mb;
+ /* Data used for registry */
+ struct list_head head __attribute__((aligned(CACHE_LINE_SIZE)));
+ pthread_t tid;
+};
+
+extern struct urcu_reader __thread urcu_reader;
+
+extern int gp_futex;
+
+/*
+ * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
+ */
+static inline void wake_up_gp(void)
+{
+ if (unlikely(uatomic_read(&gp_futex) == -1)) {
+ uatomic_set(&gp_futex, 0);
+ futex(&gp_futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0);
+ }
+}
static inline int rcu_old_gp_ongoing(long *value)
{
{
long tmp;
- tmp = urcu_active_readers;
+ tmp = urcu_reader.ctr;
/* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK))) {
- _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr));
+ _STORE_SHARED(urcu_reader.ctr, _LOAD_SHARED(urcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
* accessing the pointer. See force_mb_all_threads().
*/
reader_barrier();
} else {
- _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT);
+ _STORE_SHARED(urcu_reader.ctr, tmp + RCU_GP_COUNT);
}
}
static inline void _rcu_read_unlock(void)
{
- reader_barrier();
+ long tmp;
+
+ tmp = urcu_reader.ctr;
/*
* Finish using rcu before decrementing the pointer.
* See force_mb_all_threads().
- * Formally only needed for outermost nesting level, but leave barrier
- * in place for nested unlocks to remove a branch from the common case
- * (no nesting).
*/
- _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT);
+ if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
+ reader_barrier();
+ _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT);
+ /* write urcu_reader.ctr before read futex */
+ reader_barrier();
+ wake_up_gp();
+ } else {
+ _STORE_SHARED(urcu_reader.ctr, urcu_reader.ctr - RCU_GP_COUNT);
+ }
}
/**
STORE_SHARED(p, v); \
})
+/**
+ * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer
+ * is as expected by "old". If succeeds, returns the previous pointer to the
+ * data structure, which can be safely freed after waiting for a quiescent state
+ * using synchronize_rcu(). If fails (unexpected value), returns old (which
+ * should not be freed !).
+ */
+
+#define _rcu_cmpxchg_pointer(p, old, _new) \
+ ({ \
+ if (!__builtin_constant_p(_new) || \
+ ((_new) != NULL)) \
+ wmb(); \
+ uatomic_cmpxchg(p, old, _new); \
+ })
+
/**
* _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
* pointer to the data structure, which can be safely freed after waiting for a
if (!__builtin_constant_p(v) || \
((v) != NULL)) \
wmb(); \
- xchg(p, v); \
+ uatomic_xchg(p, v); \
})
/*