#include <pthread.h>
#include <assert.h>
#include <limits.h>
-#include <syscall.h>
-#include <unistd.h>
+#include <sched.h>
+#include <sys/time.h>
#include <compiler.h>
#include <arch.h>
(_________p1); \
})
-#define futex(...) syscall(__NR_futex, __VA_ARGS__)
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
/*
* This code section can only be included in LGPL 2.1 compatible source code.
* See below for the function call wrappers which can be used in code meant to
/*
* If a reader is really non-cooperative and refuses to commit its
- * rcu_reader_qs_gp count to memory (there is no barrier in the reader
+ * rcu_reader qs_gp count to memory (there is no barrier in the reader
* per-se), kick it after a few loops waiting for it.
*/
#define KICK_READER_LOOPS 10000
/*
- * Active attempts to check for reader Q.S. before calling futex().
+ * Active attempts to check for reader Q.S. before calling sched_yield().
*/
#define RCU_QS_ACTIVE_ATTEMPTS 100
}
#define RCU_GP_ONLINE (1UL << 0)
-#define RCU_GP_CTR (1UL << 1)
+#define RCU_GP_ONGOING (1UL << 1)
+#define RCU_GP_CTR (1UL << 2)
/*
* Global quiescent period counter with low-order bits unused.
*/
extern unsigned long urcu_gp_ctr;
-extern unsigned long __thread rcu_reader_qs_gp;
+struct urcu_reader_status {
+ unsigned long qs_gp;
+ unsigned long gp_waiting;
+ unsigned long qs_time_delta_usec;
+ struct timeval qs_time_last;
+};
-extern int gp_futex;
-
-/*
- * Wake-up waiting synchronize_rcu(). Called from many concurrent threads.
- */
-static inline void wake_up_gp(void)
-{
- if (unlikely(atomic_read(&gp_futex) == -1)) {
- atomic_set(&gp_futex, 0);
- futex(&gp_futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
- }
-}
+extern struct urcu_reader_status __thread urcu_reader_status;
#if (BITS_PER_LONG < 64)
static inline int rcu_gp_ongoing(unsigned long *value)
static inline void _rcu_read_lock(void)
{
- rcu_assert(rcu_reader_qs_gp);
+ rcu_assert(urcu_reader_status.qs_gp);
}
static inline void _rcu_read_unlock(void)
static inline void _rcu_quiescent_state(void)
{
- smp_mb();
- _STORE_SHARED(rcu_reader_qs_gp, _LOAD_SHARED(urcu_gp_ctr));
- smp_mb(); /* write rcu_reader_qs_gp before read futex */
- wake_up_gp();
+ long gp_ctr;
+ struct timeval current_time, delta_time;
+
+ smp_mb();
+ gettimeofday(¤t_time, NULL);
+ timersub(¤t_time, &urcu_reader_status.qs_time_last, &delta_time);
+ if (delta_time.tv_sec >= 1)
+ _STORE_SHARED(urcu_reader_status.qs_time_delta_usec, 1000000);
+ else
+ _STORE_SHARED(urcu_reader_status.qs_time_delta_usec,
+ (unsigned long)delta_time.tv_usec);
+ urcu_reader_status.qs_time_last = current_time;
+ /*
+ * volatile accesses can be reordered by the compiler when put in the
+ * same expression.
+ */
+ if (unlikely((gp_ctr = LOAD_SHARED(urcu_gp_ctr)) & RCU_GP_ONGOING) &&
+ unlikely(urcu_reader_status.gp_waiting)) {
+ _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr);
+ sched_yield();
+ } else {
+ _STORE_SHARED(urcu_reader_status.qs_gp, gp_ctr);
+ }
smp_mb();
}
static inline void _rcu_thread_offline(void)
{
smp_mb();
- STORE_SHARED(rcu_reader_qs_gp, 0);
- smp_mb(); /* write rcu_reader_qs_gp before read futex */
- wake_up_gp();
+ STORE_SHARED(urcu_reader_status.qs_gp, 0);
+ if (unlikely(LOAD_SHARED(urcu_gp_ctr) & RCU_GP_ONGOING) &&
+ unlikely(urcu_reader_status.gp_waiting)) {
+ sched_yield();
+ }
}
static inline void _rcu_thread_online(void)
{
- _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
+ struct timeval current_time, delta_time;
+
+ gettimeofday(¤t_time, NULL);
+ timersub(¤t_time, &urcu_reader_status.qs_time_last, &delta_time);
+ if (delta_time.tv_sec >= 1)
+ _STORE_SHARED(urcu_reader_status.qs_time_delta_usec, 1000000);
+ else
+ _STORE_SHARED(urcu_reader_status.qs_time_delta_usec,
+ (unsigned long)delta_time.tv_usec);
+ urcu_reader_status.qs_time_last = current_time;
+ _STORE_SHARED(urcu_reader_status.qs_time_delta_usec, 0);
+ _STORE_SHARED(urcu_reader_status.qs_gp, LOAD_SHARED(urcu_gp_ctr));
smp_mb();
}