Remove parameter from rcu_read_lock()
[urcu.git] / urcu.h
diff --git a/urcu.h b/urcu.h
index b6b5c7b039e359225d947afb91a29a2f25ba6a3c..01a4c6857d7e6fd31668dec57615be03e94ccc91 100644 (file)
--- a/urcu.h
+++ b/urcu.h
@@ -17,6 +17,8 @@
  * Distributed under GPLv2
  */
 
+#include <stdlib.h>
+
 /* The "volatile" is due to gcc bugs */
 #define barrier() __asm__ __volatile__("": : :"memory")
 
@@ -66,38 +68,105 @@ static inline void atomic_inc(int *v)
 
 #define SIGURCU SIGUSR1
 
-/* Global quiescent period parity */
-extern int urcu_qparity;
+#ifdef DEBUG_YIELD
+#include <sched.h>
+
+#define YIELD_READ     (1 << 0)
+#define YIELD_WRITE    (1 << 1)
+
+extern unsigned int yield_active;
+extern unsigned int __thread rand_yield;
+
+static inline void debug_yield_read(void)
+{
+       if (yield_active & YIELD_READ)
+               if (rand_r(&rand_yield) & 0x1)
+                       sched_yield();
+}
 
-extern int __thread urcu_active_readers[2];
+static inline void debug_yield_write(void)
+{
+       if (yield_active & YIELD_WRITE)
+               if (rand_r(&rand_yield) & 0x1)
+                       sched_yield();
+}
 
-static inline int get_urcu_qparity(void)
+static inline void debug_yield_init(void)
+{
+       rand_yield = time(NULL) ^ pthread_self();
+}
+#else
+static inline void debug_yield_read(void)
 {
-       return urcu_qparity;
 }
 
+static inline void debug_yield_write(void)
+{
+}
+
+static inline void debug_yield_init(void)
+{
+
+}
+#endif
+
 /*
- * urcu_parity should be declared on the caller's stack.
+ * Limiting the nesting level to 256 to keep instructions small in the read
+ * fast-path.
  */
-static inline void rcu_read_lock(int *urcu_parity)
+#define RCU_GP_COUNT           (1U << 0)
+#define RCU_GP_CTR_BIT         (1U << 8)
+#define RCU_GP_CTR_NEST_MASK   (RCU_GP_CTR_BIT - 1)
+
+/* Global quiescent period counter with low-order bits unused. */
+extern int urcu_gp_ctr;
+
+extern int __thread urcu_active_readers;
+
+static inline int rcu_old_gp_ongoing(int *value)
+{
+       int v;
+
+       if (value == NULL)
+               return 0;
+       debug_yield_write();
+       v = ACCESS_ONCE(*value);
+       debug_yield_write();
+       return (v & RCU_GP_CTR_NEST_MASK) &&
+                ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT);
+}
+
+static inline void rcu_read_lock(void)
 {
-       *urcu_parity = get_urcu_qparity();
-       urcu_active_readers[*urcu_parity]++;
+       int tmp;
+
+       debug_yield_read();
+       tmp = urcu_active_readers;
+       debug_yield_read();
+       if (!(tmp & RCU_GP_CTR_NEST_MASK))
+               urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT;
+       else
+               urcu_active_readers = tmp + RCU_GP_COUNT;
+       debug_yield_read();
        /*
         * Increment active readers count before accessing the pointer.
         * See force_mb_all_threads().
         */
        barrier();
+       debug_yield_read();
 }
 
-static inline void rcu_read_unlock(int *urcu_parity)
+static inline void rcu_read_unlock(void)
 {
+       debug_yield_read();
        barrier();
+       debug_yield_read();
        /*
         * Finish using rcu before decrementing the pointer.
         * See force_mb_all_threads().
         */
-       urcu_active_readers[*urcu_parity]--;
+       urcu_active_readers -= RCU_GP_COUNT;
+       debug_yield_read();
 }
 
 extern void *urcu_publish_content(void **ptr, void *new);
This page took 0.02347 seconds and 4 git commands to generate.