Add randomness to yield debug test
[urcu.git] / urcu.h
diff --git a/urcu.h b/urcu.h
index bee7715270794c3e83cede4acb1741a98d361ac6..c77b26f958e34b8521f1b8363d83b6492cb46b0f 100644 (file)
--- a/urcu.h
+++ b/urcu.h
@@ -1,6 +1,22 @@
 #ifndef _URCU_H
 #define _URCU_H
 
+/*
+ * urcu.h
+ *
+ * Userspace RCU header
+ *
+ * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
+ * for inspiration coming from the Linux kernel RCU and rcu-preempt.
+ *
+ * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
+ * and rcu_dereference primitives come from the Linux kernel.
+ *
+ * Distributed under GPLv2
+ */
+
 /* The "volatile" is due to gcc bugs */
 #define barrier() __asm__ __volatile__("": : :"memory")
 
@@ -9,9 +25,6 @@
 #define rmb()   asm volatile("lfence":::"memory")
 #define wmb()   asm volatile("sfence" ::: "memory")
 
-
-
-/* x86 32 */
 static inline void atomic_inc(int *v)
 {
        asm volatile("lock; incl %0"
@@ -53,6 +66,48 @@ static inline void atomic_inc(int *v)
 
 #define SIGURCU SIGUSR1
 
+#ifdef DEBUG_YIELD
+#include <sched.h>
+
+#define YIELD_READ     (1 << 0)
+#define YIELD_WRITE    (1 << 1)
+
+extern unsigned int yield_active;
+extern unsigned int __thread rand_yield;
+
+static inline void debug_yield_read(void)
+{
+       if (yield_active & YIELD_READ)
+               if (rand_r(&rand_yield) & 0x1)
+                       sched_yield();
+}
+
+static inline void debug_yield_write(void)
+{
+       if (yield_active & YIELD_WRITE)
+               if (rand_r(&rand_yield) & 0x1)
+                       sched_yield();
+}
+
+static inline void debug_yield_init(void)
+{
+       rand_yield = time(NULL) ^ pthread_self();
+}
+#else
+static inline void debug_yield_read(void)
+{
+}
+
+static inline void debug_yield_write(void)
+{
+}
+
+static inline void debug_yield_init(void)
+{
+
+}
+#endif
+
 /* Global quiescent period parity */
 extern int urcu_qparity;
 
@@ -64,39 +119,42 @@ static inline int get_urcu_qparity(void)
 }
 
 /*
- * returns urcu_parity.
+ * urcu_parity should be declared on the caller's stack.
  */
-static inline int rcu_read_lock(void)
+static inline void rcu_read_lock(int *urcu_parity)
 {
-       int urcu_parity = get_urcu_qparity();
-       urcu_active_readers[urcu_parity]++;
+       debug_yield_read();
+       *urcu_parity = get_urcu_qparity();
+       debug_yield_read();
+       urcu_active_readers[*urcu_parity]++;
+       debug_yield_read();
        /*
         * Increment active readers count before accessing the pointer.
         * See force_mb_all_threads().
         */
        barrier();
-       return urcu_parity;
+       debug_yield_read();
 }
 
-static inline void rcu_read_unlock(int urcu_parity)
+static inline void rcu_read_unlock(int *urcu_parity)
 {
+       debug_yield_read();
        barrier();
+       debug_yield_read();
        /*
         * Finish using rcu before decrementing the pointer.
         * See force_mb_all_threads().
         */
-       urcu_active_readers[urcu_parity]--;
+       urcu_active_readers[*urcu_parity]--;
+       debug_yield_read();
 }
 
-extern void rcu_write_lock(void);
-extern void rcu_write_unlock(void);
-
 extern void *urcu_publish_content(void **ptr, void *new);
 
 /*
  * Reader thread registration.
  */
 extern void urcu_register_thread(void);
-extern void urcu_register_thread(void);
+extern void urcu_unregister_thread(void);
 
 #endif /* _URCU_H */
This page took 0.023474 seconds and 4 git commands to generate.