projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Enhance test cases
[urcu.git]
/
urcu.h
diff --git
a/urcu.h
b/urcu.h
index 9e9fea2b96fafa5c4688a1b1bf696284587df25f..03764ab33dbe241b643b72ef9cbfd6757662fa3b 100644
(file)
--- a/
urcu.h
+++ b/
urcu.h
@@
-121,10
+121,19
@@
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
#include <sched.h>
#include <time.h>
#include <pthread.h>
#include <sched.h>
#include <time.h>
#include <pthread.h>
+#include <unistd.h>
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
+/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
+#ifdef DEBUG_FULL_MB
+/* maximum sleep delay, in us */
+#define MAX_SLEEP 50
+#else
+#define MAX_SLEEP 30000
+#endif
+
extern unsigned int yield_active;
extern unsigned int __thread rand_yield;
extern unsigned int yield_active;
extern unsigned int __thread rand_yield;
@@
-132,14
+141,14
@@
static inline void debug_yield_read(void)
{
if (yield_active & YIELD_READ)
if (rand_r(&rand_yield) & 0x1)
{
if (yield_active & YIELD_READ)
if (rand_r(&rand_yield) & 0x1)
-
sched_yield(
);
+
usleep(rand_r(&rand_yield) % MAX_SLEEP
);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
if (rand_r(&rand_yield) & 0x1)
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
if (rand_r(&rand_yield) & 0x1)
-
sched_yield(
);
+
usleep(rand_r(&rand_yield) % MAX_SLEEP
);
}
static inline void debug_yield_init(void)
}
static inline void debug_yield_init(void)
@@
-161,6
+170,18
@@
static inline void debug_yield_init(void)
}
#endif
}
#endif
+#ifdef DEBUG_FULL_MB
+static inline void read_barrier()
+{
+ mb();
+}
+#else
+static inline void read_barrier()
+{
+ barrier();
+}
+#endif
+
/*
* The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
* full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
/*
* The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
* full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
@@
-199,6
+220,7
@@
static inline void rcu_read_lock(void)
debug_yield_read();
tmp = urcu_active_readers;
debug_yield_read();
debug_yield_read();
tmp = urcu_active_readers;
debug_yield_read();
+ /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
urcu_active_readers = urcu_gp_ctr;
else
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
urcu_active_readers = urcu_gp_ctr;
else
@@
-208,14
+230,14
@@
static inline void rcu_read_lock(void)
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
- barrier();
+
read_
barrier();
debug_yield_read();
}
static inline void rcu_read_unlock(void)
{
debug_yield_read();
debug_yield_read();
}
static inline void rcu_read_unlock(void)
{
debug_yield_read();
- barrier();
+
read_
barrier();
debug_yield_read();
/*
* Finish using rcu before decrementing the pointer.
debug_yield_read();
/*
* Finish using rcu before decrementing the pointer.
This page took
0.033089 seconds
and
4
git commands to generate.