projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Add read/write counts to test
[urcu.git]
/
urcu.h
diff --git
a/urcu.h
b/urcu.h
index 92b31df603d157ffa942534af1d5731d912c2342..3eca5ea3c44ccc7135635229c41141d5050df1e2 100644
(file)
--- a/
urcu.h
+++ b/
urcu.h
@@
-130,6
+130,13
@@
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
#define SIGURCU SIGUSR1
#define SIGURCU SIGUSR1
+/*
+ * If a reader is really non-cooperative and refuses to commit its
+ * urcu_active_readers count to memory (there is no barrier in the reader
+ * per-se), kick it after a few loops waiting for it.
+ */
+#define KICK_READER_LOOPS 10000
+
#ifdef DEBUG_YIELD
#include <sched.h>
#include <time.h>
#ifdef DEBUG_YIELD
#include <sched.h>
#include <time.h>
@@
-219,13
+226,11
@@
static inline int rcu_old_gp_ongoing(long *value)
if (value == NULL)
return 0;
if (value == NULL)
return 0;
- debug_yield_write();
/*
* Make sure both tests below are done on the same version of *value
* to insure consistency.
*/
v = ACCESS_ONCE(*value);
/*
* Make sure both tests below are done on the same version of *value
* to insure consistency.
*/
v = ACCESS_ONCE(*value);
- debug_yield_write();
return (v & RCU_GP_CTR_NEST_MASK) &&
((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
}
return (v & RCU_GP_CTR_NEST_MASK) &&
((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
}
@@
-234,34
+239,29
@@
static inline void rcu_read_lock(void)
{
long tmp;
{
long tmp;
- debug_yield_read();
tmp = urcu_active_readers;
tmp = urcu_active_readers;
- debug_yield_read();
/* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
/* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
+ /* The data dependency "read urcu_gp_ctr, write urcu_active_readers",
+ * serializes those two memory operations. */
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
urcu_active_readers = urcu_gp_ctr;
else
urcu_active_readers = tmp + RCU_GP_COUNT;
if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
urcu_active_readers = urcu_gp_ctr;
else
urcu_active_readers = tmp + RCU_GP_COUNT;
- debug_yield_read();
/*
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
read_barrier();
/*
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
read_barrier();
- debug_yield_read();
}
static inline void rcu_read_unlock(void)
{
}
static inline void rcu_read_unlock(void)
{
- debug_yield_read();
read_barrier();
read_barrier();
- debug_yield_read();
/*
* Finish using rcu before decrementing the pointer.
* See force_mb_all_threads().
*/
urcu_active_readers -= RCU_GP_COUNT;
/*
* Finish using rcu before decrementing the pointer.
* See force_mb_all_threads().
*/
urcu_active_readers -= RCU_GP_COUNT;
- debug_yield_read();
}
/**
}
/**
@@
-302,7
+302,6
@@
extern void synchronize_rcu(void);
#define urcu_publish_content(p, v) \
({ \
void *oldptr; \
#define urcu_publish_content(p, v) \
({ \
void *oldptr; \
- debug_yield_write(); \
oldptr = rcu_xchg_pointer(p, v); \
synchronize_rcu(); \
oldptr; \
oldptr = rcu_xchg_pointer(p, v); \
synchronize_rcu(); \
oldptr; \
This page took
0.025406 seconds
and
4
git commands to generate.