projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Add Promela model
[urcu.git]
/
urcu.h
diff --git
a/urcu.h
b/urcu.h
index 277b7d2f45ae565610e4db9ee89457174fafa667..c4a7992b4b7b166d527411d4ae5108cac174fc95 100644
(file)
--- a/
urcu.h
+++ b/
urcu.h
@@
-121,10
+121,19
@@
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
#include <sched.h>
#include <time.h>
#include <pthread.h>
#include <sched.h>
#include <time.h>
#include <pthread.h>
+#include <unistd.h>
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
#define YIELD_READ (1 << 0)
#define YIELD_WRITE (1 << 1)
+/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
+#ifdef DEBUG_FULL_MB
+/* maximum sleep delay, in us */
+#define MAX_SLEEP 50
+#else
+#define MAX_SLEEP 30000
+#endif
+
extern unsigned int yield_active;
extern unsigned int __thread rand_yield;
extern unsigned int yield_active;
extern unsigned int __thread rand_yield;
@@
-132,14
+141,14
@@
static inline void debug_yield_read(void)
{
if (yield_active & YIELD_READ)
if (rand_r(&rand_yield) & 0x1)
{
if (yield_active & YIELD_READ)
if (rand_r(&rand_yield) & 0x1)
-
sched_yield(
);
+
usleep(rand_r(&rand_yield) % MAX_SLEEP
);
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
if (rand_r(&rand_yield) & 0x1)
}
static inline void debug_yield_write(void)
{
if (yield_active & YIELD_WRITE)
if (rand_r(&rand_yield) & 0x1)
-
sched_yield(
);
+
usleep(rand_r(&rand_yield) % MAX_SLEEP
);
}
static inline void debug_yield_init(void)
}
static inline void debug_yield_init(void)
@@
-161,6
+170,18
@@
static inline void debug_yield_init(void)
}
#endif
}
#endif
+#ifdef DEBUG_FULL_MB
+static inline void read_barrier()
+{
+ mb();
+}
+#else
+static inline void read_barrier()
+{
+ barrier();
+}
+#endif
+
/*
* The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
* full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
/*
* The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
* full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
@@
-186,10
+207,14
@@
static inline int rcu_old_gp_ongoing(long *value)
if (value == NULL)
return 0;
debug_yield_write();
if (value == NULL)
return 0;
debug_yield_write();
+ /*
+ * Make sure both tests below are done on the same version of *value
+ * to insure consistency.
+ */
v = ACCESS_ONCE(*value);
debug_yield_write();
return (v & RCU_GP_CTR_NEST_MASK) &&
v = ACCESS_ONCE(*value);
debug_yield_write();
return (v & RCU_GP_CTR_NEST_MASK) &&
- ((v ^
ACCESS_ONCE(urcu_gp_ctr)
) & RCU_GP_CTR_BIT);
+ ((v ^
urcu_gp_ctr
) & RCU_GP_CTR_BIT);
}
static inline void rcu_read_lock(void)
}
static inline void rcu_read_lock(void)
@@
-209,14
+234,14
@@
static inline void rcu_read_lock(void)
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
* Increment active readers count before accessing the pointer.
* See force_mb_all_threads().
*/
- barrier();
+
read_
barrier();
debug_yield_read();
}
static inline void rcu_read_unlock(void)
{
debug_yield_read();
debug_yield_read();
}
static inline void rcu_read_unlock(void)
{
debug_yield_read();
- barrier();
+
read_
barrier();
debug_yield_read();
/*
* Finish using rcu before decrementing the pointer.
debug_yield_read();
/*
* Finish using rcu before decrementing the pointer.
This page took
0.023405 seconds
and
4
git commands to generate.