summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
df3c6c5)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
_STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
_STORE_SHARED(rcu_reader.ctr, _LOAD_SHARED(rcu_gp_ctr));
/*
* Set active readers count for outermost nesting level before
- * accessing the pointer. See force_mb_all_threads().
+ * accessing the pointer. See force_mb_all_readers().
*/
reader_barrier();
} else {
*/
reader_barrier();
} else {
tmp = rcu_reader.ctr;
/*
* Finish using rcu before decrementing the pointer.
tmp = rcu_reader.ctr;
/*
* Finish using rcu before decrementing the pointer.
- * See force_mb_all_threads().
+ * See force_mb_all_readers().
*/
if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
reader_barrier();
*/
if (likely((tmp & RCU_GP_CTR_NEST_MASK) == RCU_GP_COUNT)) {
reader_barrier();
-static void force_mb_all_threads(void)
+static void force_mb_all_readers(void)
-static void force_mb_all_threads(void)
+static void force_mb_all_readers(void)
{
struct rcu_reader *index;
{
struct rcu_reader *index;
static void wait_gp(void)
{
/* Read reader_gp before read futex */
static void wait_gp(void)
{
/* Read reader_gp before read futex */
- force_mb_all_threads();
+ force_mb_all_readers();
if (uatomic_read(&gp_futex) == -1)
futex_async(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
if (uatomic_read(&gp_futex) == -1)
futex_async(&gp_futex, FUTEX_WAIT, -1,
NULL, NULL, 0);
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
uatomic_dec(&gp_futex);
/* Write futex before read reader_gp */
- force_mb_all_threads();
+ force_mb_all_readers();
}
list_for_each_entry_safe(index, tmp, ®istry, head) {
}
list_for_each_entry_safe(index, tmp, ®istry, head) {
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- force_mb_all_threads();
+ force_mb_all_readers();
uatomic_set(&gp_futex, 0);
}
break;
uatomic_set(&gp_futex, 0);
}
break;
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
if (list_empty(®istry)) {
if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
/* Read reader_gp before write futex */
- force_mb_all_threads();
+ force_mb_all_readers();
uatomic_set(&gp_futex, 0);
}
break;
uatomic_set(&gp_futex, 0);
}
break;
wait_gp();
break; /* only escape switch */
case KICK_READER_LOOPS:
wait_gp();
break; /* only escape switch */
case KICK_READER_LOOPS:
- force_mb_all_threads();
+ force_mb_all_readers();
wait_loops = 0;
break; /* only escape switch */
default:
wait_loops = 0;
break; /* only escape switch */
default:
* where new ptr points to. Must be done within internal_rcu_lock
* because it iterates on reader threads.*/
/* Write new ptr before changing the qparity */
* where new ptr points to. Must be done within internal_rcu_lock
* because it iterates on reader threads.*/
/* Write new ptr before changing the qparity */
- force_mb_all_threads();
+ force_mb_all_readers();
switch_next_rcu_qparity(); /* 0 -> 1 */
switch_next_rcu_qparity(); /* 0 -> 1 */
/* Finish waiting for reader threads before letting the old ptr being
* freed. Must be done within internal_rcu_lock because it iterates on
* reader threads. */
/* Finish waiting for reader threads before letting the old ptr being
* freed. Must be done within internal_rcu_lock because it iterates on
* reader threads. */
- force_mb_all_threads();
+ force_mb_all_readers();