summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
d185448)
The state of a wait node must be accessed atomically. Also, the action
of busy loading until the teardown state is seen must follow a
CMM_ACQUIRE semantic while storing the teardown must follow a
CMM_RELEASE semantic.
Change-Id: I9cd9cf4cd9ab2081551d7f33c0b1c23c3cf3942f
Co-authored-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Olivier Dion <odion@efficios.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
urcu_die(errno);
}
/* Allow teardown of struct urcu_wait memory. */
urcu_die(errno);
}
/* Allow teardown of struct urcu_wait memory. */
- uatomic_or(&wait->state, URCU_WAIT_TEARDOWN);
+ uatomic_or_mo(&wait->state, URCU_WAIT_TEARDOWN, CMM_RELEASE);
break;
caa_cpu_relax();
}
break;
caa_cpu_relax();
}
- while (!(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN))
+ while (!(uatomic_load(&wait->state, CMM_ACQUIRE) & URCU_WAIT_TEARDOWN))
poll(NULL, 0, 10);
urcu_posix_assert(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN);
}
poll(NULL, 0, 10);
urcu_posix_assert(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN);
}
caa_container_of(iter, struct urcu_wait_node, node);
/* Don't wake already running threads */
caa_container_of(iter, struct urcu_wait_node, node);
/* Don't wake already running threads */
- if (wait_node->state & URCU_WAIT_RUNNING)
+ if (uatomic_load(&wait_node->state, CMM_RELAXED) & URCU_WAIT_RUNNING)
continue;
urcu_adaptative_wake_up(wait_node);
}
continue;
urcu_adaptative_wake_up(wait_node);
}