X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=src%2Furcu-wait.h;h=47ac5eb35884f437ca299e48cf656e2c301557f7;hb=HEAD;hp=68a42dac22c67c8df9e14608a52c05b3b109e0f7;hpb=542fb0dc9860863b1a869f9ad526da699b2179ff;p=urcu.git diff --git a/src/urcu-wait.h b/src/urcu-wait.h index 68a42da..d77282b 100644 --- a/src/urcu-wait.h +++ b/src/urcu-wait.h @@ -113,16 +113,15 @@ void urcu_wait_node_init(struct urcu_wait_node *node, static inline void urcu_adaptative_wake_up(struct urcu_wait_node *wait) { - cmm_smp_mb(); urcu_posix_assert(uatomic_read(&wait->state) == URCU_WAIT_WAITING); - uatomic_set(&wait->state, URCU_WAIT_WAKEUP); + uatomic_store(&wait->state, URCU_WAIT_WAKEUP, CMM_RELEASE); if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING)) { if (futex_noasync(&wait->state, FUTEX_WAKE, 1, NULL, NULL, 0) < 0) urcu_die(errno); } /* Allow teardown of struct urcu_wait memory. */ - uatomic_or(&wait->state, URCU_WAIT_TEARDOWN); + uatomic_or_mo(&wait->state, URCU_WAIT_TEARDOWN, CMM_RELEASE); } /* @@ -137,11 +136,11 @@ void urcu_adaptative_busy_wait(struct urcu_wait_node *wait) /* Load and test condition before read state */ cmm_smp_rmb(); for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) { - if (uatomic_read(&wait->state) != URCU_WAIT_WAITING) + if (uatomic_load(&wait->state, CMM_ACQUIRE) != URCU_WAIT_WAITING) goto skip_futex_wait; caa_cpu_relax(); } - while (uatomic_read(&wait->state) == URCU_WAIT_WAITING) { + while (uatomic_load(&wait->state, CMM_ACQUIRE) == URCU_WAIT_WAITING) { if (!futex_noasync(&wait->state, FUTEX_WAIT, URCU_WAIT_WAITING, NULL, NULL, 0)) { /* * Prior queued wakeups queued by unrelated code @@ -176,11 +175,11 @@ skip_futex_wait: * memory allocated for struct urcu_wait. */ for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) { - if (uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN) + if (uatomic_load(&wait->state, CMM_RELAXED) & URCU_WAIT_TEARDOWN) break; caa_cpu_relax(); } - while (!(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN)) + while (!(uatomic_load(&wait->state, CMM_ACQUIRE) & URCU_WAIT_TEARDOWN)) poll(NULL, 0, 10); urcu_posix_assert(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN); } @@ -196,7 +195,7 @@ void urcu_wake_all_waiters(struct urcu_waiters *waiters) caa_container_of(iter, struct urcu_wait_node, node); /* Don't wake already running threads */ - if (wait_node->state & URCU_WAIT_RUNNING) + if (uatomic_load(&wait_node->state, CMM_RELAXED) & URCU_WAIT_RUNNING) continue; urcu_adaptative_wake_up(wait_node); }