uatomic_set(&defer_thread_futex, 0);
futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
uatomic_set(&defer_thread_futex, 0);
futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
for (i = queue->tail; i != head;) {
cmm_smp_rmb(); /* read head before q[]. */
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
for (i = queue->tail; i != head;) {
cmm_smp_rmb(); /* read head before q[]. */
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
queue->last_fct_out = p;
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
queue->last_fct_out = p;
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
* If queue is full, or reached threshold. Empty queue ourself.
* Worse-case: must allow 2 supplementary entries for fct pointer.
*/
* If queue is full, or reached threshold. Empty queue ourself.
* Worse-case: must allow 2 supplementary entries for fct pointer.
*/
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0);
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0);
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],