* Assumes that (void *)-2L is not used often. Used to encode non-aligned
* functions and non-aligned data using extra space.
* We encode the (void *)-2L fct as: -2L, fct, data.
- * We encode the (void *)-2L data as: -2L, fct, data.
+ * We encode the (void *)-2L data as either:
+ * fct | DQ_FCT_BIT, data (if fct is aligned), or
+ * -2L, fct, data (if fct is not aligned).
* Here, DQ_FCT_MARK == ~DQ_FCT_BIT. Required for the test order.
*/
#define DQ_FCT_BIT (1 << 0)
*/
static void wake_up_defer(void)
{
- if (unlikely(uatomic_read(&defer_thread_futex) == -1)) {
+ if (caa_unlikely(uatomic_read(&defer_thread_futex) == -1)) {
uatomic_set(&defer_thread_futex, 0);
futex_noasync(&defer_thread_futex, FUTEX_WAKE, 1,
NULL, NULL, 0);
for (i = queue->tail; i != head;) {
cmm_smp_rmb(); /* read head before q[]. */
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
- if (unlikely(DQ_IS_FCT_BIT(p))) {
+ if (caa_unlikely(DQ_IS_FCT_BIT(p))) {
DQ_CLEAR_FCT_BIT(p);
queue->last_fct_out = p;
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
- } else if (unlikely(p == DQ_FCT_MARK)) {
+ } else if (caa_unlikely(p == DQ_FCT_MARK)) {
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
queue->last_fct_out = p;
p = CMM_LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]);
head = defer_queue.head;
num_items = head - defer_queue.tail;
- if (unlikely(!num_items))
+ if (caa_unlikely(!num_items))
return;
synchronize_rcu();
rcu_defer_barrier_queue(&defer_queue, head);
index->last_head = CMM_LOAD_SHARED(index->head);
num_items += index->last_head - index->tail;
}
- if (likely(!num_items)) {
+ if (caa_likely(!num_items)) {
/*
* We skip the grace period because there are no queued
* callbacks to execute.
* If queue is full, or reached threshold. Empty queue ourself.
* Worse-case: must allow 2 supplementary entries for fct pointer.
*/
- if (unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
+ if (caa_unlikely(head - tail >= DEFER_QUEUE_SIZE - 2)) {
assert(head - tail <= DEFER_QUEUE_SIZE);
rcu_defer_barrier_thread();
assert(head - CMM_LOAD_SHARED(defer_queue.tail) == 0);
}
- if (unlikely(defer_queue.last_fct_in != fct)) {
+ /*
+ * Encode:
+ * if the function is not changed and the data is aligned and it is
+ * not the marker:
+ * store the data
+ * otherwise if the function is aligned and its not the marker:
+ * store the function with DQ_FCT_BIT
+ * store the data
+ * otherwise:
+ * store the marker (DQ_FCT_MARK)
+ * store the function
+ * store the data
+ *
+ * Decode: see the comments before 'struct defer_queue'
+ * or the code in rcu_defer_barrier_queue().
+ */
+ if (caa_unlikely(defer_queue.last_fct_in != fct
+ || DQ_IS_FCT_BIT(p)
+ || p == DQ_FCT_MARK)) {
defer_queue.last_fct_in = fct;
- if (unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
- /*
- * If the function to encode is not aligned or the
- * marker, write DQ_FCT_MARK followed by the function
- * pointer.
- */
+ if (caa_unlikely(DQ_IS_FCT_BIT(fct) || fct == DQ_FCT_MARK)) {
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
DQ_FCT_MARK);
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
fct);
}
- } else {
- if (unlikely(DQ_IS_FCT_BIT(p) || p == DQ_FCT_MARK)) {
- /*
- * If the data to encode is not aligned or the marker,
- * write DQ_FCT_MARK followed by the function pointer.
- */
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
- DQ_FCT_MARK);
- _CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK],
- fct);
- }
}
_CMM_STORE_SHARED(defer_queue.q[head++ & DEFER_QUEUE_MASK], p);
cmm_smp_wmb(); /* Publish new pointer before head */