ht_compare_fct compare_fct;
unsigned long hash_seed;
pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
+ unsigned int in_progress_resize;
void (*ht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
};
return v;
}
+/*
+ * Remove all logically deleted nodes from a bucket up to a certain node key.
+ */
static
-void _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
+void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
{
- struct rcu_ht_node *iter_prev, *iter, *iter_prev_next, *next;
+ struct rcu_ht_node *iter_prev, *iter, *next;
- if (!t->size)
- return;
+ for (;;) {
+ iter_prev = dummy;
+ /* We can always skip the dummy node initially */
+ iter = rcu_dereference(iter_prev->p.next);
+ assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+ for (;;) {
+ if (unlikely(!iter))
+ return;
+ if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
+ return;
+ next = rcu_dereference(clear_flag(iter)->p.next);
+ if (is_removed(next))
+ break;
+ iter_prev = iter;
+ iter = next;
+ }
+ assert(!is_removed(iter));
+ (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next));
+ }
+}
+
+static
+struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
+ struct rcu_ht_node *node, int unique)
+{
+ struct rcu_ht_node *iter_prev, *dummy, *iter, *next;
+ unsigned long hash;
+
+ if (!t->size) {
+ assert(node->p.dummy);
+ return node; /* Initial first add (head) */
+ }
+ hash = bit_reverse_ulong(node->p.reverse_hash);
for (;;) {
uint32_t chain_len = 0;
/*
* iter_prev points to the non-removed node prior to the
* insert location.
- * iter iterates until it finds the next non-removed
- * node.
*/
- iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
+ iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]);
/* We can always skip the dummy node initially */
- iter_prev_next = next = rcu_dereference(iter_prev->next);
- assert(iter_prev);
- assert(iter_prev->reverse_hash <= node->reverse_hash);
+ iter = rcu_dereference(iter_prev->p.next);
+ assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
for (;;) {
- iter = next;
- if (unlikely(!clear_flag(iter)))
- break;
- next = rcu_dereference(clear_flag(iter)->next);
- if (unlikely(is_removed(next)))
- continue;
- if (clear_flag(iter)->reverse_hash > node->reverse_hash)
- break;
+ if (unlikely(!iter))
+ goto insert;
+ if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
+ goto insert;
+ next = rcu_dereference(clear_flag(iter)->p.next);
+ if (is_removed(next))
+ goto gc_node;
+ if (unique
+ && !clear_flag(iter)->p.dummy
+ && !ht->compare_fct(node->key, node->key_len,
+ clear_flag(iter)->key,
+ clear_flag(iter)->key_len))
+ return clear_flag(iter);
/* Only account for identical reverse hash once */
- if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash)
+ if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash)
check_resize(ht, t, ++chain_len);
iter_prev = clear_flag(iter);
- iter_prev_next = next;
+ iter = next;
}
- assert(node != iter);
+ insert:
+ assert(node != clear_flag(iter));
assert(!is_removed(iter_prev));
assert(iter_prev != node);
- node->next = iter;
- if (uatomic_cmpxchg(&iter_prev->next, iter_prev_next,
- node) != iter_prev_next)
- continue;
+ node->p.next = iter;
+ if (uatomic_cmpxchg(&iter_prev->p.next, iter,
+ node) != iter)
+ continue; /* retry */
else
- break;
+ goto gc_end;
+ gc_node:
+ assert(!is_removed(iter));
+ (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next));
+ /* retry */
}
+gc_end:
+ /* Garbage collect logically removed nodes in the bucket */
+ dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
+ _ht_gc_bucket(dummy, node);
+ return node;
}
static
int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
{
- struct rcu_ht_node *iter_prev, *iter, *iter_prev_next, *next, *old;
- unsigned long chain_len;
- int found;
+ struct rcu_ht_node *dummy, *next, *old;
int flagged = 0;
+ unsigned long hash;
-retry:
- chain_len = 0;
- found = 0;
- /*
- * iter_prev points to the non-removed node prior to the remove
- * location.
- * node is the node to remove.
- */
- iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
- /* We can always skip the dummy node initially */
- iter_prev_next = next = rcu_dereference(iter_prev->next);
- assert(iter_prev);
- assert(iter_prev->reverse_hash <= node->reverse_hash);
- for (;;) {
- iter = next;
- if (unlikely(!clear_flag(iter)))
- break;
- next = rcu_dereference(clear_flag(iter)->next);
- if (iter == node) {
- found = 1;
- break;
- }
- if (unlikely(is_removed(next)))
- continue;
- if (clear_flag(iter)->reverse_hash > node->reverse_hash)
- break;
- iter_prev = clear_flag(iter);
- iter_prev_next = next;
- }
- if (!found)
- goto end;
- if (!flagged) {
+ /* logically delete the node */
+ old = rcu_dereference(node->p.next);
+ do {
+ next = old;
if (is_removed(next))
goto end;
- /* set deletion flag */
- if ((old = uatomic_cmpxchg(&iter->next, next,
- flag_removed(next))) != next) {
- if (old == flag_removed(next))
- goto end;
- else
- goto retry;
- }
- flagged = 1;
- }
+ assert(!node->p.dummy);
+ old = uatomic_cmpxchg(&node->p.next, next,
+ flag_removed(next));
+ } while (old != next);
+
+ /* We performed the (logical) deletion. */
+ flagged = 1;
+
/*
- * Remove the element from the list.
- * - Retry if there has been a concurrent add before us.
- * - Retry if the prev node has been deleted (its next removed
- * flag would be set).
- * - There cannot be a concurrent delete for our position, because
- * we won the deletion flag cmpxchg.
- * - If there is a concurrent add or remove after us while our
- * removed flag is set, it will skip us and link directly after
- * the prior non-removed node before us. In this case, the
- * retry will not find the node in the list anymore.
+ * Ensure that the node is not visible to readers anymore: lookup for
+ * the node, and remove it (along with any other logically removed node)
+ * if found.
*/
- if (uatomic_cmpxchg(&iter_prev->next, iter_prev_next,
- clear_flag(next)) != iter_prev_next)
- goto retry;
+ hash = bit_reverse_ulong(node->p.reverse_hash);
+ dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
+ _ht_gc_bucket(dummy, node);
end:
/*
* Only the flagging action indicated that we (and no other)
* removed the node from the hash.
*/
- if (flagged)
+ if (flagged) {
+ assert(is_removed(rcu_dereference(node->p.next)));
return 0;
- else
+ } else
return -ENOENT;
}
/* Update table size when power of two */
if (i != 0 && !(i & (i - 1)))
t->size = i;
- t->tbl[i] = calloc(1, sizeof(struct rcu_ht_node));
- t->tbl[i]->dummy = 1;
- t->tbl[i]->hash = i;
- t->tbl[i]->reverse_hash = bit_reverse_ulong(i);
- _ht_add(ht, t, t->tbl[i]);
+ t->tbl[i] = calloc(1, sizeof(struct _rcu_ht_node));
+ t->tbl[i]->p.dummy = 1;
+ t->tbl[i]->p.reverse_hash = bit_reverse_ulong(i);
+ (void) _ht_add(ht, t, t->tbl[i], 0);
}
t->resize_target = t->size = end;
t->resize_initiated = 0;
ht->compare_fct = compare_fct;
ht->hash_seed = hash_seed;
ht->ht_call_rcu = ht_call_rcu;
+ ht->in_progress_resize = 0;
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
ht->t = calloc(1, sizeof(struct rcu_table)
for (;;) {
if (unlikely(!node))
break;
- if (unlikely(node->reverse_hash > reverse_hash)) {
+ if (unlikely(node->p.reverse_hash > reverse_hash)) {
node = NULL;
break;
}
- if (!ht->compare_fct(node->key, node->key_len, key, key_len)) {
- if (unlikely(is_removed(rcu_dereference(node->next))))
- node = NULL;
- break;
+ if (likely(!is_removed(rcu_dereference(node->p.next)))
+ && !node->p.dummy
+ && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+ break;
}
- node = clear_flag(rcu_dereference(node->next));
+ node = clear_flag(rcu_dereference(node->p.next));
}
+ assert(!node || !node->p.dummy);
return node;
}
void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
{
struct rcu_table *t;
+ unsigned long hash;
- node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
- node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
+ hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
+ node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
t = rcu_dereference(ht->t);
- _ht_add(ht, t, node);
+ (void) _ht_add(ht, t, node, 0);
+}
+
+struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
+{
+ struct rcu_table *t;
+ unsigned long hash;
+
+ hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
+ node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+
+ t = rcu_dereference(ht->t);
+ return _ht_add(ht, t, node, 1);
}
int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
/* Check that the table is empty */
node = t->tbl[0];
do {
- if (!node->dummy)
+ if (!node->p.dummy)
return -EPERM;
- node = node->next;
+ node = node->p.next;
+ assert(!is_removed(node));
} while (node);
/* Internal sanity check: all nodes left should be dummy */
for (i = 0; i < t->size; i++) {
- assert(t->tbl[i]->dummy);
+ assert(t->tbl[i]->p.dummy);
free(t->tbl[i]);
}
return 0;
{
int ret;
+ /* Wait for in-flight resize operations to complete */
+ while (uatomic_read(&ht->in_progress_resize))
+ poll(NULL, 0, 100); /* wait for 100ms */
ret = ht_delete_dummy(ht);
if (ret)
return ret;
return ret;
}
+void ht_count_nodes(struct rcu_ht *ht,
+ unsigned long *count,
+ unsigned long *removed)
+{
+ struct rcu_table *t;
+ struct rcu_ht_node *node, *next;
+
+ *count = 0;
+ *removed = 0;
+
+ t = rcu_dereference(ht->t);
+ /* Check that the table is empty */
+ node = rcu_dereference(t->tbl[0]);
+ do {
+ next = rcu_dereference(node->p.next);
+ if (is_removed(next)) {
+ assert(!node->p.dummy);
+ (*removed)++;
+ } else if (!node->p.dummy)
+ (*count)++;
+ node = clear_flag(next);
+ } while (node);
+}
+
static
void ht_free_table_cb(struct rcu_head *head)
{
_do_ht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
free(work);
+ cmm_smp_mb(); /* finish resize before decrement */
+ uatomic_dec(&ht->in_progress_resize);
}
static
target_size = resize_target_update(t, growth);
if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
+ uatomic_inc(&ht->in_progress_resize);
+ cmm_smp_mb(); /* increment resize count before calling it */
work = malloc(sizeof(*work));
work->ht = ht;
ht->ht_call_rcu(&work->head, do_resize_cb);