From 9357c41599e239897db0cc18e1fbaecd1065ebc0 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 22 Sep 2011 05:03:36 -0400 Subject: [PATCH] rculfhash: factor out add_replace and replace Signed-off-by: Mathieu Desnoyers --- rculfhash.c | 132 ++++++++++++++++++++++++++++++----------- tests/test_urcu_hash.c | 11 ++-- urcu/rculfhash.h | 57 ++++++++++++++---- 3 files changed, 147 insertions(+), 53 deletions(-) diff --git a/rculfhash.c b/rculfhash.c index 8ed9c10..ae72292 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -768,6 +768,80 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node return; } +static +int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, + struct cds_lfht_node *old_node, + struct cds_lfht_node *ret_next, + struct cds_lfht_node *new_node) +{ + struct cds_lfht_node *dummy, *old_next; + struct _cds_lfht_node *lookup; + int flagged = 0; + unsigned long hash, index, order; + + if (!old_node) /* Return -ENOENT if asked to replace NULL node */ + goto end; + + assert(!is_removed(old_node)); + assert(!is_dummy(old_node)); + assert(!is_removed(new_node)); + assert(!is_dummy(new_node)); + assert(new_node != old_node); + do { + /* Insert after node to be replaced */ + old_next = ret_next; + if (is_removed(old_next)) { + /* + * Too late, the old node has been removed under us + * between lookup and replace. Fail. + */ + goto end; + } + assert(!is_dummy(old_next)); + assert(new_node != clear_flag(old_next)); + new_node->p.next = clear_flag(old_next); + /* + * Here is the whole trick for lock-free replace: we add + * the replacement node _after_ the node we want to + * replace by atomically setting its next pointer at the + * same time we set its removal flag. Given that + * the lookups/get next use an iterator aware of the + * next pointer, they will either skip the old node due + * to the removal flag and see the new node, or use + * the old node, but will not see the new one. + */ + ret_next = uatomic_cmpxchg(&old_node->p.next, + old_next, flag_removed(new_node)); + } while (ret_next != old_next); + + /* We performed the replacement. */ + flagged = 1; + + /* + * Ensure that the old node is not visible to readers anymore: + * lookup for the node, and remove it (along with any other + * logically removed node) if found. + */ + hash = bit_reverse_ulong(old_node->p.reverse_hash); + assert(size > 0); + index = hash & (size - 1); + order = get_count_order_ulong(index + 1); + lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; + dummy = (struct cds_lfht_node *) lookup; + _cds_lfht_gc_bucket(dummy, new_node); +end: + /* + * Only the flagging action indicated that we (and no other) + * replaced the node from the hash table. + */ + if (flagged) { + assert(is_removed(rcu_dereference(old_node->p.next))); + return 0; + } else { + return -ENOENT; + } +} + static struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, unsigned long size, @@ -852,36 +926,13 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, } replace: - /* Insert after node to be replaced */ - iter_prev = clear_flag(iter); - iter = next; - assert(node != clear_flag(iter)); - assert(!is_removed(iter_prev)); - assert(!is_removed(iter)); - assert(iter_prev != node); - assert(!dummy); - node->p.next = clear_flag(iter); - if (is_dummy(iter)) - new_node = flag_dummy(node); - else - new_node = node; - /* - * Here is the whole trick for lock-free replace: we add - * the replacement node _after_ the node we want to - * replace by atomically setting its next pointer at the - * same time we set its removal flag. Given that - * the lookups/get next use an iterator aware of the - * next pointer, they will either skip the old node due - * to the removal flag and see the new node, or use - * the old node, but will not see the new one. - */ - new_node = flag_removed(new_node); - if (uatomic_cmpxchg(&iter_prev->p.next, - iter, new_node) != iter) { - continue; /* retry */ + + if (!_cds_lfht_replace(ht, size, clear_flag(iter), next, + node)) { + return_node = clear_flag(iter); + goto end; /* gc already done */ } else { - return_node = iter_prev; - goto gc_end; + continue; /* retry */ } gc_node: @@ -900,6 +951,7 @@ gc_end: lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; dummy_node = (struct cds_lfht_node *) lookup; _cds_lfht_gc_bucket(dummy_node, node); +end: return return_node; } @@ -913,6 +965,9 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, int flagged = 0; unsigned long hash, index, order; + if (!node) /* Return -ENOENT if asked to delete NULL node */ + goto end; + /* logically delete the node */ assert(!is_dummy(node)); assert(!is_removed(node)); @@ -954,8 +1009,9 @@ end: if (flagged) { assert(is_removed(rcu_dereference(node->p.next))); return 0; - } else + } else { return -ENOENT; + } } static @@ -1364,7 +1420,7 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, return ret; } -struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht, +struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, struct cds_lfht_node *node) { unsigned long hash, size; @@ -1380,13 +1436,23 @@ struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht, return ret; } -int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node) +int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter, + struct cds_lfht_node *new_node) +{ + unsigned long size; + + size = rcu_dereference(ht->t.size); + return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next, + new_node); +} + +int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter) { unsigned long size; int ret; size = rcu_dereference(ht->t.size); - ret = _cds_lfht_del(ht, size, node, 0); + ret = _cds_lfht_del(ht, size, iter->node, 0); if (!ret) ht_count_del(ht, size); return ret; diff --git a/tests/test_urcu_hash.c b/tests/test_urcu_hash.c index a6139c1..c796b6f 100644 --- a/tests/test_urcu_hash.c +++ b/tests/test_urcu_hash.c @@ -523,7 +523,7 @@ void *thr_writer(void *_count) ret_node = cds_lfht_add_unique(test_ht, node); } else { if (add_replace) - ret_node = cds_lfht_replace(test_ht, node); + ret_node = cds_lfht_add_replace(test_ht, node); else cds_lfht_add(test_ht, node); } @@ -545,13 +545,10 @@ void *thr_writer(void *_count) cds_lfht_lookup(test_ht, (void *)(((unsigned long) rand_r(&rand_lookup) % write_pool_size) + write_pool_offset), sizeof(void *), &iter); - node = cds_lfht_iter_get_node(&iter); - if (node) - ret = cds_lfht_del(test_ht, node); - else - ret = -ENOENT; + ret = cds_lfht_del(test_ht, &iter); rcu_read_unlock(); if (ret == 0) { + node = cds_lfht_iter_get_node(&iter); call_rcu(&node->head, free_node_cb); nr_del++; } else @@ -615,7 +612,7 @@ static int populate_hash(void) ret_node = cds_lfht_add_unique(test_ht, node); } else { if (add_replace) - ret_node = cds_lfht_replace(test_ht, node); + ret_node = cds_lfht_add_replace(test_ht, node); else cds_lfht_add(test_ht, node); } diff --git a/urcu/rculfhash.h b/urcu/rculfhash.h index 4a897fa..860ef3d 100644 --- a/urcu/rculfhash.h +++ b/urcu/rculfhash.h @@ -212,13 +212,13 @@ void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node); * The semantic of this function is that if only this function is used * to add keys into the table, no duplicated keys should ever be * observable in the table. The same guarantee apply for combination of - * add_unique and replace (see below). + * add_unique and add_replace (see below). */ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, struct cds_lfht_node *node); /* - * cds_lfht_replace - replace a node within hash table. + * cds_lfht_add_replace - replace or add a node within hash table. * * Return the node replaced upon success. If no node matching the key * was present, return NULL, which also means the operation succeeded. @@ -228,29 +228,60 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, * freeing the memory reserved for the returned node. * * The semantic of replacement vs lookups is the following: if lookups - * are performed between a key insertion and its removal, we guarantee - * that the lookups will always find the key if it is replaced - * concurrently with the lookups. + * are performed between a key unique insertion and its removal, we + * guarantee that the lookups and get next will always find exactly one + * instance of the key if it is replaced concurrently with the lookups. * * Providing this semantic allows us to ensure that replacement-only * schemes will never generate duplicated keys. It also allows us to - * guarantee that a combination of replacement and add_unique updates + * guarantee that a combination of add_replace and add_unique updates * will never generate duplicated keys. */ -struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht, +struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, struct cds_lfht_node *node); /* - * cds_lfht_del - remove node from hash table. + * cds_lfht_replace - replace a node pointer to by iter within hash table. * - * Return 0 if the node is successfully removed. - * Node can be looked up with cds_lfht_lookup. RCU read-side lock must - * be held between lookup and removal. + * Return 0 if replacement is successful, negative value otherwise. + * Replacing a NULL old node or an already removed node will fail with a + * negative value. + * Old node can be looked up with cds_lfht_lookup and cds_lfht_next. + * RCU read-side lock must be held between lookup and replacement. + * Call with rcu_read_lock held. + * After successful replacement, a grace period must be waited for before + * freeing the memory reserved for the old node (which can be accessed + * with cds_lfht_iter_get_node). + * + * The semantic of replacement vs lookups is the following: if lookups + * are performed between a key unique insertion and its removal, we + * guarantee that the lookups and get next will always find exactly one + * instance of the key if it is replaced concurrently with the lookups. + * + * Providing this semantic allows us to ensure that replacement-only + * schemes will never generate duplicated keys. It also allows us to + * guarantee that a combination of add_replace and add_unique updates + * will never generate duplicated keys. + */ +int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter, + struct cds_lfht_node *new_node); + +/* + * cds_lfht_del - remove node pointed to by iterator from hash table. + * + * Return 0 if the node is successfully removed, negative value + * otherwise. + * Replacing a NULL node or an already removed node will fail with a + * negative value. + * Node can be looked up with cds_lfht_lookup and cds_lfht_next. + * cds_lfht_iter_get_node. + * RCU read-side lock must be held between lookup and removal. * Call with rcu_read_lock held. * After successful removal, a grace period must be waited for before - * freeing the memory reserved for node. + * freeing the memory reserved for old node (which can be accessed with + * cds_lfht_iter_get_node). */ -int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node); +int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter); /* * cds_lfht_resize - Force a hash table resize -- 2.34.1