rculfhash: add unique
[urcu.git] / rculfhash.c
index 497a9eacdebd764771d1e6e2e88a95c019f83724..a5e4d9feed5a0d3ed7ec074727cacb33f9641d23 100644 (file)
@@ -46,7 +46,8 @@
 #define dbg_printf(args...)
 #endif
 
-#define BUCKET_SIZE_RESIZE_THRESHOLD   4
+#define CHAIN_LEN_TARGET               1
+#define CHAIN_LEN_RESIZE_THRESHOLD     2
 
 #ifndef max
 #define max(a, b)      ((a) > (b) ? (a) : (b))
@@ -55,6 +56,7 @@
 struct rcu_table {
        unsigned long size;     /* always a power of 2 */
        unsigned long resize_target;
+       int resize_initiated;
        struct rcu_head head;
        struct rcu_ht_node *tbl[0];
 };
@@ -165,8 +167,9 @@ static
 void check_resize(struct rcu_ht *ht, struct rcu_table *t,
                  uint32_t chain_len)
 {
-       if (chain_len >= BUCKET_SIZE_RESIZE_THRESHOLD)
-               ht_resize_lazy(ht, t, log2_u32(chain_len));
+       if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
+               ht_resize_lazy(ht, t,
+                       log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
 }
 
 static
@@ -202,96 +205,137 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
 }
 
 static
-void _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
+int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node,
+            int unique)
 {
-       struct rcu_ht_node *iter_prev = NULL, *iter = NULL;
+       struct rcu_ht_node *iter_prev, *iter, *iter_prev_next, *next;
 
        if (!t->size)
-               return;
+               return 0;
        for (;;) {
                uint32_t chain_len = 0;
 
+               /*
+                * iter_prev points to the non-removed node prior to the
+                * insert location.
+                * iter iterates until it finds the next non-removed
+                * node.
+                */
                iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
+               /* We can always skip the dummy node initially */
+               iter_prev_next = next = rcu_dereference(iter_prev->next);
                assert(iter_prev);
                assert(iter_prev->reverse_hash <= node->reverse_hash);
                for (;;) {
-                       iter = clear_flag(rcu_dereference(iter_prev->next));
-                       if (unlikely(!iter))
+                       iter = next;
+                       if (unlikely(!clear_flag(iter)))
                                break;
-                       if (iter->reverse_hash < node->reverse_hash)
+                       next = rcu_dereference(clear_flag(iter)->next);
+                       if (unlikely(is_removed(next)))
+                               continue;
+                       if (unique
+                           && !clear_flag(iter)->dummy
+                           && !ht->compare_fct(node->key, node->key_len,
+                                       clear_flag(iter)->key,
+                                       clear_flag(iter)->key_len))
+                               return -EEXIST;
+                       if (clear_flag(iter)->reverse_hash > node->reverse_hash)
                                break;
-                       iter_prev = iter;
-                       check_resize(ht, t, ++chain_len);
+                       /* Only account for identical reverse hash once */
+                       if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash)
+                               check_resize(ht, t, ++chain_len);
+                       iter_prev = clear_flag(iter);
+                       iter_prev_next = next;
                }
-               /* add in iter_prev->next */
-               if (is_removed(iter))
-                       continue;
                assert(node != iter);
-               node->next = iter;
+               assert(!is_removed(iter_prev));
                assert(iter_prev != node);
-               if (uatomic_cmpxchg(&iter_prev->next, iter, node) != iter)
+               node->next = iter;
+               if (uatomic_cmpxchg(&iter_prev->next, iter_prev_next,
+                                   node) != iter_prev_next)
                        continue;
-               break;
+               else
+                       break;
        }
+       return 0;
 }
 
 static
 int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
 {
-       struct rcu_ht_node *iter_prev, *iter, *next, *old;
+       struct rcu_ht_node *iter_prev, *iter, *iter_prev_next, *next, *old;
        unsigned long chain_len;
-       int found, ret = 0;
+       int found;
        int flagged = 0;
 
 retry:
        chain_len = 0;
        found = 0;
+       /*
+        * iter_prev points to the non-removed node prior to the remove
+        * location.
+        * node is the node to remove.
+        */
        iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
+       /* We can always skip the dummy node initially */
+       iter_prev_next = next = rcu_dereference(iter_prev->next);
        assert(iter_prev);
        assert(iter_prev->reverse_hash <= node->reverse_hash);
        for (;;) {
-               iter = clear_flag(rcu_dereference(iter_prev->next));
-               if (unlikely(!iter))
-                       break;
-               if (iter->reverse_hash < node->reverse_hash)
+               iter = next;
+               if (unlikely(!clear_flag(iter)))
                        break;
+               next = rcu_dereference(clear_flag(iter)->next);
                if (iter == node) {
                        found = 1;
                        break;
                }
-               iter_prev = iter;
+               if (unlikely(is_removed(next)))
+                       continue;
+               if (clear_flag(iter)->reverse_hash > node->reverse_hash)
+                       break;
+               iter_prev = clear_flag(iter);
+               iter_prev_next = next;
        } 
-       if (!found) {
-               ret = -ENOENT;
+       if (!found)
                goto end;
-       }
-       next = rcu_dereference(iter->next);
        if (!flagged) {
-               if (is_removed(next)) {
-                       ret = -ENOENT;
+               if (is_removed(next))
                        goto end;
-               }
                /* set deletion flag */
                if ((old = uatomic_cmpxchg(&iter->next, next,
                                           flag_removed(next))) != next) {
-                       if (old == flag_removed(next)) {
-                               ret = -ENOENT;
+                       if (old == flag_removed(next))
                                goto end;
-                       } else {
+                       else
                                goto retry;
-                       }
                }
                flagged = 1;
        }
        /*
-        * Remove the element from the list. Retry if there has been a
-        * concurrent add (there cannot be a concurrent delete, because
-        * we won the deletion flag cmpxchg).
+        * Remove the element from the list.
+        * - Retry if there has been a concurrent add before us.
+        * - Retry if the prev node has been deleted (its next removed
+        *   flag would be set).
+        * - There cannot be a concurrent delete for our position, because
+        *   we won the deletion flag cmpxchg.
+        * - If there is a concurrent add or remove after us while our
+        *   removed flag is set, it will skip us and link directly after
+        *   the prior non-removed node before us. In this case, the
+        *   retry will not find the node in the list anymore.
         */
-       if (uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next)) != iter)
+       if (uatomic_cmpxchg(&iter_prev->next, iter_prev_next,
+                           clear_flag(next)) != iter_prev_next)
                goto retry;
 end:
-       return ret;
+       /*
+        * Only the flagging action indicated that we (and no other)
+        * removed the node from the hash.
+        */
+       if (flagged)
+               return 0;
+       else
+               return -ENOENT;
 }
 
 static
@@ -309,9 +353,10 @@ void init_table(struct rcu_ht *ht, struct rcu_table *t,
                t->tbl[i]->dummy = 1;
                t->tbl[i]->hash = i;
                t->tbl[i]->reverse_hash = bit_reverse_ulong(i);
-               _ht_add(ht, t, t->tbl[i]);
+               (void) _ht_add(ht, t, t->tbl[i], 0);
        }
        t->resize_target = t->size = end;
+       t->resize_initiated = 0;
 }
 
 struct rcu_ht *ht_new(ht_hash_fct hash_fct,
@@ -353,12 +398,12 @@ struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
        for (;;) {
                if (unlikely(!node))
                        break;
-               if (node->reverse_hash > reverse_hash) {
+               if (unlikely(node->reverse_hash > reverse_hash)) {
                        node = NULL;
                        break;
                }
                if (!ht->compare_fct(node->key, node->key_len, key, key_len)) {
-                       if (is_removed(rcu_dereference(node->next)))
+                       if (unlikely(is_removed(rcu_dereference(node->next))))
                                node = NULL;
                        break;
                }
@@ -375,7 +420,18 @@ void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
        node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
 
        t = rcu_dereference(ht->t);
-       _ht_add(ht, t, node);
+       (void) _ht_add(ht, t, node, 0);
+}
+
+int ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
+{
+       struct rcu_table *t;
+
+       node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
+       node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
+
+       t = rcu_dereference(ht->t);
+       return _ht_add(ht, t, node, 1);
 }
 
 int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
@@ -474,6 +530,7 @@ void ht_resize(struct rcu_ht *ht, int growth)
 
        target_size = resize_target_update(t, growth);
        if (t->size < target_size) {
+               CMM_STORE_SHARED(t->resize_initiated, 1);
                pthread_mutex_lock(&ht->resize_mutex);
                _do_ht_resize(ht);
                pthread_mutex_unlock(&ht->resize_mutex);
@@ -500,9 +557,10 @@ void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
        unsigned long target_size;
 
        target_size = resize_target_update(t, growth);
-       if (t->size < target_size) {
+       if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
                work = malloc(sizeof(*work));
                work->ht = ht;
                ht->ht_call_rcu(&work->head, do_resize_cb);
+               CMM_STORE_SHARED(t->resize_initiated, 1);
        }
 }
This page took 0.02573 seconds and 4 git commands to generate.