rculfhash: add unique
[urcu.git] / rculfhash.c
index 8058931b386cf9d75f64173d7ff0e3333d486032..a5e4d9feed5a0d3ed7ec074727cacb33f9641d23 100644 (file)
 #include <stdio.h>
 #include <pthread.h>
 
-#define BUCKET_SIZE_RESIZE_THRESHOLD   32
-#define MAX_NR_BUCKETS                 1048576         /* 1M buckets */
+#define DEBUG          /* Test */
+
+#ifdef DEBUG
+#define dbg_printf(args...)     printf(args)
+#else
+#define dbg_printf(args...)
+#endif
+
+#define CHAIN_LEN_TARGET               1
+#define CHAIN_LEN_RESIZE_THRESHOLD     2
 
 #ifndef max
 #define max(a, b)      ((a) > (b) ? (a) : (b))
@@ -47,6 +55,8 @@
 
 struct rcu_table {
        unsigned long size;     /* always a power of 2 */
+       unsigned long resize_target;
+       int resize_initiated;
        struct rcu_head head;
        struct rcu_ht_node *tbl[0];
 };
@@ -54,9 +64,9 @@ struct rcu_table {
 struct rcu_ht {
        struct rcu_table *t;            /* shared */
        ht_hash_fct hash_fct;
-       void *hashseed;
+       ht_compare_fct compare_fct;
+       unsigned long hash_seed;
        pthread_mutex_t resize_mutex;   /* resize mutex: add/del mutex */
-       unsigned long target_size;
        void (*ht_call_rcu)(struct rcu_head *head,
                      void (*func)(struct rcu_head *head));
 };
@@ -66,23 +76,12 @@ struct rcu_resize_work {
        struct rcu_ht *ht;
 };
 
-static
-void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
-
-static
-void check_resize(struct rcu_ht *ht, struct rcu_table *t,
-                 unsigned long chain_len)
-{
-       //printf("check resize chain len %lu\n", chain_len);
-       if (chain_len >= BUCKET_SIZE_RESIZE_THRESHOLD)
-               ht_resize_lazy(ht, t, chain_len / BUCKET_SIZE_RESIZE_THRESHOLD);
-}
-
 /*
  * Algorithm to reverse bits in a word by lookup table, extended to
  * 64-bit words.
- * ref.
+ * Source:
  * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
+ * Originally from Public Domain.
  */
 
 static const uint8_t BitReverseTable256[256] = 
@@ -134,6 +133,45 @@ unsigned long bit_reverse_ulong(unsigned long v)
 #endif
 }
 
+/*
+ * Algorithm to find the log2 of a 32-bit unsigned integer.
+ * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
+ * Originally from Public Domain.
+ */
+static const char LogTable256[256] = 
+{
+#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
+       -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+       LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
+       LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
+};
+
+uint32_t log2_u32(uint32_t v)
+{
+       uint32_t t, tt;
+
+       if ((tt = (v >> 16)))
+               return (t = (tt >> 8))
+                               ? 24 + LogTable256[t]
+                               : 16 + LogTable256[tt];
+       else
+               return (t = (v >> 8))
+                               ? 8 + LogTable256[t]
+                               : LogTable256[v];
+}
+
+static
+void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
+
+static
+void check_resize(struct rcu_ht *ht, struct rcu_table *t,
+                 uint32_t chain_len)
+{
+       if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
+               ht_resize_lazy(ht, t,
+                       log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
+}
+
 static
 struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
 {
@@ -153,7 +191,7 @@ struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
 }
 
 static
-void _uatomic_max(unsigned long *ptr, unsigned long v)
+unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
 {
        unsigned long old1, old2;
 
@@ -161,103 +199,143 @@ void _uatomic_max(unsigned long *ptr, unsigned long v)
        do {
                old2 = old1;
                if (old2 >= v)
-                       break;
+                       return old2;
        } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
+       return v;
 }
 
 static
-void _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
+int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node,
+            int unique)
 {
-       struct rcu_ht_node *iter_prev = NULL, *iter = NULL;
+       struct rcu_ht_node *iter_prev, *iter, *iter_prev_next, *next;
 
        if (!t->size)
-               return;
+               return 0;
        for (;;) {
-               unsigned long chain_len = 0;
-
+               uint32_t chain_len = 0;
+
+               /*
+                * iter_prev points to the non-removed node prior to the
+                * insert location.
+                * iter iterates until it finds the next non-removed
+                * node.
+                */
                iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
-               //printf("iter prev %p hash %lu bucket %lu\n", iter_prev,
-               //      node->hash, node->hash & (t->size - 1));
+               /* We can always skip the dummy node initially */
+               iter_prev_next = next = rcu_dereference(iter_prev->next);
                assert(iter_prev);
                assert(iter_prev->reverse_hash <= node->reverse_hash);
                for (;;) {
-                       iter = clear_flag(rcu_dereference(iter_prev->next));
-                       if (unlikely(!iter))
+                       iter = next;
+                       if (unlikely(!clear_flag(iter)))
                                break;
-                       if (iter->reverse_hash < node->reverse_hash)
+                       next = rcu_dereference(clear_flag(iter)->next);
+                       if (unlikely(is_removed(next)))
+                               continue;
+                       if (unique
+                           && !clear_flag(iter)->dummy
+                           && !ht->compare_fct(node->key, node->key_len,
+                                       clear_flag(iter)->key,
+                                       clear_flag(iter)->key_len))
+                               return -EEXIST;
+                       if (clear_flag(iter)->reverse_hash > node->reverse_hash)
                                break;
-                       iter_prev = iter;
-                       check_resize(ht, t, ++chain_len);
+                       /* Only account for identical reverse hash once */
+                       if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash)
+                               check_resize(ht, t, ++chain_len);
+                       iter_prev = clear_flag(iter);
+                       iter_prev_next = next;
                }
-               /* add in iter_prev->next */
-               if (is_removed(iter))
-                       continue;
                assert(node != iter);
-               node->next = iter;
+               assert(!is_removed(iter_prev));
                assert(iter_prev != node);
-               if (uatomic_cmpxchg(&iter_prev->next, iter, node) != iter)
+               node->next = iter;
+               if (uatomic_cmpxchg(&iter_prev->next, iter_prev_next,
+                                   node) != iter_prev_next)
                        continue;
-               break;
+               else
+                       break;
        }
+       return 0;
 }
 
 static
 int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
 {
-       struct rcu_ht_node *iter_prev, *iter, *next, *old;
+       struct rcu_ht_node *iter_prev, *iter, *iter_prev_next, *next, *old;
        unsigned long chain_len;
-       int found, ret = 0;
+       int found;
        int flagged = 0;
 
 retry:
        chain_len = 0;
        found = 0;
+       /*
+        * iter_prev points to the non-removed node prior to the remove
+        * location.
+        * node is the node to remove.
+        */
        iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
+       /* We can always skip the dummy node initially */
+       iter_prev_next = next = rcu_dereference(iter_prev->next);
        assert(iter_prev);
        assert(iter_prev->reverse_hash <= node->reverse_hash);
        for (;;) {
-               iter = clear_flag(rcu_dereference(iter_prev->next));
-               if (unlikely(!iter))
-                       break;
-               if (iter->reverse_hash < node->reverse_hash)
+               iter = next;
+               if (unlikely(!clear_flag(iter)))
                        break;
+               next = rcu_dereference(clear_flag(iter)->next);
                if (iter == node) {
                        found = 1;
                        break;
                }
-               iter_prev = iter;
+               if (unlikely(is_removed(next)))
+                       continue;
+               if (clear_flag(iter)->reverse_hash > node->reverse_hash)
+                       break;
+               iter_prev = clear_flag(iter);
+               iter_prev_next = next;
        } 
-       if (!found) {
-               ret = -ENOENT;
+       if (!found)
                goto end;
-       }
-       next = rcu_dereference(iter->next);
        if (!flagged) {
-               if (is_removed(next)) {
-                       ret = -ENOENT;
+               if (is_removed(next))
                        goto end;
-               }
                /* set deletion flag */
                if ((old = uatomic_cmpxchg(&iter->next, next,
                                           flag_removed(next))) != next) {
-                       if (old == flag_removed(next)) {
-                               ret = -ENOENT;
+                       if (old == flag_removed(next))
                                goto end;
-                       } else {
+                       else
                                goto retry;
-                       }
                }
                flagged = 1;
        }
        /*
-        * Remove the element from the list. Retry if there has been a
-        * concurrent add (there cannot be a concurrent delete, because
-        * we won the deletion flag cmpxchg).
+        * Remove the element from the list.
+        * - Retry if there has been a concurrent add before us.
+        * - Retry if the prev node has been deleted (its next removed
+        *   flag would be set).
+        * - There cannot be a concurrent delete for our position, because
+        *   we won the deletion flag cmpxchg.
+        * - If there is a concurrent add or remove after us while our
+        *   removed flag is set, it will skip us and link directly after
+        *   the prior non-removed node before us. In this case, the
+        *   retry will not find the node in the list anymore.
         */
-       if (uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next)) != iter)
+       if (uatomic_cmpxchg(&iter_prev->next, iter_prev_next,
+                           clear_flag(next)) != iter_prev_next)
                goto retry;
 end:
-       return ret;
+       /*
+        * Only the flagging action indicated that we (and no other)
+        * removed the node from the hash.
+        */
+       if (flagged)
+               return 0;
+       else
+               return -ENOENT;
 }
 
 static
@@ -275,13 +353,15 @@ void init_table(struct rcu_ht *ht, struct rcu_table *t,
                t->tbl[i]->dummy = 1;
                t->tbl[i]->hash = i;
                t->tbl[i]->reverse_hash = bit_reverse_ulong(i);
-               _ht_add(ht, t, t->tbl[i]);
+               (void) _ht_add(ht, t, t->tbl[i], 0);
        }
-       t->size = end;
+       t->resize_target = t->size = end;
+       t->resize_initiated = 0;
 }
 
 struct rcu_ht *ht_new(ht_hash_fct hash_fct,
-                     void *hashseed,
+                     ht_compare_fct compare_fct,
+                     unsigned long hash_seed,
                      unsigned long init_size,
                      void (*ht_call_rcu)(struct rcu_head *head,
                                void (*func)(struct rcu_head *head)))
@@ -290,7 +370,8 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct,
 
        ht = calloc(1, sizeof(struct rcu_ht));
        ht->hash_fct = hash_fct;
-       ht->hashseed = hashseed;
+       ht->compare_fct = compare_fct;
+       ht->hash_seed = hash_seed;
        ht->ht_call_rcu = ht_call_rcu;
        /* this mutex should not nest in read-side C.S. */
        pthread_mutex_init(&ht->resize_mutex, NULL);
@@ -300,17 +381,16 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct,
        pthread_mutex_lock(&ht->resize_mutex);
        init_table(ht, ht->t, 0, max(init_size, 1));
        pthread_mutex_unlock(&ht->resize_mutex);
-       ht->target_size = ht->t->size;
        return ht;
 }
 
-struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key)
+struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
 {
        struct rcu_table *t;
        struct rcu_ht_node *node;
        unsigned long hash, reverse_hash;
 
-       hash = ht->hash_fct(ht->hashseed, key);
+       hash = ht->hash_fct(key, key_len, ht->hash_seed);
        reverse_hash = bit_reverse_ulong(hash);
 
        t = rcu_dereference(ht->t);
@@ -318,12 +398,12 @@ struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key)
        for (;;) {
                if (unlikely(!node))
                        break;
-               if (node->reverse_hash > reverse_hash) {
+               if (unlikely(node->reverse_hash > reverse_hash)) {
                        node = NULL;
                        break;
                }
-               if (node->key == key) {
-                       if (is_removed(rcu_dereference(node->next)))
+               if (!ht->compare_fct(node->key, node->key_len, key, key_len)) {
+                       if (unlikely(is_removed(rcu_dereference(node->next))))
                                node = NULL;
                        break;
                }
@@ -336,11 +416,22 @@ void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
 {
        struct rcu_table *t;
 
-       node->hash = ht->hash_fct(ht->hashseed, node->key);
+       node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
+       node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
+
+       t = rcu_dereference(ht->t);
+       (void) _ht_add(ht, t, node, 0);
+}
+
+int ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
+{
+       struct rcu_table *t;
+
+       node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
        node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
 
        t = rcu_dereference(ht->t);
-       _ht_add(ht, t, node);
+       return _ht_add(ht, t, node, 1);
 }
 
 int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
@@ -405,12 +496,12 @@ void _do_ht_resize(struct rcu_ht *ht)
        unsigned long new_size, old_size;
        struct rcu_table *new_t, *old_t;
 
-       //return; //TEST
-
        old_t = ht->t;
        old_size = old_t->size;
 
-       new_size = CMM_LOAD_SHARED(ht->target_size);
+       new_size = CMM_LOAD_SHARED(old_t->resize_target);
+       dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
+                  old_size, new_size);
        if (old_size == new_size)
                return;
        new_t = malloc(sizeof(struct rcu_table)
@@ -419,31 +510,31 @@ void _do_ht_resize(struct rcu_ht *ht)
        memcpy(&new_t->tbl, &old_t->tbl,
               old_size * sizeof(struct rcu_ht_node *));
        init_table(ht, new_t, old_size, new_size - old_size);
-       new_t->size = new_size;
        /* Changing table and size atomically wrt lookups */
        rcu_assign_pointer(ht->t, new_t);
        ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
 }
 
 static
-void resize_target_update(struct rcu_ht *ht, struct rcu_table *t,
-                         int growth_order)
+unsigned long resize_target_update(struct rcu_table *t,
+                                  int growth_order)
 {
-       unsigned long new_size = t->size << growth_order;
-
-       if (new_size > MAX_NR_BUCKETS)
-               new_size = MAX_NR_BUCKETS;
-       //printf("resize update prevtarget %lu current %lu order %d\n",
-       //      ht->target_size, t->size, growth_order);
-       _uatomic_max(&ht->target_size, new_size);
+       return _uatomic_max(&t->resize_target,
+                           t->size << growth_order);
 }
 
 void ht_resize(struct rcu_ht *ht, int growth)
 {
-       resize_target_update(ht, rcu_dereference(ht->t), growth);
-       pthread_mutex_lock(&ht->resize_mutex);
-       _do_ht_resize(ht);
-       pthread_mutex_unlock(&ht->resize_mutex);
+       struct rcu_table *t = rcu_dereference(ht->t);
+       unsigned long target_size;
+
+       target_size = resize_target_update(t, growth);
+       if (t->size < target_size) {
+               CMM_STORE_SHARED(t->resize_initiated, 1);
+               pthread_mutex_lock(&ht->resize_mutex);
+               _do_ht_resize(ht);
+               pthread_mutex_unlock(&ht->resize_mutex);
+       }
 }
 
 static
@@ -463,9 +554,13 @@ static
 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
 {
        struct rcu_resize_work *work;
+       unsigned long target_size;
 
-       work = malloc(sizeof(*work));
-       work->ht = ht;
-       resize_target_update(ht, t, growth);
-       ht->ht_call_rcu(&work->head, do_resize_cb);
+       target_size = resize_target_update(t, growth);
+       if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
+               work = malloc(sizeof(*work));
+               work->ht = ht;
+               ht->ht_call_rcu(&work->head, do_resize_cb);
+               CMM_STORE_SHARED(t->resize_initiated, 1);
+       }
 }
This page took 0.028111 seconds and 4 git commands to generate.