Update urcu-ht
[urcu.git] / urcu-ht.c
index e598e4533e01657c0e13f5b8158dd613a986b399..b6a13018d160ccd8b605bb16482437db8e362037 100644 (file)
--- a/urcu-ht.c
+++ b/urcu-ht.c
@@ -5,17 +5,28 @@
 
 #define _LGPL_SOURCE
 #include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+
 #include <urcu.h>
+#include <urcu-defer.h>
 #include <arch.h>
 #include <arch_atomic.h>
-#include <assert.h>
 #include <compiler.h>
-#include <urcu-defer.h>
-#include <errno.h>
-#include <urcu-ht.h>
 #include <urcu/jhash.h>
 #include <stdio.h>
 #include <pthread.h>
+#include <urcu-ht.h>
+
+/*
+ * Maximum number of hash table buckets: 256M on 64-bit.
+ * Should take about 512MB max if we assume 1 node per 4 buckets.
+ */
+#define MAX_HT_BUCKETS ((256 << 10) / sizeof(void *))
+
+/* node flags */
+#define        NODE_STOLEN     (1 << 0)
 
 struct rcu_ht_node;
 
@@ -23,6 +34,7 @@ struct rcu_ht_node {
        struct rcu_ht_node *next;
        void *key;
        void *data;
+       unsigned int flags;
 };
 
 struct rcu_ht {
@@ -45,12 +57,12 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data),
        ht = calloc(1, sizeof(struct rcu_ht));
        ht->hash_fct = hash_fct;
        ht->free_fct = free_fct;
-       ht->size = init_size;
+       ht->size = init_size;   /* shared */
        ht->keylen = keylen;
        ht->hashseed = hashseed;
        /* this mutex should not nest in read-side C.S. */
        pthread_mutex_init(&ht->resize_mutex, NULL);
-       ht->resize_ongoing = 0;
+       ht->resize_ongoing = 0; /* shared */
        ht->tbl = calloc(init_size, sizeof(struct rcu_ht_node *));
        return ht;
 }
@@ -96,6 +108,7 @@ int ht_add(struct rcu_ht *ht, void *key, void *data)
        new_head = calloc(1, sizeof(struct rcu_ht_node));
        new_head->key = key;
        new_head->data = data;
+       new_head->flags = 0;
        /* here comes the fun and tricky part.
         * Add at the beginning with a cmpxchg.
         * Hold a read lock between the moment the first element is read
@@ -107,7 +120,7 @@ int ht_add(struct rcu_ht *ht, void *key, void *data)
 retry:
        rcu_read_lock();
 
-       if (unlikely(ht->resize_ongoing)) {
+       if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
                rcu_read_unlock();
                /*
                 * Wait for resize to complete before continuing.
@@ -119,7 +132,8 @@ retry:
                goto retry;
        }
 
-       hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size;
+       hash = ht->hash_fct(key, ht->keylen, ht->hashseed)
+               % LOAD_SHARED(ht->size);
 
        old_head = node = rcu_dereference(ht->tbl[hash]);
        for (;;) {
@@ -148,8 +162,14 @@ restart:
 /*
  * Restart until we successfully remove the entry, or no entry is left
  * ((void *)(unsigned long)-ENOENT).
- * Deal with concurrent stealers by verifying that there are no element
- * in the list still pointing to the element stolen. (del_node)
+ * Deal with concurrent stealers by doing an extra verification pass to check
+ * that no element in the list are still pointing to the element stolen.
+ * This could happen if two concurrent steal for consecutive objects are
+ * executed. A pointer to an object being stolen could be saved by the
+ * concurrent stealer for the previous object.
+ * Also, given that in this precise scenario, another stealer can also want to
+ * delete the doubly-referenced object; use a "stolen" flag to let only one
+ * stealer delete the object.
  */
 void *ht_steal(struct rcu_ht *ht, void *key)
 {
@@ -161,7 +181,7 @@ void *ht_steal(struct rcu_ht *ht, void *key)
 retry:
        rcu_read_lock();
 
-       if (unlikely(ht->resize_ongoing)) {
+       if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
                rcu_read_unlock();
                /*
                 * Wait for resize to complete before continuing.
@@ -173,7 +193,8 @@ retry:
                goto retry;
        }
 
-       hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size;
+       hash = ht->hash_fct(key, ht->keylen, ht->hashseed)
+               % LOAD_SHARED(ht->size);
 
        prev = &ht->tbl[hash];
        node = rcu_dereference(*prev);
@@ -182,7 +203,6 @@ retry:
                        if (del_node) {
                                goto end;
                        } else {
-                               data = (void *)(unsigned long)-ENOENT;
                                goto error;
                        }
                }
@@ -192,8 +212,19 @@ retry:
                prev = &node->next;
                node = rcu_dereference(*prev);
        }
+
+       if (!del_node) {
+               /*
+                * Another concurrent thread stole it ? If so, let it deal with
+                * this. Assume NODE_STOLEN is the only flag. If this changes,
+                * read flags before cmpxchg.
+                */
+               if (cmpxchg(&node->flags, 0, NODE_STOLEN) != 0)
+                       goto error;
+       }
+
        /* Found it ! pointer to object is in "prev" */
-       if (rcu_cmpxchg_pointer(prev, node, node->next) != node)
+       if (rcu_cmpxchg_pointer(prev, node, node->next) == node)
                del_node = node;
        goto restart;
 
@@ -204,11 +235,12 @@ end:
         */
        rcu_read_unlock();
 
-       data = node->data;
-       call_rcu(free, node);
+       data = del_node->data;
+       call_rcu(free, del_node);
        return data;
 
 error:
+       data = (void *)(unsigned long)-ENOENT;
        rcu_read_unlock();
        return data;
 
@@ -319,9 +351,10 @@ static void ht_resize_grow(struct rcu_ht *ht)
 
        old_size = ht->size;
 
-       if (old_size == 1)
+       if (old_size == MAX_HT_BUCKETS)
                return;
 
+       old_tbl = ht->tbl;
        new_size = old_size << 1;
        new_tbl = calloc(new_size, sizeof(struct rcu_ht_node *));
 
@@ -332,23 +365,22 @@ static void ht_resize_grow(struct rcu_ht *ht)
                 * if it's in the table.
                 * Copy each node. (just the node, not ->data)
                 */
-               node = ht->tbl[i];
+               node = old_tbl[i];
                while (node) {
                        hash = ht->hash_fct(node->key, ht->keylen, ht->hashseed)
                                            % new_size;
                        new_node = malloc(sizeof(struct rcu_ht_node));
                        new_node->key = node->key;
                        new_node->data = node->data;
-                       new_node->next = new_tbl[i];    /* add to head */
-                       new_tbl[i] = new_node;
+                       new_node->next = new_tbl[hash]; /* add to head */
+                       new_tbl[hash] = new_node;
                        node = node->next;
                }
        }
 
-       old_tbl = ht->tbl;
        ht->tbl = new_tbl;
        smp_wmb();      /* write links and table before changing size */
-       ht->size = new_size;
+       STORE_SHARED(ht->size, new_size);
 
        /* Ensure all concurrent lookups use new size and table */
        synchronize_rcu();
@@ -376,17 +408,17 @@ static void ht_resize_shrink(struct rcu_ht *ht)
        new_size = ht->size >> 1;
 
        for (i = 0; i < new_size; i++) {
-               /* Link end with first entry of 2*i */
+               /* Link end with first entry of i + new_size */
                prev = &ht->tbl[i];
                node = *prev;
                while (node) {
                        prev = &node->next;
                        node = *prev;
                }
-               *prev = ht->tbl[i << 1];
+               *prev = ht->tbl[i + new_size];
        }
        smp_wmb();      /* write links before changing size */
-       ht->size = new_size;
+       STORE_SHARED(ht->size, new_size);
 
        /* Ensure all concurrent lookups use new size */
        synchronize_rcu();
@@ -405,7 +437,7 @@ void ht_resize(struct rcu_ht *ht, int growth)
 
        ret = pthread_mutex_lock(&ht->resize_mutex);
        assert(!ret);
-       ht->resize_ongoing = 1;
+       STORE_SHARED(ht->resize_ongoing, 1);
        synchronize_rcu();
        /* All add/remove are waiting on the mutex. */
        if (growth > 0)
@@ -413,7 +445,7 @@ void ht_resize(struct rcu_ht *ht, int growth)
        else if (growth < 0)
                ht_resize_shrink(ht);
        smp_mb();
-       ht->resize_ongoing = 0;
+       STORE_SHARED(ht->resize_ongoing, 0);
        ret = pthread_mutex_unlock(&ht->resize_mutex);
        assert(!ret);
 }
This page took 0.028838 seconds and 4 git commands to generate.