#define _LGPL_SOURCE
#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+
#include <urcu.h>
+#include <urcu-defer.h>
#include <arch.h>
#include <arch_atomic.h>
-#include <assert.h>
#include <compiler.h>
-#include <urcu-defer.h>
-#include <errno.h>
-#include <urcu-ht.h>
#include <urcu/jhash.h>
#include <stdio.h>
#include <pthread.h>
+#include <urcu-ht.h>
+
+/*
+ * Maximum number of hash table buckets: 256M on 64-bit.
+ * Should take about 512MB max if we assume 1 node per 4 buckets.
+ */
+#define MAX_HT_BUCKETS ((256 << 10) / sizeof(void *))
+
+/* node flags */
+#define NODE_STOLEN (1 << 0)
struct rcu_ht_node;
struct rcu_ht_node *next;
void *key;
void *data;
+ unsigned int flags;
};
struct rcu_ht {
ht = calloc(1, sizeof(struct rcu_ht));
ht->hash_fct = hash_fct;
ht->free_fct = free_fct;
- ht->size = init_size;
+ ht->size = init_size; /* shared */
ht->keylen = keylen;
ht->hashseed = hashseed;
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
- ht->resize_ongoing = 0;
+ ht->resize_ongoing = 0; /* shared */
ht->tbl = calloc(init_size, sizeof(struct rcu_ht_node *));
return ht;
}
new_head = calloc(1, sizeof(struct rcu_ht_node));
new_head->key = key;
new_head->data = data;
+ new_head->flags = 0;
/* here comes the fun and tricky part.
* Add at the beginning with a cmpxchg.
* Hold a read lock between the moment the first element is read
retry:
rcu_read_lock();
- if (unlikely(ht->resize_ongoing)) {
+ if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
rcu_read_unlock();
/*
* Wait for resize to complete before continuing.
goto retry;
}
- hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size;
+ hash = ht->hash_fct(key, ht->keylen, ht->hashseed)
+ % LOAD_SHARED(ht->size);
old_head = node = rcu_dereference(ht->tbl[hash]);
for (;;) {
/*
* Restart until we successfully remove the entry, or no entry is left
* ((void *)(unsigned long)-ENOENT).
- * Deal with concurrent stealers by verifying that there are no element
- * in the list still pointing to the element stolen. (del_node)
+ * Deal with concurrent stealers by doing an extra verification pass to check
+ * that no element in the list are still pointing to the element stolen.
+ * This could happen if two concurrent steal for consecutive objects are
+ * executed. A pointer to an object being stolen could be saved by the
+ * concurrent stealer for the previous object.
+ * Also, given that in this precise scenario, another stealer can also want to
+ * delete the doubly-referenced object; use a "stolen" flag to let only one
+ * stealer delete the object.
*/
void *ht_steal(struct rcu_ht *ht, void *key)
{
retry:
rcu_read_lock();
- if (unlikely(ht->resize_ongoing)) {
+ if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
rcu_read_unlock();
/*
* Wait for resize to complete before continuing.
goto retry;
}
- hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size;
+ hash = ht->hash_fct(key, ht->keylen, ht->hashseed)
+ % LOAD_SHARED(ht->size);
prev = &ht->tbl[hash];
node = rcu_dereference(*prev);
if (del_node) {
goto end;
} else {
- data = (void *)(unsigned long)-ENOENT;
goto error;
}
}
prev = &node->next;
node = rcu_dereference(*prev);
}
+
+ if (!del_node) {
+ /*
+ * Another concurrent thread stole it ? If so, let it deal with
+ * this. Assume NODE_STOLEN is the only flag. If this changes,
+ * read flags before cmpxchg.
+ */
+ if (cmpxchg(&node->flags, 0, NODE_STOLEN) != 0)
+ goto error;
+ }
+
/* Found it ! pointer to object is in "prev" */
- if (rcu_cmpxchg_pointer(prev, node, node->next) != node)
+ if (rcu_cmpxchg_pointer(prev, node, node->next) == node)
del_node = node;
goto restart;
*/
rcu_read_unlock();
- data = node->data;
- call_rcu(free, node);
+ data = del_node->data;
+ call_rcu(free, del_node);
return data;
error:
+ data = (void *)(unsigned long)-ENOENT;
rcu_read_unlock();
return data;
old_size = ht->size;
- if (old_size == 1)
+ if (old_size == MAX_HT_BUCKETS)
return;
+ old_tbl = ht->tbl;
new_size = old_size << 1;
new_tbl = calloc(new_size, sizeof(struct rcu_ht_node *));
* if it's in the table.
* Copy each node. (just the node, not ->data)
*/
- node = ht->tbl[i];
+ node = old_tbl[i];
while (node) {
hash = ht->hash_fct(node->key, ht->keylen, ht->hashseed)
% new_size;
new_node = malloc(sizeof(struct rcu_ht_node));
new_node->key = node->key;
new_node->data = node->data;
- new_node->next = new_tbl[i]; /* add to head */
- new_tbl[i] = new_node;
+ new_node->next = new_tbl[hash]; /* add to head */
+ new_tbl[hash] = new_node;
node = node->next;
}
}
- old_tbl = ht->tbl;
ht->tbl = new_tbl;
smp_wmb(); /* write links and table before changing size */
- ht->size = new_size;
+ STORE_SHARED(ht->size, new_size);
/* Ensure all concurrent lookups use new size and table */
synchronize_rcu();
new_size = ht->size >> 1;
for (i = 0; i < new_size; i++) {
- /* Link end with first entry of 2*i */
+ /* Link end with first entry of i + new_size */
prev = &ht->tbl[i];
node = *prev;
while (node) {
prev = &node->next;
node = *prev;
}
- *prev = ht->tbl[i << 1];
+ *prev = ht->tbl[i + new_size];
}
smp_wmb(); /* write links before changing size */
- ht->size = new_size;
+ STORE_SHARED(ht->size, new_size);
/* Ensure all concurrent lookups use new size */
synchronize_rcu();
ret = pthread_mutex_lock(&ht->resize_mutex);
assert(!ret);
- ht->resize_ongoing = 1;
+ STORE_SHARED(ht->resize_ongoing, 1);
synchronize_rcu();
/* All add/remove are waiting on the mutex. */
if (growth > 0)
else if (growth < 0)
ht_resize_shrink(ht);
smp_mb();
- ht->resize_ongoing = 0;
+ STORE_SHARED(ht->resize_ongoing, 0);
ret = pthread_mutex_unlock(&ht->resize_mutex);
assert(!ret);
}