#define CHAIN_LEN_RESIZE_THRESHOLD 3
/*
- * Define the minimum table size. Protects against hash table resize overload
- * when too many entries are added quickly before the resize can complete.
- * This is especially the case if the table could be shrinked to a size of 1.
- * TODO: we might want to make the add/remove operations help the resize to
- * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on
- * chain length.
+ * Define the minimum table size.
*/
-#define MIN_TABLE_SIZE 128
+#define MIN_TABLE_SIZE 1
#if (CAA_BITS_PER_LONG == 32)
#define MAX_TABLE_ORDER 32
#define DUMMY_FLAG (1UL << 1)
#define FLAGS_MASK ((1UL << 2) - 1)
+/* Value of the end pointer. Should not interact with flags. */
+#define END_VALUE NULL
+
struct ht_items_count {
unsigned long add, remove;
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
cds_lfht_compare_fct compare_fct;
unsigned long hash_seed;
int flags;
+ /*
+ * We need to put the work threads offline (QSBR) when taking this
+ * mutex, because we use synchronize_rcu within this mutex critical
+ * section, which waits on read-side critical sections, and could
+ * therefore cause grace-period deadlock if we hold off RCU G.P.
+ * completion.
+ */
pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
unsigned int in_progress_resize, in_progress_destroy;
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*cds_lfht_synchronize_rcu)(void);
void (*cds_lfht_rcu_read_lock)(void);
void (*cds_lfht_rcu_read_unlock)(void);
+ void (*cds_lfht_rcu_thread_offline)(void);
+ void (*cds_lfht_rcu_thread_online)(void);
unsigned long count; /* global approximate item count */
struct ht_items_count *percpu_count; /* per-cpu item count */
};
struct cds_lfht *ht;
};
+static
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
+ unsigned long size,
+ struct cds_lfht_node *node,
+ int unique, int dummy);
+
/*
* Algorithm to reverse bits in a word by lookup table, extended to
* 64-bit words.
{
return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
}
-
+
+static
+struct cds_lfht_node *get_end(void)
+{
+ return (struct cds_lfht_node *) END_VALUE;
+}
+
+static
+int is_end(struct cds_lfht_node *node)
+{
+ return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
+}
+
static
unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
{
*/
assert(dummy != node);
for (;;) {
- if (unlikely(!clear_flag(iter)))
+ if (unlikely(is_end(iter)))
return;
if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
return;
new_next = clear_flag(next);
(void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
}
+ return;
}
static
assert(!is_removed(node));
if (!size) {
assert(dummy);
- node->p.next = flag_dummy(NULL);
+ node->p.next = flag_dummy(get_end());
return node; /* Initial first add (head) */
}
hash = bit_reverse_ulong(node->p.reverse_hash);
iter = rcu_dereference(iter_prev->p.next);
assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
for (;;) {
- /* TODO: check if removed */
- if (unlikely(!clear_flag(iter)))
+ if (unlikely(is_end(iter)))
goto insert;
- /* TODO: check if removed */
if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
goto insert;
next = rcu_dereference(clear_flag(iter)->p.next);
return -ENOENT;
}
+/*
+ * Holding RCU read lock to protect _cds_lfht_add against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ *
+ * TODO: when we reach a certain length, we can split this population phase over
+ * many worker threads, based on the number of CPUs available in the system.
+ * This should therefore take care of not having the expand lagging behind too
+ * many concurrent insertion threads by using the scheduler's ability to
+ * schedule dummy node population fairly with insertions.
+ */
static
-void init_table_hash(struct cds_lfht *ht, unsigned long i,
- unsigned long len)
+void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len)
{
unsigned long j;
+ ht->cds_lfht_rcu_thread_online();
+ ht->cds_lfht_rcu_read_lock();
for (j = 0; j < len; j++) {
struct cds_lfht_node *new_node =
(struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
- dbg_printf("init hash entry: i %lu j %lu hash %lu\n",
+ dbg_printf("init populate: i %lu j %lu hash %lu\n",
i, j, !i ? 0 : (1UL << (i - 1)) + j);
new_node->p.reverse_hash =
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
- }
-}
-
-static
-void init_table_link(struct cds_lfht *ht, unsigned long i, unsigned long len)
-{
- unsigned long j;
-
- ht->cds_lfht_rcu_read_lock();
- for (j = 0; j < len; j++) {
- struct cds_lfht_node *new_node =
- (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
-
- dbg_printf("init link: i %lu j %lu hash %lu\n",
- i, j, !i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
new_node, 0, 1);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
ht->cds_lfht_rcu_read_unlock();
+ ht->cds_lfht_rcu_thread_offline();
}
-/*
- * Holding RCU read lock to protect _cds_lfht_add against memory
- * reclaim that could be performed by other call_rcu worker threads (ABA
- * problem).
- */
static
void init_table(struct cds_lfht *ht,
unsigned long first_order, unsigned long len_order)
len = !i ? 1 : 1UL << (i - 1);
dbg_printf("init order %lu len: %lu\n", i, len);
+
+ /* Stop expand if the resize target changes under us */
+ if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
+ break;
+
ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level)
+ (len * sizeof(struct _cds_lfht_node)));
- /* Set all dummy nodes reverse hash values for a level */
- init_table_hash(ht, i, len);
-
/*
- * Link all dummy nodes into the table. Concurrent
- * add/remove are helping us.
+ * Set all dummy nodes reverse hash values for a level and
+ * link all dummy nodes into the table.
*/
- init_table_link(ht, i, len);
+ init_table_populate(ht, i, len);
/*
- * Update table size (after init for now, because no
- * concurrent updater help (TODO)).
+ * Update table size.
*/
cmm_smp_wmb(); /* populate data before RCU size */
CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i));
+
dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i));
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
}
+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ * For a single level, we logically remove and garbage collect each node.
+ *
+ * As a design choice, we perform logical removal and garbage collection on a
+ * node-per-node basis to simplify this algorithm. We also assume keeping good
+ * cache locality of the operation would overweight possible performance gain
+ * that could be achieved by batching garbage collection for multiple levels.
+ * However, this would have to be justified by benchmarks.
+ *
+ * Concurrent removal and add operations are helping us perform garbage
+ * collection of logically removed nodes. We guarantee that all logically
+ * removed nodes have been garbage-collected (unlinked) before call_rcu is
+ * invoked to free a hole level of dummy nodes (after a grace period).
+ *
+ * Logical removal and garbage collection can therefore be done in batch or on a
+ * node-per-node basis, as long as the guarantee above holds.
+ *
+ * TODO: when we reach a certain length, we can split this removal over many
+ * worker threads, based on the number of CPUs available in the system. This
+ * should take care of not letting resize process lag behind too many concurrent
+ * updater threads actively inserting into the hash table.
+ */
static
void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
{
unsigned long j;
+ ht->cds_lfht_rcu_thread_online();
ht->cds_lfht_rcu_read_lock();
for (j = 0; j < len; j++) {
struct cds_lfht_node *fini_node =
break;
}
ht->cds_lfht_rcu_read_unlock();
+ ht->cds_lfht_rcu_thread_offline();
}
-/*
- * Holding RCU read lock to protect _cds_lfht_remove against memory
- * reclaim that could be performed by other call_rcu worker threads (ABA
- * problem).
- */
static
void fini_table(struct cds_lfht *ht,
unsigned long first_order, unsigned long len_order)
first_order, first_order + len_order);
end_order = first_order + len_order;
assert(first_order > 0);
- assert(ht->t.size == (1UL << (first_order - 1)));
for (i = end_order - 1; i >= first_order; i--) {
unsigned long len;
len = !i ? 1 : 1UL << (i - 1);
dbg_printf("fini order %lu len: %lu\n", i, len);
+ /* Stop shrink if the resize target changes under us */
+ if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
+ break;
+
+ cmm_smp_wmb(); /* populate data before RCU size */
+ CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
+
+ /*
+ * We need to wait for all add operations to reach Q.S. (and
+ * thus use the new table for lookups) before we can start
+ * releasing the old dummy nodes. Otherwise their lookup will
+ * return a logically removed node as insert position.
+ */
+ ht->cds_lfht_synchronize_rcu();
+
/*
* Set "removed" flag in dummy nodes about to be removed.
* Unlink all now-logically-removed dummy node pointers.
}
}
-struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
cds_lfht_compare_fct compare_fct,
unsigned long hash_seed,
unsigned long init_size,
void (*func)(struct rcu_head *head)),
void (*cds_lfht_synchronize_rcu)(void),
void (*cds_lfht_rcu_read_lock)(void),
- void (*cds_lfht_rcu_read_unlock)(void))
+ void (*cds_lfht_rcu_read_unlock)(void),
+ void (*cds_lfht_rcu_thread_offline)(void),
+ void (*cds_lfht_rcu_thread_online)(void))
{
struct cds_lfht *ht;
unsigned long order;
ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
+ ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline;
+ ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online;
ht->percpu_count = alloc_per_cpu_items_count();
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
ht->flags = flags;
+ ht->cds_lfht_rcu_thread_offline();
pthread_mutex_lock(&ht->resize_mutex);
+ ht->t.resize_target = 1UL << (order - 1);
init_table(ht, 0, order);
pthread_mutex_unlock(&ht->resize_mutex);
+ ht->cds_lfht_rcu_thread_online();
return ht;
}
struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len)
{
- struct cds_lfht_node *node, *next;
+ struct cds_lfht_node *node, *next, *dummy_node;
struct _cds_lfht_node *lookup;
unsigned long hash, reverse_hash, index, order, size;
lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
- node = (struct cds_lfht_node *) lookup;
+ dummy_node = (struct cds_lfht_node *) lookup;
+ /* We can always skip the dummy node initially */
+ node = rcu_dereference(dummy_node->p.next);
+ node = clear_flag(node);
for (;;) {
- if (unlikely(!node))
+ if (unlikely(is_end(node))) {
+ node = NULL;
break;
+ }
if (unlikely(node->p.reverse_hash > reverse_hash)) {
node = NULL;
break;
node = clear_flag(next);
for (;;) {
- if (unlikely(!node))
+ if (unlikely(is_end(node))) {
+ node = NULL;
break;
+ }
if (unlikely(node->p.reverse_hash > reverse_hash)) {
node = NULL;
break;
size = rcu_dereference(ht->t.size);
ret = _cds_lfht_add(ht, size, node, 1, 0);
- if (ret != node)
+ if (ret == node)
ht_count_add(ht, size);
return ret;
}
if (!is_dummy(node))
return -EPERM;
assert(!is_removed(node));
- } while (clear_flag(node));
+ } while (!is_end(node));
/*
* size accessed without rcu_dereference because hash table is
* being destroyed.
else
(nr_dummy)++;
node = clear_flag(next);
- } while (node);
+ } while (!is_end(node));
dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
}
old_size, old_order, new_size, new_order);
assert(new_size < old_size);
- cmm_smp_wmb(); /* populate data before RCU size */
- CMM_STORE_SHARED(ht->t.size, new_size);
-
- /*
- * We need to wait for all add operations to reach Q.S. (and
- * thus use the new table for lookups) before we can start
- * releasing the old dummy nodes. Otherwise their lookup will
- * return a logically removed node as insert position.
- */
- ht->cds_lfht_synchronize_rcu();
-
/* Remove and unlink all dummy nodes to remove. */
fini_table(ht, new_order, old_order - new_order);
}
ht->t.resize_initiated = 0;
/* write resize_initiated before read resize_target */
cmm_smp_mb();
- } while (new_size != CMM_LOAD_SHARED(ht->t.resize_target));
+ } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
}
static
{
resize_target_update_count(ht, new_size);
CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+ ht->cds_lfht_rcu_thread_offline();
pthread_mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
+ ht->cds_lfht_rcu_thread_online();
}
static
caa_container_of(head, struct rcu_resize_work, head);
struct cds_lfht *ht = work->ht;
+ ht->cds_lfht_rcu_thread_offline();
pthread_mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
+ ht->cds_lfht_rcu_thread_online();
poison_free(work);
cmm_smp_mb(); /* finish resize before decrement */
uatomic_dec(&ht->in_progress_resize);