X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=1288c78d585f983764da0c96eefb6df4f5712cc3;hp=cd60f5f4db8e7409668b3598ec073fdff5ce5752;hb=4d676753007f54e45b7e0307244ffc876a953874;hpb=1475579ca651164ea74eb0c9f727baad991098af diff --git a/rculfhash.c b/rculfhash.c index cd60f5f..1288c78 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -120,11 +120,11 @@ * order bits reverse * 0 0 000 000 * | - * 1 | 1 001 100 <- - * | | | - * 2 | | 2 010 010 | - * | | | 3 011 110 <- | - * | | | | | | + * 1 | 1 001 100 <- <- + * | | | | + * 2 | | 2 010 010 | | + * | | | 3 011 110 | <- | + * | | | | | | | * 3 -> | | | 4 100 001 | | * -> | | 5 101 101 | * -> | 6 110 011 @@ -166,6 +166,21 @@ #define CHAIN_LEN_TARGET 1 #define CHAIN_LEN_RESIZE_THRESHOLD 3 +/* + * Define the minimum table size. + */ +#define MIN_TABLE_SIZE 128 + +#if (CAA_BITS_PER_LONG == 32) +#define MAX_TABLE_ORDER 32 +#else +#define MAX_TABLE_ORDER 64 +#endif + +#ifndef min +#define min(a, b) ((a) < (b) ? (a) : (b)) +#endif + #ifndef max #define max(a, b) ((a) > (b) ? (a) : (b)) #endif @@ -189,23 +204,34 @@ struct rcu_level { }; struct rcu_table { - unsigned long size; /* always a power of 2 */ + unsigned long size; /* always a power of 2, shared (RCU) */ unsigned long resize_target; int resize_initiated; - struct rcu_head head; - struct rcu_level *tbl[0]; + struct rcu_level *tbl[MAX_TABLE_ORDER]; }; struct cds_lfht { - struct rcu_table *t; /* shared */ + struct rcu_table t; cds_lfht_hash_fct hash_fct; cds_lfht_compare_fct compare_fct; unsigned long hash_seed; + int flags; + /* + * We need to put the work threads offline (QSBR) when taking this + * mutex, because we use synchronize_rcu within this mutex critical + * section, which waits on read-side critical sections, and could + * therefore cause grace-period deadlock if we hold off RCU G.P. + * completion. + */ pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ unsigned int in_progress_resize, in_progress_destroy; void (*cds_lfht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head)); void (*cds_lfht_synchronize_rcu)(void); + void (*cds_lfht_rcu_read_lock)(void); + void (*cds_lfht_rcu_read_unlock)(void); + void (*cds_lfht_rcu_thread_offline)(void); + void (*cds_lfht_rcu_thread_online)(void); unsigned long count; /* global approximate item count */ struct ht_items_count *percpu_count; /* per-cpu item count */ }; @@ -215,6 +241,12 @@ struct rcu_resize_work { struct cds_lfht *ht; }; +static +struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, + unsigned long size, + struct cds_lfht_node *node, + int unique, int dummy); + /* * Algorithm to reverse bits in a word by lookup table, extended to * 64-bit words. @@ -407,8 +439,18 @@ int get_count_order_ulong(unsigned long x) return order; } +#ifdef POISON_FREE +#define poison_free(ptr) \ + do { \ + memset(ptr, 0x42, sizeof(*(ptr))); \ + free(ptr); \ + } while (0) +#else +#define poison_free(ptr) free(ptr) +#endif + static -void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth); +void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth); /* * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are @@ -419,7 +461,7 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth); #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) static -void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t, +void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, unsigned long count); static long nr_cpus_mask = -1; @@ -457,7 +499,7 @@ struct ht_items_count *alloc_per_cpu_items_count(void) static void free_per_cpu_items_count(struct ht_items_count *count) { - free(count); + poison_free(count); } static @@ -474,7 +516,7 @@ int ht_get_cpu(void) } static -void ht_count_add(struct cds_lfht *ht, struct rcu_table *t) +void ht_count_add(struct cds_lfht *ht, unsigned long size) { unsigned long percpu_count; int cpu; @@ -493,18 +535,17 @@ void ht_count_add(struct cds_lfht *ht, struct rcu_table *t) 1UL << COUNT_COMMIT_ORDER); /* If power of 2 */ if (!(count & (count - 1))) { - if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) - < t->size) + if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size) return; dbg_printf("add set global %lu\n", count); - cds_lfht_resize_lazy_count(ht, t, + cds_lfht_resize_lazy_count(ht, size, count >> (CHAIN_LEN_TARGET - 1)); } } } static -void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t) +void ht_count_remove(struct cds_lfht *ht, unsigned long size) { unsigned long percpu_count; int cpu; @@ -523,11 +564,10 @@ void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t) -(1UL << COUNT_COMMIT_ORDER)); /* If power of 2 */ if (!(count & (count - 1))) { - if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) - >= t->size) + if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size) return; dbg_printf("remove set global %lu\n", count); - cds_lfht_resize_lazy_count(ht, t, + cds_lfht_resize_lazy_count(ht, size, count >> (CHAIN_LEN_TARGET - 1)); } } @@ -549,12 +589,12 @@ void free_per_cpu_items_count(struct ht_items_count *count) } static -void ht_count_add(struct cds_lfht *ht, struct rcu_table *t) +void ht_count_add(struct cds_lfht *ht, unsigned long size) { } static -void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t) +void ht_count_remove(struct cds_lfht *ht, unsigned long size) { } @@ -562,11 +602,12 @@ void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t) static -void check_resize(struct cds_lfht *ht, struct rcu_table *t, - uint32_t chain_len) +void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) { unsigned long count; + if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) + return; count = uatomic_read(&ht->count); /* * Use bucket-local length for small table expand and for @@ -578,7 +619,7 @@ void check_resize(struct cds_lfht *ht, struct rcu_table *t, dbg_printf("WARNING: large chain length: %u.\n", chain_len); if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) - cds_lfht_resize_lazy(ht, t, + cds_lfht_resize_lazy(ht, size, get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); } @@ -626,40 +667,52 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) return v; } -static -void cds_lfht_free_table_cb(struct rcu_head *head) -{ - struct rcu_table *t = - caa_container_of(head, struct rcu_table, head); - free(t); -} - static void cds_lfht_free_level(struct rcu_head *head) { struct rcu_level *l = caa_container_of(head, struct rcu_level, head); - free(l); + poison_free(l); } /* * Remove all logically deleted nodes from a bucket up to a certain node key. */ static -void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) +int _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) { struct cds_lfht_node *iter_prev, *iter, *next, *new_next; + assert(!is_dummy(dummy)); + assert(!is_removed(dummy)); + assert(!is_dummy(node)); + assert(!is_removed(node)); for (;;) { iter_prev = dummy; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); + if (unlikely(iter == NULL)) { + /* + * We are executing concurrently with a hash table + * expand, so we see a dummy node with NULL next value. + * Help expand by linking this node into the list and + * retry. + */ + return 1; + } assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); + /* + * We should never be called with dummy (start of chain) + * and logically removed node (end of path compression + * marker) being the actual same node. This would be a + * bug in the algorithm implementation. + */ + assert(dummy != node); for (;;) { if (unlikely(!clear_flag(iter))) - return; + return 0; if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) - return; + return 0; next = rcu_dereference(clear_flag(iter)->p.next); if (likely(is_removed(next))) break; @@ -673,18 +726,23 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node new_next = clear_flag(next); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); } + return 0; } static -struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, - struct cds_lfht_node *node, int unique, int dummy) +struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, + unsigned long size, + struct cds_lfht_node *node, + int unique, int dummy) { struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, *dummy_node; struct _cds_lfht_node *lookup; unsigned long hash, index, order; - if (!t->size) { + assert(!is_dummy(node)); + assert(!is_removed(node)); + if (!size) { assert(dummy); node->p.next = flag_dummy(NULL); return node; /* Initial first add (head) */ @@ -697,14 +755,31 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, * iter_prev points to the non-removed node prior to the * insert location. */ - index = hash & (t->size - 1); + index = hash & (size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &ht->t.tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)]; iter_prev = (struct cds_lfht_node *) lookup; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); + if (unlikely(iter == NULL)) { + /* + * We are executing concurrently with a hash table + * expand, so we see a dummy node with NULL next value. + * Help expand by linking this node into the list and + * retry. + */ + (void) _cds_lfht_add(ht, size >> 1, iter_prev, 0, 1); + continue; /* retry */ + } assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); for (;;) { + /* + * When adding a dummy node, we allow concurrent + * add/removal to help. If we find the dummy node in + * place, skip its insertion. + */ + if (unlikely(dummy && clear_flag(iter) == node)) + return node; if (unlikely(!clear_flag(iter))) goto insert; if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) @@ -721,13 +796,14 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, /* Only account for identical reverse hash once */ if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash && !is_dummy(next)) - check_resize(ht, t, ++chain_len); + check_resize(ht, size, ++chain_len); iter_prev = clear_flag(iter); iter = next; } insert: assert(node != clear_flag(iter)); assert(!is_removed(iter_prev)); + assert(!is_removed(iter)); assert(iter_prev != node); if (!dummy) node->p.next = clear_flag(iter); @@ -753,17 +829,22 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, } gc_end: /* Garbage collect logically removed nodes in the bucket */ - index = hash & (t->size - 1); + index = hash & (size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; dummy_node = (struct cds_lfht_node *) lookup; - _cds_lfht_gc_bucket(dummy_node, node); + if (_cds_lfht_gc_bucket(dummy_node, node)) { + /* Help expand */ + (void) _cds_lfht_add(ht, size >> 1, dummy_node, 0, 1); + goto gc_end; /* retry */ + } return node; } static -int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t, - struct cds_lfht_node *node, int dummy_removal) +int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, + struct cds_lfht_node *node, + int dummy_removal) { struct cds_lfht_node *dummy, *next, *old; struct _cds_lfht_node *lookup; @@ -771,6 +852,8 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t, unsigned long hash, index, order; /* logically delete the node */ + assert(!is_dummy(node)); + assert(!is_removed(node)); old = rcu_dereference(node->p.next); do { next = old; @@ -787,29 +870,23 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t, /* We performed the (logical) deletion. */ flagged = 1; - if (dummy_removal) - node = clear_flag(node); - /* * Ensure that the node is not visible to readers anymore: lookup for * the node, and remove it (along with any other logically removed node) * if found. */ +gc_retry: hash = bit_reverse_ulong(node->p.reverse_hash); - /* - * When removing a dummy node, we need to consider the lower - * order table, so we don't end up looking up the dummy nodes we - * are currently removing. - */ - - if (dummy_removal) - index = hash & ((t->size >> 1) - 1); - else - index = hash & (t->size - 1); + assert(size > 0); + index = hash & (size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; dummy = (struct cds_lfht_node *) lookup; - _cds_lfht_gc_bucket(dummy, node); + if (_cds_lfht_gc_bucket(dummy, node)) { + /* Help expand */ + (void) _cds_lfht_add(ht, size >> 1, dummy, 0, 1); + goto gc_retry; /* retry */ + } end: /* * Only the flagging action indicated that we (and no other) @@ -823,7 +900,53 @@ end: } static -void init_table(struct cds_lfht *ht, struct rcu_table *t, +void init_table_hash(struct cds_lfht *ht, unsigned long i, + unsigned long len) +{ + unsigned long j; + + for (j = 0; j < len; j++) { + struct cds_lfht_node *new_node = + (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; + + dbg_printf("init hash entry: i %lu j %lu hash %lu\n", + i, j, !i ? 0 : (1UL << (i - 1)) + j); + new_node->p.reverse_hash = + bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; + } +} + +/* + * Holding RCU read lock to protect _cds_lfht_add against memory + * reclaim that could be performed by other call_rcu worker threads (ABA + * problem). + */ +static +void init_table_link(struct cds_lfht *ht, unsigned long i, unsigned long len) +{ + unsigned long j; + + ht->cds_lfht_rcu_thread_online(); + ht->cds_lfht_rcu_read_lock(); + for (j = 0; j < len; j++) { + struct cds_lfht_node *new_node = + (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; + + dbg_printf("init link: i %lu j %lu hash %lu\n", + i, j, !i ? 0 : (1UL << (i - 1)) + j); + (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)), + new_node, 0, 1); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; + } + ht->cds_lfht_rcu_read_unlock(); + ht->cds_lfht_rcu_thread_offline(); +} + +static +void init_table(struct cds_lfht *ht, unsigned long first_order, unsigned long len_order) { unsigned long i, end_order; @@ -831,38 +954,89 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t, dbg_printf("init table: first_order %lu end_order %lu\n", first_order, first_order + len_order); end_order = first_order + len_order; - t->size = !first_order ? 0 : (1UL << (first_order - 1)); for (i = first_order; i < end_order; i++) { - unsigned long j, len; + unsigned long len; len = !i ? 1 : 1UL << (i - 1); dbg_printf("init order %lu len: %lu\n", i, len); - t->tbl[i] = calloc(1, sizeof(struct rcu_level) + + /* Stop expand if the resize target changes under us */ + if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i))) + break; + + ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level) + (len * sizeof(struct _cds_lfht_node))); - for (j = 0; j < len; j++) { - struct cds_lfht_node *new_node = - (struct cds_lfht_node *) &t->tbl[i]->nodes[j]; - - dbg_printf("init entry: i %lu j %lu hash %lu\n", - i, j, !i ? 0 : (1UL << (i - 1)) + j); - new_node->p.reverse_hash = - bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - (void) _cds_lfht_add(ht, t, new_node, 0, 1); - if (CMM_LOAD_SHARED(ht->in_progress_destroy)) - break; - } - /* Update table size */ - t->size = !i ? 1 : (1UL << i); - dbg_printf("init new size: %lu\n", t->size); + + /* Set all dummy nodes reverse hash values for a level */ + init_table_hash(ht, i, len); + + /* + * Update table size. At this point, concurrent add/remove see + * dummy nodes with correctly initialized reverse hash value, + * but with NULL next pointers. If they do, they can help us + * link the dummy nodes into the list and retry. + */ + cmm_smp_wmb(); /* populate data before RCU size */ + CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i)); + + /* + * Link all dummy nodes into the table. Concurrent + * add/remove are helping us. + */ + init_table_link(ht, i, len); + + dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i)); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } - t->resize_target = t->size; - t->resize_initiated = 0; +} + +/* + * Holding RCU read lock to protect _cds_lfht_remove against memory + * reclaim that could be performed by other call_rcu worker threads (ABA + * problem). + * For a single level, we logically remove and garbage collect each node. + * + * As a design choice, we perform logical removal and garbage collection on a + * node-per-node basis to simplify this algorithm. We also assume keeping good + * cache locality of the operation would overweight possible performance gain + * that could be achieved by batching garbage collection for multiple levels. + * However, this would have to be justified by benchmarks. + * + * Concurrent removal and add operations are helping us perform garbage + * collection of logically removed nodes. We guarantee that all logically + * removed nodes have been garbage-collected (unlinked) before call_rcu is + * invoked to free a hole level of dummy nodes (after a grace period). + * + * Logical removal and garbage collection can therefore be done in batch or on a + * node-per-node basis, as long as the guarantee above holds. + */ +static +void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) +{ + unsigned long j; + + ht->cds_lfht_rcu_thread_online(); + ht->cds_lfht_rcu_read_lock(); + for (j = 0; j < len; j++) { + struct cds_lfht_node *fini_node = + (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; + + dbg_printf("remove entry: i %lu j %lu hash %lu\n", + i, j, !i ? 0 : (1UL << (i - 1)) + j); + fini_node->p.reverse_hash = + bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); + (void) _cds_lfht_remove(ht, !i ? 0 : (1UL << (i - 1)), + fini_node, 1); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; + } + ht->cds_lfht_rcu_read_unlock(); + ht->cds_lfht_rcu_thread_offline(); } static -void fini_table(struct cds_lfht *ht, struct rcu_table *t, +void fini_table(struct cds_lfht *ht, unsigned long first_order, unsigned long len_order) { long i, end_order; @@ -871,43 +1045,55 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t, first_order, first_order + len_order); end_order = first_order + len_order; assert(first_order > 0); - assert(t->size == (1UL << (end_order - 1))); for (i = end_order - 1; i >= first_order; i--) { - unsigned long j, len; + unsigned long len; len = !i ? 1 : 1UL << (i - 1); dbg_printf("fini order %lu len: %lu\n", i, len); - /* Unlink */ - for (j = 0; j < len; j++) { - struct cds_lfht_node *new_node = - (struct cds_lfht_node *) &t->tbl[i]->nodes[j]; - - dbg_printf("fini entry: i %lu j %lu hash %lu\n", - i, j, !i ? 0 : (1UL << (i - 1)) + j); - new_node->p.reverse_hash = - bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - (void) _cds_lfht_remove(ht, t, new_node, 1); - if (CMM_LOAD_SHARED(ht->in_progress_destroy)) - break; - } - ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level); - /* Update table size */ - t->size = (i == 1) ? 0 : 1UL << (i - 2); - dbg_printf("fini new size: %lu\n", t->size); + + /* Stop shrink if the resize target changes under us */ + if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1))) + break; + + cmm_smp_wmb(); /* populate data before RCU size */ + CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1)); + + /* + * We need to wait for all add operations to reach Q.S. (and + * thus use the new table for lookups) before we can start + * releasing the old dummy nodes. Otherwise their lookup will + * return a logically removed node as insert position. + */ + ht->cds_lfht_synchronize_rcu(); + + /* + * Set "removed" flag in dummy nodes about to be removed. + * Unlink all now-logically-removed dummy node pointers. + * Concurrent add/remove operation are helping us doing + * the gc. + */ + remove_table(ht, i, len); + + ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level); + + dbg_printf("fini new size: %lu\n", 1UL << i); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } - t->resize_target = t->size; - t->resize_initiated = 0; } struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, cds_lfht_compare_fct compare_fct, unsigned long hash_seed, unsigned long init_size, + int flags, void (*cds_lfht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head)), - void (*cds_lfht_synchronize_rcu)(void)) + void (*cds_lfht_synchronize_rcu)(void), + void (*cds_lfht_rcu_read_lock)(void), + void (*cds_lfht_rcu_read_unlock)(void), + void (*cds_lfht_rcu_thread_offline)(void), + void (*cds_lfht_rcu_thread_online)(void)) { struct cds_lfht *ht; unsigned long order; @@ -921,36 +1107,39 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->hash_seed = hash_seed; ht->cds_lfht_call_rcu = cds_lfht_call_rcu; ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu; - ht->in_progress_resize = 0; + ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock; + ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock; + ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline; + ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online; ht->percpu_count = alloc_per_cpu_items_count(); /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); - order = get_count_order_ulong(max(init_size, 1)) + 1; - ht->t = calloc(1, sizeof(struct cds_lfht) - + (order * sizeof(struct rcu_level *))); - ht->t->size = 0; + order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1; + ht->flags = flags; + ht->cds_lfht_rcu_thread_offline(); pthread_mutex_lock(&ht->resize_mutex); - init_table(ht, ht->t, 0, order); + ht->t.resize_target = 1UL << (order - 1); + init_table(ht, 0, order); pthread_mutex_unlock(&ht->resize_mutex); + ht->cds_lfht_rcu_thread_online(); return ht; } struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len) { - struct rcu_table *t; struct cds_lfht_node *node, *next; struct _cds_lfht_node *lookup; - unsigned long hash, reverse_hash, index, order; + unsigned long hash, reverse_hash, index, order, size; hash = ht->hash_fct(key, key_len, ht->hash_seed); reverse_hash = bit_reverse_ulong(hash); - t = rcu_dereference(ht->t); - index = hash & (t->size - 1); + size = rcu_dereference(ht->t.size); + index = hash & (size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)]; dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", - hash, index, order, index & ((1UL << (order - 1)) - 1)); + hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); node = (struct cds_lfht_node *) lookup; for (;;) { if (unlikely(!node)) @@ -1006,57 +1195,53 @@ struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) { - struct rcu_table *t; - unsigned long hash; + unsigned long hash, size; hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); - t = rcu_dereference(ht->t); - (void) _cds_lfht_add(ht, t, node, 0, 0); - ht_count_add(ht, t); + size = rcu_dereference(ht->t.size); + (void) _cds_lfht_add(ht, size, node, 0, 0); + ht_count_add(ht, size); } struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, struct cds_lfht_node *node) { - struct rcu_table *t; - unsigned long hash; + unsigned long hash, size; struct cds_lfht_node *ret; hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); - t = rcu_dereference(ht->t); - ret = _cds_lfht_add(ht, t, node, 1, 0); + size = rcu_dereference(ht->t.size); + ret = _cds_lfht_add(ht, size, node, 1, 0); if (ret != node) - ht_count_add(ht, t); + ht_count_add(ht, size); return ret; } int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node) { - struct rcu_table *t; + unsigned long size; int ret; - t = rcu_dereference(ht->t); - ret = _cds_lfht_remove(ht, t, node, 0); + size = rcu_dereference(ht->t.size); + ret = _cds_lfht_remove(ht, size, node, 0); if (!ret) - ht_count_remove(ht, t); + ht_count_remove(ht, size); return ret; } static int cds_lfht_delete_dummy(struct cds_lfht *ht) { - struct rcu_table *t; struct cds_lfht_node *node; struct _cds_lfht_node *lookup; - unsigned long order, i; + unsigned long order, i, size; - t = ht->t; /* Check that the table is empty */ - lookup = &t->tbl[0]->nodes[0]; + lookup = &ht->t.tbl[0]->nodes[0]; node = (struct cds_lfht_node *) lookup; do { node = clear_flag(node)->p.next; @@ -1064,18 +1249,23 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) return -EPERM; assert(!is_removed(node)); } while (clear_flag(node)); + /* + * size accessed without rcu_dereference because hash table is + * being destroyed. + */ + size = ht->t.size; /* Internal sanity check: all nodes left should be dummy */ - for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) { + for (order = 0; order < get_count_order_ulong(size) + 1; order++) { unsigned long len; len = !order ? 1 : 1UL << (order - 1); for (i = 0; i < len; i++) { dbg_printf("delete order %lu i %lu hash %lu\n", order, i, - bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash)); - assert(is_dummy(t->tbl[order]->nodes[i].next)); + bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash)); + assert(is_dummy(ht->t.tbl[order]->nodes[i].next)); } - free(t->tbl[order]); + poison_free(ht->t.tbl[order]); } return 0; } @@ -1095,9 +1285,8 @@ int cds_lfht_destroy(struct cds_lfht *ht) ret = cds_lfht_delete_dummy(ht); if (ret) return ret; - free(ht->t); free_per_cpu_items_count(ht->percpu_count); - free(ht); + poison_free(ht); return ret; } @@ -1105,7 +1294,6 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, unsigned long *count, unsigned long *removed) { - struct rcu_table *t; struct cds_lfht_node *node, *next; struct _cds_lfht_node *lookup; unsigned long nr_dummy = 0; @@ -1113,9 +1301,8 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, *count = 0; *removed = 0; - t = rcu_dereference(ht->t); /* Count non-dummy nodes in the table */ - lookup = &t->tbl[0]->nodes[0]; + lookup = &ht->t.tbl[0]->nodes[0]; node = (struct cds_lfht_node *) lookup; do { next = rcu_dereference(node->p.next); @@ -1133,62 +1320,35 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, /* called with resize mutex held */ static -void _do_cds_lfht_grow(struct cds_lfht *ht, struct rcu_table *old_t, +void _do_cds_lfht_grow(struct cds_lfht *ht, unsigned long old_size, unsigned long new_size) { unsigned long old_order, new_order; - struct rcu_table *new_t; old_order = get_count_order_ulong(old_size) + 1; new_order = get_count_order_ulong(new_size) + 1; printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", old_size, old_order, new_size, new_order); - new_t = malloc(sizeof(struct cds_lfht) - + (new_order * sizeof(struct rcu_level *))); assert(new_size > old_size); - memcpy(&new_t->tbl, &old_t->tbl, - old_order * sizeof(struct rcu_level *)); - init_table(ht, new_t, old_order, new_order - old_order); - /* Changing table and size atomically wrt lookups */ - rcu_assign_pointer(ht->t, new_t); - ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb); + init_table(ht, old_order, new_order - old_order); } /* called with resize mutex held */ static -void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t, +void _do_cds_lfht_shrink(struct cds_lfht *ht, unsigned long old_size, unsigned long new_size) { unsigned long old_order, new_order; - struct rcu_table *new_t; - new_size = max(new_size, 1); + new_size = max(new_size, MIN_TABLE_SIZE); old_order = get_count_order_ulong(old_size) + 1; new_order = get_count_order_ulong(new_size) + 1; printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", old_size, old_order, new_size, new_order); - new_t = malloc(sizeof(struct cds_lfht) - + (new_order * sizeof(struct rcu_level *))); assert(new_size < old_size); - memcpy(&new_t->tbl, &old_t->tbl, - new_order * sizeof(struct rcu_level *)); - new_t->size = !new_order ? 1 : (1UL << (new_order - 1)); - new_t->resize_target = new_t->size; - new_t->resize_initiated = 0; - /* Changing table and size atomically wrt lookups */ - rcu_assign_pointer(ht->t, new_t); - - /* - * We need to wait for all reader threads to reach Q.S. (and - * thus use the new table for lookups) before we can start - * releasing the old dummy nodes. - */ - ht->cds_lfht_synchronize_rcu(); - - /* Unlink and remove all now-unused dummy node pointers. */ - fini_table(ht, old_t, new_order, old_order - new_order); - ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb); + /* Remove and unlink all dummy nodes to remove. */ + fini_table(ht, new_order, old_order - new_order); } @@ -1197,45 +1357,49 @@ static void _do_cds_lfht_resize(struct cds_lfht *ht) { unsigned long new_size, old_size; - struct rcu_table *old_t; - - old_t = ht->t; - old_size = old_t->size; - new_size = CMM_LOAD_SHARED(old_t->resize_target); - if (old_size < new_size) - _do_cds_lfht_grow(ht, old_t, old_size, new_size); - else if (old_size > new_size) - _do_cds_lfht_shrink(ht, old_t, old_size, new_size); - else - CMM_STORE_SHARED(old_t->resize_initiated, 0); + + /* + * Resize table, re-do if the target size has changed under us. + */ + do { + ht->t.resize_initiated = 1; + old_size = ht->t.size; + new_size = CMM_LOAD_SHARED(ht->t.resize_target); + if (old_size < new_size) + _do_cds_lfht_grow(ht, old_size, new_size); + else if (old_size > new_size) + _do_cds_lfht_shrink(ht, old_size, new_size); + ht->t.resize_initiated = 0; + /* write resize_initiated before read resize_target */ + cmm_smp_mb(); + } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target)); } static -unsigned long resize_target_update(struct rcu_table *t, +unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size, int growth_order) { - return _uatomic_max(&t->resize_target, - t->size << growth_order); + return _uatomic_max(&ht->t.resize_target, + size << growth_order); } static -unsigned long resize_target_update_count(struct rcu_table *t, - unsigned long count) +void resize_target_update_count(struct cds_lfht *ht, + unsigned long count) { - count = max(count, 1); - return uatomic_set(&t->resize_target, count); + count = max(count, MIN_TABLE_SIZE); + uatomic_set(&ht->t.resize_target, count); } void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size) { - struct rcu_table *t = rcu_dereference(ht->t); - unsigned long target_size; - - target_size = resize_target_update_count(t, new_size); - CMM_STORE_SHARED(t->resize_initiated, 1); + resize_target_update_count(ht, new_size); + CMM_STORE_SHARED(ht->t.resize_initiated, 1); + ht->cds_lfht_rcu_thread_offline(); pthread_mutex_lock(&ht->resize_mutex); _do_cds_lfht_resize(ht); pthread_mutex_unlock(&ht->resize_mutex); + ht->cds_lfht_rcu_thread_online(); } static @@ -1245,48 +1409,55 @@ void do_resize_cb(struct rcu_head *head) caa_container_of(head, struct rcu_resize_work, head); struct cds_lfht *ht = work->ht; + ht->cds_lfht_rcu_thread_offline(); pthread_mutex_lock(&ht->resize_mutex); _do_cds_lfht_resize(ht); pthread_mutex_unlock(&ht->resize_mutex); - free(work); + ht->cds_lfht_rcu_thread_online(); + poison_free(work); cmm_smp_mb(); /* finish resize before decrement */ uatomic_dec(&ht->in_progress_resize); } static -void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth) +void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) { struct rcu_resize_work *work; unsigned long target_size; - target_size = resize_target_update(t, growth); - if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) { + target_size = resize_target_update(ht, size, growth); + /* Store resize_target before read resize_initiated */ + cmm_smp_mb(); + if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) { uatomic_inc(&ht->in_progress_resize); cmm_smp_mb(); /* increment resize count before calling it */ work = malloc(sizeof(*work)); work->ht = ht; ht->cds_lfht_call_rcu(&work->head, do_resize_cb); - CMM_STORE_SHARED(t->resize_initiated, 1); + CMM_STORE_SHARED(ht->t.resize_initiated, 1); } } #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) static -void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t, +void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, unsigned long count) { struct rcu_resize_work *work; - unsigned long target_size; - target_size = resize_target_update_count(t, count); - if (!CMM_LOAD_SHARED(t->resize_initiated)) { + if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) + return; + resize_target_update_count(ht, count); + /* Store resize_target before read resize_initiated */ + cmm_smp_mb(); + if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) { uatomic_inc(&ht->in_progress_resize); cmm_smp_mb(); /* increment resize count before calling it */ work = malloc(sizeof(*work)); work->ht = ht; ht->cds_lfht_call_rcu(&work->head, do_resize_cb); - CMM_STORE_SHARED(t->resize_initiated, 1); + CMM_STORE_SHARED(ht->t.resize_initiated, 1); } }