X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=7e8165021f65116a1ed6e4386f102c1c7d5a9907;hp=da37e97d38554c6def88699590be83439385e82f;hb=04db56f85e2cd2abb05e4ef14990e99a1058d0df;hpb=b5d6b20fb518bce4a39c256ac89dcb376a5f971a diff --git a/rculfhash.c b/rculfhash.c index da37e97..7e81650 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -222,15 +222,37 @@ /* Value of the end pointer. Should not interact with flags. */ #define END_VALUE NULL +/* + * ht_items_count: Split-counters counting the number of node addition + * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag + * is set at hash table creation. + * + * These are free-running counters, never reset to zero. They count the + * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER) + * operations to update the global counter. We choose a power-of-2 value + * for the trigger to deal with 32 or 64-bit overflow of the counter. + */ struct ht_items_count { unsigned long add, del; } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); +/* + * rcu_level: Contains the per order-index-level dummy node table. The + * size of each dummy node table is half the number of hashes contained + * in this order (except for order 0). The minimum allocation size + * parameter allows combining the dummy node arrays of the lowermost + * levels to improve cache locality for small index orders. + */ struct rcu_level { /* Note: manually update allocation length when adding a field */ - struct _cds_lfht_node nodes[0]; + struct cds_lfht_node nodes[0]; }; +/* + * rcu_table: Contains the size and desired new size if a resize + * operation is in progress, as well as the statically-sized array of + * rcu_level pointers. + */ struct rcu_table { unsigned long size; /* always a power of 2, shared (RCU) */ unsigned long resize_target; @@ -238,13 +260,15 @@ struct rcu_table { struct rcu_level *tbl[MAX_TABLE_ORDER]; }; +/* + * cds_lfht: Top-level data structure representing a lock-free hash + * table. Defined in the implementation file to make it be an opaque + * cookie to users. + */ struct cds_lfht { struct rcu_table t; - cds_lfht_hash_fct hash_fct; - cds_lfht_compare_fct compare_fct; unsigned long min_alloc_order; unsigned long min_alloc_size; - unsigned long hash_seed; int flags; /* * We need to put the work threads offline (QSBR) when taking this @@ -269,11 +293,20 @@ struct cds_lfht { struct ht_items_count *split_count; /* split item count */ }; +/* + * rcu_resize_work: Contains arguments passed to RCU worker thread + * responsible for performing lazy resize. + */ struct rcu_resize_work { struct rcu_head head; struct cds_lfht *ht; }; +/* + * partition_resize_work: Contains arguments passed to worker threads + * executing the hash table resize on partitions of the hash table + * assigned to each processor's worker thread. + */ struct partition_resize_work { pthread_t thread_id; struct cds_lfht *ht; @@ -284,6 +317,8 @@ struct partition_resize_work { static void _cds_lfht_add(struct cds_lfht *ht, + cds_lfht_match_fct match, + void *key, unsigned long size, struct cds_lfht_node *node, struct cds_lfht_iter *unique_ret, @@ -454,7 +489,7 @@ unsigned int fls_u32(uint32_t x) unsigned int fls_ulong(unsigned long x) { -#if (CAA_BITS_PER_lONG == 32) +#if (CAA_BITS_PER_LONG == 32) return fls_u32(x); #else return fls_u64(x); @@ -498,7 +533,7 @@ int get_count_order_ulong(unsigned long x) #endif static -void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth); +void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth); static void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, @@ -568,7 +603,7 @@ int ht_get_split_count_index(unsigned long hash) assert(split_count_mask >= 0); cpu = sched_getcpu(); - if (unlikely(cpu < 0)) + if (caa_unlikely(cpu < 0)) return hash & split_count_mask; else return cpu & split_count_mask; @@ -587,11 +622,11 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash) unsigned long split_count; int index; - if (unlikely(!ht->split_count)) + if (caa_unlikely(!ht->split_count)) return; index = ht_get_split_count_index(hash); split_count = uatomic_add_return(&ht->split_count[index].add, 1); - if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { + if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { long count; dbg_printf("add split count %lu\n", split_count); @@ -614,11 +649,11 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash) unsigned long split_count; int index; - if (unlikely(!ht->split_count)) + if (caa_unlikely(!ht->split_count)) return; index = ht_get_split_count_index(hash); split_count = uatomic_add_return(&ht->split_count[index].del, 1); - if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { + if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { long count; dbg_printf("del split count %lu\n", split_count); @@ -659,7 +694,7 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) dbg_printf("WARNING: large chain length: %u.\n", chain_len); if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) - cds_lfht_resize_lazy(ht, size, + cds_lfht_resize_lazy_grow(ht, size, get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); } @@ -706,7 +741,8 @@ int is_end(struct cds_lfht_node *node) } static -unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) +unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr, + unsigned long v) { unsigned long old1, old2; @@ -716,11 +752,11 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) if (old2 >= v) return old2; } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2); - return v; + return old2; } static -struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, +struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, unsigned long hash) { unsigned long index, order; @@ -759,9 +795,9 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node for (;;) { iter_prev = dummy; /* We can always skip the dummy node initially */ - iter = rcu_dereference(iter_prev->p.next); + iter = rcu_dereference(iter_prev->next); assert(!is_removed(iter)); - assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); + assert(iter_prev->reverse_hash <= node->reverse_hash); /* * We should never be called with dummy (start of chain) * and logically removed node (end of path compression @@ -770,12 +806,12 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node */ assert(dummy != node); for (;;) { - if (unlikely(is_end(iter))) + if (caa_unlikely(is_end(iter))) return; - if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) + if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash)) return; - next = rcu_dereference(clear_flag(iter)->p.next); - if (likely(is_removed(next))) + next = rcu_dereference(clear_flag(iter)->next); + if (caa_likely(is_removed(next))) break; iter_prev = clear_flag(iter); iter = next; @@ -785,7 +821,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node new_next = flag_dummy(clear_flag(next)); else new_next = clear_flag(next); - (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); + (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next); } return; } @@ -796,8 +832,7 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *old_next, struct cds_lfht_node *new_node) { - struct cds_lfht_node *dummy, *ret_next; - struct _cds_lfht_node *lookup; + struct cds_lfht_node *bucket, *ret_next; if (!old_node) /* Return -ENOENT if asked to replace NULL node */ return -ENOENT; @@ -818,7 +853,7 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, } assert(!is_dummy(old_next)); assert(new_node != clear_flag(old_next)); - new_node->p.next = clear_flag(old_next); + new_node->next = clear_flag(old_next); /* * Here is the whole trick for lock-free replace: we add * the replacement node _after_ the node we want to @@ -829,7 +864,7 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, * to the removal flag and see the new node, or use * the old node, but will not see the new one. */ - ret_next = uatomic_cmpxchg(&old_node->p.next, + ret_next = uatomic_cmpxchg(&old_node->next, old_next, flag_removed(new_node)); if (ret_next == old_next) break; /* We performed the replacement. */ @@ -841,11 +876,10 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, * lookup for the node, and remove it (along with any other * logically removed node) if found. */ - lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash)); - dummy = (struct cds_lfht_node *) lookup; - _cds_lfht_gc_bucket(dummy, new_node); + bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash)); + _cds_lfht_gc_bucket(bucket, new_node); - assert(is_removed(rcu_dereference(old_node->p.next))); + assert(is_removed(rcu_dereference(old_node->next))); return 0; } @@ -855,6 +889,8 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, */ static void _cds_lfht_add(struct cds_lfht *ht, + cds_lfht_match_fct match, + void *key, unsigned long size, struct cds_lfht_node *node, struct cds_lfht_iter *unique_ret, @@ -862,11 +898,11 @@ void _cds_lfht_add(struct cds_lfht *ht, { struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, *return_node; - struct _cds_lfht_node *lookup; + struct cds_lfht_node *bucket; assert(!is_dummy(node)); assert(!is_removed(node)); - lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash)); + bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash)); for (;;) { uint32_t chain_len = 0; @@ -874,28 +910,28 @@ void _cds_lfht_add(struct cds_lfht *ht, * iter_prev points to the non-removed node prior to the * insert location. */ - iter_prev = (struct cds_lfht_node *) lookup; + iter_prev = bucket; /* We can always skip the dummy node initially */ - iter = rcu_dereference(iter_prev->p.next); - assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); + iter = rcu_dereference(iter_prev->next); + assert(iter_prev->reverse_hash <= node->reverse_hash); for (;;) { - if (unlikely(is_end(iter))) + if (caa_unlikely(is_end(iter))) goto insert; - if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) + if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash)) goto insert; /* dummy node is the first node of the identical-hash-value chain */ - if (dummy && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) + if (dummy && clear_flag(iter)->reverse_hash == node->reverse_hash) goto insert; - next = rcu_dereference(clear_flag(iter)->p.next); - if (unlikely(is_removed(next))) + next = rcu_dereference(clear_flag(iter)->next); + if (caa_unlikely(is_removed(next))) goto gc_node; /* uniquely add */ if (unique_ret && !is_dummy(next) - && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) { + && clear_flag(iter)->reverse_hash == node->reverse_hash) { struct cds_lfht_iter d_iter = { .node = node, .next = iter, }; /* @@ -907,7 +943,7 @@ void _cds_lfht_add(struct cds_lfht *ht, * (including observe one node by one node * by forward iterations) */ - cds_lfht_next_duplicate(ht, &d_iter); + cds_lfht_next_duplicate(ht, match, key, &d_iter); if (!d_iter.node) goto insert; @@ -916,7 +952,7 @@ void _cds_lfht_add(struct cds_lfht *ht, } /* Only account for identical reverse hash once */ - if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash + if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash && !is_dummy(next)) check_resize(ht, size, ++chain_len); iter_prev = clear_flag(iter); @@ -929,14 +965,14 @@ void _cds_lfht_add(struct cds_lfht *ht, assert(!is_removed(iter)); assert(iter_prev != node); if (!dummy) - node->p.next = clear_flag(iter); + node->next = clear_flag(iter); else - node->p.next = flag_dummy(clear_flag(iter)); + node->next = flag_dummy(clear_flag(iter)); if (is_dummy(iter)) new_node = flag_dummy(node); else new_node = node; - if (uatomic_cmpxchg(&iter_prev->p.next, iter, + if (uatomic_cmpxchg(&iter_prev->next, iter, new_node) != iter) { continue; /* retry */ } else { @@ -950,7 +986,7 @@ void _cds_lfht_add(struct cds_lfht *ht, new_next = flag_dummy(clear_flag(next)); else new_next = clear_flag(next); - (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); + (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next); /* retry */ } end: @@ -965,8 +1001,7 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *node, int dummy_removal) { - struct cds_lfht_node *dummy, *next, *old; - struct _cds_lfht_node *lookup; + struct cds_lfht_node *bucket, *next, *old; if (!node) /* Return -ENOENT if asked to delete NULL node */ return -ENOENT; @@ -974,19 +1009,19 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, /* logically delete the node */ assert(!is_dummy(node)); assert(!is_removed(node)); - old = rcu_dereference(node->p.next); + old = rcu_dereference(node->next); do { struct cds_lfht_node *new_next; next = old; - if (unlikely(is_removed(next))) + if (caa_unlikely(is_removed(next))) return -ENOENT; if (dummy_removal) assert(is_dummy(next)); else assert(!is_dummy(next)); new_next = flag_removed(next); - old = uatomic_cmpxchg(&node->p.next, next, new_next); + old = uatomic_cmpxchg(&node->next, next, new_next); } while (old != next); /* We performed the (logical) deletion. */ @@ -995,11 +1030,10 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, * the node, and remove it (along with any other logically removed node) * if found. */ - lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash)); - dummy = (struct cds_lfht_node *) lookup; - _cds_lfht_gc_bucket(dummy, node); + bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash)); + _cds_lfht_gc_bucket(bucket, node); - assert(is_removed(rcu_dereference(node->p.next))); + assert(is_removed(rcu_dereference(node->next))); return 0; } @@ -1076,14 +1110,13 @@ void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, assert(i > ht->min_alloc_order); ht->cds_lfht_rcu_read_lock(); for (j = start; j < start + len; j++) { - struct cds_lfht_node *new_node = - (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; + struct cds_lfht_node *new_node = &ht->t.tbl[i]->nodes[j]; dbg_printf("init populate: i %lu j %lu hash %lu\n", i, j, (1UL << (i - 1)) + j); - new_node->p.reverse_hash = + new_node->reverse_hash = bit_reverse_ulong((1UL << (i - 1)) + j); - _cds_lfht_add(ht, 1UL << (i - 1), + _cds_lfht_add(ht, NULL, NULL, 1UL << (i - 1), new_node, NULL, 1); } ht->cds_lfht_rcu_read_unlock(); @@ -1122,7 +1155,7 @@ void init_table(struct cds_lfht *ht, if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i)) break; - ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node)); + ht->t.tbl[i] = calloc(1, len * sizeof(struct cds_lfht_node)); assert(ht->t.tbl[i]); /* @@ -1177,12 +1210,11 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i, assert(i > ht->min_alloc_order); ht->cds_lfht_rcu_read_lock(); for (j = start; j < start + len; j++) { - struct cds_lfht_node *fini_node = - (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; + struct cds_lfht_node *fini_node = &ht->t.tbl[i]->nodes[j]; dbg_printf("remove entry: i %lu j %lu hash %lu\n", i, j, (1UL << (i - 1)) + j); - fini_node->p.reverse_hash = + fini_node->reverse_hash = bit_reverse_ulong((1UL << (i - 1)) + j); (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1); } @@ -1260,10 +1292,10 @@ void fini_table(struct cds_lfht *ht, static void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size) { - struct _cds_lfht_node *prev, *node; + struct cds_lfht_node *prev, *node; unsigned long order, len, i, j; - ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct _cds_lfht_node)); + ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct cds_lfht_node)); assert(ht->t.tbl[0]); dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0); @@ -1275,7 +1307,7 @@ void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size) if (order <= ht->min_alloc_order) { ht->t.tbl[order] = (struct rcu_level *) (ht->t.tbl[0]->nodes + len); } else { - ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node)); + ht->t.tbl[order] = calloc(1, len * sizeof(struct cds_lfht_node)); assert(ht->t.tbl[order]); } @@ -1295,15 +1327,12 @@ void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size) node->next = prev->next; assert(is_dummy(node->next)); node->reverse_hash = bit_reverse_ulong(j + len); - prev->next = flag_dummy((struct cds_lfht_node *)node); + prev->next = flag_dummy(node); } } } -struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, - cds_lfht_compare_fct compare_fct, - unsigned long hash_seed, - unsigned long init_size, +struct cds_lfht *_cds_lfht_new(unsigned long init_size, unsigned long min_alloc_size, int flags, void (*cds_lfht_call_rcu)(struct rcu_head *head, @@ -1331,9 +1360,6 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, ht = calloc(1, sizeof(struct cds_lfht)); assert(ht); ht->flags = flags; - ht->hash_fct = hash_fct; - ht->compare_fct = compare_fct; - ht->hash_seed = hash_seed; ht->cds_lfht_call_rcu = cds_lfht_call_rcu; ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu; ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock; @@ -1348,84 +1374,79 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, pthread_mutex_init(&ht->resize_mutex, NULL); order = get_count_order_ulong(init_size); ht->t.resize_target = 1UL << order; - cds_lfht_create_dummy(ht, 1UL << order); - ht->t.size = 1UL << order; ht->min_alloc_size = min_alloc_size; ht->min_alloc_order = get_count_order_ulong(min_alloc_size); + cds_lfht_create_dummy(ht, 1UL << order); + ht->t.size = 1UL << order; return ht; } -void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len, - struct cds_lfht_iter *iter) +void cds_lfht_lookup(struct cds_lfht *ht, cds_lfht_match_fct match, + unsigned long hash, void *key, struct cds_lfht_iter *iter) { - struct cds_lfht_node *node, *next, *dummy_node; - struct _cds_lfht_node *lookup; - unsigned long hash, reverse_hash, size; + struct cds_lfht_node *node, *next, *bucket; + unsigned long reverse_hash, size; - hash = ht->hash_fct(key, key_len, ht->hash_seed); reverse_hash = bit_reverse_ulong(hash); size = rcu_dereference(ht->t.size); - lookup = lookup_bucket(ht, size, hash); - dummy_node = (struct cds_lfht_node *) lookup; + bucket = lookup_bucket(ht, size, hash); /* We can always skip the dummy node initially */ - node = rcu_dereference(dummy_node->p.next); + node = rcu_dereference(bucket->next); node = clear_flag(node); for (;;) { - if (unlikely(is_end(node))) { + if (caa_unlikely(is_end(node))) { node = next = NULL; break; } - if (unlikely(node->p.reverse_hash > reverse_hash)) { + if (caa_unlikely(node->reverse_hash > reverse_hash)) { node = next = NULL; break; } - next = rcu_dereference(node->p.next); - if (likely(!is_removed(next)) + next = rcu_dereference(node->next); + assert(node == clear_flag(node)); + if (caa_likely(!is_removed(next)) && !is_dummy(next) - && clear_flag(node)->p.reverse_hash == reverse_hash - && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { + && node->reverse_hash == reverse_hash + && caa_likely(match(node, key))) { break; } node = clear_flag(next); } - assert(!node || !is_dummy(rcu_dereference(node->p.next))); + assert(!node || !is_dummy(rcu_dereference(node->next))); iter->node = node; iter->next = next; } -void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter) +void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match, + void *key, struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next; unsigned long reverse_hash; - void *key; - size_t key_len; node = iter->node; - reverse_hash = node->p.reverse_hash; - key = node->key; - key_len = node->key_len; + reverse_hash = node->reverse_hash; next = iter->next; node = clear_flag(next); for (;;) { - if (unlikely(is_end(node))) { + if (caa_unlikely(is_end(node))) { node = next = NULL; break; } - if (unlikely(node->p.reverse_hash > reverse_hash)) { + if (caa_unlikely(node->reverse_hash > reverse_hash)) { node = next = NULL; break; } - next = rcu_dereference(node->p.next); - if (likely(!is_removed(next)) + next = rcu_dereference(node->next); + if (caa_likely(!is_removed(next)) && !is_dummy(next) - && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { + && caa_likely(match(node, key))) { break; } node = clear_flag(next); } - assert(!node || !is_dummy(rcu_dereference(node->p.next))); + assert(!node || !is_dummy(rcu_dereference(node->next))); iter->node = node; iter->next = next; } @@ -1436,25 +1457,25 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) node = clear_flag(iter->next); for (;;) { - if (unlikely(is_end(node))) { + if (caa_unlikely(is_end(node))) { node = next = NULL; break; } - next = rcu_dereference(node->p.next); - if (likely(!is_removed(next)) + next = rcu_dereference(node->next); + if (caa_likely(!is_removed(next)) && !is_dummy(next)) { break; } node = clear_flag(next); } - assert(!node || !is_dummy(rcu_dereference(node->p.next))); + assert(!node || !is_dummy(rcu_dereference(node->next))); iter->node = node; iter->next = next; } void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) { - struct _cds_lfht_node *lookup; + struct cds_lfht_node *lookup; /* * Get next after first dummy node. The first dummy node is the @@ -1465,46 +1486,47 @@ void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) cds_lfht_next(ht, iter); } -void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) +void cds_lfht_add(struct cds_lfht *ht, unsigned long hash, + struct cds_lfht_node *node) { - unsigned long hash, size; - - hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); - node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); + unsigned long size; + node->reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); - _cds_lfht_add(ht, size, node, NULL, 0); + _cds_lfht_add(ht, NULL, NULL, size, node, NULL, 0); ht_count_add(ht, size, hash); } struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, + cds_lfht_match_fct match, + void *key, + unsigned long hash, struct cds_lfht_node *node) { - unsigned long hash, size; + unsigned long size; struct cds_lfht_iter iter; - hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); - node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); - + node->reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); - _cds_lfht_add(ht, size, node, &iter, 0); + _cds_lfht_add(ht, match, key, size, node, &iter, 0); if (iter.node == node) ht_count_add(ht, size, hash); return iter.node; } struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, + cds_lfht_match_fct match, + void *key, + unsigned long hash, struct cds_lfht_node *node) { - unsigned long hash, size; + unsigned long size; struct cds_lfht_iter iter; - hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); - node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); - + node->reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); for (;;) { - _cds_lfht_add(ht, size, node, &iter, 0); + _cds_lfht_add(ht, match, key, size, node, &iter, 0); if (iter.node == node) { ht_count_add(ht, size, hash); return NULL; @@ -1533,7 +1555,7 @@ int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter) size = rcu_dereference(ht->t.size); ret = _cds_lfht_del(ht, size, iter->node, 0); if (!ret) { - hash = bit_reverse_ulong(iter->node->p.reverse_hash); + hash = bit_reverse_ulong(iter->node->reverse_hash); ht_count_del(ht, size, hash); } return ret; @@ -1543,14 +1565,12 @@ static int cds_lfht_delete_dummy(struct cds_lfht *ht) { struct cds_lfht_node *node; - struct _cds_lfht_node *lookup; unsigned long order, i, size; /* Check that the table is empty */ - lookup = &ht->t.tbl[0]->nodes[0]; - node = (struct cds_lfht_node *) lookup; + node = &ht->t.tbl[0]->nodes[0]; do { - node = clear_flag(node)->p.next; + node = clear_flag(node)->next; if (!is_dummy(node)) return -EPERM; assert(!is_removed(node)); @@ -1611,7 +1631,6 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, long *approx_after) { struct cds_lfht_node *node, *next; - struct _cds_lfht_node *lookup; unsigned long nr_dummy = 0; *approx_before = 0; @@ -1628,10 +1647,9 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, *removed = 0; /* Count non-dummy nodes in the table */ - lookup = &ht->t.tbl[0]->nodes[0]; - node = (struct cds_lfht_node *) lookup; + node = &ht->t.tbl[0]->nodes[0]; do { - next = rcu_dereference(node->p.next); + next = rcu_dereference(node->next); if (is_removed(next)) { if (!is_dummy(next)) (*removed)++; @@ -1716,11 +1734,9 @@ void _do_cds_lfht_resize(struct cds_lfht *ht) } static -unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size, - int growth_order) +unsigned long resize_target_grow(struct cds_lfht *ht, unsigned long new_size) { - return _uatomic_max(&ht->t.resize_target, - size << growth_order); + return _uatomic_xchg_monotonic_increase(&ht->t.resize_target, new_size); } static @@ -1760,15 +1776,13 @@ void do_resize_cb(struct rcu_head *head) } static -void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) +void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht) { struct rcu_resize_work *work; - unsigned long target_size; - target_size = resize_target_update(ht, size, growth); /* Store resize_target before read resize_initiated */ cmm_smp_mb(); - if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) { + if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) { uatomic_inc(&ht->in_progress_resize); cmm_smp_mb(); /* increment resize count before load destroy */ if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { @@ -1782,27 +1796,47 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) } } +static +void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth) +{ + unsigned long target_size = size << growth; + + if (resize_target_grow(ht, target_size) >= target_size) + return; + + __cds_lfht_resize_lazy_launch(ht); +} + +/* + * We favor grow operations over shrink. A shrink operation never occurs + * if a grow operation is queued for lazy execution. A grow operation + * cancels any pending shrink lazy execution. + */ static void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, unsigned long count) { - struct rcu_resize_work *work; - if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) return; - resize_target_update_count(ht, count); - /* Store resize_target before read resize_initiated */ - cmm_smp_mb(); - if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) { - uatomic_inc(&ht->in_progress_resize); - cmm_smp_mb(); /* increment resize count before load destroy */ - if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { - uatomic_dec(&ht->in_progress_resize); + count = max(count, ht->min_alloc_size); + if (count == size) + return; /* Already the right size, no resize needed */ + if (count > size) { /* lazy grow */ + if (resize_target_grow(ht, count) >= count) return; + } else { /* lazy shrink */ + for (;;) { + unsigned long s; + + s = uatomic_cmpxchg(&ht->t.resize_target, size, count); + if (s == size) + break; /* no resize needed */ + if (s > size) + return; /* growing is/(was just) in progress */ + if (s <= count) + return; /* some other thread do shrink */ + size = s; } - work = malloc(sizeof(*work)); - work->ht = ht; - ht->cds_lfht_call_rcu(&work->head, do_resize_cb); - CMM_STORE_SHARED(ht->t.resize_initiated, 1); } + __cds_lfht_resize_lazy_launch(ht); }