X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=0bcbb1e832a5ab41c4d4357c0dafa4ff87832db7;hp=8ed9c10b2386c710c2791ed8db3565d4ea13bce5;hb=4f6e90b73cb103d7d49b8b4838c5f48c3d15b02e;hpb=96ad111233eea0c9fb81cfb79f559f22f38b1d0e diff --git a/rculfhash.c b/rculfhash.c index 8ed9c10..0bcbb1e 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -91,12 +91,32 @@ * the "dummy node" tables. * - There is one dummy node table per hash index order. The size of * each dummy node table is half the number of hashes contained in - * this order. - * - call_rcu is used to garbage-collect the old order table. + * this order (except for order 0). + * - synchronzie_rcu is used to garbage-collect the old dummy node table. * - The per-order dummy node tables contain a compact version of the * hash table nodes. These tables are invariant after they are * populated into the hash table. - * + * + * Dummy node tables: + * + * hash table hash table the last all dummy node tables + * order size dummy node 0 1 2 3 4 5 6(index) + * table size + * 0 1 1 1 + * 1 2 1 1 1 + * 2 4 2 1 1 2 + * 3 8 4 1 1 2 4 + * 4 16 8 1 1 2 4 8 + * 5 32 16 1 1 2 4 8 16 + * 6 64 32 1 1 2 4 8 16 32 + * + * When growing/shrinking, we only focus on the last dummy node table + * which size is (!order ? 1 : (1 << (order -1))). + * + * Example for growing/shrinking: + * grow hash table from order 5 to 6: init the index=6 dummy node table + * shrink hash table from order 6 to 5: fini the index=6 dummy node table + * * A bit of ascii art explanation: * * Order index is the off-by-one compare to the actual power of 2 because @@ -119,12 +139,9 @@ * * order bits reverse * 0 0 000 000 - * | - * 1 | 1 001 100 <- <- - * | | | | - * 2 | | 2 010 010 | | + * 1 | 1 001 100 <- + * 2 | | 2 010 010 <- | * | | | 3 011 110 | <- | - * | | | | | | | * 3 -> | | | 4 100 001 | | * -> | | 5 101 101 | * -> | 6 110 011 @@ -144,7 +161,6 @@ #include #include #include -#include #include #include #include @@ -210,7 +226,7 @@ struct ht_items_count { } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); struct rcu_level { - struct rcu_head head; + /* Note: manually update allocation length when adding a field */ struct _cds_lfht_node nodes[0]; }; @@ -256,24 +272,19 @@ struct rcu_resize_work { }; struct partition_resize_work { - struct rcu_head head; + pthread_t thread_id; struct cds_lfht *ht; unsigned long i, start, len; void (*fct)(struct cds_lfht *ht, unsigned long i, unsigned long start, unsigned long len); }; -enum add_mode { - ADD_DEFAULT = 0, - ADD_UNIQUE = 1, - ADD_REPLACE = 2, -}; - static -struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, - unsigned long size, - struct cds_lfht_node *node, - enum add_mode mode, int dummy); +void _cds_lfht_add(struct cds_lfht *ht, + unsigned long size, + struct cds_lfht_node *node, + struct cds_lfht_iter *unique_ret, + int dummy); /* * Algorithm to reverse bits in a word by lookup table, extended to @@ -447,24 +458,28 @@ unsigned int fls_ulong(unsigned long x) #endif } +/* + * Return the minimum order for which x <= (1UL << order). + * Return -1 if x is 0. + */ int get_count_order_u32(uint32_t x) { - int order; + if (!x) + return -1; - order = fls_u32(x) - 1; - if (x & (x - 1)) - order++; - return order; + return fls_u32(x - 1); } +/* + * Return the minimum order for which x <= (1UL << order). + * Return -1 if x is 0. + */ int get_count_order_ulong(unsigned long x) { - int order; + if (!x) + return -1; - order = fls_ulong(x) - 1; - if (x & (x - 1)) - order++; - return order; + return fls_ulong(x - 1); } #ifdef POISON_FREE @@ -609,7 +624,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ -static const long nr_cpus_mask = -1; +static const long nr_cpus_mask = -2; static struct ht_items_count *alloc_per_cpu_items_count(void) @@ -714,11 +729,24 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) } static -void cds_lfht_free_level(struct rcu_head *head) +struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, + unsigned long hash) { - struct rcu_level *l = - caa_container_of(head, struct rcu_level, head); - poison_free(l); + unsigned long index, order; + + assert(size > 0); + index = hash & (size - 1); + /* + * equivalent to get_count_order_ulong(index + 1), but optimizes + * away the non-existing 0 special-case for + * get_count_order_ulong. + */ + order = fls_ulong(index); + + dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", + hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); + + return &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; } /* @@ -737,6 +765,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node iter_prev = dummy; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); + assert(!is_removed(iter)); assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); /* * We should never be called with dummy (start of chain) @@ -761,32 +790,88 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node new_next = flag_dummy(clear_flag(next)); else new_next = clear_flag(next); - if (is_removed(iter)) - new_next = flag_removed(new_next); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); } return; } static -struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, - unsigned long size, - struct cds_lfht_node *node, - enum add_mode mode, int dummy) +int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, + struct cds_lfht_node *old_node, + struct cds_lfht_node *old_next, + struct cds_lfht_node *new_node) +{ + struct cds_lfht_node *dummy, *ret_next; + struct _cds_lfht_node *lookup; + + if (!old_node) /* Return -ENOENT if asked to replace NULL node */ + return -ENOENT; + + assert(!is_removed(old_node)); + assert(!is_dummy(old_node)); + assert(!is_removed(new_node)); + assert(!is_dummy(new_node)); + assert(new_node != old_node); + for (;;) { + /* Insert after node to be replaced */ + if (is_removed(old_next)) { + /* + * Too late, the old node has been removed under us + * between lookup and replace. Fail. + */ + return -ENOENT; + } + assert(!is_dummy(old_next)); + assert(new_node != clear_flag(old_next)); + new_node->p.next = clear_flag(old_next); + /* + * Here is the whole trick for lock-free replace: we add + * the replacement node _after_ the node we want to + * replace by atomically setting its next pointer at the + * same time we set its removal flag. Given that + * the lookups/get next use an iterator aware of the + * next pointer, they will either skip the old node due + * to the removal flag and see the new node, or use + * the old node, but will not see the new one. + */ + ret_next = uatomic_cmpxchg(&old_node->p.next, + old_next, flag_removed(new_node)); + if (ret_next == old_next) + break; /* We performed the replacement. */ + old_next = ret_next; + } + + /* + * Ensure that the old node is not visible to readers anymore: + * lookup for the node, and remove it (along with any other + * logically removed node) if found. + */ + lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash)); + dummy = (struct cds_lfht_node *) lookup; + _cds_lfht_gc_bucket(dummy, new_node); + + assert(is_removed(rcu_dereference(old_node->p.next))); + return 0; +} + +/* + * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add + * mode. A NULL unique_ret allows creation of duplicate keys. + */ +static +void _cds_lfht_add(struct cds_lfht *ht, + unsigned long size, + struct cds_lfht_node *node, + struct cds_lfht_iter *unique_ret, + int dummy) { struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, - *dummy_node, *return_node; + *return_node; struct _cds_lfht_node *lookup; - unsigned long hash, index, order; assert(!is_dummy(node)); assert(!is_removed(node)); - if (!size) { - assert(dummy); - node->p.next = flag_dummy(get_end()); - return node; /* Initial first add (head) */ - } - hash = bit_reverse_ulong(node->p.reverse_hash); + lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash)); for (;;) { uint32_t chain_len = 0; @@ -794,9 +879,6 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, * iter_prev points to the non-removed node prior to the * insert location. */ - index = hash & (size - 1); - order = get_count_order_ulong(index + 1); - lookup = &ht->t.tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)]; iter_prev = (struct cds_lfht_node *) lookup; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); @@ -806,19 +888,38 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, goto insert; if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) goto insert; + + /* dummy node is the first node of the identical-hash-value chain */ + if (dummy && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) + goto insert; + next = rcu_dereference(clear_flag(iter)->p.next); if (unlikely(is_removed(next))) goto gc_node; - if ((mode == ADD_UNIQUE || mode == ADD_REPLACE) + + /* uniquely add */ + if (unique_ret && !is_dummy(next) - && !ht->compare_fct(node->key, node->key_len, - clear_flag(iter)->key, - clear_flag(iter)->key_len)) { - if (mode == ADD_UNIQUE) - return clear_flag(iter); - else /* mode == ADD_REPLACE */ - goto replace; + && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) { + struct cds_lfht_iter d_iter = { .node = node, .next = iter, }; + + /* + * uniquely adding inserts the node as the first + * node of the identical-hash-value node chain. + * + * This semantic ensures no duplicated keys + * should ever be observable in the table + * (including observe one node by one node + * by forward iterations) + */ + cds_lfht_next_duplicate(ht, &d_iter); + if (!d_iter.node) + goto insert; + + *unique_ret = d_iter; + return; } + /* Only account for identical reverse hash once */ if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash && !is_dummy(next)) @@ -844,44 +945,8 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, new_node) != iter) { continue; /* retry */ } else { - if (mode == ADD_REPLACE) - return_node = NULL; - else /* ADD_DEFAULT and ADD_UNIQUE */ - return_node = node; - goto gc_end; - } - - replace: - /* Insert after node to be replaced */ - iter_prev = clear_flag(iter); - iter = next; - assert(node != clear_flag(iter)); - assert(!is_removed(iter_prev)); - assert(!is_removed(iter)); - assert(iter_prev != node); - assert(!dummy); - node->p.next = clear_flag(iter); - if (is_dummy(iter)) - new_node = flag_dummy(node); - else - new_node = node; - /* - * Here is the whole trick for lock-free replace: we add - * the replacement node _after_ the node we want to - * replace by atomically setting its next pointer at the - * same time we set its removal flag. Given that - * the lookups/get next use an iterator aware of the - * next pointer, they will either skip the old node due - * to the removal flag and see the new node, or use - * the old node, but will not see the new one. - */ - new_node = flag_removed(new_node); - if (uatomic_cmpxchg(&iter_prev->p.next, - iter, new_node) != iter) { - continue; /* retry */ - } else { - return_node = iter_prev; - goto gc_end; + return_node = node; + goto end; } gc_node: @@ -893,14 +958,11 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); /* retry */ } -gc_end: - /* Garbage collect logically removed nodes in the bucket */ - index = hash & (size - 1); - order = get_count_order_ulong(index + 1); - lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; - dummy_node = (struct cds_lfht_node *) lookup; - _cds_lfht_gc_bucket(dummy_node, node); - return return_node; +end: + if (unique_ret) { + unique_ret->node = return_node; + /* unique_ret->next left unset, never used. */ + } } static @@ -910,8 +972,9 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, { struct cds_lfht_node *dummy, *next, *old; struct _cds_lfht_node *lookup; - int flagged = 0; - unsigned long hash, index, order; + + if (!node) /* Return -ENOENT if asked to delete NULL node */ + return -ENOENT; /* logically delete the node */ assert(!is_dummy(node)); @@ -922,7 +985,7 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, next = old; if (unlikely(is_removed(next))) - goto end; + return -ENOENT; if (dummy_removal) assert(is_dummy(next)); else @@ -930,32 +993,19 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, new_next = flag_removed(next); old = uatomic_cmpxchg(&node->p.next, next, new_next); } while (old != next); - /* We performed the (logical) deletion. */ - flagged = 1; /* * Ensure that the node is not visible to readers anymore: lookup for * the node, and remove it (along with any other logically removed node) * if found. */ - hash = bit_reverse_ulong(node->p.reverse_hash); - assert(size > 0); - index = hash & (size - 1); - order = get_count_order_ulong(index + 1); - lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; + lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash)); dummy = (struct cds_lfht_node *) lookup; _cds_lfht_gc_bucket(dummy, node); -end: - /* - * Only the flagging action indicated that we (and no other) - * removed the node from the hash. - */ - if (flagged) { - assert(is_removed(rcu_dereference(node->p.next))); - return 0; - } else - return -ENOENT; + + assert(is_removed(rcu_dereference(node->p.next))); + return 0; } static @@ -979,18 +1029,20 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, struct partition_resize_work *work; int thread, ret; unsigned long nr_threads; - pthread_t *thread_id; /* * Note: nr_cpus_mask + 1 is always power of 2. * We spawn just the number of threads we need to satisfy the minimum * partition size, up to the number of CPUs in the system. */ - nr_threads = min(nr_cpus_mask + 1, - len >> MIN_PARTITION_PER_THREAD_ORDER); + if (nr_cpus_mask > 0) { + nr_threads = min(nr_cpus_mask + 1, + len >> MIN_PARTITION_PER_THREAD_ORDER); + } else { + nr_threads = 1; + } partition_len = len >> get_count_order_ulong(nr_threads); work = calloc(nr_threads, sizeof(*work)); - thread_id = calloc(nr_threads, sizeof(*thread_id)); assert(work); for (thread = 0; thread < nr_threads; thread++) { work[thread].ht = ht; @@ -998,16 +1050,15 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, work[thread].len = partition_len; work[thread].start = thread * partition_len; work[thread].fct = fct; - ret = pthread_create(&thread_id[thread], ht->resize_attr, + ret = pthread_create(&(work[thread].thread_id), ht->resize_attr, partition_resize_thread, &work[thread]); assert(!ret); } for (thread = 0; thread < nr_threads; thread++) { - ret = pthread_join(thread_id[thread], NULL); + ret = pthread_join(work[thread].thread_id, NULL); assert(!ret); } free(work); - free(thread_id); } /* @@ -1027,19 +1078,18 @@ void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, { unsigned long j; + assert(i > 0); ht->cds_lfht_rcu_read_lock(); for (j = start; j < start + len; j++) { struct cds_lfht_node *new_node = (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; dbg_printf("init populate: i %lu j %lu hash %lu\n", - i, j, !i ? 0 : (1UL << (i - 1)) + j); + i, j, (1UL << (i - 1)) + j); new_node->p.reverse_hash = - bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)), - new_node, ADD_DEFAULT, 1); - if (CMM_LOAD_SHARED(ht->in_progress_destroy)) - break; + bit_reverse_ulong((1UL << (i - 1)) + j); + _cds_lfht_add(ht, 1UL << (i - 1), + new_node, NULL, 1); } ht->cds_lfht_rcu_read_unlock(); } @@ -1060,25 +1110,24 @@ void init_table_populate(struct cds_lfht *ht, unsigned long i, static void init_table(struct cds_lfht *ht, - unsigned long first_order, unsigned long len_order) + unsigned long first_order, unsigned long last_order) { - unsigned long i, end_order; + unsigned long i; - dbg_printf("init table: first_order %lu end_order %lu\n", - first_order, first_order + len_order); - end_order = first_order + len_order; - for (i = first_order; i < end_order; i++) { + dbg_printf("init table: first_order %lu last_order %lu\n", + first_order, last_order); + assert(first_order > 0); + for (i = first_order; i <= last_order; i++) { unsigned long len; - len = !i ? 1 : 1UL << (i - 1); + len = 1UL << (i - 1); dbg_printf("init order %lu len: %lu\n", i, len); /* Stop expand if the resize target changes under us */ - if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i))) + if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i)) break; - ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level) - + (len * sizeof(struct _cds_lfht_node))); + ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node)); assert(ht->t.tbl[i]); /* @@ -1091,9 +1140,9 @@ void init_table(struct cds_lfht *ht, * Update table size. */ cmm_smp_wmb(); /* populate data before RCU size */ - CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i)); + CMM_STORE_SHARED(ht->t.size, 1UL << i); - dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i)); + dbg_printf("init new size: %lu\n", 1UL << i); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } @@ -1130,19 +1179,17 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i, { unsigned long j; + assert(i > 0); ht->cds_lfht_rcu_read_lock(); for (j = start; j < start + len; j++) { struct cds_lfht_node *fini_node = (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; dbg_printf("remove entry: i %lu j %lu hash %lu\n", - i, j, !i ? 0 : (1UL << (i - 1)) + j); + i, j, (1UL << (i - 1)) + j); fini_node->p.reverse_hash = - bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)), - fini_node, 1); - if (CMM_LOAD_SHARED(ht->in_progress_destroy)) - break; + bit_reverse_ulong((1UL << (i - 1)) + j); + (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1); } ht->cds_lfht_rcu_read_unlock(); } @@ -1163,18 +1210,18 @@ void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) static void fini_table(struct cds_lfht *ht, - unsigned long first_order, unsigned long len_order) + unsigned long first_order, unsigned long last_order) { - long i, end_order; + long i; + void *free_by_rcu = NULL; - dbg_printf("fini table: first_order %lu end_order %lu\n", - first_order, first_order + len_order); - end_order = first_order + len_order; + dbg_printf("fini table: first_order %lu last_order %lu\n", + first_order, last_order); assert(first_order > 0); - for (i = end_order - 1; i >= first_order; i--) { + for (i = last_order; i >= first_order; i--) { unsigned long len; - len = !i ? 1 : 1UL << (i - 1); + len = 1UL << (i - 1); dbg_printf("fini order %lu len: %lu\n", i, len); /* Stop shrink if the resize target changes under us */ @@ -1191,6 +1238,8 @@ void fini_table(struct cds_lfht *ht, * return a logically removed node as insert position. */ ht->cds_lfht_synchronize_rcu(); + if (free_by_rcu) + free(free_by_rcu); /* * Set "removed" flag in dummy nodes about to be removed. @@ -1200,12 +1249,56 @@ void fini_table(struct cds_lfht *ht, */ remove_table(ht, i, len); - ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level); + free_by_rcu = ht->t.tbl[i]; dbg_printf("fini new size: %lu\n", 1UL << i); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } + + if (free_by_rcu) { + ht->cds_lfht_synchronize_rcu(); + free(free_by_rcu); + } +} + +static +void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size) +{ + struct _cds_lfht_node *prev, *node; + unsigned long order, len, i, j; + + ht->t.tbl[0] = calloc(1, sizeof(struct _cds_lfht_node)); + assert(ht->t.tbl[0]); + + dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0); + ht->t.tbl[0]->nodes[0].next = flag_dummy(get_end()); + ht->t.tbl[0]->nodes[0].reverse_hash = 0; + + for (order = 1; order < get_count_order_ulong(size) + 1; order++) { + len = 1UL << (order - 1); + ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node)); + assert(ht->t.tbl[order]); + + i = 0; + prev = ht->t.tbl[i]->nodes; + for (j = 0; j < len; j++) { + if (j & (j - 1)) { /* Between power of 2 */ + prev++; + } else if (j) { /* At each power of 2 */ + i++; + prev = ht->t.tbl[i]->nodes; + } + + node = &ht->t.tbl[order]->nodes[j]; + dbg_printf("create dummy: order %lu index %lu hash %lu\n", + order, j, j + len); + node->next = prev->next; + assert(is_dummy(node->next)); + node->reverse_hash = bit_reverse_ulong(j + len); + prev->next = flag_dummy((struct cds_lfht_node *)node); + } + } } struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, @@ -1247,14 +1340,11 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->percpu_count = alloc_per_cpu_items_count(); /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); - order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1; ht->flags = flags; - ht->cds_lfht_rcu_thread_offline(); - pthread_mutex_lock(&ht->resize_mutex); - ht->t.resize_target = 1UL << (order - 1); - init_table(ht, 0, order); - pthread_mutex_unlock(&ht->resize_mutex); - ht->cds_lfht_rcu_thread_online(); + order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)); + ht->t.resize_target = 1UL << order; + cds_lfht_create_dummy(ht, 1UL << order); + ht->t.size = 1UL << order; return ht; } @@ -1263,17 +1353,13 @@ void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len, { struct cds_lfht_node *node, *next, *dummy_node; struct _cds_lfht_node *lookup; - unsigned long hash, reverse_hash, index, order, size; + unsigned long hash, reverse_hash, size; hash = ht->hash_fct(key, key_len, ht->hash_seed); reverse_hash = bit_reverse_ulong(hash); size = rcu_dereference(ht->t.size); - index = hash & (size - 1); - order = get_count_order_ulong(index + 1); - lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)]; - dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", - hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); + lookup = lookup_bucket(ht, size, hash); dummy_node = (struct cds_lfht_node *) lookup; /* We can always skip the dummy node initially */ node = rcu_dereference(dummy_node->p.next); @@ -1290,6 +1376,7 @@ void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len, next = rcu_dereference(node->p.next); if (likely(!is_removed(next)) && !is_dummy(next) + && clear_flag(node)->p.reverse_hash == reverse_hash && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { break; } @@ -1300,7 +1387,7 @@ void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len, iter->next = next; } -void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) +void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next; unsigned long reverse_hash; @@ -1336,6 +1423,41 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) iter->next = next; } +void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) +{ + struct cds_lfht_node *node, *next; + + node = clear_flag(iter->next); + for (;;) { + if (unlikely(is_end(node))) { + node = next = NULL; + break; + } + next = rcu_dereference(node->p.next); + if (likely(!is_removed(next)) + && !is_dummy(next)) { + break; + } + node = clear_flag(next); + } + assert(!node || !is_dummy(rcu_dereference(node->p.next))); + iter->node = node; + iter->next = next; +} + +void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) +{ + struct _cds_lfht_node *lookup; + + /* + * Get next after first dummy node. The first dummy node is the + * first node of the linked list. + */ + lookup = &ht->t.tbl[0]->nodes[0]; + iter->next = lookup->next; + cds_lfht_next(ht, iter); +} + void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) { unsigned long hash, size; @@ -1344,7 +1466,7 @@ void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); - (void) _cds_lfht_add(ht, size, node, ADD_DEFAULT, 0); + _cds_lfht_add(ht, size, node, NULL, 0); ht_count_add(ht, size); } @@ -1352,41 +1474,57 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, struct cds_lfht_node *node) { unsigned long hash, size; - struct cds_lfht_node *ret; + struct cds_lfht_iter iter; hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); - ret = _cds_lfht_add(ht, size, node, ADD_UNIQUE, 0); - if (ret == node) + _cds_lfht_add(ht, size, node, &iter, 0); + if (iter.node == node) ht_count_add(ht, size); - return ret; + return iter.node; } -struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht, +struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, struct cds_lfht_node *node) { unsigned long hash, size; - struct cds_lfht_node *ret; + struct cds_lfht_iter iter; hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); - ret = _cds_lfht_add(ht, size, node, ADD_REPLACE, 0); - if (ret == NULL) - ht_count_add(ht, size); - return ret; + for (;;) { + _cds_lfht_add(ht, size, node, &iter, 0); + if (iter.node == node) { + ht_count_add(ht, size); + return NULL; + } + + if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node)) + return iter.node; + } } -int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node) +int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter, + struct cds_lfht_node *new_node) +{ + unsigned long size; + + size = rcu_dereference(ht->t.size); + return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next, + new_node); +} + +int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter) { unsigned long size; int ret; size = rcu_dereference(ht->t.size); - ret = _cds_lfht_del(ht, size, node, 0); + ret = _cds_lfht_del(ht, size, iter->node, 0); if (!ret) ht_count_del(ht, size); return ret; @@ -1438,7 +1576,8 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) int ret; /* Wait for in-flight resize operations to complete */ - CMM_STORE_SHARED(ht->in_progress_destroy, 1); + _CMM_STORE_SHARED(ht->in_progress_destroy, 1); + cmm_smp_mb(); /* Store destroy before load resize */ while (uatomic_read(&ht->in_progress_resize)) poll(NULL, 0, 100); /* wait for 100ms */ ret = cds_lfht_delete_dummy(ht); @@ -1509,12 +1648,12 @@ void _do_cds_lfht_grow(struct cds_lfht *ht, { unsigned long old_order, new_order; - old_order = get_count_order_ulong(old_size) + 1; - new_order = get_count_order_ulong(new_size) + 1; - printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", - old_size, old_order, new_size, new_order); + old_order = get_count_order_ulong(old_size); + new_order = get_count_order_ulong(new_size); + dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", + old_size, old_order, new_size, new_order); assert(new_size > old_size); - init_table(ht, old_order, new_order - old_order); + init_table(ht, old_order + 1, new_order); } /* called with resize mutex held */ @@ -1525,14 +1664,14 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, unsigned long old_order, new_order; new_size = max(new_size, MIN_TABLE_SIZE); - old_order = get_count_order_ulong(old_size) + 1; - new_order = get_count_order_ulong(new_size) + 1; - printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", - old_size, old_order, new_size, new_order); + old_order = get_count_order_ulong(old_size); + new_order = get_count_order_ulong(new_size); + dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", + old_size, old_order, new_size, new_order); assert(new_size < old_size); /* Remove and unlink all dummy nodes to remove. */ - fini_table(ht, new_order, old_order - new_order); + fini_table(ht, new_order + 1, old_order); } @@ -1546,6 +1685,9 @@ void _do_cds_lfht_resize(struct cds_lfht *ht) * Resize table, re-do if the target size has changed under us. */ do { + assert(uatomic_read(&ht->in_progress_resize)); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; ht->t.resize_initiated = 1; old_size = ht->t.size; new_size = CMM_LOAD_SHARED(ht->t.resize_target); @@ -1614,7 +1756,11 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) cmm_smp_mb(); if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) { uatomic_inc(&ht->in_progress_resize); - cmm_smp_mb(); /* increment resize count before calling it */ + cmm_smp_mb(); /* increment resize count before load destroy */ + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { + uatomic_dec(&ht->in_progress_resize); + return; + } work = malloc(sizeof(*work)); work->ht = ht; ht->cds_lfht_call_rcu(&work->head, do_resize_cb); @@ -1637,7 +1783,11 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, cmm_smp_mb(); if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) { uatomic_inc(&ht->in_progress_resize); - cmm_smp_mb(); /* increment resize count before calling it */ + cmm_smp_mb(); /* increment resize count before load destroy */ + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { + uatomic_dec(&ht->in_progress_resize); + return; + } work = malloc(sizeof(*work)); work->ht = ht; ht->cds_lfht_call_rcu(&work->head, do_resize_cb);