X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=1487980c832c115154937cda4b185024636f9e44;hp=09840f2f75694529c17ab2ede16be0ec3b475bda;hb=b198f0fda77c816d80f63cfa2e71a7e0b4496736;hpb=dc1da8f6d538b4b56326617b391182f6bec36b59 diff --git a/rculfhash.c b/rculfhash.c index 09840f2..1487980 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -177,6 +177,12 @@ #define MAX_TABLE_ORDER 64 #endif +/* + * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink. + */ +#define MIN_PARTITION_PER_THREAD_ORDER 12 +#define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER) + #ifndef min #define min(a, b) ((a) < (b) ? (a) : (b)) #endif @@ -187,6 +193,8 @@ /* * The removed flag needs to be updated atomically with the pointer. + * It indicates that no node must attach to the node scheduled for + * removal, and that node garbage collection must be performed. * The dummy flag does not require to be updated atomically with the * pointer, but it is added as a pointer low bit flag to save space. */ @@ -198,7 +206,7 @@ #define END_VALUE NULL struct ht_items_count { - unsigned long add, remove; + unsigned long add, del; } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); struct rcu_level { @@ -235,6 +243,9 @@ struct cds_lfht { void (*cds_lfht_rcu_read_unlock)(void); void (*cds_lfht_rcu_thread_offline)(void); void (*cds_lfht_rcu_thread_online)(void); + void (*cds_lfht_rcu_register_thread)(void); + void (*cds_lfht_rcu_unregister_thread)(void); + pthread_attr_t *resize_attr; /* Resize threads attributes */ unsigned long count; /* global approximate item count */ struct ht_items_count *percpu_count; /* per-cpu item count */ }; @@ -244,11 +255,25 @@ struct rcu_resize_work { struct cds_lfht *ht; }; +struct partition_resize_work { + struct rcu_head head; + struct cds_lfht *ht; + unsigned long i, start, len; + void (*fct)(struct cds_lfht *ht, unsigned long i, + unsigned long start, unsigned long len); +}; + +enum add_mode { + ADD_DEFAULT = 0, + ADD_UNIQUE = 1, + ADD_REPLACE = 2, +}; + static struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *node, - int unique, int dummy); + enum add_mode mode, int dummy); /* * Algorithm to reverse bits in a word by lookup table, extended to @@ -548,7 +573,7 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size) } static -void ht_count_remove(struct cds_lfht *ht, unsigned long size) +void ht_count_del(struct cds_lfht *ht, unsigned long size) { unsigned long percpu_count; int cpu; @@ -558,18 +583,18 @@ void ht_count_remove(struct cds_lfht *ht, unsigned long size) cpu = ht_get_cpu(); if (unlikely(cpu < 0)) return; - percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1); + percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, -1); if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { unsigned long count; - dbg_printf("remove percpu %lu\n", percpu_count); + dbg_printf("del percpu %lu\n", percpu_count); count = uatomic_add_return(&ht->count, -(1UL << COUNT_COMMIT_ORDER)); /* If power of 2 */ if (!(count & (count - 1))) { if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size) return; - dbg_printf("remove set global %lu\n", count); + dbg_printf("del set global %lu\n", count); cds_lfht_resize_lazy_count(ht, size, count >> (CHAIN_LEN_TARGET - 1)); } @@ -597,7 +622,7 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size) } static -void ht_count_remove(struct cds_lfht *ht, unsigned long size) +void ht_count_del(struct cds_lfht *ht, unsigned long size) { } @@ -730,6 +755,8 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node new_next = flag_dummy(clear_flag(next)); else new_next = clear_flag(next); + if (is_removed(iter)) + new_next = flag_removed(new_next); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); } return; @@ -739,10 +766,10 @@ static struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *node, - int unique, int dummy) + enum add_mode mode, int dummy) { struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, - *dummy_node; + *dummy_node, *return_node; struct _cds_lfht_node *lookup; unsigned long hash, index, order; @@ -776,12 +803,16 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, next = rcu_dereference(clear_flag(iter)->p.next); if (unlikely(is_removed(next))) goto gc_node; - if (unique + if ((mode == ADD_UNIQUE || mode == ADD_REPLACE) && !is_dummy(next) && !ht->compare_fct(node->key, node->key_len, clear_flag(iter)->key, - clear_flag(iter)->key_len)) - return clear_flag(iter); + clear_flag(iter)->key_len)) { + if (mode == ADD_UNIQUE) + return clear_flag(iter); + else /* mode == ADD_REPLACE */ + goto replace; + } /* Only account for identical reverse hash once */ if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash && !is_dummy(next)) @@ -789,6 +820,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, iter_prev = clear_flag(iter); iter = next; } + insert: assert(node != clear_flag(iter)); assert(!is_removed(iter_prev)); @@ -803,17 +835,55 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, else new_node = node; if (uatomic_cmpxchg(&iter_prev->p.next, iter, - new_node) != iter) + new_node) != iter) { continue; /* retry */ + } else { + if (mode == ADD_REPLACE) + return_node = NULL; + else /* ADD_DEFAULT and ADD_UNIQUE */ + return_node = node; + goto gc_end; + } + + replace: + /* Insert after node to be replaced */ + iter_prev = clear_flag(iter); + iter = next; + assert(node != clear_flag(iter)); + assert(!is_removed(iter_prev)); + assert(!is_removed(iter)); + assert(iter_prev != node); + assert(!dummy); + node->p.next = clear_flag(iter); + if (is_dummy(iter)) + new_node = flag_dummy(node); else + new_node = node; + /* + * Here is the whole trick for lock-free replace: we add + * the replacement node _after_ the node we want to + * replace by atomically setting its next pointer at the + * same time we set its removal flag. Given that + * the lookups/get next use an iterator aware of the + * next pointer, they will either skip the old node due + * to the removal flag and see the new node, or use + * the old node, but will not see the new one. + */ + new_node = flag_removed(new_node); + if (uatomic_cmpxchg(&iter_prev->p.next, + iter, new_node) != iter) { + continue; /* retry */ + } else { + return_node = iter_prev; goto gc_end; + } + gc_node: assert(!is_removed(iter)); if (is_dummy(iter)) new_next = flag_dummy(clear_flag(next)); else new_next = clear_flag(next); - assert(new_next != NULL); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); /* retry */ } @@ -824,11 +894,11 @@ gc_end: lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; dummy_node = (struct cds_lfht_node *) lookup; _cds_lfht_gc_bucket(dummy_node, node); - return node; + return return_node; } static -int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, +int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *node, int dummy_removal) { @@ -842,6 +912,8 @@ int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, assert(!is_removed(node)); old = rcu_dereference(node->p.next); do { + struct cds_lfht_node *new_next; + next = old; if (unlikely(is_removed(next))) goto end; @@ -849,9 +921,8 @@ int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, assert(is_dummy(next)); else assert(!is_dummy(next)); - assert(next != NULL); - old = uatomic_cmpxchg(&node->p.next, next, - flag_removed(next)); + new_next = flag_removed(next); + old = uatomic_cmpxchg(&node->p.next, next, new_next); } while (old != next); /* We performed the (logical) deletion. */ @@ -881,19 +952,77 @@ end: return -ENOENT; } +static +void *partition_resize_thread(void *arg) +{ + struct partition_resize_work *work = arg; + + work->ht->cds_lfht_rcu_register_thread(); + work->fct(work->ht, work->i, work->start, work->len); + work->ht->cds_lfht_rcu_unregister_thread(); + return NULL; +} + +static +void partition_resize_helper(struct cds_lfht *ht, unsigned long i, + unsigned long len, + void (*fct)(struct cds_lfht *ht, unsigned long i, + unsigned long start, unsigned long len)) +{ + unsigned long partition_len; + struct partition_resize_work *work; + int thread, ret; + unsigned long nr_threads; + pthread_t *thread_id; + + /* + * Note: nr_cpus_mask + 1 is always power of 2. + * We spawn just the number of threads we need to satisfy the minimum + * partition size, up to the number of CPUs in the system. + */ + nr_threads = min(nr_cpus_mask + 1, + len >> MIN_PARTITION_PER_THREAD_ORDER); + partition_len = len >> get_count_order_ulong(nr_threads); + work = calloc(nr_threads, sizeof(*work)); + thread_id = calloc(nr_threads, sizeof(*thread_id)); + assert(work); + for (thread = 0; thread < nr_threads; thread++) { + work[thread].ht = ht; + work[thread].i = i; + work[thread].len = partition_len; + work[thread].start = thread * partition_len; + work[thread].fct = fct; + ret = pthread_create(&thread_id[thread], ht->resize_attr, + partition_resize_thread, &work[thread]); + assert(!ret); + } + for (thread = 0; thread < nr_threads; thread++) { + ret = pthread_join(thread_id[thread], NULL); + assert(!ret); + } + free(work); + free(thread_id); +} + /* * Holding RCU read lock to protect _cds_lfht_add against memory * reclaim that could be performed by other call_rcu worker threads (ABA * problem). + * + * When we reach a certain length, we can split this population phase over + * many worker threads, based on the number of CPUs available in the system. + * This should therefore take care of not having the expand lagging behind too + * many concurrent insertion threads by using the scheduler's ability to + * schedule dummy node population fairly with insertions. */ static -void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len) +void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, + unsigned long start, unsigned long len) { unsigned long j; - ht->cds_lfht_rcu_thread_online(); ht->cds_lfht_rcu_read_lock(); - for (j = 0; j < len; j++) { + for (j = start; j < start + len; j++) { struct cds_lfht_node *new_node = (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; @@ -902,12 +1031,25 @@ void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len new_node->p.reverse_hash = bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)), - new_node, 0, 1); + new_node, ADD_DEFAULT, 1); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } ht->cds_lfht_rcu_read_unlock(); - ht->cds_lfht_rcu_thread_offline(); +} + +static +void init_table_populate(struct cds_lfht *ht, unsigned long i, + unsigned long len) +{ + assert(nr_cpus_mask != -1); + if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { + ht->cds_lfht_rcu_thread_online(); + init_table_populate_partition(ht, i, 0, len); + ht->cds_lfht_rcu_thread_offline(); + return; + } + partition_resize_helper(ht, i, len, init_table_populate_partition); } static @@ -931,6 +1073,7 @@ void init_table(struct cds_lfht *ht, ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level) + (len * sizeof(struct _cds_lfht_node))); + assert(ht->t.tbl[i]); /* * Set all dummy nodes reverse hash values for a level and @@ -969,15 +1112,20 @@ void init_table(struct cds_lfht *ht, * * Logical removal and garbage collection can therefore be done in batch or on a * node-per-node basis, as long as the guarantee above holds. + * + * When we reach a certain length, we can split this removal over many worker + * threads, based on the number of CPUs available in the system. This should + * take care of not letting resize process lag behind too many concurrent + * updater threads actively inserting into the hash table. */ static -void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) +void remove_table_partition(struct cds_lfht *ht, unsigned long i, + unsigned long start, unsigned long len) { unsigned long j; - ht->cds_lfht_rcu_thread_online(); ht->cds_lfht_rcu_read_lock(); - for (j = 0; j < len; j++) { + for (j = start; j < start + len; j++) { struct cds_lfht_node *fini_node = (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; @@ -985,13 +1133,26 @@ void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) i, j, !i ? 0 : (1UL << (i - 1)) + j); fini_node->p.reverse_hash = bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - (void) _cds_lfht_remove(ht, !i ? 0 : (1UL << (i - 1)), + (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)), fini_node, 1); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } ht->cds_lfht_rcu_read_unlock(); - ht->cds_lfht_rcu_thread_offline(); +} + +static +void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) +{ + + assert(nr_cpus_mask != -1); + if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { + ht->cds_lfht_rcu_thread_online(); + remove_table_partition(ht, i, 0, len); + ht->cds_lfht_rcu_thread_offline(); + return; + } + partition_resize_helper(ht, i, len, remove_table_partition); } static @@ -1041,7 +1202,7 @@ void fini_table(struct cds_lfht *ht, } } -struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, +struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, cds_lfht_compare_fct compare_fct, unsigned long hash_seed, unsigned long init_size, @@ -1052,7 +1213,10 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, void (*cds_lfht_rcu_read_lock)(void), void (*cds_lfht_rcu_read_unlock)(void), void (*cds_lfht_rcu_thread_offline)(void), - void (*cds_lfht_rcu_thread_online)(void)) + void (*cds_lfht_rcu_thread_online)(void), + void (*cds_lfht_rcu_register_thread)(void), + void (*cds_lfht_rcu_unregister_thread)(void), + pthread_attr_t *attr) { struct cds_lfht *ht; unsigned long order; @@ -1061,6 +1225,7 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, if (init_size && (init_size & (init_size - 1))) return NULL; ht = calloc(1, sizeof(struct cds_lfht)); + assert(ht); ht->hash_fct = hash_fct; ht->compare_fct = compare_fct; ht->hash_seed = hash_seed; @@ -1070,6 +1235,9 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock; ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline; ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online; + ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread; + ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread; + ht->resize_attr = attr; ht->percpu_count = alloc_per_cpu_items_count(); /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); @@ -1084,7 +1252,8 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, return ht; } -struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len) +void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len, + struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next, *dummy_node; struct _cds_lfht_node *lookup; @@ -1121,21 +1290,22 @@ struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key node = clear_flag(next); } assert(!node || !is_dummy(rcu_dereference(node->p.next))); - return node; + iter->node = node; + iter->next = next; } -struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, - struct cds_lfht_node *node) +void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) { - struct cds_lfht_node *next; + struct cds_lfht_node *node, *next; unsigned long reverse_hash; void *key; size_t key_len; + node = iter->node; reverse_hash = node->p.reverse_hash; key = node->key; key_len = node->key_len; - next = rcu_dereference(node->p.next); + next = iter->next; node = clear_flag(next); for (;;) { @@ -1156,7 +1326,8 @@ struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, node = clear_flag(next); } assert(!node || !is_dummy(rcu_dereference(node->p.next))); - return node; + iter->node = node; + iter->next = next; } void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) @@ -1167,12 +1338,12 @@ void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); - (void) _cds_lfht_add(ht, size, node, 0, 0); + (void) _cds_lfht_add(ht, size, node, ADD_DEFAULT, 0); ht_count_add(ht, size); } struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, - struct cds_lfht_node *node) + struct cds_lfht_node *node) { unsigned long hash, size; struct cds_lfht_node *ret; @@ -1181,21 +1352,37 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); size = rcu_dereference(ht->t.size); - ret = _cds_lfht_add(ht, size, node, 1, 0); + ret = _cds_lfht_add(ht, size, node, ADD_UNIQUE, 0); if (ret == node) ht_count_add(ht, size); return ret; } -int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node) +struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht, + struct cds_lfht_node *node) +{ + unsigned long hash, size; + struct cds_lfht_node *ret; + + hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); + node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); + + size = rcu_dereference(ht->t.size); + ret = _cds_lfht_add(ht, size, node, ADD_REPLACE, 0); + if (ret == NULL) + ht_count_add(ht, size); + return ret; +} + +int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node) { unsigned long size; int ret; size = rcu_dereference(ht->t.size); - ret = _cds_lfht_remove(ht, size, node, 0); + ret = _cds_lfht_del(ht, size, node, 0); if (!ret) - ht_count_remove(ht, size); + ht_count_del(ht, size); return ret; } @@ -1240,7 +1427,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) * Should only be called when no more concurrent readers nor writers can * possibly access the table. */ -int cds_lfht_destroy(struct cds_lfht *ht) +int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) { int ret; @@ -1252,6 +1439,8 @@ int cds_lfht_destroy(struct cds_lfht *ht) if (ret) return ret; free_per_cpu_items_count(ht->percpu_count); + if (attr) + *attr = ht->resize_attr; poison_free(ht); return ret; }