X-Git-Url: http://git.liburcu.org/?p=userspace-rcu.git;a=blobdiff_plain;f=rculfhash.c;h=8081dc731af4b25bf6bb0c505b6d83852a962678;hp=e9cf0622f53576ef8ad8c08eda082c37f1151b86;hb=7a3e2ed26a6be1deac2cfde1d062526497d4ae9f;hpb=71bb3aca70894bd78ec75f47c0c464261ad1dc86 diff --git a/rculfhash.c b/rculfhash.c index e9cf062..8081dc7 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -561,6 +561,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, static long nr_cpus_mask = -1; static long split_count_mask = -1; +static int split_count_order = -1; #if defined(HAVE_SYSCONF) static void ht_init_nr_cpus_mask(void) @@ -597,6 +598,8 @@ void alloc_split_items_count(struct cds_lfht *ht) split_count_mask = DEFAULT_SPLIT_COUNT_MASK; else split_count_mask = nr_cpus_mask; + split_count_order = + cds_lfht_get_count_order_ulong(split_count_mask + 1); } assert(split_count_mask >= 0); @@ -712,14 +715,39 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) * Use bucket-local length for small table expand and for * environments lacking per-cpu data support. */ - if (count >= (1UL << COUNT_COMMIT_ORDER)) + if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order))) return; if (chain_len > 100) dbg_printf("WARNING: large chain length: %u.\n", chain_len); - if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) - cds_lfht_resize_lazy_grow(ht, size, - cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); + if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) { + int growth; + + /* + * Ideal growth calculated based on chain length. + */ + growth = cds_lfht_get_count_order_u32(chain_len + - (CHAIN_LEN_TARGET - 1)); + if ((ht->flags & CDS_LFHT_ACCOUNTING) + && (size << growth) + >= (1UL << (COUNT_COMMIT_ORDER + + split_count_order))) { + /* + * If ideal growth expands the hash table size + * beyond the "small hash table" sizes, use the + * maximum small hash table size to attempt + * expanding the hash table. This only applies + * when node accounting is available, otherwise + * the chain length is used to expand the hash + * table in every case. + */ + growth = COUNT_COMMIT_ORDER + split_count_order + - cds_lfht_get_count_order_ulong(size); + if (growth <= 0) + return; + } + cds_lfht_resize_lazy_grow(ht, size, growth); + } } static @@ -832,13 +860,16 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *bucket, struct cds_lfht_node *nod assert(!is_bucket(bucket)); assert(!is_removed(bucket)); + assert(!is_removal_owner(bucket)); assert(!is_bucket(node)); assert(!is_removed(node)); + assert(!is_removal_owner(node)); for (;;) { iter_prev = bucket; /* We can always skip the bucket node initially */ iter = rcu_dereference(iter_prev->next); assert(!is_removed(iter)); + assert(!is_removal_owner(iter)); assert(iter_prev->reverse_hash <= node->reverse_hash); /* * We should never be called with bucket (start of chain) @@ -859,6 +890,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *bucket, struct cds_lfht_node *nod iter = next; } assert(!is_removed(iter)); + assert(!is_removal_owner(iter)); if (is_bucket(iter)) new_next = flag_bucket(clear_flag(next)); else @@ -879,8 +911,10 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, return -ENOENT; assert(!is_removed(old_node)); + assert(!is_removal_owner(old_node)); assert(!is_bucket(old_node)); assert(!is_removed(new_node)); + assert(!is_removal_owner(new_node)); assert(!is_bucket(new_node)); assert(new_node != old_node); for (;;) { @@ -955,6 +989,7 @@ void _cds_lfht_add(struct cds_lfht *ht, assert(!is_bucket(node)); assert(!is_removed(node)); + assert(!is_removal_owner(node)); bucket = lookup_bucket(ht, size, hash); for (;;) { uint32_t chain_len = 0; @@ -1015,7 +1050,9 @@ void _cds_lfht_add(struct cds_lfht *ht, insert: assert(node != clear_flag(iter)); assert(!is_removed(iter_prev)); + assert(!is_removal_owner(iter_prev)); assert(!is_removed(iter)); + assert(!is_removal_owner(iter)); assert(iter_prev != node); if (!bucket_flag) node->next = clear_flag(iter); @@ -1035,6 +1072,7 @@ void _cds_lfht_add(struct cds_lfht *ht, gc_node: assert(!is_removed(iter)); + assert(!is_removal_owner(iter)); if (is_bucket(iter)) new_next = flag_bucket(clear_flag(next)); else @@ -1073,6 +1111,11 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, if (caa_unlikely(is_removed(next))) return -ENOENT; assert(!is_bucket(next)); + /* + * The del operation semantic guarantees a full memory barrier + * before the uatomic_or atomic commit of the deletion flag. + */ + cmm_smp_mb__before_uatomic_or(); /* * We set the REMOVED_FLAG unconditionally. Note that there may * be more than one concurrent thread setting this flag. @@ -1127,11 +1170,15 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, void (*fct)(struct cds_lfht *ht, unsigned long i, unsigned long start, unsigned long len)) { - unsigned long partition_len; + unsigned long partition_len, start = 0; struct partition_resize_work *work; int thread, ret; unsigned long nr_threads; + assert(nr_cpus_mask != -1); + if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) + goto fallback; + /* * Note: nr_cpus_mask + 1 is always power of 2. * We spawn just the number of threads we need to satisfy the minimum @@ -1145,7 +1192,10 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, } partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads); work = calloc(nr_threads, sizeof(*work)); - assert(work); + if (!work) { + dbg_printf("error allocating for resize, single-threading\n"); + goto fallback; + } for (thread = 0; thread < nr_threads; thread++) { work[thread].ht = ht; work[thread].i = i; @@ -1154,6 +1204,17 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, work[thread].fct = fct; ret = pthread_create(&(work[thread].thread_id), ht->resize_attr, partition_resize_thread, &work[thread]); + if (ret == EAGAIN) { + /* + * Out of resources: wait and join the threads + * we've created, then handle leftovers. + */ + dbg_printf("error spawning for resize, single-threading\n"); + start = work[thread].start; + len -= start; + nr_threads = thread; + break; + } assert(!ret); } for (thread = 0; thread < nr_threads; thread++) { @@ -1161,6 +1222,18 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, assert(!ret); } free(work); + + /* + * A pthread_create failure above will either lead in us having + * no threads to join or starting at a non-zero offset, + * fallback to single thread processing of leftovers. + */ + if (start == 0 && nr_threads > 0) + return; +fallback: + ht->flavor->thread_online(); + fct(ht, i, start, len); + ht->flavor->thread_offline(); } /* @@ -1198,13 +1271,6 @@ static void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len) { - assert(nr_cpus_mask != -1); - if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { - ht->flavor->thread_online(); - init_table_populate_partition(ht, i, 0, len); - ht->flavor->thread_offline(); - return; - } partition_resize_helper(ht, i, len, init_table_populate_partition); } @@ -1297,14 +1363,6 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i, static void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) { - - assert(nr_cpus_mask != -1); - if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { - ht->flavor->thread_online(); - remove_table_partition(ht, i, 0, len); - ht->flavor->thread_offline(); - return; - } partition_resize_helper(ht, i, len, remove_table_partition); } @@ -1692,6 +1750,7 @@ int cds_lfht_delete_bucket(struct cds_lfht *ht) if (!is_bucket(node)) return -EPERM; assert(!is_removed(node)); + assert(!is_removal_owner(node)); } while (!is_end(node)); /* * size accessed without rcu_dereference because hash table is @@ -1723,8 +1782,10 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) /* Wait for in-flight resize operations to complete */ _CMM_STORE_SHARED(ht->in_progress_destroy, 1); cmm_smp_mb(); /* Store destroy before load resize */ + ht->flavor->thread_offline(); while (uatomic_read(&ht->in_progress_resize)) poll(NULL, 0, 100); /* wait for 100ms */ + ht->flavor->thread_online(); ret = cds_lfht_delete_bucket(ht); if (ret) return ret;