X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=src%2Frculfhash.c;h=e0c5860c7b6b91e448b68b250c5c0a58afd49825;hp=0bd138486d8d74a21edc46f8b9c74e0873b1a925;hb=99ab1528d4df0c35bd209063c42123d1bc2f81ea;hpb=d0ec0ed2fcb5d67a28587dcb778606e64f5b7b83 diff --git a/src/rculfhash.c b/src/rculfhash.c index 0bd1384..e0c5860 100644 --- a/src/rculfhash.c +++ b/src/rculfhash.c @@ -266,9 +266,9 @@ #include #include "compat-getcpu.h" -#include -#include -#include +#include +#include +#include #include #include #include @@ -380,6 +380,27 @@ static int cds_lfht_workqueue_atfork_nesting; static void cds_lfht_init_worker(const struct rcu_flavor_struct *flavor); static void cds_lfht_fini_worker(const struct rcu_flavor_struct *flavor); +#ifdef CONFIG_CDS_LFHT_ITER_DEBUG + +static +void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter) +{ + iter->lfht = ht; +} + +#define cds_lfht_iter_debug_assert(...) assert(__VA_ARGS__) + +#else + +static +void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter) +{ +} + +#define cds_lfht_iter_debug_assert(...) + +#endif + /* * Algorithm to reverse bits in a word by lookup table, extended to * 64-bit words. @@ -585,6 +606,37 @@ static void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, unsigned long count); +static void mutex_lock(pthread_mutex_t *mutex) +{ + int ret; + +#ifndef DISTRUST_SIGNALS_EXTREME + ret = pthread_mutex_lock(mutex); + if (ret) + urcu_die(ret); +#else /* #ifndef DISTRUST_SIGNALS_EXTREME */ + while ((ret = pthread_mutex_trylock(mutex)) != 0) { + if (ret != EBUSY && ret != EINTR) + urcu_die(ret); + if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) { + cmm_smp_mb(); + _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0); + cmm_smp_mb(); + } + (void) poll(NULL, 0, 10); + } +#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ +} + +static void mutex_unlock(pthread_mutex_t *mutex) +{ + int ret; + + ret = pthread_mutex_unlock(mutex); + if (ret) + urcu_die(ret); +} + static long nr_cpus_mask = -1; static long split_count_mask = -1; static int split_count_order = -1; @@ -1037,7 +1089,13 @@ void _cds_lfht_add(struct cds_lfht *ht, if (unique_ret && !is_bucket(next) && clear_flag(iter)->reverse_hash == node->reverse_hash) { - struct cds_lfht_iter d_iter = { .node = node, .next = iter, }; + struct cds_lfht_iter d_iter = { + .node = node, + .next = iter, +#ifdef CONFIG_CDS_LFHT_ITER_DEBUG + .lfht = ht, +#endif + }; /* * uniquely adding inserts the node as the first @@ -1485,6 +1543,32 @@ void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) } } +#if (CAA_BITS_PER_LONG > 32) +/* + * For 64-bit architectures, with max number of buckets small enough not to + * use the entire 64-bit memory mapping space (and allowing a fair number of + * hash table instances), use the mmap allocator, which is faster. Otherwise, + * fallback to the order allocator. + */ +static +const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets) +{ + if (max_nr_buckets && max_nr_buckets <= (1ULL << 32)) + return &cds_lfht_mm_mmap; + else + return &cds_lfht_mm_order; +} +#else +/* + * For 32-bit architectures, use the order allocator. + */ +static +const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets) +{ + return &cds_lfht_mm_order; +} +#endif + struct cds_lfht *_cds_lfht_new(unsigned long init_size, unsigned long min_nr_alloc_buckets, unsigned long max_nr_buckets, @@ -1507,26 +1591,8 @@ struct cds_lfht *_cds_lfht_new(unsigned long init_size, /* * Memory management plugin default. */ - if (!mm) { - if (CAA_BITS_PER_LONG > 32 - && max_nr_buckets - && max_nr_buckets <= (1ULL << 32)) { - /* - * For 64-bit architectures, with max number of - * buckets small enough not to use the entire - * 64-bit memory mapping space (and allowing a - * fair number of hash table instances), use the - * mmap allocator, which is faster than the - * order allocator. - */ - mm = &cds_lfht_mm_mmap; - } else { - /* - * The fallback is to use the order allocator. - */ - mm = &cds_lfht_mm_order; - } - } + if (!mm) + mm = get_mm_type(max_nr_buckets); /* max_nr_buckets == 0 for order based mm means infinite */ if (mm == &cds_lfht_mm_order && !max_nr_buckets) @@ -1569,6 +1635,8 @@ void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash, struct cds_lfht_node *node, *next, *bucket; unsigned long reverse_hash, size; + cds_lfht_iter_debug_set_ht(ht, iter); + reverse_hash = bit_reverse_ulong(hash); size = rcu_dereference(ht->size); @@ -1606,6 +1674,7 @@ void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match, struct cds_lfht_node *node, *next; unsigned long reverse_hash; + cds_lfht_iter_debug_assert(ht == iter->lfht); node = iter->node; reverse_hash = node->reverse_hash; next = iter->next; @@ -1637,6 +1706,7 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next; + cds_lfht_iter_debug_assert(ht == iter->lfht); node = clear_flag(iter->next); for (;;) { if (caa_unlikely(is_end(node))) { @@ -1657,6 +1727,7 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) { + cds_lfht_iter_debug_set_ht(ht, iter); /* * Get next after first bucket node. The first bucket node is the * first node of the linked list. @@ -1947,9 +2018,9 @@ void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size) { resize_target_update_count(ht, new_size); CMM_STORE_SHARED(ht->resize_initiated, 1); - pthread_mutex_lock(&ht->resize_mutex); + mutex_lock(&ht->resize_mutex); _do_cds_lfht_resize(ht); - pthread_mutex_unlock(&ht->resize_mutex); + mutex_unlock(&ht->resize_mutex); } static @@ -1960,9 +2031,9 @@ void do_resize_cb(struct urcu_work *work) struct cds_lfht *ht = resize_work->ht; ht->flavor->register_thread(); - pthread_mutex_lock(&ht->resize_mutex); + mutex_lock(&ht->resize_mutex); _do_cds_lfht_resize(ht); - pthread_mutex_unlock(&ht->resize_mutex); + mutex_unlock(&ht->resize_mutex); ht->flavor->unregister_thread(); poison_free(work); } @@ -2037,37 +2108,6 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, __cds_lfht_resize_lazy_launch(ht); } -static void mutex_lock(pthread_mutex_t *mutex) -{ - int ret; - -#ifndef DISTRUST_SIGNALS_EXTREME - ret = pthread_mutex_lock(mutex); - if (ret) - urcu_die(ret); -#else /* #ifndef DISTRUST_SIGNALS_EXTREME */ - while ((ret = pthread_mutex_trylock(mutex)) != 0) { - if (ret != EBUSY && ret != EINTR) - urcu_die(ret); - if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) { - cmm_smp_mb(); - _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0); - cmm_smp_mb(); - } - (void) poll(NULL, 0, 10); - } -#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ -} - -static void mutex_unlock(pthread_mutex_t *mutex) -{ - int ret; - - ret = pthread_mutex_unlock(mutex); - if (ret) - urcu_die(ret); -} - static void cds_lfht_before_fork(void *priv) { if (cds_lfht_workqueue_atfork_nesting++)