X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=668f93672a17899e7fc27e9cd049e1ccf9350d74;hp=1f6ee71f39ddb05242d87a6b5c744144c7021c98;hb=4ddbb355f0c6bb38e4dab863949db7a4ef459d37;hpb=ef6e6171a5046d56c3f4a32abcf3436546977118 diff --git a/rculfhash.c b/rculfhash.c index 1f6ee71..668f936 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -53,7 +53,7 @@ * operation. * - The resize operation for larger tables (and available through an * API) allows both expanding and shrinking the hash table. - * - Per-CPU Split-counters are used to keep track of the number of + * - Split-counters are used to keep track of the number of * nodes within the hash table for automatic resize triggering. * - Resize operation initiated by long chain detection is executed by a * call_rcu thread, which keeps lock-freedom of add and remove. @@ -173,12 +173,13 @@ #endif /* - * Per-CPU split-counters lazily update the global counter each 1024 + * Split-counters lazily update the global counter each 1024 * addition/removal. It automatically keeps track of resize required. * We use the bucket length as indicator for need to expand for small * tables and machines lacking per-cpu data suppport. */ #define COUNT_COMMIT_ORDER 10 +#define DEFAULT_SPLIT_COUNT_MASK 0xFUL #define CHAIN_LEN_TARGET 1 #define CHAIN_LEN_RESIZE_THRESHOLD 3 @@ -265,7 +266,7 @@ struct cds_lfht { void (*cds_lfht_rcu_unregister_thread)(void); pthread_attr_t *resize_attr; /* Resize threads attributes */ long count; /* global approximate item count */ - struct ht_items_count *percpu_count; /* per-cpu item count */ + struct ht_items_count *split_count; /* split item count */ }; struct rcu_resize_work { @@ -497,85 +498,95 @@ int get_count_order_ulong(unsigned long x) static void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth); -/* - * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are - * available, then we support hash table item accounting. - * In the unfortunate event the number of CPUs reported would be - * inaccurate, we use modulo arithmetic on the number of CPUs we got. - */ -#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) - static void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, unsigned long count); static long nr_cpus_mask = -1; +static long split_count_mask = -1; + +#if defined(HAVE_SYSCONF) +static void ht_init_nr_cpus_mask(void) +{ + long maxcpus; + + maxcpus = sysconf(_SC_NPROCESSORS_CONF); + if (maxcpus <= 0) { + nr_cpus_mask = -2; + return; + } + /* + * round up number of CPUs to next power of two, so we + * can use & for modulo. + */ + maxcpus = 1UL << get_count_order_ulong(maxcpus); + nr_cpus_mask = maxcpus - 1; +} +#else /* #if defined(HAVE_SYSCONF) */ +static void ht_init_nr_cpus_mask(void) +{ + nr_cpus_mask = -2; +} +#endif /* #else #if defined(HAVE_SYSCONF) */ static -struct ht_items_count *alloc_per_cpu_items_count(void) +struct ht_items_count *alloc_split_items_count(void) { struct ht_items_count *count; - switch (nr_cpus_mask) { - case -2: - return NULL; - case -1: - { - long maxcpus; - - maxcpus = sysconf(_SC_NPROCESSORS_CONF); - if (maxcpus <= 0) { - nr_cpus_mask = -2; - return NULL; - } - /* - * round up number of CPUs to next power of two, so we - * can use & for modulo. - */ - maxcpus = 1UL << get_count_order_ulong(maxcpus); - nr_cpus_mask = maxcpus - 1; - } - /* Fall-through */ - default: - return calloc(nr_cpus_mask + 1, sizeof(*count)); + if (nr_cpus_mask == -1) { + ht_init_nr_cpus_mask(); + if (nr_cpus_mask < 0) + split_count_mask = DEFAULT_SPLIT_COUNT_MASK; + else + split_count_mask = nr_cpus_mask; } + + assert(split_count_mask >= 0); + return calloc(split_count_mask + 1, sizeof(*count)); } static -void free_per_cpu_items_count(struct ht_items_count *count) +void free_split_items_count(struct ht_items_count *count) { poison_free(count); } +#if defined(HAVE_SCHED_GETCPU) static -int ht_get_cpu(void) +int ht_get_split_count_index(unsigned long hash) { int cpu; - assert(nr_cpus_mask >= 0); + assert(split_count_mask >= 0); cpu = sched_getcpu(); if (unlikely(cpu < 0)) - return cpu; + return hash & split_count_mask; else - return cpu & nr_cpus_mask; + return cpu & split_count_mask; } +#else /* #if defined(HAVE_SCHED_GETCPU) */ +static +int ht_get_split_count_index(unsigned long hash) +{ + return hash & split_count_mask; +} +#endif /* #else #if defined(HAVE_SCHED_GETCPU) */ static -void ht_count_add(struct cds_lfht *ht, unsigned long size) +void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash) { - unsigned long percpu_count; - int cpu; + unsigned long split_count; + int index; - if (unlikely(!ht->percpu_count)) + if (unlikely(!ht->split_count)) return; - cpu = ht_get_cpu(); - if (unlikely(cpu < 0)) - return; - percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1); - if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { + index = ht_get_split_count_index(hash); + split_count = uatomic_add_return(&ht->split_count[index].add, 1); + if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { long count; - dbg_printf("add percpu %lu\n", percpu_count); + dbg_printf("add split count %lu\n", split_count); count = uatomic_add_return(&ht->count, 1UL << COUNT_COMMIT_ORDER); /* If power of 2 */ @@ -590,21 +601,19 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size) } static -void ht_count_del(struct cds_lfht *ht, unsigned long size) +void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash) { - unsigned long percpu_count; - int cpu; + unsigned long split_count; + int index; - if (unlikely(!ht->percpu_count)) + if (unlikely(!ht->split_count)) return; - cpu = ht_get_cpu(); - if (unlikely(cpu < 0)) - return; - percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1); - if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { + index = ht_get_split_count_index(hash); + split_count = uatomic_add_return(&ht->split_count[index].del, 1); + if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { long count; - dbg_printf("del percpu %lu\n", percpu_count); + dbg_printf("del split count %lu\n", split_count); count = uatomic_add_return(&ht->count, -(1UL << COUNT_COMMIT_ORDER)); /* If power of 2 */ @@ -616,7 +625,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) * Don't shrink table if the number of nodes is below a * certain threshold. */ - if (count < (1UL << COUNT_COMMIT_ORDER) * (nr_cpus_mask + 1)) + if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1)) return; cds_lfht_resize_lazy_count(ht, size, count >> (CHAIN_LEN_TARGET - 1)); @@ -624,34 +633,6 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) } } -#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ - -static const long nr_cpus_mask = -2; - -static -struct ht_items_count *alloc_per_cpu_items_count(void) -{ - return NULL; -} - -static -void free_per_cpu_items_count(struct ht_items_count *count) -{ -} - -static -void ht_count_add(struct cds_lfht *ht, unsigned long size) -{ -} - -static -void ht_count_del(struct cds_lfht *ht, unsigned long size) -{ -} - -#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ - - static void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) { @@ -1284,7 +1265,7 @@ void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size) for (order = 1; order < get_count_order_ulong(size) + 1; order++) { len = 1UL << (order - 1); if (order <= ht->min_alloc_order) { - ht->t.tbl[order] = (void *)(ht->t.tbl[0]->nodes + len); + ht->t.tbl[order] = (struct rcu_level *) (ht->t.tbl[0]->nodes + len); } else { ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node)); assert(ht->t.tbl[order]); @@ -1353,7 +1334,7 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread; ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread; ht->resize_attr = attr; - ht->percpu_count = alloc_per_cpu_items_count(); + ht->split_count = alloc_split_items_count(); /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); ht->flags = flags; @@ -1485,7 +1466,7 @@ void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) size = rcu_dereference(ht->t.size); _cds_lfht_add(ht, size, node, NULL, 0); - ht_count_add(ht, size); + ht_count_add(ht, size, hash); } struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, @@ -1500,7 +1481,7 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, size = rcu_dereference(ht->t.size); _cds_lfht_add(ht, size, node, &iter, 0); if (iter.node == node) - ht_count_add(ht, size); + ht_count_add(ht, size, hash); return iter.node; } @@ -1517,7 +1498,7 @@ struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, for (;;) { _cds_lfht_add(ht, size, node, &iter, 0); if (iter.node == node) { - ht_count_add(ht, size); + ht_count_add(ht, size, hash); return NULL; } @@ -1538,13 +1519,15 @@ int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter, int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter) { - unsigned long size; + unsigned long size, hash; int ret; size = rcu_dereference(ht->t.size); ret = _cds_lfht_del(ht, size, iter->node, 0); - if (!ret) - ht_count_del(ht, size); + if (!ret) { + hash = bit_reverse_ulong(iter->node->p.reverse_hash); + ht_count_del(ht, size, hash); + } return ret; } @@ -1606,7 +1589,7 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) ret = cds_lfht_delete_dummy(ht); if (ret) return ret; - free_per_cpu_items_count(ht->percpu_count); + free_split_items_count(ht->split_count); if (attr) *attr = ht->resize_attr; poison_free(ht); @@ -1624,12 +1607,12 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, unsigned long nr_dummy = 0; *approx_before = 0; - if (nr_cpus_mask >= 0) { + if (split_count_mask >= 0) { int i; - for (i = 0; i < nr_cpus_mask + 1; i++) { - *approx_before += uatomic_read(&ht->percpu_count[i].add); - *approx_before -= uatomic_read(&ht->percpu_count[i].del); + for (i = 0; i < split_count_mask + 1; i++) { + *approx_before += uatomic_read(&ht->split_count[i].add); + *approx_before -= uatomic_read(&ht->split_count[i].del); } } @@ -1654,12 +1637,12 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, } while (!is_end(node)); dbg_printf("number of dummy nodes: %lu\n", nr_dummy); *approx_after = 0; - if (nr_cpus_mask >= 0) { + if (split_count_mask >= 0) { int i; - for (i = 0; i < nr_cpus_mask + 1; i++) { - *approx_after += uatomic_read(&ht->percpu_count[i].add); - *approx_after -= uatomic_read(&ht->percpu_count[i].del); + for (i = 0; i < split_count_mask + 1; i++) { + *approx_after += uatomic_read(&ht->split_count[i].add); + *approx_after -= uatomic_read(&ht->split_count[i].del); } } } @@ -1791,8 +1774,6 @@ void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) } } -#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) - static void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, unsigned long count) @@ -1817,5 +1798,3 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, CMM_STORE_SHARED(ht->t.resize_initiated, 1); } } - -#endif