X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=rculfhash.c;h=e94b9b224422e8e316c5dbeea4be0aff0b002b36;hb=4c42f1b8ad898738bf5caae2682e18c57dce91d3;hp=b67acc820a078011f81963608f7c366fc1a3d2ec;hpb=238cc06e5728e3a642d4d22bd4fc199cfd265110;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index b67acc8..e94b9b2 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -53,7 +53,7 @@ * operation. * - The resize operation for larger tables (and available through an * API) allows both expanding and shrinking the hash table. - * - Per-CPU Split-counters are used to keep track of the number of + * - Split-counters are used to keep track of the number of * nodes within the hash table for automatic resize triggering. * - Resize operation initiated by long chain detection is executed by a * call_rcu thread, which keeps lock-freedom of add and remove. @@ -173,7 +173,7 @@ #endif /* - * Per-CPU split-counters lazily update the global counter each 1024 + * Split-counters lazily update the global counter each 1024 * addition/removal. It automatically keeps track of resize required. * We use the bucket length as indicator for need to expand for small * tables and machines lacking per-cpu data suppport. @@ -241,6 +241,8 @@ struct cds_lfht { struct rcu_table t; cds_lfht_hash_fct hash_fct; cds_lfht_compare_fct compare_fct; + unsigned long min_alloc_order; + unsigned long min_alloc_size; unsigned long hash_seed; int flags; /* @@ -263,7 +265,7 @@ struct cds_lfht { void (*cds_lfht_rcu_unregister_thread)(void); pthread_attr_t *resize_attr; /* Resize threads attributes */ long count; /* global approximate item count */ - struct ht_items_count *percpu_count; /* per-cpu item count */ + struct ht_items_count *split_count; /* split item count */ }; struct rcu_resize_work { @@ -508,72 +510,76 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, unsigned long count); static long nr_cpus_mask = -1; +static long split_count_mask = -1; + +static void ht_init_nr_cpus_mask(void) +{ + long maxcpus; + + maxcpus = sysconf(_SC_NPROCESSORS_CONF); + if (maxcpus <= 0) { + nr_cpus_mask = -2; + return; + } + /* + * round up number of CPUs to next power of two, so we + * can use & for modulo. + */ + maxcpus = 1UL << get_count_order_ulong(maxcpus); + nr_cpus_mask = maxcpus - 1; +} static -struct ht_items_count *alloc_per_cpu_items_count(void) +struct ht_items_count *alloc_split_items_count(void) { struct ht_items_count *count; - switch (nr_cpus_mask) { - case -2: - return NULL; - case -1: - { - long maxcpus; - - maxcpus = sysconf(_SC_NPROCESSORS_CONF); - if (maxcpus <= 0) { - nr_cpus_mask = -2; - return NULL; - } - /* - * round up number of CPUs to next power of two, so we - * can use & for modulo. - */ - maxcpus = 1UL << get_count_order_ulong(maxcpus); - nr_cpus_mask = maxcpus - 1; - } - /* Fall-through */ - default: - return calloc(nr_cpus_mask + 1, sizeof(*count)); + if (nr_cpus_mask == -1) { + ht_init_nr_cpus_mask(); + split_count_mask = nr_cpus_mask; } + + if (split_count_mask < 0) + return NULL; + else + return calloc(split_count_mask + 1, sizeof(*count)); } static -void free_per_cpu_items_count(struct ht_items_count *count) +void free_split_items_count(struct ht_items_count *count) { poison_free(count); } static -int ht_get_cpu(void) +int ht_get_split_count_index(void) { int cpu; - assert(nr_cpus_mask >= 0); + assert(split_count_mask >= 0); cpu = sched_getcpu(); if (unlikely(cpu < 0)) return cpu; else - return cpu & nr_cpus_mask; + return cpu & split_count_mask; } static void ht_count_add(struct cds_lfht *ht, unsigned long size) { - unsigned long percpu_count; - int cpu; + unsigned long split_count; + int index; - if (unlikely(!ht->percpu_count)) + if (unlikely(!ht->split_count)) return; - cpu = ht_get_cpu(); - if (unlikely(cpu < 0)) + index = ht_get_split_count_index(); + if (unlikely(index < 0)) return; - percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1); - if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { + split_count = uatomic_add_return(&ht->split_count[index].add, 1); + if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { long count; - dbg_printf("add percpu %lu\n", percpu_count); + dbg_printf("add split count %lu\n", split_count); count = uatomic_add_return(&ht->count, 1UL << COUNT_COMMIT_ORDER); /* If power of 2 */ @@ -590,19 +596,19 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size) static void ht_count_del(struct cds_lfht *ht, unsigned long size) { - unsigned long percpu_count; - int cpu; + unsigned long split_count; + int index; - if (unlikely(!ht->percpu_count)) + if (unlikely(!ht->split_count)) return; - cpu = ht_get_cpu(); - if (unlikely(cpu < 0)) + index = ht_get_split_count_index(); + if (unlikely(index < 0)) return; - percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1); - if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { + split_count = uatomic_add_return(&ht->split_count[index].del, 1); + if (unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { long count; - dbg_printf("del percpu %lu\n", percpu_count); + dbg_printf("del split count %lu\n", split_count); count = uatomic_add_return(&ht->count, -(1UL << COUNT_COMMIT_ORDER)); /* If power of 2 */ @@ -614,7 +620,7 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) * Don't shrink table if the number of nodes is below a * certain threshold. */ - if (count < (1UL << COUNT_COMMIT_ORDER) * (nr_cpus_mask + 1)) + if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1)) return; cds_lfht_resize_lazy_count(ht, size, count >> (CHAIN_LEN_TARGET - 1)); @@ -625,15 +631,16 @@ void ht_count_del(struct cds_lfht *ht, unsigned long size) #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ static const long nr_cpus_mask = -2; +static const long split_count_mask = -2; static -struct ht_items_count *alloc_per_cpu_items_count(void) +struct ht_items_count *alloc_split_items_count(void) { return NULL; } static -void free_per_cpu_items_count(struct ht_items_count *count) +void free_split_items_count(struct ht_items_count *count) { } @@ -736,17 +743,21 @@ struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, assert(size > 0); index = hash & (size - 1); + + if (index < ht->min_alloc_size) { + dbg_printf("lookup hash %lu index %lu order 0 aridx 0\n", + hash, index); + return &ht->t.tbl[0]->nodes[index]; + } /* * equivalent to get_count_order_ulong(index + 1), but optimizes * away the non-existing 0 special-case for * get_count_order_ulong. */ order = fls_ulong(index); - dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", - hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); - - return &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; + hash, index, order, index & ((1UL << (order - 1)) - 1)); + return &ht->t.tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; } /* @@ -871,12 +882,6 @@ void _cds_lfht_add(struct cds_lfht *ht, assert(!is_dummy(node)); assert(!is_removed(node)); - if (!size) { - assert(dummy); - assert(!unique_ret); - node->p.next = flag_dummy(get_end()); - return; /* Initial first add (head) */ - } lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash)); for (;;) { uint32_t chain_len = 0; @@ -1084,16 +1089,17 @@ void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, { unsigned long j; + assert(i > ht->min_alloc_order); ht->cds_lfht_rcu_read_lock(); for (j = start; j < start + len; j++) { struct cds_lfht_node *new_node = (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; dbg_printf("init populate: i %lu j %lu hash %lu\n", - i, j, !i ? 0 : (1UL << (i - 1)) + j); + i, j, (1UL << (i - 1)) + j); new_node->p.reverse_hash = - bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)), + bit_reverse_ulong((1UL << (i - 1)) + j); + _cds_lfht_add(ht, 1UL << (i - 1), new_node, NULL, 1); } ht->cds_lfht_rcu_read_unlock(); @@ -1121,14 +1127,15 @@ void init_table(struct cds_lfht *ht, dbg_printf("init table: first_order %lu last_order %lu\n", first_order, last_order); + assert(first_order > ht->min_alloc_order); for (i = first_order; i <= last_order; i++) { unsigned long len; - len = !i ? 1 : 1UL << (i - 1); + len = 1UL << (i - 1); dbg_printf("init order %lu len: %lu\n", i, len); /* Stop expand if the resize target changes under us */ - if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i))) + if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i)) break; ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node)); @@ -1144,9 +1151,9 @@ void init_table(struct cds_lfht *ht, * Update table size. */ cmm_smp_wmb(); /* populate data before RCU size */ - CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i)); + CMM_STORE_SHARED(ht->t.size, 1UL << i); - dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i)); + dbg_printf("init new size: %lu\n", 1UL << i); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } @@ -1183,17 +1190,17 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i, { unsigned long j; + assert(i > ht->min_alloc_order); ht->cds_lfht_rcu_read_lock(); for (j = start; j < start + len; j++) { struct cds_lfht_node *fini_node = (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; dbg_printf("remove entry: i %lu j %lu hash %lu\n", - i, j, !i ? 0 : (1UL << (i - 1)) + j); + i, j, (1UL << (i - 1)) + j); fini_node->p.reverse_hash = - bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)), - fini_node, 1); + bit_reverse_ulong((1UL << (i - 1)) + j); + (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1); } ht->cds_lfht_rcu_read_unlock(); } @@ -1221,11 +1228,11 @@ void fini_table(struct cds_lfht *ht, dbg_printf("fini table: first_order %lu last_order %lu\n", first_order, last_order); - assert(first_order > 0); + assert(first_order > ht->min_alloc_order); for (i = last_order; i >= first_order; i--) { unsigned long len; - len = !i ? 1 : 1UL << (i - 1); + len = 1UL << (i - 1); dbg_printf("fini order %lu len: %lu\n", i, len); /* Stop shrink if the resize target changes under us */ @@ -1266,10 +1273,54 @@ void fini_table(struct cds_lfht *ht, } } +static +void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size) +{ + struct _cds_lfht_node *prev, *node; + unsigned long order, len, i, j; + + ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct _cds_lfht_node)); + assert(ht->t.tbl[0]); + + dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0); + ht->t.tbl[0]->nodes[0].next = flag_dummy(get_end()); + ht->t.tbl[0]->nodes[0].reverse_hash = 0; + + for (order = 1; order < get_count_order_ulong(size) + 1; order++) { + len = 1UL << (order - 1); + if (order <= ht->min_alloc_order) { + ht->t.tbl[order] = (struct rcu_level *) (ht->t.tbl[0]->nodes + len); + } else { + ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node)); + assert(ht->t.tbl[order]); + } + + i = 0; + prev = ht->t.tbl[i]->nodes; + for (j = 0; j < len; j++) { + if (j & (j - 1)) { /* Between power of 2 */ + prev++; + } else if (j) { /* At each power of 2 */ + i++; + prev = ht->t.tbl[i]->nodes; + } + + node = &ht->t.tbl[order]->nodes[j]; + dbg_printf("create dummy: order %lu index %lu hash %lu\n", + order, j, j + len); + node->next = prev->next; + assert(is_dummy(node->next)); + node->reverse_hash = bit_reverse_ulong(j + len); + prev->next = flag_dummy((struct cds_lfht_node *)node); + } + } +} + struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, cds_lfht_compare_fct compare_fct, unsigned long hash_seed, unsigned long init_size, + unsigned long min_alloc_size, int flags, void (*cds_lfht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head)), @@ -1285,9 +1336,14 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, struct cds_lfht *ht; unsigned long order; + /* min_alloc_size must be power of two */ + if (!min_alloc_size || (min_alloc_size & (min_alloc_size - 1))) + return NULL; /* init_size must be power of two */ - if (init_size && (init_size & (init_size - 1))) + if (!init_size || (init_size & (init_size - 1))) return NULL; + min_alloc_size = max(min_alloc_size, MIN_TABLE_SIZE); + init_size = max(init_size, min_alloc_size); ht = calloc(1, sizeof(struct cds_lfht)); assert(ht); ht->hash_fct = hash_fct; @@ -1302,17 +1358,16 @@ struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread; ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread; ht->resize_attr = attr; - ht->percpu_count = alloc_per_cpu_items_count(); + ht->split_count = alloc_split_items_count(); /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); - order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)); ht->flags = flags; - ht->cds_lfht_rcu_thread_offline(); - pthread_mutex_lock(&ht->resize_mutex); + order = get_count_order_ulong(init_size); ht->t.resize_target = 1UL << order; - init_table(ht, 0, order); - pthread_mutex_unlock(&ht->resize_mutex); - ht->cds_lfht_rcu_thread_online(); + cds_lfht_create_dummy(ht, 1UL << order); + ht->t.size = 1UL << order; + ht->min_alloc_size = min_alloc_size; + ht->min_alloc_order = get_count_order_ulong(min_alloc_size); return ht; } @@ -1530,7 +1585,12 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash)); assert(is_dummy(ht->t.tbl[order]->nodes[i].next)); } - poison_free(ht->t.tbl[order]); + + if (order == ht->min_alloc_order) + poison_free(ht->t.tbl[0]); + else if (order > ht->min_alloc_order) + poison_free(ht->t.tbl[order]); + /* Nothing to delete for order < ht->min_alloc_order */ } return 0; } @@ -1551,7 +1611,7 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) ret = cds_lfht_delete_dummy(ht); if (ret) return ret; - free_per_cpu_items_count(ht->percpu_count); + free_split_items_count(ht->split_count); if (attr) *attr = ht->resize_attr; poison_free(ht); @@ -1569,12 +1629,12 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, unsigned long nr_dummy = 0; *approx_before = 0; - if (nr_cpus_mask >= 0) { + if (split_count_mask >= 0) { int i; - for (i = 0; i < nr_cpus_mask + 1; i++) { - *approx_before += uatomic_read(&ht->percpu_count[i].add); - *approx_before -= uatomic_read(&ht->percpu_count[i].del); + for (i = 0; i < split_count_mask + 1; i++) { + *approx_before += uatomic_read(&ht->split_count[i].add); + *approx_before -= uatomic_read(&ht->split_count[i].del); } } @@ -1599,12 +1659,12 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, } while (!is_end(node)); dbg_printf("number of dummy nodes: %lu\n", nr_dummy); *approx_after = 0; - if (nr_cpus_mask >= 0) { + if (split_count_mask >= 0) { int i; - for (i = 0; i < nr_cpus_mask + 1; i++) { - *approx_after += uatomic_read(&ht->percpu_count[i].add); - *approx_after -= uatomic_read(&ht->percpu_count[i].del); + for (i = 0; i < split_count_mask + 1; i++) { + *approx_after += uatomic_read(&ht->split_count[i].add); + *approx_after -= uatomic_read(&ht->split_count[i].del); } } } @@ -1631,7 +1691,7 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, { unsigned long old_order, new_order; - new_size = max(new_size, MIN_TABLE_SIZE); + new_size = max(new_size, ht->min_alloc_size); old_order = get_count_order_ulong(old_size); new_order = get_count_order_ulong(new_size); dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", @@ -1681,7 +1741,7 @@ static void resize_target_update_count(struct cds_lfht *ht, unsigned long count) { - count = max(count, MIN_TABLE_SIZE); + count = max(count, ht->min_alloc_size); uatomic_set(&ht->t.resize_target, count); }