X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=src%2Frculfhash.c;h=10f5b8ed58a89c687421b72e513e218492299a0c;hb=refs%2Fheads%2Fmaster;hp=307ba7d1d847175da6d7fa5aef592ab54919f5bf;hpb=601922a81d884e16ff404cee7534ede56fb87d0a;p=urcu.git diff --git a/src/rculfhash.c b/src/rculfhash.c index 307ba7d..10f5b8e 100644 --- a/src/rculfhash.c +++ b/src/rculfhash.c @@ -249,6 +249,7 @@ #include #include #include +#include #include "compat-getcpu.h" #include @@ -259,7 +260,6 @@ #include #include #include -#include #include #include #include @@ -348,6 +348,11 @@ struct partition_resize_work { unsigned long start, unsigned long len); }; +enum nr_cpus_mask_state { + NR_CPUS_MASK_INIT_FAILED = -2, + NR_CPUS_MASK_UNINITIALIZED = -1, +}; + static struct urcu_workqueue *cds_lfht_workqueue; /* @@ -564,6 +569,50 @@ unsigned int cds_lfht_fls_ulong(unsigned long x) #endif } +static void *cds_lfht_malloc(void *state __attribute__((unused)), + size_t size) +{ + return malloc(size); +} + +static void *cds_lfht_calloc(void *state __attribute__((unused)), + size_t nmemb, size_t size) +{ + return calloc(nmemb, size); +} + +static void *cds_lfht_realloc(void *state __attribute__((unused)), + void *ptr, size_t size) +{ + return realloc(ptr, size); +} + +static void *cds_lfht_aligned_alloc(void *state __attribute__((unused)), + size_t alignment, size_t size) +{ + void *ptr; + + if (posix_memalign(&ptr, alignment, size)) + return NULL; + return ptr; +} + +static void cds_lfht_free(void *state __attribute__((unused)), void *ptr) +{ + free(ptr); +} + + +/* Default memory allocator */ +static struct cds_lfht_alloc cds_lfht_default_alloc = { + .malloc = cds_lfht_malloc, + .calloc = cds_lfht_calloc, + .realloc = cds_lfht_realloc, + .aligned_alloc = cds_lfht_aligned_alloc, + .free = cds_lfht_free, + .state = NULL, +}; + /* * Return the minimum order for which x <= (1UL << order). * Return -1 if x is 0. @@ -625,7 +674,7 @@ static void mutex_unlock(pthread_mutex_t *mutex) urcu_die(ret); } -static long nr_cpus_mask = -1; +static long nr_cpus_mask = NR_CPUS_MASK_UNINITIALIZED; static long split_count_mask = -1; static int split_count_order = -1; @@ -635,7 +684,7 @@ static void ht_init_nr_cpus_mask(void) maxcpus = get_possible_cpus_array_len(); if (maxcpus <= 0) { - nr_cpus_mask = -2; + nr_cpus_mask = NR_CPUS_MASK_INIT_FAILED; return; } /* @@ -649,7 +698,7 @@ static void ht_init_nr_cpus_mask(void) static void alloc_split_items_count(struct cds_lfht *ht) { - if (nr_cpus_mask == -1) { + if (nr_cpus_mask == NR_CPUS_MASK_UNINITIALIZED) { ht_init_nr_cpus_mask(); if (nr_cpus_mask < 0) split_count_mask = DEFAULT_SPLIT_COUNT_MASK; @@ -662,7 +711,7 @@ void alloc_split_items_count(struct cds_lfht *ht) urcu_posix_assert(split_count_mask >= 0); if (ht->flags & CDS_LFHT_ACCOUNTING) { - ht->split_count = calloc(split_count_mask + 1, + ht->split_count = ht->alloc->calloc(ht->alloc->state, split_count_mask + 1, sizeof(struct ht_items_count)); urcu_posix_assert(ht->split_count); } else { @@ -673,7 +722,7 @@ void alloc_split_items_count(struct cds_lfht *ht) static void free_split_items_count(struct cds_lfht *ht) { - poison_free(ht->split_count); + poison_free(ht->alloc, ht->split_count); } static @@ -1154,7 +1203,7 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *node) { struct cds_lfht_node *bucket, *next; - struct cds_lfht_node **node_next; + uintptr_t *node_next; if (!node) /* Return -ENOENT if asked to delete NULL node */ return -ENOENT; @@ -1186,7 +1235,7 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, * NOTE: The node_next variable is present to avoid breaking * strict-aliasing rules. */ - node_next = &node->next; + node_next = (uintptr_t*)&node->next; uatomic_or_mo(node_next, REMOVED_FLAG, CMM_RELEASE); /* We performed the (logical) deletion. */ @@ -1242,7 +1291,7 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, unsigned long thread, nr_threads; sigset_t newmask, oldmask; - urcu_posix_assert(nr_cpus_mask != -1); + urcu_posix_assert(nr_cpus_mask != NR_CPUS_MASK_UNINITIALIZED); if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) goto fallback; @@ -1258,7 +1307,7 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, nr_threads = 1; } partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads); - work = calloc(nr_threads, sizeof(*work)); + work = ht->alloc->calloc(ht->alloc->state, nr_threads, sizeof(*work)); if (!work) { dbg_printf("error allocating for resize, single-threading\n"); goto fallback; @@ -1299,7 +1348,7 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, ret = pthread_join(work[thread].thread_id, NULL); urcu_posix_assert(!ret); } - free(work); + ht->alloc->free(ht->alloc->state, work); /* * A pthread_create failure above will either lead in us having @@ -1427,7 +1476,7 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i, for (j = size + start; j < size + start + len; j++) { struct cds_lfht_node *fini_bucket = bucket_at(ht, j); struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size); - struct cds_lfht_node **fini_bucket_next; + uintptr_t *fini_bucket_next; urcu_posix_assert(j >= size && j < (size << 1)); dbg_printf("remove entry: order %lu index %lu hash %lu\n", @@ -1437,7 +1486,7 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i, * NOTE: The fini_bucket_next variable is present to * avoid breaking strict-aliasing rules. */ - fini_bucket_next = &fini_bucket->next; + fini_bucket_next = (uintptr_t*)&fini_bucket->next; uatomic_or(fini_bucket_next, REMOVED_FLAG); _cds_lfht_gc_bucket(parent_bucket, fini_bucket); } @@ -1592,12 +1641,13 @@ void cds_lfht_node_init_deleted(struct cds_lfht_node *node) node->next = flag_removed(NULL); } -struct cds_lfht *_cds_lfht_new(unsigned long init_size, +struct cds_lfht *_cds_lfht_new_with_alloc(unsigned long init_size, unsigned long min_nr_alloc_buckets, unsigned long max_nr_buckets, int flags, const struct cds_lfht_mm_type *mm, const struct rcu_flavor_struct *flavor, + const struct cds_lfht_alloc *alloc, pthread_attr_t *attr) { struct cds_lfht *ht; @@ -1633,7 +1683,8 @@ struct cds_lfht *_cds_lfht_new(unsigned long init_size, max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets); init_size = min(init_size, max_nr_buckets); - ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets); + ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets, alloc ? : &cds_lfht_default_alloc); + urcu_posix_assert(ht); urcu_posix_assert(ht->mm == mm); urcu_posix_assert(ht->bucket_at == mm->bucket_at); @@ -1653,6 +1704,19 @@ struct cds_lfht *_cds_lfht_new(unsigned long init_size, return ht; } +struct cds_lfht *_cds_lfht_new(unsigned long init_size, + unsigned long min_nr_alloc_buckets, + unsigned long max_nr_buckets, + int flags, + const struct cds_lfht_mm_type *mm, + const struct rcu_flavor_struct *flavor, + pthread_attr_t *attr) +{ + return _cds_lfht_new_with_alloc(init_size, + min_nr_alloc_buckets, max_nr_buckets, + flags, mm, flavor, NULL, attr); +} + void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash, cds_lfht_match_fct match, const void *key, struct cds_lfht_iter *iter) @@ -1941,7 +2005,7 @@ void do_auto_resize_destroy_cb(struct urcu_work *work) if (ret) urcu_die(ret); ht->flavor->unregister_thread(); - poison_free(ht); + poison_free(ht->alloc, ht); } /* @@ -1985,7 +2049,7 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) ret = pthread_mutex_destroy(&ht->resize_mutex); if (ret) ret = -EBUSY; - poison_free(ht); + poison_free(ht->alloc, ht); return ret; } @@ -2140,7 +2204,7 @@ void do_resize_cb(struct urcu_work *work) _do_cds_lfht_resize(ht); mutex_unlock(&ht->resize_mutex); ht->flavor->unregister_thread(); - poison_free(work); + poison_free(ht->alloc, work); } static @@ -2156,7 +2220,7 @@ void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht) if (uatomic_load(&ht->in_progress_destroy, CMM_RELAXED)) { return; } - work = malloc(sizeof(*work)); + work = ht->alloc->malloc(ht->alloc->state, sizeof(*work)); if (work == NULL) { dbg_printf("error allocating resize work, bailing out\n"); return;