Cleanup: remove leftover manual pthread detection
[urcu.git] / rculfhash.c
index 6e12db1013f6c58d70ee3119a67a068fadc3d80e..d7d107f3788f3a70cec57be5380ae1ff1bbff173 100644 (file)
  * To discuss these guarantees, we first define "read" operation as any
  * of the the basic cds_lfht_lookup, cds_lfht_next_duplicate,
  * cds_lfht_first, cds_lfht_next operation, as well as
- * cds_lfht_add_unique (failure). 
+ * cds_lfht_add_unique (failure).
  *
  * We define "read traversal" operation as any of the following
  * group of operations
  * shrink hash table from order 6 to 5: fini the index=6 bucket node table
  *
  * A bit of ascii art explanation:
- * 
+ *
  * The order index is the off-by-one compared to the actual power of 2
  * because we use index 0 to deal with the 0 special-case.
- * 
+ *
  * This shows the nodes for a small table ordered by reversed bits:
- * 
+ *
  *    bits   reverse
  * 0  000        000
  * 4  100        001
  * 5  101        101
  * 3  011        110
  * 7  111        111
- * 
- * This shows the nodes in order of non-reversed bits, linked by 
+ *
+ * This shows the nodes in order of non-reversed bits, linked by
  * reversed-bit order.
- * 
+ *
  * order              bits       reverse
  * 0               0  000        000
  * 1               |  1  001        100             <-
 #include <stdint.h>
 #include <string.h>
 #include <sched.h>
+#include <unistd.h>
 
 #include "config.h"
-#include <urcu.h>
+#include "compat-getcpu.h"
+#include <urcu-pointer.h>
 #include <urcu-call-rcu.h>
 #include <urcu-flavor.h>
 #include <urcu/arch.h>
@@ -364,7 +366,7 @@ struct partition_resize_work {
  * Originally from Public Domain.
  */
 
-static const uint8_t BitReverseTable256[256] = 
+static const uint8_t BitReverseTable256[256] =
 {
 #define R2(n) (n),   (n) + 2*64,     (n) + 1*64,     (n) + 3*64
 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
@@ -385,21 +387,21 @@ uint8_t bit_reverse_u8(uint8_t v)
 static
 uint32_t bit_reverse_u32(uint32_t v)
 {
-       return ((uint32_t) bit_reverse_u8(v) << 24) | 
-               ((uint32_t) bit_reverse_u8(v >> 8) << 16) | 
-               ((uint32_t) bit_reverse_u8(v >> 16) << 8) | 
+       return ((uint32_t) bit_reverse_u8(v) << 24) |
+               ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
+               ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
                ((uint32_t) bit_reverse_u8(v >> 24));
 }
 #else
 static
 uint64_t bit_reverse_u64(uint64_t v)
 {
-       return ((uint64_t) bit_reverse_u8(v) << 56) | 
-               ((uint64_t) bit_reverse_u8(v >> 8)  << 48) | 
+       return ((uint64_t) bit_reverse_u8(v) << 56) |
+               ((uint64_t) bit_reverse_u8(v >> 8)  << 48) |
                ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
                ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
-               ((uint64_t) bit_reverse_u8(v >> 32) << 24) | 
-               ((uint64_t) bit_reverse_u8(v >> 40) << 16) | 
+               ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
+               ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
                ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
                ((uint64_t) bit_reverse_u8(v >> 56));
 }
@@ -426,7 +428,7 @@ unsigned int fls_u32(uint32_t x)
 {
        int r;
 
-       asm("bsrl %1,%0\n\t"
+       __asm__ ("bsrl %1,%0\n\t"
            "jnz 1f\n\t"
            "movl $-1,%0\n\t"
            "1:\n\t"
@@ -442,7 +444,7 @@ unsigned int fls_u64(uint64_t x)
 {
        long r;
 
-       asm("bsrq %1,%0\n\t"
+       __asm__ ("bsrq %1,%0\n\t"
            "jnz 1f\n\t"
            "movq $-1,%0\n\t"
            "1:\n\t"
@@ -563,6 +565,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
 
 static long nr_cpus_mask = -1;
 static long split_count_mask = -1;
+static int split_count_order = -1;
 
 #if defined(HAVE_SYSCONF)
 static void ht_init_nr_cpus_mask(void)
@@ -597,6 +600,8 @@ void alloc_split_items_count(struct cds_lfht *ht)
                        split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
                else
                        split_count_mask = nr_cpus_mask;
+               split_count_order =
+                       cds_lfht_get_count_order_ulong(split_count_mask + 1);
        }
 
        assert(split_count_mask >= 0);
@@ -616,26 +621,18 @@ void free_split_items_count(struct cds_lfht *ht)
        poison_free(ht->split_count);
 }
 
-#if defined(HAVE_SCHED_GETCPU)
 static
 int ht_get_split_count_index(unsigned long hash)
 {
        int cpu;
 
        assert(split_count_mask >= 0);
-       cpu = sched_getcpu();
+       cpu = urcu_sched_getcpu();
        if (caa_unlikely(cpu < 0))
                return hash & split_count_mask;
        else
                return cpu & split_count_mask;
 }
-#else /* #if defined(HAVE_SCHED_GETCPU) */
-static
-int ht_get_split_count_index(unsigned long hash)
-{
-       return hash & split_count_mask;
-}
-#endif /* #else #if defined(HAVE_SCHED_GETCPU) */
 
 static
 void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
@@ -713,14 +710,39 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
         * Use bucket-local length for small table expand and for
         * environments lacking per-cpu data support.
         */
-       if (count >= (1UL << COUNT_COMMIT_ORDER))
+       if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order)))
                return;
        if (chain_len > 100)
                dbg_printf("WARNING: large chain length: %u.\n",
                           chain_len);
-       if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
-               cds_lfht_resize_lazy_grow(ht, size,
-                       cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
+       if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) {
+               int growth;
+
+               /*
+                * Ideal growth calculated based on chain length.
+                */
+               growth = cds_lfht_get_count_order_u32(chain_len
+                               - (CHAIN_LEN_TARGET - 1));
+               if ((ht->flags & CDS_LFHT_ACCOUNTING)
+                               && (size << growth)
+                                       >= (1UL << (COUNT_COMMIT_ORDER
+                                               + split_count_order))) {
+                       /*
+                        * If ideal growth expands the hash table size
+                        * beyond the "small hash table" sizes, use the
+                        * maximum small hash table size to attempt
+                        * expanding the hash table. This only applies
+                        * when node accounting is available, otherwise
+                        * the chain length is used to expand the hash
+                        * table in every case.
+                        */
+                       growth = COUNT_COMMIT_ORDER + split_count_order
+                               - cds_lfht_get_count_order_ulong(size);
+                       if (growth <= 0)
+                               return;
+               }
+               cds_lfht_resize_lazy_grow(ht, size, growth);
+       }
 }
 
 static
@@ -1143,11 +1165,15 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
                void (*fct)(struct cds_lfht *ht, unsigned long i,
                        unsigned long start, unsigned long len))
 {
-       unsigned long partition_len;
+       unsigned long partition_len, start = 0;
        struct partition_resize_work *work;
        int thread, ret;
        unsigned long nr_threads;
 
+       assert(nr_cpus_mask != -1);
+       if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
+               goto fallback;
+
        /*
         * Note: nr_cpus_mask + 1 is always power of 2.
         * We spawn just the number of threads we need to satisfy the minimum
@@ -1161,7 +1187,10 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
        }
        partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
        work = calloc(nr_threads, sizeof(*work));
-       assert(work);
+       if (!work) {
+               dbg_printf("error allocating for resize, single-threading\n");
+               goto fallback;
+       }
        for (thread = 0; thread < nr_threads; thread++) {
                work[thread].ht = ht;
                work[thread].i = i;
@@ -1170,6 +1199,17 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
                work[thread].fct = fct;
                ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
                        partition_resize_thread, &work[thread]);
+               if (ret == EAGAIN) {
+                       /*
+                        * Out of resources: wait and join the threads
+                        * we've created, then handle leftovers.
+                        */
+                       dbg_printf("error spawning for resize, single-threading\n");
+                       start = work[thread].start;
+                       len -= start;
+                       nr_threads = thread;
+                       break;
+               }
                assert(!ret);
        }
        for (thread = 0; thread < nr_threads; thread++) {
@@ -1177,6 +1217,18 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
                assert(!ret);
        }
        free(work);
+
+       /*
+        * A pthread_create failure above will either lead in us having
+        * no threads to join or starting at a non-zero offset,
+        * fallback to single thread processing of leftovers.
+        */
+       if (start == 0 && nr_threads > 0)
+               return;
+fallback:
+       ht->flavor->thread_online();
+       fct(ht, i, start, len);
+       ht->flavor->thread_offline();
 }
 
 /*
@@ -1214,13 +1266,6 @@ static
 void init_table_populate(struct cds_lfht *ht, unsigned long i,
                         unsigned long len)
 {
-       assert(nr_cpus_mask != -1);
-       if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
-               ht->flavor->thread_online();
-               init_table_populate_partition(ht, i, 0, len);
-               ht->flavor->thread_offline();
-               return;
-       }
        partition_resize_helper(ht, i, len, init_table_populate_partition);
 }
 
@@ -1313,14 +1358,6 @@ void remove_table_partition(struct cds_lfht *ht, unsigned long i,
 static
 void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
 {
-
-       assert(nr_cpus_mask != -1);
-       if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
-               ht->flavor->thread_online();
-               remove_table_partition(ht, i, 0, len);
-               ht->flavor->thread_offline();
-               return;
-       }
        partition_resize_helper(ht, i, len, remove_table_partition);
 }
 
@@ -1343,7 +1380,7 @@ void fini_table(struct cds_lfht *ht,
                unsigned long len;
 
                len = 1UL << (i - 1);
-               dbg_printf("fini order %lu len: %lu\n", i, len);
+               dbg_printf("fini order %ld len: %lu\n", i, len);
 
                /* Stop shrink if the resize target changes under us */
                if (CMM_LOAD_SHARED(ht->resize_target) > (1UL << (i - 1)))
@@ -1745,6 +1782,13 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
        was_online = ht->flavor->read_ongoing();
        if (was_online)
                ht->flavor->thread_offline();
+       /* Calling with RCU read-side held is an error. */
+       if (ht->flavor->read_ongoing()) {
+               ret = -EINVAL;
+               if (was_online)
+                       ht->flavor->thread_online();
+               goto end;
+       }
        while (uatomic_read(&ht->in_progress_resize))
                poll(NULL, 0, 100);     /* wait for 100ms */
        if (was_online)
@@ -1755,7 +1799,11 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
        free_split_items_count(ht);
        if (attr)
                *attr = ht->resize_attr;
+       ret = pthread_mutex_destroy(&ht->resize_mutex);
+       if (ret)
+               ret = -EBUSY;
        poison_free(ht);
+end:
        return ret;
 }
 
@@ -1886,14 +1934,26 @@ void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
 {
        int was_online;
 
-       resize_target_update_count(ht, new_size);
-       CMM_STORE_SHARED(ht->resize_initiated, 1);
        was_online = ht->flavor->read_ongoing();
        if (was_online)
                ht->flavor->thread_offline();
+       /* Calling with RCU read-side held is an error. */
+       if (ht->flavor->read_ongoing()) {
+               static int print_once;
+
+               if (!CMM_LOAD_SHARED(print_once))
+                       fprintf(stderr, "[error] rculfhash: cds_lfht_resize "
+                               "called with RCU read-side lock held.\n");
+               CMM_STORE_SHARED(print_once, 1);
+               assert(0);
+               goto end;
+       }
+       resize_target_update_count(ht, new_size);
+       CMM_STORE_SHARED(ht->resize_initiated, 1);
        pthread_mutex_lock(&ht->resize_mutex);
        _do_cds_lfht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
+end:
        if (was_online)
                ht->flavor->thread_online();
 }
This page took 0.026663 seconds and 4 git commands to generate.