+#ifndef HAS_FLS_U32
+static __attribute__((unused))
+unsigned int fls_u32(uint32_t x)
+{
+ unsigned int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xFFFF0000U)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF000000U)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF0000000U)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC0000000U)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000U)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+#endif
+
+unsigned int cds_lfht_fls_ulong(unsigned long x)
+{
+#if (CAA_BITS_PER_LONG == 32)
+ return fls_u32(x);
+#else
+ return fls_u64(x);
+#endif
+}
+
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int cds_lfht_get_count_order_u32(uint32_t x)
+{
+ if (!x)
+ return -1;
+
+ return fls_u32(x - 1);
+}
+
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int cds_lfht_get_count_order_ulong(unsigned long x)
+{
+ if (!x)
+ return -1;
+
+ return cds_lfht_fls_ulong(x - 1);
+}
+
+static
+void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth);
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
+ unsigned long count);
+
+static long nr_cpus_mask = -1;
+static long split_count_mask = -1;
+
+#if defined(HAVE_SYSCONF)
+static void ht_init_nr_cpus_mask(void)
+{
+ long maxcpus;
+
+ maxcpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (maxcpus <= 0) {
+ nr_cpus_mask = -2;
+ return;
+ }
+ /*
+ * round up number of CPUs to next power of two, so we
+ * can use & for modulo.
+ */
+ maxcpus = 1UL << cds_lfht_get_count_order_ulong(maxcpus);
+ nr_cpus_mask = maxcpus - 1;
+}
+#else /* #if defined(HAVE_SYSCONF) */
+static void ht_init_nr_cpus_mask(void)
+{
+ nr_cpus_mask = -2;
+}
+#endif /* #else #if defined(HAVE_SYSCONF) */
+
+static
+void alloc_split_items_count(struct cds_lfht *ht)
+{
+ struct ht_items_count *count;
+
+ if (nr_cpus_mask == -1) {
+ ht_init_nr_cpus_mask();
+ if (nr_cpus_mask < 0)
+ split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
+ else
+ split_count_mask = nr_cpus_mask;
+ }
+
+ assert(split_count_mask >= 0);
+
+ if (ht->flags & CDS_LFHT_ACCOUNTING) {
+ ht->split_count = calloc(split_count_mask + 1, sizeof(*count));
+ assert(ht->split_count);
+ } else {
+ ht->split_count = NULL;
+ }
+}
+
+static
+void free_split_items_count(struct cds_lfht *ht)
+{
+ poison_free(ht->split_count);
+}
+
+#if defined(HAVE_SCHED_GETCPU)
+static
+int ht_get_split_count_index(unsigned long hash)
+{
+ int cpu;
+
+ assert(split_count_mask >= 0);
+ cpu = sched_getcpu();
+ if (caa_unlikely(cpu < 0))
+ return hash & split_count_mask;
+ else
+ return cpu & split_count_mask;
+}
+#else /* #if defined(HAVE_SCHED_GETCPU) */
+static
+int ht_get_split_count_index(unsigned long hash)
+{
+ return hash & split_count_mask;
+}
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) */
+
+static
+void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
+{
+ unsigned long split_count;
+ int index;
+ long count;
+
+ if (caa_unlikely(!ht->split_count))
+ return;
+ index = ht_get_split_count_index(hash);
+ split_count = uatomic_add_return(&ht->split_count[index].add, 1);
+ if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
+ return;
+ /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */
+
+ dbg_printf("add split count %lu\n", split_count);
+ count = uatomic_add_return(&ht->count,
+ 1UL << COUNT_COMMIT_ORDER);
+ if (caa_likely(count & (count - 1)))
+ return;
+ /* Only if global count is power of 2 */
+
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
+ return;
+ dbg_printf("add set global %ld\n", count);
+ cds_lfht_resize_lazy_count(ht, size,
+ count >> (CHAIN_LEN_TARGET - 1));
+}
+
+static
+void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
+{
+ unsigned long split_count;
+ int index;
+ long count;
+
+ if (caa_unlikely(!ht->split_count))
+ return;
+ index = ht_get_split_count_index(hash);
+ split_count = uatomic_add_return(&ht->split_count[index].del, 1);
+ if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
+ return;
+ /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */
+
+ dbg_printf("del split count %lu\n", split_count);
+ count = uatomic_add_return(&ht->count,
+ -(1UL << COUNT_COMMIT_ORDER));
+ if (caa_likely(count & (count - 1)))
+ return;
+ /* Only if global count is power of 2 */
+
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
+ return;
+ dbg_printf("del set global %ld\n", count);
+ /*
+ * Don't shrink table if the number of nodes is below a
+ * certain threshold.
+ */
+ if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
+ return;
+ cds_lfht_resize_lazy_count(ht, size,
+ count >> (CHAIN_LEN_TARGET - 1));
+}
+
+static
+void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
+{
+ unsigned long count;
+
+ if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+ return;
+ count = uatomic_read(&ht->count);
+ /*
+ * Use bucket-local length for small table expand and for
+ * environments lacking per-cpu data support.
+ */
+ if (count >= (1UL << COUNT_COMMIT_ORDER))
+ return;
+ if (chain_len > 100)
+ dbg_printf("WARNING: large chain length: %u.\n",
+ chain_len);
+ if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
+ cds_lfht_resize_lazy_grow(ht, size,
+ cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
+}
+
+static
+struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
+{
+ return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
+}
+
+static
+int is_removed(struct cds_lfht_node *node)
+{
+ return ((unsigned long) node) & REMOVED_FLAG;
+}
+
+static
+struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
+{
+ return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
+}
+
+static
+int is_bucket(struct cds_lfht_node *node)
+{
+ return ((unsigned long) node) & BUCKET_FLAG;
+}
+
+static
+struct cds_lfht_node *flag_bucket(struct cds_lfht_node *node)
+{
+ return (struct cds_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
+}
+
+static
+int is_removal_owner(struct cds_lfht_node *node)
+{
+ return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
+}
+
+static
+struct cds_lfht_node *flag_removal_owner(struct cds_lfht_node *node)
+{
+ return (struct cds_lfht_node *) (((unsigned long) node) | REMOVAL_OWNER_FLAG);
+}
+
+static
+struct cds_lfht_node *get_end(void)
+{
+ return (struct cds_lfht_node *) END_VALUE;
+}
+
+static
+int is_end(struct cds_lfht_node *node)
+{
+ return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
+}
+
+static
+unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr,
+ unsigned long v)
+{
+ unsigned long old1, old2;
+
+ old1 = uatomic_read(ptr);
+ do {
+ old2 = old1;
+ if (old2 >= v)
+ return old2;
+ } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
+ return old2;
+}
+
+static
+void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
+{
+ return ht->mm->alloc_bucket_table(ht, order);
+}
+
+/*
+ * cds_lfht_free_bucket_table() should be called with decreasing order.
+ * When cds_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
+{
+ return ht->mm->free_bucket_table(ht, order);
+}
+
+static inline
+struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
+{
+ return ht->bucket_at(ht, index);
+}
+
+static inline
+struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
+ unsigned long hash)
+{
+ assert(size > 0);
+ return bucket_at(ht, hash & (size - 1));