#include <stdint.h>
#include <string.h>
+#include "config.h"
#include <urcu.h>
#include <urcu-call-rcu.h>
#include <urcu/arch.h>
#define dbg_printf(fmt, args...)
#endif
-#define CHAIN_LEN_TARGET 4
-#define CHAIN_LEN_RESIZE_THRESHOLD 8
+/*
+ * Per-CPU split-counters lazily update the global counter each 1024
+ * addition/removal. It automatically keeps track of resize required.
+ * We use the bucket length as indicator for need to expand for small
+ * tables and machines lacking per-cpu data suppport.
+ */
+#define COUNT_COMMIT_ORDER 10
+#define CHAIN_LEN_TARGET 1
+#define CHAIN_LEN_RESIZE_THRESHOLD 3
#ifndef max
#define max(a, b) ((a) > (b) ? (a) : (b))
#define DUMMY_FLAG (1UL << 1)
#define FLAGS_MASK ((1UL << 2) - 1)
+struct ht_items_count {
+ unsigned long add, remove;
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
struct rcu_table {
unsigned long size; /* always a power of 2 */
unsigned long resize_target;
unsigned int in_progress_resize, in_progress_destroy;
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
+ unsigned long count; /* global approximate item count */
+ struct ht_items_count *percpu_count; /* per-cpu item count */
};
struct rcu_resize_work {
static
void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
+/*
+ * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
+ * available, then we support hash table item accounting.
+ * In the unfortunate event the number of CPUs reported would be
+ * inaccurate, we use modulo arithmetic on the number of CPUs we got.
+ */
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+ unsigned long count);
+
+static long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+ struct ht_items_count *count;
+
+ switch (nr_cpus_mask) {
+ case -2:
+ return NULL;
+ case -1:
+ {
+ long maxcpus;
+
+ maxcpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (maxcpus <= 0) {
+ nr_cpus_mask = -2;
+ return NULL;
+ }
+ /*
+ * round up number of CPUs to next power of two, so we
+ * can use & for modulo.
+ */
+ maxcpus = 1UL << get_count_order_ulong(maxcpus);
+ nr_cpus_mask = maxcpus - 1;
+ }
+ /* Fall-through */
+ default:
+ return calloc(nr_cpus_mask + 1, sizeof(*count));
+ }
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+ free(count);
+}
+
+static
+int ht_get_cpu(void)
+{
+ int cpu;
+
+ assert(nr_cpus_mask >= 0);
+ cpu = sched_getcpu();
+ if (unlikely(cpu < 0))
+ return cpu;
+ else
+ return cpu & nr_cpus_mask;
+}
+
+static
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+ unsigned long percpu_count;
+ int cpu;
+
+ if (unlikely(!ht->percpu_count))
+ return;
+ cpu = ht_get_cpu();
+ if (unlikely(cpu < 0))
+ return;
+ percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
+ if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+ unsigned long count;
+
+ dbg_printf("add percpu %lu\n", percpu_count);
+ count = uatomic_add_return(&ht->count,
+ 1UL << COUNT_COMMIT_ORDER);
+ /* If power of 2 */
+ if (!(count & (count - 1))) {
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+ < t->size)
+ return;
+ dbg_printf("add set global %lu\n", count);
+ cds_lfht_resize_lazy_count(ht, t,
+ count >> (CHAIN_LEN_TARGET - 1));
+ }
+ }
+}
+
+static
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+ unsigned long percpu_count;
+ int cpu;
+
+ if (unlikely(!ht->percpu_count))
+ return;
+ cpu = ht_get_cpu();
+ if (unlikely(cpu < 0))
+ return;
+ percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1);
+ if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+ unsigned long count;
+
+ dbg_printf("remove percpu %lu\n", percpu_count);
+ count = uatomic_add_return(&ht->count,
+ -(1UL << COUNT_COMMIT_ORDER));
+ /* If power of 2 */
+ if (!(count & (count - 1))) {
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+ >= t->size)
+ return;
+ dbg_printf("remove set global %lu\n", count);
+ cds_lfht_resize_lazy_count(ht, t,
+ count >> (CHAIN_LEN_TARGET - 1));
+ }
+ }
+}
+
+#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+static const long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+ return NULL;
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+}
+
+static
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+}
+
+static
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+}
+
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+
static
void check_resize(struct cds_lfht *ht, struct rcu_table *t,
uint32_t chain_len)
{
+ unsigned long count;
+
+ count = uatomic_read(&ht->count);
+ /*
+ * Use bucket-local length for small table expand and for
+ * environments lacking per-cpu data support.
+ */
+ if (count >= (1UL << COUNT_COMMIT_ORDER))
+ return;
if (chain_len > 100)
dbg_printf("WARNING: large chain length: %u.\n",
chain_len);
ht->hash_seed = hash_seed;
ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
ht->in_progress_resize = 0;
+ ht->percpu_count = alloc_per_cpu_items_count();
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
order = get_count_order_ulong(max(init_size, 1)) + 1;
t = rcu_dereference(ht->t);
(void) _cds_lfht_add(ht, t, node, 0, 0);
+ ht_count_add(ht, t);
}
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
{
struct rcu_table *t;
unsigned long hash;
+ struct cds_lfht_node *ret;
hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
t = rcu_dereference(ht->t);
- return _cds_lfht_add(ht, t, node, 1, 0);
+ ret = _cds_lfht_add(ht, t, node, 1, 0);
+ if (ret != node)
+ ht_count_add(ht, t);
+ return ret;
}
int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node)
{
struct rcu_table *t;
+ int ret;
t = rcu_dereference(ht->t);
- return _cds_lfht_remove(ht, t, node);
+ ret = _cds_lfht_remove(ht, t, node);
+ if (!ret)
+ ht_count_remove(ht, t);
+ return ret;
}
static
if (ret)
return ret;
free(ht->t);
+ free_per_cpu_items_count(ht->percpu_count);
free(ht);
return ret;
}
if (old_size == new_size)
return;
new_order = get_count_order_ulong(new_size) + 1;
- dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
new_t = malloc(sizeof(struct cds_lfht)
+ (new_order * sizeof(struct _cds_lfht_node *)));
CMM_STORE_SHARED(t->resize_initiated, 1);
}
}
+
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+unsigned long resize_target_update_count(struct rcu_table *t,
+ unsigned long count)
+{
+ return _uatomic_max(&t->resize_target, count);
+}
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+ unsigned long count)
+{
+ struct rcu_resize_work *work;
+ unsigned long target_size;
+
+ target_size = resize_target_update_count(t, count);
+ if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
+ uatomic_inc(&ht->in_progress_resize);
+ cmm_smp_mb(); /* increment resize count before calling it */
+ work = malloc(sizeof(*work));
+ work->ht = ht;
+ ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
+ CMM_STORE_SHARED(t->resize_initiated, 1);
+ }
+}
+
+#endif