#define dbg_printf(args...)
#endif
-#define CHAIN_LEN_TARGET 1
-#define CHAIN_LEN_RESIZE_THRESHOLD 2
+#define CHAIN_LEN_TARGET 4
+#define CHAIN_LEN_RESIZE_THRESHOLD 8
#ifndef max
#define max(a, b) ((a) > (b) ? (a) : (b))
ht_compare_fct compare_fct;
unsigned long hash_seed;
pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
- unsigned int in_progress_resize;
+ unsigned int in_progress_resize, in_progress_destroy;
void (*ht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
};
chain_len);
if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
ht_resize_lazy(ht, t,
- get_count_order_u32(chain_len - CHAIN_LEN_TARGET + 1));
+ get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
}
static
new_node->p.reverse_hash =
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _ht_add(ht, t, new_node, 0, 1);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
}
/* Update table size */
t->size = !i ? 1 : (1UL << i);
dbg_printf("rculfhash: init new size: %lu\n", t->size);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
}
t->resize_target = t->size;
t->resize_initiated = 0;
int ret;
/* Wait for in-flight resize operations to complete */
+ CMM_STORE_SHARED(ht->in_progress_destroy, 1);
while (uatomic_read(&ht->in_progress_resize))
poll(NULL, 0, 100); /* wait for 100ms */
ret = ht_delete_dummy(ht);
if (old_size == new_size)
return;
new_order = get_count_order_ulong(new_size) + 1;
- dbg_printf("rculfhash: resize from %lu (order %lu) to %lu (order %lu) buckets\n",
- old_size, old_order, new_size, new_order);
+ printf("rculfhash: resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
new_t = malloc(sizeof(struct rcu_table)
+ (new_order * sizeof(struct _rcu_ht_node *)));
assert(new_size > old_size);