#include <stdint.h>
#include <errno.h>
#include <limits.h>
+#include <string.h>
#include <urcu/rcuja.h>
#include <urcu/compiler.h>
#include <urcu/arch.h>
#include "rcuja-internal.h"
#include "bitfield.h"
+#ifndef abs
+#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
+#endif
+
enum cds_ja_type_class {
RCU_JA_LINEAR = 0, /* Type A */
/* 32-bit: 1 to 25 children, 8 to 128 bytes */
{ .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
/*
- * TODO: Upon node removal below min_child, if child pool is
- * filled beyond capacity, we need to roll back to pigeon.
+ * Upon node removal below min_child, if child pool is filled
+ * beyond capacity, we roll back to pigeon.
*/
{ .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
{ .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
/*
- * TODO: Upon node removal below min_child, if child pool is
- * filled beyond capacity, we need to roll back to pigeon.
+ * Upon node removal below min_child, if child pool is filled
+ * beyond capacity, we roll back to pigeon.
*/
{ .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
};
enum ja_recompact {
- JA_RECOMPACT,
- JA_RECOMPACT_ADD,
+ JA_RECOMPACT_ADD_SAME,
+ JA_RECOMPACT_ADD_NEXT,
JA_RECOMPACT_DEL,
};
+static
+unsigned long node_fallback_count_distribution[JA_ENTRY_PER_NODE];
+static
+unsigned long nr_nodes_allocated, nr_nodes_freed;
+
+static
+struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
+{
+ return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
+}
+
+unsigned long ja_node_type(struct cds_ja_inode_flag *node)
+{
+ unsigned long type;
+
+ if (_ja_node_mask_ptr(node) == NULL) {
+ return NODE_INDEX_NULL;
+ }
+ type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
+ assert(type < (1UL << JA_TYPE_BITS));
+ return type;
+}
+
+struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
+{
+ unsigned long type_index = ja_node_type(node);
+ const struct cds_ja_type *type;
+
+ type = &ja_types[type_index];
+ switch (type->type_class) {
+ case RCU_JA_LINEAR:
+ case RCU_JA_PIGEON: /* fall-through */
+ case RCU_JA_NULL: /* fall-through */
+ default: /* fall-through */
+ return _ja_node_mask_ptr(node);
+ case RCU_JA_POOL:
+ switch (type->nr_pool_order) {
+ case 1:
+ return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_1D_MASK | JA_TYPE_MASK));
+ case 2:
+ return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_2D_MASK | JA_POOL_1D_MASK | JA_TYPE_MASK));
+ default:
+ assert(0);
+ }
+ }
+}
+
struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
{
- return calloc(1U << ja_type->order, sizeof(char));
+ size_t len = 1U << ja_type->order;
+ void *p;
+ int ret;
+
+ ret = posix_memalign(&p, len, len);
+ if (ret || !p) {
+ return NULL;
+ }
+ memset(p, 0, len);
+ uatomic_inc(&nr_nodes_allocated);
+ return p;
}
void free_cds_ja_node(struct cds_ja_inode *node)
{
free(node);
+ uatomic_inc(&nr_nodes_freed);
}
#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
struct cds_ja_inode *node,
struct cds_ja_inode_flag ***child_node_flag_ptr,
+ struct cds_ja_inode_flag **child_node_flag_v,
+ struct cds_ja_inode_flag ***node_flag_ptr,
uint8_t n)
{
uint8_t nr_child;
if (CMM_LOAD_SHARED(values[i]) == n)
break;
}
- if (i >= nr_child)
+ if (i >= nr_child) {
+ if (caa_unlikely(node_flag_ptr))
+ *node_flag_ptr = NULL;
return NULL;
+ }
pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
ptr = rcu_dereference(pointers[i]);
if (caa_unlikely(child_node_flag_ptr) && ptr)
*child_node_flag_ptr = &pointers[i];
+ if (caa_unlikely(child_node_flag_v) && ptr)
+ *child_node_flag_v = ptr;
+ if (caa_unlikely(node_flag_ptr))
+ *node_flag_ptr = &pointers[i];
return ptr;
}
static
struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
struct cds_ja_inode *node,
+ struct cds_ja_inode_flag *node_flag,
struct cds_ja_inode_flag ***child_node_flag_ptr,
+ struct cds_ja_inode_flag **child_node_flag_v,
+ struct cds_ja_inode_flag ***node_flag_ptr,
uint8_t n)
{
struct cds_ja_inode *linear;
assert(type->type_class == RCU_JA_POOL);
- /*
- * TODO: currently, we select the pool by highest bits. We
- * should support various encodings.
- */
- linear = (struct cds_ja_inode *)
- &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
- return ja_linear_node_get_nth(type, linear, child_node_flag_ptr, n);
+
+ switch (type->nr_pool_order) {
+ case 1:
+ {
+ unsigned long bitsel, index;
+
+ bitsel = ja_node_pool_1d_bitsel(node_flag);
+ assert(bitsel < CHAR_BIT);
+ index = ((unsigned long) n >> bitsel) & 0x1;
+ linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
+ break;
+ }
+ case 2:
+ {
+ unsigned long bitsel[2], index[2], rindex;
+
+ ja_node_pool_2d_bitsel(node_flag, bitsel);
+ assert(bitsel[0] < CHAR_BIT);
+ assert(bitsel[1] < CHAR_BIT);
+ index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
+ index[0] <<= 1;
+ index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
+ rindex = index[0] | index[1];
+ linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
+ break;
+ }
+ default:
+ linear = NULL;
+ assert(0);
+ }
+ return ja_linear_node_get_nth(type, linear, child_node_flag_ptr,
+ child_node_flag_v, node_flag_ptr, n);
}
static
struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
struct cds_ja_inode *node,
struct cds_ja_inode_flag ***child_node_flag_ptr,
+ struct cds_ja_inode_flag **child_node_flag_v,
+ struct cds_ja_inode_flag ***node_flag_ptr,
uint8_t n)
{
struct cds_ja_inode_flag **child_node_flag;
+ struct cds_ja_inode_flag *child_node_flag_read;
assert(type->type_class == RCU_JA_PIGEON);
child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n];
+ child_node_flag_read = rcu_dereference(*child_node_flag);
dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
child_node_flag);
- if (caa_unlikely(child_node_flag_ptr) && *child_node_flag)
+ if (caa_unlikely(child_node_flag_ptr) && child_node_flag_read)
*child_node_flag_ptr = child_node_flag;
- return rcu_dereference(*child_node_flag);
+ if (caa_unlikely(child_node_flag_v) && child_node_flag_read)
+ *child_node_flag_v = child_node_flag_read;
+ if (caa_unlikely(node_flag_ptr))
+ *node_flag_ptr = child_node_flag;
+ return child_node_flag_read;
}
static
struct cds_ja_inode *node,
uint8_t i)
{
- return ja_pigeon_node_get_nth(type, node, NULL, i);
+ return ja_pigeon_node_get_nth(type, node, NULL, NULL, NULL, i);
}
/*
* node_flag is already rcu_dereference'd.
*/
static
-struct cds_ja_inode_flag * ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
+struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
struct cds_ja_inode_flag ***child_node_flag_ptr,
+ struct cds_ja_inode_flag **child_node_flag,
+ struct cds_ja_inode_flag ***node_flag_ptr,
uint8_t n)
{
unsigned int type_index;
switch (type->type_class) {
case RCU_JA_LINEAR:
return ja_linear_node_get_nth(type, node,
- child_node_flag_ptr, n);
+ child_node_flag_ptr, child_node_flag,
+ node_flag_ptr, n);
case RCU_JA_POOL:
- return ja_pool_node_get_nth(type, node,
- child_node_flag_ptr, n);
+ return ja_pool_node_get_nth(type, node, node_flag,
+ child_node_flag_ptr, child_node_flag,
+ node_flag_ptr, n);
case RCU_JA_PIGEON:
return ja_pigeon_node_get_nth(type, node,
- child_node_flag_ptr, n);
+ child_node_flag_ptr, child_node_flag,
+ node_flag_ptr, n);
default:
assert(0);
return (void *) -1UL;
static
int ja_pool_node_set_nth(const struct cds_ja_type *type,
struct cds_ja_inode *node,
+ struct cds_ja_inode_flag *node_flag,
struct cds_ja_shadow_node *shadow_node,
uint8_t n,
struct cds_ja_inode_flag *child_node_flag)
struct cds_ja_inode *linear;
assert(type->type_class == RCU_JA_POOL);
- linear = (struct cds_ja_inode *)
- &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
+
+ switch (type->nr_pool_order) {
+ case 1:
+ {
+ unsigned long bitsel, index;
+
+ bitsel = ja_node_pool_1d_bitsel(node_flag);
+ assert(bitsel < CHAR_BIT);
+ index = ((unsigned long) n >> bitsel) & 0x1;
+ linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
+ break;
+ }
+ case 2:
+ {
+ unsigned long bitsel[2], index[2], rindex;
+
+ ja_node_pool_2d_bitsel(node_flag, bitsel);
+ assert(bitsel[0] < CHAR_BIT);
+ assert(bitsel[1] < CHAR_BIT);
+ index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
+ index[0] <<= 1;
+ index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
+ rindex = index[0] | index[1];
+ linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
+ break;
+ }
+ default:
+ linear = NULL;
+ assert(0);
+ }
+
return ja_linear_node_set_nth(type, linear, shadow_node,
n, child_node_flag);
}
static
int _ja_node_set_nth(const struct cds_ja_type *type,
struct cds_ja_inode *node,
+ struct cds_ja_inode_flag *node_flag,
struct cds_ja_shadow_node *shadow_node,
uint8_t n,
struct cds_ja_inode_flag *child_node_flag)
return ja_linear_node_set_nth(type, node, shadow_node, n,
child_node_flag);
case RCU_JA_POOL:
- return ja_pool_node_set_nth(type, node, shadow_node, n,
+ return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
child_node_flag);
case RCU_JA_PIGEON:
return ja_pigeon_node_set_nth(type, node, shadow_node, n,
assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
nr_child_ptr = &node->u.data[0];
- dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
nr_child = *nr_child_ptr;
assert(nr_child <= type->max_linear_child);
if (shadow_node->fallback_removal_count) {
shadow_node->fallback_removal_count--;
} else {
- if (shadow_node->nr_child <= type->min_child) {
+ if (type->type_class == RCU_JA_LINEAR
+ && shadow_node->nr_child <= type->min_child) {
/* We need to try recompacting the node */
return -EFBIG;
}
}
+ dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
assert(*node_flag_ptr != NULL);
rcu_assign_pointer(*node_flag_ptr, NULL);
/*
(unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
(unsigned int) shadow_node->nr_child,
node, shadow_node);
-
return 0;
}
static
int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
struct cds_ja_inode *node,
+ struct cds_ja_inode_flag *node_flag,
struct cds_ja_shadow_node *shadow_node,
struct cds_ja_inode_flag **node_flag_ptr,
uint8_t n)
struct cds_ja_inode *linear;
assert(type->type_class == RCU_JA_POOL);
- linear = (struct cds_ja_inode *)
- &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
+
+ if (shadow_node->fallback_removal_count) {
+ shadow_node->fallback_removal_count--;
+ } else {
+ /* We should try recompacting the node */
+ if (shadow_node->nr_child <= type->min_child)
+ return -EFBIG;
+ }
+
+ switch (type->nr_pool_order) {
+ case 1:
+ {
+ unsigned long bitsel, index;
+
+ bitsel = ja_node_pool_1d_bitsel(node_flag);
+ assert(bitsel < CHAR_BIT);
+ index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
+ linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
+ break;
+ }
+ case 2:
+ {
+ unsigned long bitsel[2], index[2], rindex;
+
+ ja_node_pool_2d_bitsel(node_flag, bitsel);
+ assert(bitsel[0] < CHAR_BIT);
+ assert(bitsel[1] < CHAR_BIT);
+ index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
+ index[0] <<= 1;
+ index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
+ rindex = index[0] | index[1];
+ linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
+ break;
+ }
+ default:
+ linear = NULL;
+ assert(0);
+ }
+
return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
}
struct cds_ja_inode_flag **node_flag_ptr)
{
assert(type->type_class == RCU_JA_PIGEON);
+
+ if (shadow_node->fallback_removal_count) {
+ shadow_node->fallback_removal_count--;
+ } else {
+ /* We should try recompacting the node */
+ if (shadow_node->nr_child <= type->min_child)
+ return -EFBIG;
+ }
dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
rcu_assign_pointer(*node_flag_ptr, NULL);
shadow_node->nr_child--;
static
int _ja_node_clear_ptr(const struct cds_ja_type *type,
struct cds_ja_inode *node,
+ struct cds_ja_inode_flag *node_flag,
struct cds_ja_shadow_node *shadow_node,
struct cds_ja_inode_flag **node_flag_ptr,
uint8_t n)
case RCU_JA_LINEAR:
return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
case RCU_JA_POOL:
- return ja_pool_node_clear_ptr(type, node, shadow_node, node_flag_ptr, n);
+ return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
case RCU_JA_PIGEON:
return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
case RCU_JA_NULL:
return 0;
}
+/*
+ * Calculate bit distribution. Returns the bit (0 to 7) that splits the
+ * distribution in two sub-distributions containing as much elements one
+ * compared to the other.
+ */
+static
+unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
+ struct cds_ja *ja,
+ unsigned int type_index,
+ const struct cds_ja_type *type,
+ struct cds_ja_inode *node,
+ struct cds_ja_shadow_node *shadow_node,
+ uint8_t n,
+ struct cds_ja_inode_flag *child_node_flag,
+ struct cds_ja_inode_flag **nullify_node_flag_ptr)
+{
+ uint8_t nr_one[JA_BITS_PER_BYTE];
+ unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
+ unsigned int distrib_nr_child = 0;
+
+ memset(nr_one, 0, sizeof(nr_one));
+
+ switch (type->type_class) {
+ case RCU_JA_LINEAR:
+ {
+ uint8_t nr_child =
+ ja_linear_node_get_nr_child(type, node);
+ unsigned int i;
+
+ for (i = 0; i < nr_child; i++) {
+ struct cds_ja_inode_flag *iter;
+ uint8_t v;
+
+ ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
+ if (!iter)
+ continue;
+ if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
+ continue;
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ if (v & (1U << bit_i))
+ nr_one[bit_i]++;
+ }
+ distrib_nr_child++;
+ }
+ break;
+ }
+ case RCU_JA_POOL:
+ {
+ unsigned int pool_nr;
+
+ for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
+ struct cds_ja_inode *pool =
+ ja_pool_node_get_ith_pool(type,
+ node, pool_nr);
+ uint8_t nr_child =
+ ja_linear_node_get_nr_child(type, pool);
+ unsigned int j;
+
+ for (j = 0; j < nr_child; j++) {
+ struct cds_ja_inode_flag *iter;
+ uint8_t v;
+
+ ja_linear_node_get_ith_pos(type, pool,
+ j, &v, &iter);
+ if (!iter)
+ continue;
+ if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
+ continue;
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ if (v & (1U << bit_i))
+ nr_one[bit_i]++;
+ }
+ distrib_nr_child++;
+ }
+ }
+ break;
+ }
+ case RCU_JA_PIGEON:
+ {
+ uint8_t nr_child;
+ unsigned int i;
+
+ assert(mode == JA_RECOMPACT_DEL);
+ nr_child = shadow_node->nr_child;
+ for (i = 0; i < nr_child; i++) {
+ struct cds_ja_inode_flag *iter;
+
+ iter = ja_pigeon_node_get_ith_pos(type, node, i);
+ if (!iter)
+ continue;
+ if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
+ continue;
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ if (i & (1U << bit_i))
+ nr_one[bit_i]++;
+ }
+ distrib_nr_child++;
+ }
+ break;
+ }
+ case RCU_JA_NULL:
+ assert(mode == JA_RECOMPACT_ADD_NEXT);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ if (n & (1U << bit_i))
+ nr_one[bit_i]++;
+ }
+ distrib_nr_child++;
+ }
+
+ /*
+ * The best bit selector is that for which the number of ones is
+ * closest to half of the number of children in the
+ * distribution. We calculate the distance using the double of
+ * the sub-distribution sizes to eliminate truncation error.
+ */
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ unsigned int distance_to_best;
+
+ distance_to_best = abs_int((nr_one[bit_i] << 1U) - distrib_nr_child);
+ if (distance_to_best < overall_best_distance) {
+ overall_best_distance = distance_to_best;
+ bitsel = bit_i;
+ }
+ }
+ dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
+ return bitsel;
+}
+
+/*
+ * Calculate bit distribution in two dimensions. Returns the two bits
+ * (each 0 to 7) that splits the distribution in four sub-distributions
+ * containing as much elements one compared to the other.
+ */
+static
+void ja_node_sum_distribution_2d(enum ja_recompact mode,
+ struct cds_ja *ja,
+ unsigned int type_index,
+ const struct cds_ja_type *type,
+ struct cds_ja_inode *node,
+ struct cds_ja_shadow_node *shadow_node,
+ uint8_t n,
+ struct cds_ja_inode_flag *child_node_flag,
+ struct cds_ja_inode_flag **nullify_node_flag_ptr,
+ unsigned int *_bitsel)
+{
+ uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
+ nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
+ nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
+ nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
+ unsigned int bitsel[2] = { 0, 1 };
+ unsigned int bit_i, bit_j;
+ int overall_best_distance = INT_MAX;
+ unsigned int distrib_nr_child = 0;
+
+ memset(nr_2d_11, 0, sizeof(nr_2d_11));
+ memset(nr_2d_10, 0, sizeof(nr_2d_10));
+ memset(nr_2d_01, 0, sizeof(nr_2d_01));
+ memset(nr_2d_00, 0, sizeof(nr_2d_00));
+
+ switch (type->type_class) {
+ case RCU_JA_LINEAR:
+ {
+ uint8_t nr_child =
+ ja_linear_node_get_nr_child(type, node);
+ unsigned int i;
+
+ for (i = 0; i < nr_child; i++) {
+ struct cds_ja_inode_flag *iter;
+ uint8_t v;
+
+ ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
+ if (!iter)
+ continue;
+ if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
+ continue;
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ for (bit_j = 0; bit_j < bit_i; bit_j++) {
+ if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
+ nr_2d_11[bit_i][bit_j]++;
+ }
+ if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
+ nr_2d_10[bit_i][bit_j]++;
+ }
+ if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
+ nr_2d_01[bit_i][bit_j]++;
+ }
+ if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
+ nr_2d_00[bit_i][bit_j]++;
+ }
+ }
+ }
+ distrib_nr_child++;
+ }
+ break;
+ }
+ case RCU_JA_POOL:
+ {
+ unsigned int pool_nr;
+
+ for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
+ struct cds_ja_inode *pool =
+ ja_pool_node_get_ith_pool(type,
+ node, pool_nr);
+ uint8_t nr_child =
+ ja_linear_node_get_nr_child(type, pool);
+ unsigned int j;
+
+ for (j = 0; j < nr_child; j++) {
+ struct cds_ja_inode_flag *iter;
+ uint8_t v;
+
+ ja_linear_node_get_ith_pos(type, pool,
+ j, &v, &iter);
+ if (!iter)
+ continue;
+ if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
+ continue;
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ for (bit_j = 0; bit_j < bit_i; bit_j++) {
+ if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
+ nr_2d_11[bit_i][bit_j]++;
+ }
+ if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
+ nr_2d_10[bit_i][bit_j]++;
+ }
+ if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
+ nr_2d_01[bit_i][bit_j]++;
+ }
+ if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
+ nr_2d_00[bit_i][bit_j]++;
+ }
+ }
+ }
+ distrib_nr_child++;
+ }
+ }
+ break;
+ }
+ case RCU_JA_PIGEON:
+ {
+ uint8_t nr_child;
+ unsigned int i;
+
+ assert(mode == JA_RECOMPACT_DEL);
+ nr_child = shadow_node->nr_child;
+ for (i = 0; i < nr_child; i++) {
+ struct cds_ja_inode_flag *iter;
+
+ iter = ja_pigeon_node_get_ith_pos(type, node, i);
+ if (!iter)
+ continue;
+ if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
+ continue;
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ for (bit_j = 0; bit_j < bit_i; bit_j++) {
+ if ((i & (1U << bit_i)) && (i & (1U << bit_j))) {
+ nr_2d_11[bit_i][bit_j]++;
+ }
+ if ((i & (1U << bit_i)) && !(i & (1U << bit_j))) {
+ nr_2d_10[bit_i][bit_j]++;
+ }
+ if (!(i & (1U << bit_i)) && (i & (1U << bit_j))) {
+ nr_2d_01[bit_i][bit_j]++;
+ }
+ if (!(i & (1U << bit_i)) && !(i & (1U << bit_j))) {
+ nr_2d_00[bit_i][bit_j]++;
+ }
+ }
+ }
+ distrib_nr_child++;
+ }
+ break;
+ }
+ case RCU_JA_NULL:
+ assert(mode == JA_RECOMPACT_ADD_NEXT);
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ for (bit_j = 0; bit_j < bit_i; bit_j++) {
+ if ((n & (1U << bit_i)) && (n & (1U << bit_j))) {
+ nr_2d_11[bit_i][bit_j]++;
+ }
+ if ((n & (1U << bit_i)) && !(n & (1U << bit_j))) {
+ nr_2d_10[bit_i][bit_j]++;
+ }
+ if (!(n & (1U << bit_i)) && (n & (1U << bit_j))) {
+ nr_2d_01[bit_i][bit_j]++;
+ }
+ if (!(n & (1U << bit_i)) && !(n & (1U << bit_j))) {
+ nr_2d_00[bit_i][bit_j]++;
+ }
+ }
+ }
+ distrib_nr_child++;
+ }
+
+ /*
+ * The best bit selector is that for which the number of nodes
+ * in each sub-class is closest to one-fourth of the number of
+ * children in the distribution. We calculate the distance using
+ * 4 times the size of the sub-distribution to eliminate
+ * truncation error.
+ */
+ for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
+ for (bit_j = 0; bit_j < bit_i; bit_j++) {
+ int distance_to_best[4];
+
+ distance_to_best[0] = (nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
+ distance_to_best[1] = (nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
+ distance_to_best[2] = (nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
+ distance_to_best[3] = (nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
+
+ /* Consider worse distance above best */
+ if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
+ distance_to_best[0] = distance_to_best[1];
+ if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
+ distance_to_best[0] = distance_to_best[2];
+ if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
+ distance_to_best[0] = distance_to_best[3];
+
+ /*
+ * If our worse distance is better than overall,
+ * we become new best candidate.
+ */
+ if (distance_to_best[0] < overall_best_distance) {
+ overall_best_distance = distance_to_best[0];
+ bitsel[0] = bit_i;
+ bitsel[1] = bit_j;
+ }
+ }
+ }
+
+ dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
+
+ /* Return our bit selection */
+ _bitsel[0] = bitsel[0];
+ _bitsel[1] = bitsel[1];
+}
+
/*
* ja_node_recompact_add: recompact a node, adding a new child.
- * TODO: for pool type, take selection bit(s) into account.
* Return 0 on success, -EAGAIN if need to retry, or other negative
* error value otherwise.
*/
old_node_flag = *old_node_flag_ptr;
switch (mode) {
- case JA_RECOMPACT:
- new_type_index = old_type_index;
+ case JA_RECOMPACT_ADD_SAME:
+ if (old_type->type_class == RCU_JA_POOL) {
+ /*
+ * For pool type, try redistributing
+ * into a different distribution of same
+ * size if we have not reached limits.
+ */
+ if (shadow_node->nr_child + 1 > old_type->max_child) {
+ new_type_index = old_type_index + 1;
+ } else if (shadow_node->nr_child + 1 < old_type->min_child) {
+ new_type_index = old_type_index - 1;
+ } else {
+ new_type_index = old_type_index;
+ }
+ } else {
+ new_type_index = old_type_index;
+ }
break;
- case JA_RECOMPACT_ADD:
+ case JA_RECOMPACT_ADD_NEXT:
if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
new_type_index = 0;
} else {
- new_type_index = old_type_index + 1;
+ if (old_type->type_class == RCU_JA_POOL) {
+ /*
+ * For pool type, try redistributing
+ * into a different distribution of same
+ * size if we have not reached limits.
+ */
+ if (shadow_node->nr_child + 1 > old_type->max_child) {
+ new_type_index = old_type_index + 1;
+ } else {
+ new_type_index = old_type_index;
+ }
+ } else {
+ new_type_index = old_type_index + 1;
+ }
}
break;
case JA_RECOMPACT_DEL:
if (old_type_index == 0) {
new_type_index = NODE_INDEX_NULL;
} else {
- new_type_index = old_type_index - 1;
+ if (old_type->type_class == RCU_JA_POOL) {
+ /*
+ * For pool type, try redistributing
+ * into a different distribution of same
+ * size if we have not reached limits.
+ */
+ if (shadow_node->nr_child - 1 < old_type->min_child) {
+ new_type_index = old_type_index - 1;
+ } else {
+ new_type_index = old_type_index;
+ }
+ } else {
+ new_type_index = old_type_index - 1;
+ }
}
break;
default:
new_node = alloc_cds_ja_node(new_type);
if (!new_node)
return -ENOMEM;
- new_node_flag = ja_node_flag(new_node, new_type_index);
+
+ if (new_type->type_class == RCU_JA_POOL) {
+ switch (new_type->nr_pool_order) {
+ case 1:
+ {
+ unsigned int node_distrib_bitsel;
+
+ node_distrib_bitsel =
+ ja_node_sum_distribution_1d(mode, ja,
+ old_type_index, old_type,
+ old_node, shadow_node,
+ n, child_node_flag,
+ nullify_node_flag_ptr);
+ assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
+ new_node_flag = ja_node_flag_pool_1d(new_node,
+ new_type_index, node_distrib_bitsel);
+ break;
+ }
+ case 2:
+ {
+ unsigned int node_distrib_bitsel[2];
+
+ ja_node_sum_distribution_2d(mode, ja,
+ old_type_index, old_type,
+ old_node, shadow_node,
+ n, child_node_flag,
+ nullify_node_flag_ptr,
+ node_distrib_bitsel);
+ assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
+ assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
+ new_node_flag = ja_node_flag_pool_2d(new_node,
+ new_type_index, node_distrib_bitsel);
+ break;
+ }
+ default:
+ assert(0);
+ }
+ } else {
+ new_node_flag = ja_node_flag(new_node, new_type_index);
+ }
+
dbg_printf("Recompact inherit lock from %p\n", shadow_node);
new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja);
if (!new_shadow_node) {
new_node_flag = NULL;
}
- assert(mode != JA_RECOMPACT_ADD || old_type->type_class != RCU_JA_PIGEON);
+ assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
if (new_type_index == NODE_INDEX_NULL)
goto skip_copy;
continue;
if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
continue;
- ret = _ja_node_set_nth(new_type, new_node,
+ ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
new_shadow_node,
v, iter);
if (new_type->type_class == RCU_JA_POOL && ret) {
continue;
if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
continue;
- ret = _ja_node_set_nth(new_type, new_node,
+ ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
new_shadow_node,
v, iter);
if (new_type->type_class == RCU_JA_POOL
break;
}
case RCU_JA_NULL:
- assert(mode == JA_RECOMPACT_ADD);
+ assert(mode == JA_RECOMPACT_ADD_NEXT);
break;
case RCU_JA_PIGEON:
{
continue;
if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
continue;
- ret = _ja_node_set_nth(new_type, new_node,
+ ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
new_shadow_node,
i, iter);
if (new_type->type_class == RCU_JA_POOL && ret) {
}
skip_copy:
- if (mode == JA_RECOMPACT_ADD) {
+ if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
/* add node */
- ret = _ja_node_set_nth(new_type, new_node,
+ ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
new_shadow_node,
n, child_node_flag);
+ if (new_type->type_class == RCU_JA_POOL && ret) {
+ goto fallback_toosmall;
+ }
assert(!ret);
}
+
+ if (fallback) {
+ dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
+ new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
+ (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
+ uatomic_inc(&node_fallback_count_distribution[new_shadow_node->nr_child]);
+ }
+
/* Return pointer to new recompacted node through old_node_flag_ptr */
*old_node_flag_ptr = new_node_flag;
if (old_node) {
RCUJA_SHADOW_CLEAR_FREE_NODE);
assert(!ret);
- /* Choose fallback type: pigeon */
+ switch (mode) {
+ case JA_RECOMPACT_ADD_SAME:
+ /*
+ * JA_RECOMPACT_ADD_SAME is only triggered if a linear
+ * node within a pool has unused entries. It should
+ * therefore _never_ be too small.
+ */
+ assert(0);
+
+ /* Fall-through */
+ case JA_RECOMPACT_ADD_NEXT:
+ {
+ const struct cds_ja_type *next_type;
+
+ /*
+ * Recompaction attempt on add failed. Should only
+ * happen if target node type is pool. Caused by
+ * hard-to-split distribution. Recompact using the next
+ * distribution size.
+ */
+ assert(new_type->type_class == RCU_JA_POOL);
+ next_type = &ja_types[new_type_index + 1];
+ /*
+ * Try going to the next pool size if our population
+ * fits within its range. This is not flagged as a
+ * fallback.
+ */
+ if (shadow_node->nr_child + 1 >= next_type->min_child
+ && shadow_node->nr_child + 1 <= next_type->max_child) {
+ new_type_index++;
+ goto retry;
+ } else {
+ new_type_index++;
+ dbg_printf("Add fallback to type %d\n", new_type_index);
+ uatomic_inc(&ja->nr_fallback);
+ fallback = 1;
+ goto retry;
+ }
+ break;
+ }
+ case JA_RECOMPACT_DEL:
+ /*
+ * Recompaction attempt on delete failed. Should only
+ * happen if target node type is pool. This is caused by
+ * a hard-to-split distribution. Recompact on same node
+ * size, but flag current node as "fallback" to ensure
+ * we don't attempt recompaction before some activity
+ * has reshuffled our node.
+ */
+ assert(new_type->type_class == RCU_JA_POOL);
+ new_type_index = old_type_index;
+ dbg_printf("Delete fallback keeping type %d\n", new_type_index);
+ uatomic_inc(&ja->nr_fallback);
+ fallback = 1;
+ goto retry;
+ default:
+ assert(0);
+ return -EINVAL;
+ }
+
+ /*
+ * Last resort fallback: pigeon.
+ */
new_type_index = (1UL << JA_TYPE_BITS) - 1;
dbg_printf("Fallback to type %d\n", new_type_index);
uatomic_inc(&ja->nr_fallback);
node = ja_node_ptr(*node_flag);
type_index = ja_node_type(*node_flag);
type = &ja_types[type_index];
- ret = _ja_node_set_nth(type, node, shadow_node,
+ ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
n, child_node_flag);
switch (ret) {
case -ENOSPC:
- /* Not enough space in node, need to recompact. */
- ret = ja_node_recompact(JA_RECOMPACT_ADD, ja, type_index, type, node,
+ /* Not enough space in node, need to recompact to next type. */
+ ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
shadow_node, node_flag, n, child_node_flag, NULL);
break;
case -ERANGE:
/* Node needs to be recompacted. */
- ret = ja_node_recompact(JA_RECOMPACT, ja, type_index, type, node,
+ ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
shadow_node, node_flag, n, child_node_flag, NULL);
break;
}
node = ja_node_ptr(*parent_node_flag_ptr);
type_index = ja_node_type(*parent_node_flag_ptr);
type = &ja_types[type_index];
- ret = _ja_node_clear_ptr(type, node, shadow_node, node_flag_ptr, n);
+ ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
if (ret == -EFBIG) {
- /* Should to try recompaction. */
+ /* Should try recompaction. */
ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
shadow_node, parent_node_flag_ptr, n, NULL,
node_flag_ptr);
uint8_t iter_key;
iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
- node_flag = ja_node_get_nth(node_flag, NULL,
+ node_flag = ja_node_get_nth(node_flag, NULL, NULL, NULL,
iter_key);
dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
(unsigned int) iter_key, node_flag);
*/
static
int ja_attach_node(struct cds_ja *ja,
+ struct cds_ja_inode_flag **attach_node_flag_ptr,
+ struct cds_ja_inode_flag *attach_node_flag,
struct cds_ja_inode_flag **node_flag_ptr,
struct cds_ja_inode_flag *node_flag,
struct cds_ja_inode_flag *parent_node_flag,
}
}
- if (*node_flag_ptr != NULL) {
+ if (node_flag_ptr && ja_node_ptr(*node_flag_ptr)) {
+ /*
+ * Target node has been updated between RCU lookup and
+ * lock acquisition. We need to re-try lookup and
+ * attach.
+ */
+ ret = -EAGAIN;
+ goto unlock_parent;
+ }
+
+ if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
+ ja_node_ptr(attach_node_flag)) {
/*
- * Attach point is non-NULL: it has been updated between
- * RCU lookup and lock acquisition. We need to re-try
- * lookup and attach.
+ * Target node has been updated between RCU lookup and
+ * lock acquisition. We need to re-try lookup and
+ * attach.
*/
ret = -EAGAIN;
goto unlock_parent;
/* Publish new branch */
dbg_printf("Publish branch %p, replacing %p\n",
- iter_node_flag, *node_flag_ptr);
- rcu_assign_pointer(*node_flag_ptr, iter_node_flag);
+ iter_node_flag, *attach_node_flag_ptr);
+ rcu_assign_pointer(*attach_node_flag_ptr, iter_node_flag);
/* Success */
ret = 0;
static
int ja_chain_node(struct cds_ja *ja,
struct cds_ja_inode_flag *parent_node_flag,
+ struct cds_ja_inode_flag **node_flag_ptr,
+ struct cds_ja_inode_flag *node_flag,
struct cds_hlist_head *head,
struct cds_ja_node *node)
{
struct cds_ja_shadow_node *shadow_node;
+ int ret = 0;
shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
if (!shadow_node) {
- dbg_printf("AGAIN3\n");
return -EAGAIN;
}
+ if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
+ ret = -EAGAIN;
+ goto end;
+ }
cds_hlist_add_head_rcu(&node->list, head);
+end:
rcuja_shadow_unlock(shadow_node);
- return 0;
+ return ret;
}
int cds_ja_add(struct cds_ja *ja, uint64_t key,
struct cds_ja_node *new_node)
{
unsigned int tree_depth, i;
- struct cds_ja_inode_flag **node_flag_ptr; /* in parent */
+ struct cds_ja_inode_flag **attach_node_flag_ptr,
+ **node_flag_ptr;
struct cds_ja_inode_flag *node_flag,
*parent_node_flag,
- *parent2_node_flag;
+ *parent2_node_flag,
+ *attach_node_flag;
int ret;
if (caa_unlikely(key > ja->key_max)) {
parent2_node_flag = NULL;
parent_node_flag =
(struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
+ attach_node_flag_ptr = &ja->root;
+ attach_node_flag = rcu_dereference(ja->root);
node_flag_ptr = &ja->root;
node_flag = rcu_dereference(ja->root);
for (i = 1; i < tree_depth; i++) {
uint8_t iter_key;
- dbg_printf("cds_ja_add iter node_flag_ptr %p node_flag %p\n",
- *node_flag_ptr, node_flag);
+ dbg_printf("cds_ja_add iter attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
+ attach_node_flag_ptr, node_flag_ptr, node_flag);
if (!ja_node_ptr(node_flag)) {
- ret = ja_attach_node(ja, node_flag_ptr,
- parent_node_flag, parent2_node_flag,
+ ret = ja_attach_node(ja, attach_node_flag_ptr,
+ attach_node_flag,
+ node_flag_ptr,
+ parent_node_flag,
+ parent2_node_flag,
key, i, new_node);
if (ret == -EAGAIN || ret == -EEXIST)
goto retry;
parent2_node_flag = parent_node_flag;
parent_node_flag = node_flag;
node_flag = ja_node_get_nth(node_flag,
+ &attach_node_flag_ptr,
+ &attach_node_flag,
&node_flag_ptr,
iter_key);
- dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p node_flag_ptr %p\n",
- (unsigned int) iter_key, node_flag, *node_flag_ptr);
+ dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p attach_node_flag_ptr %p node_flag_ptr %p\n",
+ (unsigned int) iter_key, node_flag,
+ attach_node_flag_ptr,
+ node_flag_ptr);
}
/*
* level, or chain it if key is already present.
*/
if (!ja_node_ptr(node_flag)) {
- dbg_printf("cds_ja_add last node_flag_ptr %p node_flag %p\n",
- *node_flag_ptr, node_flag);
- ret = ja_attach_node(ja, node_flag_ptr, parent_node_flag,
+ dbg_printf("cds_ja_add attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
+ attach_node_flag_ptr, node_flag_ptr, node_flag);
+ ret = ja_attach_node(ja, attach_node_flag_ptr,
+ attach_node_flag,
+ node_flag_ptr, parent_node_flag,
parent2_node_flag, key, i, new_node);
} else {
ret = ja_chain_node(ja,
parent_node_flag,
- (struct cds_hlist_head *) node_flag_ptr,
+ node_flag_ptr,
+ node_flag,
+ (struct cds_hlist_head *) attach_node_flag_ptr,
new_node);
}
if (ret == -EAGAIN || ret == -EEXIST)
ret = -EAGAIN;
goto end;
}
- assert(shadow_node->nr_child > 0);
shadow_nodes[nr_shadow++] = shadow_node;
- if (shadow_node->nr_child == 1)
+
+ /*
+ * Check if node has been removed between RCU
+ * lookup and lock acquisition.
+ */
+ assert(snapshot_ptr[i + 1]);
+ if (ja_node_ptr(*snapshot_ptr[i + 1])
+ != ja_node_ptr(snapshot[i + 1])) {
+ ret = -ENOENT;
+ goto end;
+ }
+
+ assert(shadow_node->nr_child > 0);
+ if (shadow_node->nr_child == 1 && i > 1)
nr_clear++;
nr_branch++;
if (shadow_node->nr_child > 1 || i == 1) {
goto end;
}
shadow_nodes[nr_shadow++] = shadow_node;
+
+ /*
+ * Check if node has been removed between RCU
+ * lookup and lock acquisition.
+ */
+ assert(snapshot_ptr[i]);
+ if (ja_node_ptr(*snapshot_ptr[i])
+ != ja_node_ptr(snapshot[i])) {
+ ret = -ENOENT;
+ goto end;
+ }
+
node_flag_ptr = snapshot_ptr[i + 1];
n = snapshot_n[i + 1];
parent_node_flag_ptr = snapshot_ptr[i];
parent_node_flag = snapshot[i];
+
if (i > 1) {
/*
* Lock parent's parent, in case we need
goto end;
}
shadow_nodes[nr_shadow++] = shadow_node;
+
+ /*
+ * Check if node has been removed between RCU
+ * lookup and lock acquisition.
+ */
+ assert(snapshot_ptr[i - 1]);
+ if (ja_node_ptr(*snapshot_ptr[i - 1])
+ != ja_node_ptr(snapshot[i - 1])) {
+ ret = -ENOENT;
+ goto end;
+ }
}
+
break;
}
}
- /*
- * Check if node has been removed between RCU lookup and lock
- * acquisition.
- */
- if (!*node_flag_ptr) {
- ret = -ENOENT;
- goto end;
- }
-
/*
* At this point, we want to delete all nodes that are about to
* be removed from shadow_nodes (except the last one, which is
* either the root or the parent of the upmost node with 1
- * child). OK to as to free lock here, because RCU read lock is
- * held, and free only performed in call_rcu.
+ * child). OK to free lock here, because RCU read lock is held,
+ * and free only performed in call_rcu.
*/
for (i = 0; i < nr_clear; i++) {
static
int ja_unchain_node(struct cds_ja *ja,
struct cds_ja_inode_flag *parent_node_flag,
- struct cds_hlist_head *head,
+ struct cds_ja_inode_flag **node_flag_ptr,
+ struct cds_ja_inode_flag *node_flag,
struct cds_ja_node *node)
{
struct cds_ja_shadow_node *shadow_node;
struct cds_hlist_node *hlist_node;
- int ret = 0, count = 0;
+ struct cds_hlist_head hlist_head;
+ int ret = 0, count = 0, found = 0;
shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
if (!shadow_node)
return -EAGAIN;
+ if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
+ ret = -EAGAIN;
+ goto end;
+ }
+ hlist_head.next = (struct cds_hlist_node *) ja_node_ptr(node_flag);
/*
* Retry if another thread removed all but one of duplicates
- * since check (that was performed without lock).
+ * since check (this check was performed without lock).
+ * Ensure that the node we are about to remove is still in the
+ * list (while holding lock).
*/
- cds_hlist_for_each_rcu(hlist_node, head, list) {
+ cds_hlist_for_each_rcu(hlist_node, &hlist_head) {
+ if (count == 0) {
+ /* FIXME: currently a work-around */
+ hlist_node->prev = (struct cds_hlist_node *) node_flag_ptr;
+ }
count++;
+ if (hlist_node == &node->list)
+ found++;
}
-
- if (count == 1) {
+ assert(found <= 1);
+ if (!found || count == 1) {
ret = -EAGAIN;
goto end;
}
cds_hlist_del_rcu(&node->list);
+ /*
+ * Validate that we indeed removed the node from linked list.
+ */
+ assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
end:
rcuja_shadow_unlock(shadow_node);
return ret;
struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
uint8_t snapshot_n[JA_MAX_DEPTH];
struct cds_ja_inode_flag *node_flag;
- struct cds_ja_inode_flag **prev_node_flag_ptr;
+ struct cds_ja_inode_flag **prev_node_flag_ptr,
+ **node_flag_ptr;
int nr_snapshot;
int ret;
snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
node_flag = rcu_dereference(ja->root);
prev_node_flag_ptr = &ja->root;
+ node_flag_ptr = &ja->root;
/* Iterate on all internal levels */
for (i = 1; i < tree_depth; i++) {
snapshot[nr_snapshot++] = node_flag;
node_flag = ja_node_get_nth(node_flag,
&prev_node_flag_ptr,
+ NULL,
+ &node_flag_ptr,
iter_key);
dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
(unsigned int) iter_key, node_flag,
prev_node_flag_ptr);
}
-
/*
* We reached bottom of tree, try to find the node we are trying
* to remove. Fail if we cannot find it.
snapshot_n, nr_snapshot, key, node);
} else {
ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
- &hlist_head, match);
+ node_flag_ptr, node_flag, match);
}
}
/*
}
}
+static
+void print_debug_fallback_distribution(void)
+{
+ int i;
+
+ fprintf(stderr, "Fallback node distribution:\n");
+ for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
+ if (!node_fallback_count_distribution[i])
+ continue;
+ fprintf(stderr, " %3u: %4lu\n",
+ i, node_fallback_count_distribution[i]);
+ }
+}
+
/*
* There should be no more concurrent add to the judy array while it is
* being destroyed (ensured by the caller).
fprintf(stderr,
"[warning] RCU Judy Array used %lu fallback node(s)\n",
uatomic_read(&ja->nr_fallback));
+ fprintf(stderr, "Nodes allocated: %lu, Nodes freed: %lu. Fallback ratio: %g\n",
+ uatomic_read(&nr_nodes_allocated),
+ uatomic_read(&nr_nodes_freed),
+ (double) uatomic_read(&ja->nr_fallback) / (double) uatomic_read(&nr_nodes_allocated));
+ print_debug_fallback_distribution();
free(ja);
return 0;
}