X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=rcuja%2Frcuja.c;h=b0d33221ae6c730ed756453b655dba5564fe38b7;hb=9be99d4ad3a507adf0fb01ecb361e8efff3f130f;hp=db5072a2133f469672c2010df73e2485e09f2a57;hpb=1216b3d282b4b3cb4a515dee30997fd2dc078776;p=userspace-rcu.git diff --git a/rcuja/rcuja.c b/rcuja/rcuja.c index db5072a..b0d3322 100644 --- a/rcuja/rcuja.c +++ b/rcuja/rcuja.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -35,6 +36,10 @@ #include "rcuja-internal.h" #include "bitfield.h" +#ifndef abs +#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a))) +#endif + enum cds_ja_type_class { RCU_JA_LINEAR = 0, /* Type A */ /* 32-bit: 1 to 25 children, 8 to 128 bytes */ @@ -120,8 +125,8 @@ const struct cds_ja_type ja_types[] = { { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, }, /* - * TODO: Upon node removal below min_child, if child pool is - * filled beyond capacity, we need to roll back to pigeon. + * Upon node removal below min_child, if child pool is filled + * beyond capacity, we roll back to pigeon. */ { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, }, @@ -168,8 +173,8 @@ const struct cds_ja_type ja_types[] = { { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, }, /* - * TODO: Upon node removal below min_child, if child pool is - * filled beyond capacity, we need to roll back to pigeon. + * Upon node removal below min_child, if child pool is filled + * beyond capacity, we roll back to pigeon. */ { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, }, @@ -234,19 +239,78 @@ struct cds_ja_inode { }; enum ja_recompact { - JA_RECOMPACT, - JA_RECOMPACT_ADD, + JA_RECOMPACT_ADD_SAME, + JA_RECOMPACT_ADD_NEXT, JA_RECOMPACT_DEL, }; +static +unsigned long node_fallback_count_distribution[JA_ENTRY_PER_NODE]; +static +unsigned long nr_nodes_allocated, nr_nodes_freed; + +static +struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node) +{ + return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK); +} + +unsigned long ja_node_type(struct cds_ja_inode_flag *node) +{ + unsigned long type; + + if (_ja_node_mask_ptr(node) == NULL) { + return NODE_INDEX_NULL; + } + type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK); + assert(type < (1UL << JA_TYPE_BITS)); + return type; +} + +struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node) +{ + unsigned long type_index = ja_node_type(node); + const struct cds_ja_type *type; + + type = &ja_types[type_index]; + switch (type->type_class) { + case RCU_JA_LINEAR: + case RCU_JA_PIGEON: /* fall-through */ + case RCU_JA_NULL: /* fall-through */ + default: /* fall-through */ + return _ja_node_mask_ptr(node); + case RCU_JA_POOL: + switch (type->nr_pool_order) { + case 1: + return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_1D_MASK | JA_TYPE_MASK)); + case 2: + return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_2D_MASK | JA_POOL_1D_MASK | JA_TYPE_MASK)); + default: + assert(0); + } + } +} + struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type) { - return calloc(1U << ja_type->order, sizeof(char)); + size_t len = 1U << ja_type->order; + void *p; + int ret; + + ret = posix_memalign(&p, len, len); + if (ret || !p) { + return NULL; + } + memset(p, 0, len); + uatomic_inc(&nr_nodes_allocated); + return p; } void free_cds_ja_node(struct cds_ja_inode *node) { free(node); + if (node) + uatomic_inc(&nr_nodes_freed); } #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask)) @@ -276,7 +340,7 @@ uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type, static struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type, struct cds_ja_inode *node, - struct cds_ja_inode_flag ***child_node_flag_ptr, + struct cds_ja_inode_flag ***node_flag_ptr, uint8_t n) { uint8_t nr_child; @@ -297,12 +361,15 @@ struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type, if (CMM_LOAD_SHARED(values[i]) == n) break; } - if (i >= nr_child) + if (i >= nr_child) { + if (caa_unlikely(node_flag_ptr)) + *node_flag_ptr = NULL; return NULL; + } pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]); ptr = rcu_dereference(pointers[i]); - if (caa_unlikely(child_node_flag_ptr) && ptr) - *child_node_flag_ptr = &pointers[i]; + if (caa_unlikely(node_flag_ptr)) + *node_flag_ptr = &pointers[i]; return ptr; } @@ -328,19 +395,44 @@ void ja_linear_node_get_ith_pos(const struct cds_ja_type *type, static struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type, struct cds_ja_inode *node, - struct cds_ja_inode_flag ***child_node_flag_ptr, + struct cds_ja_inode_flag *node_flag, + struct cds_ja_inode_flag ***node_flag_ptr, uint8_t n) { struct cds_ja_inode *linear; assert(type->type_class == RCU_JA_POOL); - /* - * TODO: currently, we select the pool by highest bits. We - * should support various encodings. - */ - linear = (struct cds_ja_inode *) - &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order]; - return ja_linear_node_get_nth(type, linear, child_node_flag_ptr, n); + + switch (type->nr_pool_order) { + case 1: + { + unsigned long bitsel, index; + + bitsel = ja_node_pool_1d_bitsel(node_flag); + assert(bitsel < CHAR_BIT); + index = ((unsigned long) n >> bitsel) & 0x1; + linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order]; + break; + } + case 2: + { + unsigned long bitsel[2], index[2], rindex; + + ja_node_pool_2d_bitsel(node_flag, bitsel); + assert(bitsel[0] < CHAR_BIT); + assert(bitsel[1] < CHAR_BIT); + index[0] = ((unsigned long) n >> bitsel[0]) & 0x1; + index[0] <<= 1; + index[1] = ((unsigned long) n >> bitsel[1]) & 0x1; + rindex = index[0] | index[1]; + linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order]; + break; + } + default: + linear = NULL; + assert(0); + } + return ja_linear_node_get_nth(type, linear, node_flag_ptr, n); } static @@ -356,18 +448,20 @@ struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type, static struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type, struct cds_ja_inode *node, - struct cds_ja_inode_flag ***child_node_flag_ptr, + struct cds_ja_inode_flag ***node_flag_ptr, uint8_t n) { - struct cds_ja_inode_flag **child_node_flag; + struct cds_ja_inode_flag **child_node_flag_ptr; + struct cds_ja_inode_flag *child_node_flag; assert(type->type_class == RCU_JA_PIGEON); - child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n]; + child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n]; + child_node_flag = rcu_dereference(*child_node_flag_ptr); dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n", - child_node_flag); - if (caa_unlikely(child_node_flag_ptr) && *child_node_flag) - *child_node_flag_ptr = child_node_flag; - return rcu_dereference(*child_node_flag); + child_node_flag_ptr); + if (caa_unlikely(node_flag_ptr)) + *node_flag_ptr = child_node_flag_ptr; + return child_node_flag; } static @@ -383,8 +477,8 @@ struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *t * node_flag is already rcu_dereference'd. */ static -struct cds_ja_inode_flag * ja_node_get_nth(struct cds_ja_inode_flag *node_flag, - struct cds_ja_inode_flag ***child_node_flag_ptr, +struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag, + struct cds_ja_inode_flag ***node_flag_ptr, uint8_t n) { unsigned int type_index; @@ -399,13 +493,13 @@ struct cds_ja_inode_flag * ja_node_get_nth(struct cds_ja_inode_flag *node_flag, switch (type->type_class) { case RCU_JA_LINEAR: return ja_linear_node_get_nth(type, node, - child_node_flag_ptr, n); + node_flag_ptr, n); case RCU_JA_POOL: - return ja_pool_node_get_nth(type, node, - child_node_flag_ptr, n); + return ja_pool_node_get_nth(type, node, node_flag, + node_flag_ptr, n); case RCU_JA_PIGEON: return ja_pigeon_node_get_nth(type, node, - child_node_flag_ptr, n); + node_flag_ptr, n); default: assert(0); return (void *) -1UL; @@ -427,7 +521,8 @@ int ja_linear_node_set_nth(const struct cds_ja_type *type, assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL); nr_child_ptr = &node->u.data[0]; - dbg_printf("linear set nth: nr_child_ptr %p\n", nr_child_ptr); + dbg_printf("linear set nth: n %u, nr_child_ptr %p\n", + (unsigned int) n, nr_child_ptr); nr_child = *nr_child_ptr; assert(nr_child <= type->max_linear_child); @@ -473,6 +568,7 @@ int ja_linear_node_set_nth(const struct cds_ja_type *type, static int ja_pool_node_set_nth(const struct cds_ja_type *type, struct cds_ja_inode *node, + struct cds_ja_inode_flag *node_flag, struct cds_ja_shadow_node *shadow_node, uint8_t n, struct cds_ja_inode_flag *child_node_flag) @@ -480,8 +576,37 @@ int ja_pool_node_set_nth(const struct cds_ja_type *type, struct cds_ja_inode *linear; assert(type->type_class == RCU_JA_POOL); - linear = (struct cds_ja_inode *) - &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order]; + + switch (type->nr_pool_order) { + case 1: + { + unsigned long bitsel, index; + + bitsel = ja_node_pool_1d_bitsel(node_flag); + assert(bitsel < CHAR_BIT); + index = ((unsigned long) n >> bitsel) & 0x1; + linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order]; + break; + } + case 2: + { + unsigned long bitsel[2], index[2], rindex; + + ja_node_pool_2d_bitsel(node_flag, bitsel); + assert(bitsel[0] < CHAR_BIT); + assert(bitsel[1] < CHAR_BIT); + index[0] = ((unsigned long) n >> bitsel[0]) & 0x1; + index[0] <<= 1; + index[1] = ((unsigned long) n >> bitsel[1]) & 0x1; + rindex = index[0] | index[1]; + linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order]; + break; + } + default: + linear = NULL; + assert(0); + } + return ja_linear_node_set_nth(type, linear, shadow_node, n, child_node_flag); } @@ -511,6 +636,7 @@ int ja_pigeon_node_set_nth(const struct cds_ja_type *type, static int _ja_node_set_nth(const struct cds_ja_type *type, struct cds_ja_inode *node, + struct cds_ja_inode_flag *node_flag, struct cds_ja_shadow_node *shadow_node, uint8_t n, struct cds_ja_inode_flag *child_node_flag) @@ -520,7 +646,7 @@ int _ja_node_set_nth(const struct cds_ja_type *type, return ja_linear_node_set_nth(type, node, shadow_node, n, child_node_flag); case RCU_JA_POOL: - return ja_pool_node_set_nth(type, node, shadow_node, n, + return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n, child_node_flag); case RCU_JA_PIGEON: return ja_pigeon_node_set_nth(type, node, shadow_node, n, @@ -547,18 +673,17 @@ int ja_linear_node_clear_ptr(const struct cds_ja_type *type, assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL); nr_child_ptr = &node->u.data[0]; - dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr); nr_child = *nr_child_ptr; assert(nr_child <= type->max_linear_child); - if (shadow_node->fallback_removal_count) { - shadow_node->fallback_removal_count--; - } else { + if (type->type_class == RCU_JA_LINEAR) { + assert(!shadow_node->fallback_removal_count); if (shadow_node->nr_child <= type->min_child) { /* We need to try recompacting the node */ return -EFBIG; } } + dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr); assert(*node_flag_ptr != NULL); rcu_assign_pointer(*node_flag_ptr, NULL); /* @@ -573,13 +698,13 @@ int ja_linear_node_clear_ptr(const struct cds_ja_type *type, (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr), (unsigned int) shadow_node->nr_child, node, shadow_node); - return 0; } static int ja_pool_node_clear_ptr(const struct cds_ja_type *type, struct cds_ja_inode *node, + struct cds_ja_inode_flag *node_flag, struct cds_ja_shadow_node *shadow_node, struct cds_ja_inode_flag **node_flag_ptr, uint8_t n) @@ -587,8 +712,45 @@ int ja_pool_node_clear_ptr(const struct cds_ja_type *type, struct cds_ja_inode *linear; assert(type->type_class == RCU_JA_POOL); - linear = (struct cds_ja_inode *) - &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order]; + + if (shadow_node->fallback_removal_count) { + shadow_node->fallback_removal_count--; + } else { + /* We should try recompacting the node */ + if (shadow_node->nr_child <= type->min_child) + return -EFBIG; + } + + switch (type->nr_pool_order) { + case 1: + { + unsigned long bitsel, index; + + bitsel = ja_node_pool_1d_bitsel(node_flag); + assert(bitsel < CHAR_BIT); + index = ((unsigned long) n >> bitsel) & type->nr_pool_order; + linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order]; + break; + } + case 2: + { + unsigned long bitsel[2], index[2], rindex; + + ja_node_pool_2d_bitsel(node_flag, bitsel); + assert(bitsel[0] < CHAR_BIT); + assert(bitsel[1] < CHAR_BIT); + index[0] = ((unsigned long) n >> bitsel[0]) & 0x1; + index[0] <<= 1; + index[1] = ((unsigned long) n >> bitsel[1]) & 0x1; + rindex = index[0] | index[1]; + linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order]; + break; + } + default: + linear = NULL; + assert(0); + } + return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr); } @@ -599,6 +761,14 @@ int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type, struct cds_ja_inode_flag **node_flag_ptr) { assert(type->type_class == RCU_JA_PIGEON); + + if (shadow_node->fallback_removal_count) { + shadow_node->fallback_removal_count--; + } else { + /* We should try recompacting the node */ + if (shadow_node->nr_child <= type->min_child) + return -EFBIG; + } dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr); rcu_assign_pointer(*node_flag_ptr, NULL); shadow_node->nr_child--; @@ -612,6 +782,7 @@ int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type, static int _ja_node_clear_ptr(const struct cds_ja_type *type, struct cds_ja_inode *node, + struct cds_ja_inode_flag *node_flag, struct cds_ja_shadow_node *shadow_node, struct cds_ja_inode_flag **node_flag_ptr, uint8_t n) @@ -620,7 +791,7 @@ int _ja_node_clear_ptr(const struct cds_ja_type *type, case RCU_JA_LINEAR: return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr); case RCU_JA_POOL: - return ja_pool_node_clear_ptr(type, node, shadow_node, node_flag_ptr, n); + return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n); case RCU_JA_PIGEON: return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr); case RCU_JA_NULL: @@ -633,9 +804,376 @@ int _ja_node_clear_ptr(const struct cds_ja_type *type, return 0; } +/* + * Calculate bit distribution. Returns the bit (0 to 7) that splits the + * distribution in two sub-distributions containing as much elements one + * compared to the other. + */ +static +unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode, + struct cds_ja *ja, + unsigned int type_index, + const struct cds_ja_type *type, + struct cds_ja_inode *node, + struct cds_ja_shadow_node *shadow_node, + uint8_t n, + struct cds_ja_inode_flag *child_node_flag, + struct cds_ja_inode_flag **nullify_node_flag_ptr) +{ + uint8_t nr_one[JA_BITS_PER_BYTE]; + unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX; + unsigned int distrib_nr_child = 0; + + memset(nr_one, 0, sizeof(nr_one)); + + switch (type->type_class) { + case RCU_JA_LINEAR: + { + uint8_t nr_child = + ja_linear_node_get_nr_child(type, node); + unsigned int i; + + for (i = 0; i < nr_child; i++) { + struct cds_ja_inode_flag *iter; + uint8_t v; + + ja_linear_node_get_ith_pos(type, node, i, &v, &iter); + if (!iter) + continue; + if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter) + continue; + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + if (v & (1U << bit_i)) + nr_one[bit_i]++; + } + distrib_nr_child++; + } + break; + } + case RCU_JA_POOL: + { + unsigned int pool_nr; + + for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) { + struct cds_ja_inode *pool = + ja_pool_node_get_ith_pool(type, + node, pool_nr); + uint8_t nr_child = + ja_linear_node_get_nr_child(type, pool); + unsigned int j; + + for (j = 0; j < nr_child; j++) { + struct cds_ja_inode_flag *iter; + uint8_t v; + + ja_linear_node_get_ith_pos(type, pool, + j, &v, &iter); + if (!iter) + continue; + if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter) + continue; + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + if (v & (1U << bit_i)) + nr_one[bit_i]++; + } + distrib_nr_child++; + } + } + break; + } + case RCU_JA_PIGEON: + { + unsigned int i; + + assert(mode == JA_RECOMPACT_DEL); + for (i = 0; i < JA_ENTRY_PER_NODE; i++) { + struct cds_ja_inode_flag *iter; + + iter = ja_pigeon_node_get_ith_pos(type, node, i); + if (!iter) + continue; + if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter) + continue; + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + if (i & (1U << bit_i)) + nr_one[bit_i]++; + } + distrib_nr_child++; + } + break; + } + case RCU_JA_NULL: + assert(mode == JA_RECOMPACT_ADD_NEXT); + break; + default: + assert(0); + break; + } + + if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) { + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + if (n & (1U << bit_i)) + nr_one[bit_i]++; + } + distrib_nr_child++; + } + + /* + * The best bit selector is that for which the number of ones is + * closest to half of the number of children in the + * distribution. We calculate the distance using the double of + * the sub-distribution sizes to eliminate truncation error. + */ + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + unsigned int distance_to_best; + + distance_to_best = abs_int((nr_one[bit_i] << 1U) - distrib_nr_child); + if (distance_to_best < overall_best_distance) { + overall_best_distance = distance_to_best; + bitsel = bit_i; + } + } + dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel); + return bitsel; +} + +/* + * Calculate bit distribution in two dimensions. Returns the two bits + * (each 0 to 7) that splits the distribution in four sub-distributions + * containing as much elements one compared to the other. + */ +static +void ja_node_sum_distribution_2d(enum ja_recompact mode, + struct cds_ja *ja, + unsigned int type_index, + const struct cds_ja_type *type, + struct cds_ja_inode *node, + struct cds_ja_shadow_node *shadow_node, + uint8_t n, + struct cds_ja_inode_flag *child_node_flag, + struct cds_ja_inode_flag **nullify_node_flag_ptr, + unsigned int *_bitsel) +{ + uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE], + nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE], + nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE], + nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE]; + unsigned int bitsel[2] = { 0, 1 }; + unsigned int bit_i, bit_j; + int overall_best_distance = INT_MAX; + unsigned int distrib_nr_child = 0; + + memset(nr_2d_11, 0, sizeof(nr_2d_11)); + memset(nr_2d_10, 0, sizeof(nr_2d_10)); + memset(nr_2d_01, 0, sizeof(nr_2d_01)); + memset(nr_2d_00, 0, sizeof(nr_2d_00)); + + switch (type->type_class) { + case RCU_JA_LINEAR: + { + uint8_t nr_child = + ja_linear_node_get_nr_child(type, node); + unsigned int i; + + for (i = 0; i < nr_child; i++) { + struct cds_ja_inode_flag *iter; + uint8_t v; + + ja_linear_node_get_ith_pos(type, node, i, &v, &iter); + if (!iter) + continue; + if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter) + continue; + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + for (bit_j = 0; bit_j < bit_i; bit_j++) { + if ((v & (1U << bit_i)) && (v & (1U << bit_j))) { + nr_2d_11[bit_i][bit_j]++; + } + if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) { + nr_2d_10[bit_i][bit_j]++; + } + if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) { + nr_2d_01[bit_i][bit_j]++; + } + if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) { + nr_2d_00[bit_i][bit_j]++; + } + } + } + distrib_nr_child++; + } + break; + } + case RCU_JA_POOL: + { + unsigned int pool_nr; + + for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) { + struct cds_ja_inode *pool = + ja_pool_node_get_ith_pool(type, + node, pool_nr); + uint8_t nr_child = + ja_linear_node_get_nr_child(type, pool); + unsigned int j; + + for (j = 0; j < nr_child; j++) { + struct cds_ja_inode_flag *iter; + uint8_t v; + + ja_linear_node_get_ith_pos(type, pool, + j, &v, &iter); + if (!iter) + continue; + if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter) + continue; + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + for (bit_j = 0; bit_j < bit_i; bit_j++) { + if ((v & (1U << bit_i)) && (v & (1U << bit_j))) { + nr_2d_11[bit_i][bit_j]++; + } + if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) { + nr_2d_10[bit_i][bit_j]++; + } + if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) { + nr_2d_01[bit_i][bit_j]++; + } + if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) { + nr_2d_00[bit_i][bit_j]++; + } + } + } + distrib_nr_child++; + } + } + break; + } + case RCU_JA_PIGEON: + { + unsigned int i; + + assert(mode == JA_RECOMPACT_DEL); + for (i = 0; i < JA_ENTRY_PER_NODE; i++) { + struct cds_ja_inode_flag *iter; + + iter = ja_pigeon_node_get_ith_pos(type, node, i); + if (!iter) + continue; + if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter) + continue; + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + for (bit_j = 0; bit_j < bit_i; bit_j++) { + if ((i & (1U << bit_i)) && (i & (1U << bit_j))) { + nr_2d_11[bit_i][bit_j]++; + } + if ((i & (1U << bit_i)) && !(i & (1U << bit_j))) { + nr_2d_10[bit_i][bit_j]++; + } + if (!(i & (1U << bit_i)) && (i & (1U << bit_j))) { + nr_2d_01[bit_i][bit_j]++; + } + if (!(i & (1U << bit_i)) && !(i & (1U << bit_j))) { + nr_2d_00[bit_i][bit_j]++; + } + } + } + distrib_nr_child++; + } + break; + } + case RCU_JA_NULL: + assert(mode == JA_RECOMPACT_ADD_NEXT); + break; + default: + assert(0); + break; + } + + if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) { + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + for (bit_j = 0; bit_j < bit_i; bit_j++) { + if ((n & (1U << bit_i)) && (n & (1U << bit_j))) { + nr_2d_11[bit_i][bit_j]++; + } + if ((n & (1U << bit_i)) && !(n & (1U << bit_j))) { + nr_2d_10[bit_i][bit_j]++; + } + if (!(n & (1U << bit_i)) && (n & (1U << bit_j))) { + nr_2d_01[bit_i][bit_j]++; + } + if (!(n & (1U << bit_i)) && !(n & (1U << bit_j))) { + nr_2d_00[bit_i][bit_j]++; + } + } + } + distrib_nr_child++; + } + + /* + * The best bit selector is that for which the number of nodes + * in each sub-class is closest to one-fourth of the number of + * children in the distribution. We calculate the distance using + * 4 times the size of the sub-distribution to eliminate + * truncation error. + */ + for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) { + for (bit_j = 0; bit_j < bit_i; bit_j++) { + int distance_to_best[4]; + + distance_to_best[0] = (nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child; + distance_to_best[1] = (nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child; + distance_to_best[2] = (nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child; + distance_to_best[3] = (nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child; + + /* Consider worse distance above best */ + if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0]) + distance_to_best[0] = distance_to_best[1]; + if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0]) + distance_to_best[0] = distance_to_best[2]; + if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0]) + distance_to_best[0] = distance_to_best[3]; + + /* + * If our worse distance is better than overall, + * we become new best candidate. + */ + if (distance_to_best[0] < overall_best_distance) { + overall_best_distance = distance_to_best[0]; + bitsel[0] = bit_i; + bitsel[1] = bit_j; + } + } + } + + dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]); + + /* Return our bit selection */ + _bitsel[0] = bitsel[0]; + _bitsel[1] = bitsel[1]; +} + +static +unsigned int find_nearest_type_index(unsigned int type_index, + unsigned int nr_nodes) +{ + const struct cds_ja_type *type; + + assert(type_index != NODE_INDEX_NULL); + if (nr_nodes == 0) + return NODE_INDEX_NULL; + for (;;) { + type = &ja_types[type_index]; + if (nr_nodes < type->min_child) + type_index--; + else if (nr_nodes > type->max_child) + type_index++; + else + break; + } + return type_index; +} + /* * ja_node_recompact_add: recompact a node, adding a new child. - * TODO: for pool type, take selection bit(s) into account. * Return 0 on success, -EAGAIN if need to retry, or other negative * error value otherwise. */ @@ -648,7 +1186,8 @@ int ja_node_recompact(enum ja_recompact mode, struct cds_ja_shadow_node *shadow_node, struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n, struct cds_ja_inode_flag *child_node_flag, - struct cds_ja_inode_flag **nullify_node_flag_ptr) + struct cds_ja_inode_flag **nullify_node_flag_ptr, + int level) { unsigned int new_type_index; struct cds_ja_inode *new_node; @@ -660,23 +1199,35 @@ int ja_node_recompact(enum ja_recompact mode, old_node_flag = *old_node_flag_ptr; + /* + * Need to find nearest type index even for ADD_SAME, because + * this recompaction, when applied to linear nodes, will garbage + * collect dummy (NULL) entries, and can therefore cause a few + * linear representations to be skipped. + */ switch (mode) { - case JA_RECOMPACT: - new_type_index = old_type_index; + case JA_RECOMPACT_ADD_SAME: + new_type_index = find_nearest_type_index(old_type_index, + shadow_node->nr_child + 1); + dbg_printf("Recompact for node with %u children\n", + shadow_node->nr_child + 1); break; - case JA_RECOMPACT_ADD: + case JA_RECOMPACT_ADD_NEXT: if (!shadow_node || old_type_index == NODE_INDEX_NULL) { new_type_index = 0; + dbg_printf("Recompact for NULL\n"); } else { - new_type_index = old_type_index + 1; + new_type_index = find_nearest_type_index(old_type_index, + shadow_node->nr_child + 1); + dbg_printf("Recompact for node with %u children\n", + shadow_node->nr_child + 1); } break; case JA_RECOMPACT_DEL: - if (old_type_index == 0) { - new_type_index = NODE_INDEX_NULL; - } else { - new_type_index = old_type_index - 1; - } + new_type_index = find_nearest_type_index(old_type_index, + shadow_node->nr_child - 1); + dbg_printf("Recompact for node with %u children\n", + shadow_node->nr_child - 1); break; default: assert(0); @@ -690,11 +1241,51 @@ retry: /* for fallback */ new_node = alloc_cds_ja_node(new_type); if (!new_node) return -ENOMEM; - new_node_flag = ja_node_flag(new_node, new_type_index); + + if (new_type->type_class == RCU_JA_POOL) { + switch (new_type->nr_pool_order) { + case 1: + { + unsigned int node_distrib_bitsel; + + node_distrib_bitsel = + ja_node_sum_distribution_1d(mode, ja, + old_type_index, old_type, + old_node, shadow_node, + n, child_node_flag, + nullify_node_flag_ptr); + assert(!((unsigned long) new_node & JA_POOL_1D_MASK)); + new_node_flag = ja_node_flag_pool_1d(new_node, + new_type_index, node_distrib_bitsel); + break; + } + case 2: + { + unsigned int node_distrib_bitsel[2]; + + ja_node_sum_distribution_2d(mode, ja, + old_type_index, old_type, + old_node, shadow_node, + n, child_node_flag, + nullify_node_flag_ptr, + node_distrib_bitsel); + assert(!((unsigned long) new_node & JA_POOL_1D_MASK)); + assert(!((unsigned long) new_node & JA_POOL_2D_MASK)); + new_node_flag = ja_node_flag_pool_2d(new_node, + new_type_index, node_distrib_bitsel); + break; + } + default: + assert(0); + } + } else { + new_node_flag = ja_node_flag(new_node, new_type_index); + } + dbg_printf("Recompact inherit lock from %p\n", shadow_node); - new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja); + new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level); if (!new_shadow_node) { - free(new_node); + free_cds_ja_node(new_node); return -ENOMEM; } if (fallback) @@ -705,7 +1296,7 @@ retry: /* for fallback */ new_node_flag = NULL; } - assert(mode != JA_RECOMPACT_ADD || old_type->type_class != RCU_JA_PIGEON); + assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON); if (new_type_index == NODE_INDEX_NULL) goto skip_copy; @@ -726,7 +1317,7 @@ retry: /* for fallback */ continue; if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter) continue; - ret = _ja_node_set_nth(new_type, new_node, + ret = _ja_node_set_nth(new_type, new_node, new_node_flag, new_shadow_node, v, iter); if (new_type->type_class == RCU_JA_POOL && ret) { @@ -758,7 +1349,7 @@ retry: /* for fallback */ continue; if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter) continue; - ret = _ja_node_set_nth(new_type, new_node, + ret = _ja_node_set_nth(new_type, new_node, new_node_flag, new_shadow_node, v, iter); if (new_type->type_class == RCU_JA_POOL @@ -771,16 +1362,14 @@ retry: /* for fallback */ break; } case RCU_JA_NULL: - assert(mode == JA_RECOMPACT_ADD); + assert(mode == JA_RECOMPACT_ADD_NEXT); break; case RCU_JA_PIGEON: { - uint8_t nr_child; unsigned int i; assert(mode == JA_RECOMPACT_DEL); - nr_child = shadow_node->nr_child; - for (i = 0; i < nr_child; i++) { + for (i = 0; i < JA_ENTRY_PER_NODE; i++) { struct cds_ja_inode_flag *iter; iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i); @@ -788,7 +1377,7 @@ retry: /* for fallback */ continue; if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter) continue; - ret = _ja_node_set_nth(new_type, new_node, + ret = _ja_node_set_nth(new_type, new_node, new_node_flag, new_shadow_node, i, iter); if (new_type->type_class == RCU_JA_POOL && ret) { @@ -805,13 +1394,24 @@ retry: /* for fallback */ } skip_copy: - if (mode == JA_RECOMPACT_ADD) { + if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) { /* add node */ - ret = _ja_node_set_nth(new_type, new_node, + ret = _ja_node_set_nth(new_type, new_node, new_node_flag, new_shadow_node, n, child_node_flag); + if (new_type->type_class == RCU_JA_POOL && ret) { + goto fallback_toosmall; + } assert(!ret); } + + if (fallback) { + dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n", + new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" : + (mode == JA_RECOMPACT_DEL ? "del" : "add_same")); + uatomic_inc(&node_fallback_count_distribution[new_shadow_node->nr_child]); + } + /* Return pointer to new recompacted node through old_node_flag_ptr */ *old_node_flag_ptr = new_node_flag; if (old_node) { @@ -824,7 +1424,7 @@ skip_copy: * This synchronizes removal with re-add of that node. */ if (new_type_index == NODE_INDEX_NULL) - flags = RCUJA_SHADOW_CLEAR_FREE_LOCK; + flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK; ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node, flags); assert(!ret); @@ -841,7 +1441,69 @@ fallback_toosmall: RCUJA_SHADOW_CLEAR_FREE_NODE); assert(!ret); - /* Choose fallback type: pigeon */ + switch (mode) { + case JA_RECOMPACT_ADD_SAME: + /* + * JA_RECOMPACT_ADD_SAME is only triggered if a linear + * node within a pool has unused entries. It should + * therefore _never_ be too small. + */ + assert(0); + + /* Fall-through */ + case JA_RECOMPACT_ADD_NEXT: + { + const struct cds_ja_type *next_type; + + /* + * Recompaction attempt on add failed. Should only + * happen if target node type is pool. Caused by + * hard-to-split distribution. Recompact using the next + * distribution size. + */ + assert(new_type->type_class == RCU_JA_POOL); + next_type = &ja_types[new_type_index + 1]; + /* + * Try going to the next pool size if our population + * fits within its range. This is not flagged as a + * fallback. + */ + if (shadow_node->nr_child + 1 >= next_type->min_child + && shadow_node->nr_child + 1 <= next_type->max_child) { + new_type_index++; + goto retry; + } else { + new_type_index++; + dbg_printf("Add fallback to type %d\n", new_type_index); + uatomic_inc(&ja->nr_fallback); + fallback = 1; + goto retry; + } + break; + } + case JA_RECOMPACT_DEL: + /* + * Recompaction attempt on delete failed. Should only + * happen if target node type is pool. This is caused by + * a hard-to-split distribution. Recompact on same node + * size, but flag current node as "fallback" to ensure + * we don't attempt recompaction before some activity + * has reshuffled our node. + */ + assert(new_type->type_class == RCU_JA_POOL); + new_type_index = old_type_index; + dbg_printf("Delete fallback keeping type %d\n", new_type_index); + uatomic_inc(&ja->nr_fallback); + fallback = 1; + goto retry; + default: + assert(0); + return -EINVAL; + } + + /* + * Last resort fallback: pigeon. + */ new_type_index = (1UL << JA_TYPE_BITS) - 1; dbg_printf("Fallback to type %d\n", new_type_index); uatomic_inc(&ja->nr_fallback); @@ -857,7 +1519,8 @@ static int ja_node_set_nth(struct cds_ja *ja, struct cds_ja_inode_flag **node_flag, uint8_t n, struct cds_ja_inode_flag *child_node_flag, - struct cds_ja_shadow_node *shadow_node) + struct cds_ja_shadow_node *shadow_node, + int level) { int ret; unsigned int type_index; @@ -870,18 +1533,18 @@ int ja_node_set_nth(struct cds_ja *ja, node = ja_node_ptr(*node_flag); type_index = ja_node_type(*node_flag); type = &ja_types[type_index]; - ret = _ja_node_set_nth(type, node, shadow_node, + ret = _ja_node_set_nth(type, node, *node_flag, shadow_node, n, child_node_flag); switch (ret) { case -ENOSPC: - /* Not enough space in node, need to recompact. */ - ret = ja_node_recompact(JA_RECOMPACT_ADD, ja, type_index, type, node, - shadow_node, node_flag, n, child_node_flag, NULL); + /* Not enough space in node, need to recompact to next type. */ + ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node, + shadow_node, node_flag, n, child_node_flag, NULL, level); break; case -ERANGE: /* Node needs to be recompacted. */ - ret = ja_node_recompact(JA_RECOMPACT, ja, type_index, type, node, - shadow_node, node_flag, n, child_node_flag, NULL); + ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node, + shadow_node, node_flag, n, child_node_flag, NULL, level); break; } return ret; @@ -896,7 +1559,7 @@ int ja_node_clear_ptr(struct cds_ja *ja, struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */ struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */ struct cds_ja_shadow_node *shadow_node, /* of parent */ - uint8_t n) + uint8_t n, int level) { int ret; unsigned int type_index; @@ -909,12 +1572,12 @@ int ja_node_clear_ptr(struct cds_ja *ja, node = ja_node_ptr(*parent_node_flag_ptr); type_index = ja_node_type(*parent_node_flag_ptr); type = &ja_types[type_index]; - ret = _ja_node_clear_ptr(type, node, shadow_node, node_flag_ptr, n); + ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n); if (ret == -EFBIG) { - /* Should to try recompaction. */ + /* Should try recompaction. */ ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node, shadow_node, parent_node_flag_ptr, n, NULL, - node_flag_ptr); + node_flag_ptr, level); } return ret; } @@ -938,8 +1601,7 @@ struct cds_hlist_head cds_ja_lookup(struct cds_ja *ja, uint64_t key) uint8_t iter_key; iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1))); - node_flag = ja_node_get_nth(node_flag, NULL, - iter_key); + node_flag = ja_node_get_nth(node_flag, NULL, iter_key); dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n", (unsigned int) iter_key, node_flag); if (!ja_node_ptr(node_flag)) @@ -965,84 +1627,138 @@ struct cds_hlist_head cds_ja_lookup(struct cds_ja *ja, uint64_t key) */ static int ja_attach_node(struct cds_ja *ja, - struct cds_ja_inode_flag **node_flag_ptr, - struct cds_ja_inode_flag *node_flag, - struct cds_ja_inode_flag *parent_node_flag, + struct cds_ja_inode_flag **attach_node_flag_ptr, + struct cds_ja_inode_flag *attach_node_flag, + struct cds_ja_inode_flag *parent_attach_node_flag, + struct cds_ja_inode_flag **old_node_flag_ptr, + struct cds_ja_inode_flag *old_node_flag, uint64_t key, unsigned int level, struct cds_ja_node *child_node) { struct cds_ja_shadow_node *shadow_node = NULL, *parent_shadow_node = NULL; - struct cds_ja_inode *node = ja_node_ptr(node_flag); - struct cds_ja_inode *parent_node = ja_node_ptr(parent_node_flag); struct cds_hlist_head head; struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag; int ret, i; struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH]; int nr_created_nodes = 0; - dbg_printf("Attach node at level %u (node %p, node_flag %p)\n", - level, node, node_flag); + dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n", + level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag); - assert(node); - shadow_node = rcuja_shadow_lookup_lock(ja->ht, node_flag); - if (!shadow_node) { - ret = -EAGAIN; - goto end; + assert(!old_node_flag); + if (attach_node_flag) { + shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag); + if (!shadow_node) { + ret = -EAGAIN; + goto end; + } } - if (parent_node) { + if (parent_attach_node_flag) { parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht, - parent_node_flag); + parent_attach_node_flag); if (!parent_shadow_node) { ret = -EAGAIN; goto unlock_shadow; } } + if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) { + /* + * Target node has been updated between RCU lookup and + * lock acquisition. We need to re-try lookup and + * attach. + */ + ret = -EAGAIN; + goto unlock_parent; + } + + /* + * Perform a lookup query to handle the case where + * old_node_flag_ptr is NULL. We cannot use it to check if the + * node has been populated between RCU lookup and mutex + * acquisition. + */ + if (!old_node_flag_ptr) { + uint8_t iter_key; + struct cds_ja_inode_flag *lookup_node_flag; + struct cds_ja_inode_flag **lookup_node_flag_ptr; + + iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level))); + lookup_node_flag = ja_node_get_nth(attach_node_flag, + &lookup_node_flag_ptr, + iter_key); + if (lookup_node_flag) { + ret = -EEXIST; + goto unlock_parent; + } + } + + if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) != + ja_node_ptr(attach_node_flag)) { + /* + * Target node has been updated between RCU lookup and + * lock acquisition. We need to re-try lookup and + * attach. + */ + ret = -EAGAIN; + goto unlock_parent; + } + /* Create new branch, starting from bottom */ CDS_INIT_HLIST_HEAD(&head); cds_hlist_add_head_rcu(&child_node->list, &head); iter_node_flag = (struct cds_ja_inode_flag *) head.next; - for (i = ja->tree_depth; i > (int) level; i--) { + for (i = ja->tree_depth - 1; i >= (int) level; i--) { uint8_t iter_key; - iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i))); + iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1))); dbg_printf("branch creation level %d, key %u\n", - i - 1, (unsigned int) iter_key); + i, (unsigned int) iter_key); iter_dest_node_flag = NULL; ret = ja_node_set_nth(ja, &iter_dest_node_flag, iter_key, iter_node_flag, - NULL); - if (ret) + NULL, i); + if (ret) { + dbg_printf("branch creation error %d\n", ret); goto check_error; + } created_nodes[nr_created_nodes++] = iter_dest_node_flag; iter_node_flag = iter_dest_node_flag; } + assert(level > 0); - if (level > 1) { + /* Publish branch */ + if (level == 1) { + /* + * Attaching to root node. + */ + rcu_assign_pointer(ja->root, iter_node_flag); + } else { uint8_t iter_key; iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level))); + dbg_printf("publish branch at level %d, key %u\n", + level - 1, (unsigned int) iter_key); /* We need to use set_nth on the previous level. */ - iter_dest_node_flag = node_flag; + iter_dest_node_flag = attach_node_flag; ret = ja_node_set_nth(ja, &iter_dest_node_flag, iter_key, iter_node_flag, - shadow_node); - if (ret) + shadow_node, level - 1); + if (ret) { + dbg_printf("branch publish error %d\n", ret); goto check_error; - created_nodes[nr_created_nodes++] = iter_dest_node_flag; - iter_node_flag = iter_dest_node_flag; + } + /* + * Attach branch + */ + rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag); } - /* Publish new branch */ - dbg_printf("Publish branch %p, replacing %p\n", - iter_node_flag, *node_flag_ptr); - rcu_assign_pointer(*node_flag_ptr, iter_node_flag); - /* Success */ ret = 0; @@ -1062,6 +1778,7 @@ check_error: assert(!tmpret); } } +unlock_parent: if (parent_shadow_node) rcuja_shadow_unlock(parent_shadow_node); unlock_shadow: @@ -1080,31 +1797,46 @@ end: static int ja_chain_node(struct cds_ja *ja, struct cds_ja_inode_flag *parent_node_flag, - struct cds_hlist_head *head, + struct cds_ja_inode_flag **node_flag_ptr, + struct cds_ja_inode_flag *node_flag, struct cds_ja_node *node) { struct cds_ja_shadow_node *shadow_node; + int ret = 0; shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag); - if (!shadow_node) + if (!shadow_node) { return -EAGAIN; - cds_hlist_add_head_rcu(&node->list, head); + } + if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) { + ret = -EAGAIN; + goto end; + } + cds_hlist_add_head_rcu(&node->list, (struct cds_hlist_head *) node_flag_ptr); +end: rcuja_shadow_unlock(shadow_node); - return 0; + return ret; } -int cds_ja_add(struct cds_ja *ja, uint64_t key, - struct cds_ja_node *new_node) +static +int _cds_ja_add(struct cds_ja *ja, uint64_t key, + struct cds_ja_node *new_node, + struct cds_ja_node **unique_node_ret) { unsigned int tree_depth, i; - struct cds_ja_inode_flag **node_flag_ptr; /* in parent */ - struct cds_ja_inode_flag *node_flag, + struct cds_ja_inode_flag *attach_node_flag, *parent_node_flag, - *parent2_node_flag; + *parent2_node_flag, + *node_flag, + *parent_attach_node_flag; + struct cds_ja_inode_flag **attach_node_flag_ptr, + **parent_node_flag_ptr, + **node_flag_ptr; int ret; - if (caa_unlikely(key > ja->key_max)) + if (caa_unlikely(key > ja->key_max)) { return -EINVAL; + } tree_depth = ja->tree_depth; retry: @@ -1113,55 +1845,90 @@ retry: parent2_node_flag = NULL; parent_node_flag = (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */ - node_flag_ptr = &ja->root; + parent_node_flag_ptr = NULL; node_flag = rcu_dereference(ja->root); + node_flag_ptr = &ja->root; /* Iterate on all internal levels */ for (i = 1; i < tree_depth; i++) { uint8_t iter_key; - dbg_printf("cds_ja_add iter node_flag_ptr %p node_flag %p\n", - *node_flag_ptr, node_flag); - if (!ja_node_ptr(node_flag)) { - ret = ja_attach_node(ja, node_flag_ptr, - parent_node_flag, parent2_node_flag, - key, i, new_node); - if (ret == -EAGAIN || ret == -EEXIST) - goto retry; - else - goto end; - } + if (!ja_node_ptr(node_flag)) + break; + dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n", + parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag); iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1))); parent2_node_flag = parent_node_flag; parent_node_flag = node_flag; + parent_node_flag_ptr = node_flag_ptr; node_flag = ja_node_get_nth(node_flag, &node_flag_ptr, iter_key); - dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p node_flag_ptr %p\n", - (unsigned int) iter_key, node_flag, *node_flag_ptr); } /* - * We reached bottom of tree, simply add node to last internal - * level, or chain it if key is already present. + * We reached either bottom of tree or internal NULL node, + * simply add node to last internal level, or chain it if key is + * already present. */ if (!ja_node_ptr(node_flag)) { - dbg_printf("cds_ja_add last node_flag_ptr %p node_flag %p\n", - *node_flag_ptr, node_flag); - ret = ja_attach_node(ja, node_flag_ptr, parent_node_flag, - parent2_node_flag, key, i, new_node); + dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n", + parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag); + + attach_node_flag = parent_node_flag; + attach_node_flag_ptr = parent_node_flag_ptr; + parent_attach_node_flag = parent2_node_flag; + + ret = ja_attach_node(ja, attach_node_flag_ptr, + attach_node_flag, + parent_attach_node_flag, + node_flag_ptr, + node_flag, + key, i, new_node); } else { + if (unique_node_ret) { + *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag); + return -EEXIST; + } + + dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n", + parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag); + + attach_node_flag = node_flag; + attach_node_flag_ptr = node_flag_ptr; + parent_attach_node_flag = parent_node_flag; + ret = ja_chain_node(ja, - parent_node_flag, - (struct cds_hlist_head *) node_flag_ptr, + parent_attach_node_flag, + attach_node_flag_ptr, + attach_node_flag, new_node); } - if (ret == -EAGAIN) + if (ret == -EAGAIN || ret == -EEXIST) goto retry; -end: + return ret; } +int cds_ja_add(struct cds_ja *ja, uint64_t key, + struct cds_ja_node *new_node) +{ + return _cds_ja_add(ja, key, new_node, NULL); +} + +struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key, + struct cds_ja_node *new_node) +{ + int ret; + struct cds_ja_node *ret_node; + + ret = _cds_ja_add(ja, key, new_node, &ret_node); + if (ret == -EEXIST) + return ret_node; + else + return new_node; +} + /* * Note: there is no need to lookup the pointer address associated with * each node's nth item after taking the lock: it's already been done by @@ -1169,6 +1936,9 @@ end: * ensure that when a match value -> pointer is found in a node, it is * _NEVER_ changed for that node without recompaction, and recompaction * reallocates the node. + * However, when a child is removed from "linear" nodes, its pointer + * is set to NULL. We therefore check, while holding the locks, if this + * pointer is NULL, and return -ENOENT to the caller if it is the case. */ static int ja_detach_node(struct cds_ja *ja, @@ -1205,9 +1975,21 @@ int ja_detach_node(struct cds_ja *ja, ret = -EAGAIN; goto end; } - assert(shadow_node->nr_child > 0); shadow_nodes[nr_shadow++] = shadow_node; - if (shadow_node->nr_child == 1) + + /* + * Check if node has been removed between RCU + * lookup and lock acquisition. + */ + assert(snapshot_ptr[i + 1]); + if (ja_node_ptr(*snapshot_ptr[i + 1]) + != ja_node_ptr(snapshot[i + 1])) { + ret = -ENOENT; + goto end; + } + + assert(shadow_node->nr_child > 0); + if (shadow_node->nr_child == 1 && i > 1) nr_clear++; nr_branch++; if (shadow_node->nr_child > 1 || i == 1) { @@ -1219,10 +2001,23 @@ int ja_detach_node(struct cds_ja *ja, goto end; } shadow_nodes[nr_shadow++] = shadow_node; + + /* + * Check if node has been removed between RCU + * lookup and lock acquisition. + */ + assert(snapshot_ptr[i]); + if (ja_node_ptr(*snapshot_ptr[i]) + != ja_node_ptr(snapshot[i])) { + ret = -ENOENT; + goto end; + } + node_flag_ptr = snapshot_ptr[i + 1]; n = snapshot_n[i + 1]; parent_node_flag_ptr = snapshot_ptr[i]; parent_node_flag = snapshot[i]; + if (i > 1) { /* * Lock parent's parent, in case we need @@ -1235,7 +2030,19 @@ int ja_detach_node(struct cds_ja *ja, goto end; } shadow_nodes[nr_shadow++] = shadow_node; + + /* + * Check if node has been removed between RCU + * lookup and lock acquisition. + */ + assert(snapshot_ptr[i - 1]); + if (ja_node_ptr(*snapshot_ptr[i - 1]) + != ja_node_ptr(snapshot[i - 1])) { + ret = -ENOENT; + goto end; + } } + break; } } @@ -1244,8 +2051,8 @@ int ja_detach_node(struct cds_ja *ja, * At this point, we want to delete all nodes that are about to * be removed from shadow_nodes (except the last one, which is * either the root or the parent of the upmost node with 1 - * child). OK to as to free lock here, because RCU read lock is - * held, and free only performed in call_rcu. + * child). OK to free lock here, because RCU read lock is held, + * and free only performed in call_rcu. */ for (i = 0; i < nr_clear; i++) { @@ -1263,7 +2070,9 @@ int ja_detach_node(struct cds_ja *ja, node_flag_ptr, /* Pointer to location to nullify */ &iter_node_flag, /* Old new parent ptr in its parent */ shadow_nodes[nr_branch - 1], /* of parent */ - n); + n, nr_branch - 1); + if (ret) + goto end; dbg_printf("ja_detach_node: publish %p instead of %p\n", iter_node_flag, *parent_node_flag_ptr); @@ -1279,29 +2088,48 @@ end: static int ja_unchain_node(struct cds_ja *ja, struct cds_ja_inode_flag *parent_node_flag, - struct cds_hlist_head *head, + struct cds_ja_inode_flag **node_flag_ptr, + struct cds_ja_inode_flag *node_flag, struct cds_ja_node *node) { struct cds_ja_shadow_node *shadow_node; struct cds_hlist_node *hlist_node; - int ret = 0, count = 0; + struct cds_hlist_head hlist_head; + int ret = 0, count = 0, found = 0; shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag); if (!shadow_node) return -EAGAIN; + if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) { + ret = -EAGAIN; + goto end; + } + hlist_head.next = (struct cds_hlist_node *) ja_node_ptr(node_flag); /* * Retry if another thread removed all but one of duplicates - * since check (that was performed without lock). + * since check (this check was performed without lock). + * Ensure that the node we are about to remove is still in the + * list (while holding lock). */ - cds_hlist_for_each_rcu(hlist_node, head, list) { + cds_hlist_for_each_rcu(hlist_node, &hlist_head) { + if (count == 0) { + /* FIXME: currently a work-around */ + hlist_node->prev = (struct cds_hlist_node *) node_flag_ptr; + } count++; + if (hlist_node == &node->list) + found++; } - - if (count == 1) { + assert(found <= 1); + if (!found || count == 1) { ret = -EAGAIN; goto end; } cds_hlist_del_rcu(&node->list); + /* + * Validate that we indeed removed the node from linked list. + */ + assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node); end: rcuja_shadow_unlock(shadow_node); return ret; @@ -1318,7 +2146,8 @@ int cds_ja_del(struct cds_ja *ja, uint64_t key, struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH]; uint8_t snapshot_n[JA_MAX_DEPTH]; struct cds_ja_inode_flag *node_flag; - struct cds_ja_inode_flag **prev_node_flag_ptr; + struct cds_ja_inode_flag **prev_node_flag_ptr, + **node_flag_ptr; int nr_snapshot; int ret; @@ -1338,6 +2167,7 @@ retry: snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root; node_flag = rcu_dereference(ja->root); prev_node_flag_ptr = &ja->root; + node_flag_ptr = &ja->root; /* Iterate on all internal levels */ for (i = 1; i < tree_depth; i++) { @@ -1353,13 +2183,14 @@ retry: snapshot_ptr[nr_snapshot] = prev_node_flag_ptr; snapshot[nr_snapshot++] = node_flag; node_flag = ja_node_get_nth(node_flag, - &prev_node_flag_ptr, + &node_flag_ptr, iter_key); + if (node_flag) + prev_node_flag_ptr = node_flag_ptr; dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n", (unsigned int) iter_key, node_flag, prev_node_flag_ptr); } - /* * We reached bottom of tree, try to find the node we are trying * to remove. Fail if we cannot find it. @@ -1401,10 +2232,15 @@ retry: snapshot_n, nr_snapshot, key, node); } else { ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1], - &hlist_head, match); + node_flag_ptr, node_flag, match); } } - if (ret == -EAGAIN) + /* + * Explanation of -ENOENT handling: caused by concurrent delete + * between RCU lookup and actual removal. Need to re-do the + * lookup and removal attempt. + */ + if (ret == -EAGAIN || ret == -ENOENT) goto retry; return ret; } @@ -1450,12 +2286,11 @@ struct cds_ja *_cds_ja_new(unsigned int key_bits, */ root_shadow_node = rcuja_shadow_set(ja->ht, (struct cds_ja_inode_flag *) &ja->root, - NULL, ja); + NULL, ja, 0); if (!root_shadow_node) { ret = -ENOMEM; goto ht_node_error; } - root_shadow_node->level = 0; return ja; @@ -1499,14 +2334,14 @@ void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node, struct cds_ja_inode_flag *iter; struct cds_hlist_head head; struct cds_ja_node *entry; - struct cds_hlist_node *pos; + struct cds_hlist_node *pos, *tmp; uint8_t v; ja_linear_node_get_ith_pos(type, node, i, &v, &iter); if (!iter) continue; head.next = (struct cds_hlist_node *) iter; - cds_hlist_for_each_entry_rcu(entry, pos, &head, list) { + cds_hlist_for_each_entry_safe(entry, pos, tmp, &head, list) { flavor->update_call_rcu(&entry->head, free_node_cb); } } @@ -1527,14 +2362,14 @@ void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node, struct cds_ja_inode_flag *iter; struct cds_hlist_head head; struct cds_ja_node *entry; - struct cds_hlist_node *pos; + struct cds_hlist_node *pos, *tmp; uint8_t v; - ja_linear_node_get_ith_pos(type, node, j, &v, &iter); + ja_linear_node_get_ith_pos(type, pool, j, &v, &iter); if (!iter) continue; head.next = (struct cds_hlist_node *) iter; - cds_hlist_for_each_entry_rcu(entry, pos, &head, list) { + cds_hlist_for_each_entry_safe(entry, pos, tmp, &head, list) { flavor->update_call_rcu(&entry->head, free_node_cb); } } @@ -1545,21 +2380,19 @@ void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node, break; case RCU_JA_PIGEON: { - uint8_t nr_child; unsigned int i; - nr_child = shadow_node->nr_child; - for (i = 0; i < nr_child; i++) { + for (i = 0; i < JA_ENTRY_PER_NODE; i++) { struct cds_ja_inode_flag *iter; struct cds_hlist_head head; struct cds_ja_node *entry; - struct cds_hlist_node *pos; + struct cds_hlist_node *pos, *tmp; iter = ja_pigeon_node_get_ith_pos(type, node, i); if (!iter) continue; head.next = (struct cds_hlist_node *) iter; - cds_hlist_for_each_entry_rcu(entry, pos, &head, list) { + cds_hlist_for_each_entry_safe(entry, pos, tmp, &head, list) { flavor->update_call_rcu(&entry->head, free_node_cb); } } @@ -1570,6 +2403,20 @@ void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node, } } +static +void print_debug_fallback_distribution(void) +{ + int i; + + fprintf(stderr, "Fallback node distribution:\n"); + for (i = 0; i < JA_ENTRY_PER_NODE; i++) { + if (!node_fallback_count_distribution[i]) + continue; + fprintf(stderr, " %3u: %4lu\n", + i, node_fallback_count_distribution[i]); + } +} + /* * There should be no more concurrent add to the judy array while it is * being destroyed (ensured by the caller). @@ -1577,18 +2424,27 @@ void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node, int cds_ja_destroy(struct cds_ja *ja, void (*free_node_cb)(struct rcu_head *head)) { + const struct rcu_flavor_struct *flavor; int ret; + flavor = cds_lfht_rcu_flavor(ja->ht); rcuja_shadow_prune(ja->ht, RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK, free_node_cb); + flavor->thread_offline(); ret = rcuja_delete_ht(ja->ht); if (ret) return ret; + flavor->thread_online(); if (uatomic_read(&ja->nr_fallback)) fprintf(stderr, "[warning] RCU Judy Array used %lu fallback node(s)\n", uatomic_read(&ja->nr_fallback)); + fprintf(stderr, "Nodes allocated: %lu, Nodes freed: %lu. Fallback ratio: %g\n", + uatomic_read(&nr_nodes_allocated), + uatomic_read(&nr_nodes_freed), + (double) uatomic_read(&ja->nr_fallback) / (double) uatomic_read(&nr_nodes_allocated)); + print_debug_fallback_distribution(); free(ja); return 0; }