rcuja tests: test lookup lower equal for 8-bit and 16-bit judy arrays
[urcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
b1a90ce3 27#include <string.h>
61009379 28#include <urcu/rcuja.h>
d68c6810
MD
29#include <urcu/compiler.h>
30#include <urcu/arch.h>
31#include <assert.h>
8e519e3c 32#include <urcu-pointer.h>
f07b240f 33#include <urcu/uatomic.h>
b4540e8a 34#include <stdint.h>
8e519e3c 35
61009379 36#include "rcuja-internal.h"
d68c6810 37#include "bitfield.h"
61009379 38
b1a90ce3
MD
39#ifndef abs
40#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
41#endif
42
d96bfb0d 43enum cds_ja_type_class {
e5227865 44 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
45 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
46 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
47 RCU_JA_POOL = 1, /* Type B */
48 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
49 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 50 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
51 /* 32-bit: 101 to 256 children, 1024 bytes */
52 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 53 /* Leaf nodes are implicit from their height in the tree */
1db4943c 54 RCU_JA_NR_TYPES,
e1db2db5
MD
55
56 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
57};
58
d96bfb0d
MD
59struct cds_ja_type {
60 enum cds_ja_type_class type_class;
8e519e3c
MD
61 uint16_t min_child; /* minimum number of children: 1 to 256 */
62 uint16_t max_child; /* maximum number of children: 1 to 256 */
63 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
64 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
65 uint16_t nr_pool_order; /* number of pools */
66 uint16_t pool_size_order; /* pool size */
e5227865
MD
67};
68
69/*
70 * Iteration on the array to find the right node size for the number of
d68c6810 71 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 72 * possible node size, which contains 256 children).
d68c6810
MD
73 * The min_child overlaps with the previous max_child to provide an
74 * hysteresis loop to reallocation for patterns of cyclic add/removal
75 * within the same node.
76 * The node the index within the following arrays is represented on 3
77 * bits. It identifies the node type, min/max number of children, and
78 * the size order.
3d45251f
MD
79 * The max_child values for the RCU_JA_POOL below result from
80 * statistical approximation: over million populations, the max_child
81 * covers between 97% and 99% of the populations generated. Therefore, a
82 * fallback should exist to cover the rare extreme population unbalance
83 * cases, but it will not have a major impact on speed nor space
84 * consumption, since those are rare cases.
e5227865 85 */
e5227865 86
d68c6810
MD
87#if (CAA_BITS_PER_LONG < 64)
88/* 32-bit pointers */
1db4943c
MD
89enum {
90 ja_type_0_max_child = 1,
91 ja_type_1_max_child = 3,
92 ja_type_2_max_child = 6,
93 ja_type_3_max_child = 12,
94 ja_type_4_max_child = 25,
95 ja_type_5_max_child = 48,
96 ja_type_6_max_child = 92,
97 ja_type_7_max_child = 256,
e1db2db5 98 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
99};
100
8e519e3c
MD
101enum {
102 ja_type_0_max_linear_child = 1,
103 ja_type_1_max_linear_child = 3,
104 ja_type_2_max_linear_child = 6,
105 ja_type_3_max_linear_child = 12,
106 ja_type_4_max_linear_child = 25,
107 ja_type_5_max_linear_child = 24,
108 ja_type_6_max_linear_child = 23,
109};
110
1db4943c
MD
111enum {
112 ja_type_5_nr_pool_order = 1,
113 ja_type_6_nr_pool_order = 2,
114};
115
d96bfb0d 116const struct cds_ja_type ja_types[] = {
8e519e3c
MD
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
121 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 122
fd800776 123 /* Pools may fill sooner than max_child */
8e519e3c
MD
124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
125 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
126
127 /*
b1a90ce3
MD
128 * Upon node removal below min_child, if child pool is filled
129 * beyond capacity, we roll back to pigeon.
3d45251f 130 */
58c16c03 131 { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
132
133 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 134};
d68c6810
MD
135#else /* !(CAA_BITS_PER_LONG < 64) */
136/* 64-bit pointers */
1db4943c
MD
137enum {
138 ja_type_0_max_child = 1,
139 ja_type_1_max_child = 3,
140 ja_type_2_max_child = 7,
141 ja_type_3_max_child = 14,
142 ja_type_4_max_child = 28,
143 ja_type_5_max_child = 54,
144 ja_type_6_max_child = 104,
145 ja_type_7_max_child = 256,
e1db2db5 146 ja_type_8_max_child = 256,
1db4943c
MD
147};
148
8e519e3c
MD
149enum {
150 ja_type_0_max_linear_child = 1,
151 ja_type_1_max_linear_child = 3,
152 ja_type_2_max_linear_child = 7,
153 ja_type_3_max_linear_child = 14,
154 ja_type_4_max_linear_child = 28,
155 ja_type_5_max_linear_child = 27,
156 ja_type_6_max_linear_child = 26,
157};
158
1db4943c
MD
159enum {
160 ja_type_5_nr_pool_order = 1,
161 ja_type_6_nr_pool_order = 2,
162};
163
d96bfb0d 164const struct cds_ja_type ja_types[] = {
8e519e3c
MD
165 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 170
3d45251f 171 /* Pools may fill sooner than max_child. */
8e519e3c
MD
172 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
173 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 174
3d45251f 175 /*
b1a90ce3
MD
176 * Upon node removal below min_child, if child pool is filled
177 * beyond capacity, we roll back to pigeon.
3d45251f 178 */
64457f6c 179 { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
180
181 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 182};
d68c6810 183#endif /* !(BITS_PER_LONG < 64) */
e5227865 184
1db4943c
MD
185static inline __attribute__((unused))
186void static_array_size_check(void)
187{
e1db2db5 188 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
189}
190
e5227865 191/*
d96bfb0d 192 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
193 * read-side. For linear and pool node configurations, it starts with a
194 * byte counting the number of children in the node. Then, the
195 * node-specific data is placed.
196 * The node mutex, if any is needed, protecting concurrent updated of
197 * each node is placed in a separate hash table indexed by node address.
198 * For the pigeon configuration, the number of children is also kept in
199 * a separate hash table, indexed by node address, because it is only
200 * required for updates.
e5227865 201 */
1db4943c 202
ff38c745
MD
203#define DECLARE_LINEAR_NODE(index) \
204 struct { \
205 uint8_t nr_child; \
206 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 207 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
208 }
209
210#define DECLARE_POOL_NODE(index) \
211 struct { \
212 struct { \
213 uint8_t nr_child; \
214 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 215 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
216 } linear[1U << ja_type_## index ##_nr_pool_order]; \
217 }
1db4943c 218
b4540e8a 219struct cds_ja_inode {
1db4943c
MD
220 union {
221 /* Linear configuration */
222 DECLARE_LINEAR_NODE(0) conf_0;
223 DECLARE_LINEAR_NODE(1) conf_1;
224 DECLARE_LINEAR_NODE(2) conf_2;
225 DECLARE_LINEAR_NODE(3) conf_3;
226 DECLARE_LINEAR_NODE(4) conf_4;
227
228 /* Pool configuration */
229 DECLARE_POOL_NODE(5) conf_5;
230 DECLARE_POOL_NODE(6) conf_6;
231
232 /* Pigeon configuration */
233 struct {
b4540e8a 234 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
235 } conf_7;
236 /* data aliasing nodes for computed accesses */
b4540e8a 237 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 238 } u;
e5227865
MD
239};
240
2e313670 241enum ja_recompact {
19ddcd04
MD
242 JA_RECOMPACT_ADD_SAME,
243 JA_RECOMPACT_ADD_NEXT,
2e313670
MD
244 JA_RECOMPACT_DEL,
245};
246
b1a90ce3
MD
247static
248struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
249{
250 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
251}
252
253unsigned long ja_node_type(struct cds_ja_inode_flag *node)
254{
255 unsigned long type;
256
257 if (_ja_node_mask_ptr(node) == NULL) {
258 return NODE_INDEX_NULL;
259 }
260 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
261 assert(type < (1UL << JA_TYPE_BITS));
262 return type;
263}
264
265struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
266{
267 unsigned long type_index = ja_node_type(node);
268 const struct cds_ja_type *type;
269
270 type = &ja_types[type_index];
271 switch (type->type_class) {
272 case RCU_JA_LINEAR:
273 case RCU_JA_PIGEON: /* fall-through */
274 case RCU_JA_NULL: /* fall-through */
275 default: /* fall-through */
276 return _ja_node_mask_ptr(node);
277 case RCU_JA_POOL:
278 switch (type->nr_pool_order) {
279 case 1:
280 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_1D_MASK | JA_TYPE_MASK));
281 case 2:
282 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_2D_MASK | JA_POOL_1D_MASK | JA_TYPE_MASK));
283 default:
284 assert(0);
285 }
286 }
287}
288
354981c2
MD
289static
290struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
291 const struct cds_ja_type *ja_type)
e5227865 292{
b1a90ce3
MD
293 size_t len = 1U << ja_type->order;
294 void *p;
295 int ret;
296
297 ret = posix_memalign(&p, len, len);
298 if (ret || !p) {
299 return NULL;
300 }
301 memset(p, 0, len);
354981c2 302 uatomic_inc(&ja->nr_nodes_allocated);
b1a90ce3 303 return p;
e5227865
MD
304}
305
354981c2 306void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
e5227865
MD
307{
308 free(node);
48cbe001 309 if (node)
354981c2 310 uatomic_inc(&ja->nr_nodes_freed);
e5227865
MD
311}
312
d68c6810
MD
313#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
314#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
315#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
316#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
317
318static
1db4943c 319uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 320{
1db4943c 321 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
322}
323
11c5e016 324static
d96bfb0d 325uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 326 struct cds_ja_inode *node)
11c5e016
MD
327{
328 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 329 return rcu_dereference(node->u.data[0]);
11c5e016
MD
330}
331
13a7f5a6
MD
332/*
333 * The order in which values and pointers are does does not matter: if
334 * a value is missing, we return NULL. If a value is there, but its
335 * associated pointers is still NULL, we return NULL too.
336 */
d68c6810 337static
b4540e8a
MD
338struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
339 struct cds_ja_inode *node,
b0ca2d21 340 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 341 uint8_t n)
d68c6810
MD
342{
343 uint8_t nr_child;
344 uint8_t *values;
b4540e8a
MD
345 struct cds_ja_inode_flag **pointers;
346 struct cds_ja_inode_flag *ptr;
d68c6810
MD
347 unsigned int i;
348
8e519e3c 349 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 350
11c5e016 351 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 352 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
353 assert(nr_child <= type->max_linear_child);
354 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 355
1db4943c 356 values = &node->u.data[1];
d68c6810 357 for (i = 0; i < nr_child; i++) {
13a7f5a6 358 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
359 break;
360 }
b0ca2d21
MD
361 if (i >= nr_child) {
362 if (caa_unlikely(node_flag_ptr))
363 *node_flag_ptr = NULL;
d68c6810 364 return NULL;
b0ca2d21 365 }
b4540e8a 366 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 367 ptr = rcu_dereference(pointers[i]);
b0ca2d21
MD
368 if (caa_unlikely(node_flag_ptr))
369 *node_flag_ptr = &pointers[i];
d68c6810
MD
370 return ptr;
371}
372
291b2543
MD
373static
374struct cds_ja_inode_flag *ja_linear_node_get_left(const struct cds_ja_type *type,
375 struct cds_ja_inode *node,
376 unsigned int n)
377{
378 uint8_t nr_child;
379 uint8_t *values;
380 struct cds_ja_inode_flag **pointers;
381 struct cds_ja_inode_flag *ptr;
382 unsigned int i, match_idx;
383 int match_v = -1;
384
385 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
386
387 nr_child = ja_linear_node_get_nr_child(type, node);
388 cmm_smp_rmb(); /* read nr_child before values and pointers */
389 assert(nr_child <= type->max_linear_child);
390 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
391
392 values = &node->u.data[1];
393 for (i = 0; i < nr_child; i++) {
394 unsigned int v;
395
396 v = CMM_LOAD_SHARED(values[i]);
397 if (v < n && (int) v > match_v) {
398 match_v = v;
399 match_idx = i;
400 }
401 }
402 if (match_v < 0) {
403 return NULL;
404 }
405 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
406 ptr = rcu_dereference(pointers[match_idx]);
407 return ptr;
408}
409
11c5e016 410static
5a9a87dd 411void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 412 struct cds_ja_inode *node,
11c5e016
MD
413 uint8_t i,
414 uint8_t *v,
b4540e8a 415 struct cds_ja_inode_flag **iter)
11c5e016
MD
416{
417 uint8_t *values;
b4540e8a 418 struct cds_ja_inode_flag **pointers;
11c5e016
MD
419
420 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
421 assert(i < ja_linear_node_get_nr_child(type, node));
422
423 values = &node->u.data[1];
424 *v = values[i];
b4540e8a 425 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
426 *iter = pointers[i];
427}
428
d68c6810 429static
b4540e8a
MD
430struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
431 struct cds_ja_inode *node,
b1a90ce3 432 struct cds_ja_inode_flag *node_flag,
b0ca2d21 433 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 434 uint8_t n)
d68c6810 435{
b4540e8a 436 struct cds_ja_inode *linear;
d68c6810 437
fd800776 438 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
439
440 switch (type->nr_pool_order) {
441 case 1:
442 {
443 unsigned long bitsel, index;
444
445 bitsel = ja_node_pool_1d_bitsel(node_flag);
446 assert(bitsel < CHAR_BIT);
19ddcd04 447 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
448 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
449 break;
450 }
451 case 2:
452 {
19ddcd04
MD
453 unsigned long bitsel[2], index[2], rindex;
454
455 ja_node_pool_2d_bitsel(node_flag, bitsel);
456 assert(bitsel[0] < CHAR_BIT);
457 assert(bitsel[1] < CHAR_BIT);
458 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
459 index[0] <<= 1;
460 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
461 rindex = index[0] | index[1];
462 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
463 break;
464 }
465 default:
466 linear = NULL;
467 assert(0);
468 }
48cbe001 469 return ja_linear_node_get_nth(type, linear, node_flag_ptr, n);
d68c6810
MD
470}
471
11c5e016 472static
b4540e8a
MD
473struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
474 struct cds_ja_inode *node,
11c5e016
MD
475 uint8_t i)
476{
477 assert(type->type_class == RCU_JA_POOL);
b4540e8a 478 return (struct cds_ja_inode *)
11c5e016
MD
479 &node->u.data[(unsigned int) i << type->pool_size_order];
480}
481
291b2543
MD
482static
483struct cds_ja_inode_flag *ja_pool_node_get_left(const struct cds_ja_type *type,
484 struct cds_ja_inode *node,
485 unsigned int n)
486{
487 unsigned int pool_nr;
488 int match_v = -1;
489 struct cds_ja_inode_flag *match_node_flag = NULL;
490
491 assert(type->type_class == RCU_JA_POOL);
492
493 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
494 struct cds_ja_inode *pool =
495 ja_pool_node_get_ith_pool(type,
496 node, pool_nr);
497 uint8_t nr_child =
498 ja_linear_node_get_nr_child(type, pool);
499 unsigned int j;
500
501 for (j = 0; j < nr_child; j++) {
502 struct cds_ja_inode_flag *iter;
503 uint8_t v;
504
505 ja_linear_node_get_ith_pos(type, pool,
506 j, &v, &iter);
507 if (!iter)
508 continue;
509 if (v < n && (int) v > match_v) {
510 match_v = v;
511 match_node_flag = iter;
512 }
513 }
514 }
515 return match_node_flag;
516}
517
d68c6810 518static
b4540e8a
MD
519struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
520 struct cds_ja_inode *node,
b0ca2d21 521 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 522 uint8_t n)
d68c6810 523{
48cbe001
MD
524 struct cds_ja_inode_flag **child_node_flag_ptr;
525 struct cds_ja_inode_flag *child_node_flag;
5a9a87dd 526
d68c6810 527 assert(type->type_class == RCU_JA_PIGEON);
48cbe001
MD
528 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
529 child_node_flag = rcu_dereference(*child_node_flag_ptr);
582a6ade 530 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
48cbe001 531 child_node_flag_ptr);
b0ca2d21 532 if (caa_unlikely(node_flag_ptr))
48cbe001
MD
533 *node_flag_ptr = child_node_flag_ptr;
534 return child_node_flag;
d68c6810
MD
535}
536
291b2543
MD
537static
538struct cds_ja_inode_flag *ja_pigeon_node_get_left(const struct cds_ja_type *type,
539 struct cds_ja_inode *node,
540 unsigned int n)
541{
542 struct cds_ja_inode_flag **child_node_flag_ptr;
543 struct cds_ja_inode_flag *child_node_flag;
544 int i;
545
546 assert(type->type_class == RCU_JA_PIGEON);
547
548 /* n - 1 is first value left of n */
549 for (i = n - 1; i >= 0; i--) {
550 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
551 child_node_flag = rcu_dereference(*child_node_flag_ptr);
552 if (child_node_flag) {
553 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
554 child_node_flag);
555 return child_node_flag;
556 }
557 }
558 return NULL;
559}
560
2e313670
MD
561static
562struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
563 struct cds_ja_inode *node,
564 uint8_t i)
565{
48cbe001 566 return ja_pigeon_node_get_nth(type, node, NULL, i);
2e313670
MD
567}
568
13a7f5a6
MD
569/*
570 * ja_node_get_nth: get nth item from a node.
571 * node_flag is already rcu_dereference'd.
572 */
d68c6810 573static
b62a8d0c 574struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
b0ca2d21 575 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 576 uint8_t n)
d68c6810
MD
577{
578 unsigned int type_index;
b4540e8a 579 struct cds_ja_inode *node;
d96bfb0d 580 const struct cds_ja_type *type;
d68c6810 581
d68c6810 582 node = ja_node_ptr(node_flag);
5a9a87dd 583 assert(node != NULL);
d68c6810
MD
584 type_index = ja_node_type(node_flag);
585 type = &ja_types[type_index];
586
587 switch (type->type_class) {
588 case RCU_JA_LINEAR:
5a9a87dd 589 return ja_linear_node_get_nth(type, node,
b62a8d0c 590 node_flag_ptr, n);
fd800776 591 case RCU_JA_POOL:
b1a90ce3 592 return ja_pool_node_get_nth(type, node, node_flag,
b62a8d0c 593 node_flag_ptr, n);
d68c6810 594 case RCU_JA_PIGEON:
5a9a87dd 595 return ja_pigeon_node_get_nth(type, node,
b62a8d0c 596 node_flag_ptr, n);
d68c6810
MD
597 default:
598 assert(0);
599 return (void *) -1UL;
600 }
601}
602
291b2543
MD
603static
604struct cds_ja_inode_flag *ja_node_get_left(struct cds_ja_inode_flag *node_flag,
605 unsigned int n)
606{
607 unsigned int type_index;
608 struct cds_ja_inode *node;
609 const struct cds_ja_type *type;
610
611 node = ja_node_ptr(node_flag);
612 assert(node != NULL);
613 type_index = ja_node_type(node_flag);
614 type = &ja_types[type_index];
615
616 switch (type->type_class) {
617 case RCU_JA_LINEAR:
618 return ja_linear_node_get_left(type, node, n);
619 case RCU_JA_POOL:
620 return ja_pool_node_get_left(type, node, n);
621 case RCU_JA_PIGEON:
622 return ja_pigeon_node_get_left(type, node, n);
623 default:
624 assert(0);
625 return (void *) -1UL;
626 }
627}
628
629static
630struct cds_ja_inode_flag *ja_node_get_rightmost(struct cds_ja_inode_flag *node_flag)
631{
632 return ja_node_get_left(node_flag, JA_ENTRY_PER_NODE);
633}
634
8e519e3c 635static
d96bfb0d 636int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 637 struct cds_ja_inode *node,
d96bfb0d 638 struct cds_ja_shadow_node *shadow_node,
8e519e3c 639 uint8_t n,
b4540e8a 640 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
641{
642 uint8_t nr_child;
643 uint8_t *values, *nr_child_ptr;
b4540e8a 644 struct cds_ja_inode_flag **pointers;
2e313670 645 unsigned int i, unused = 0;
8e519e3c
MD
646
647 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
648
649 nr_child_ptr = &node->u.data[0];
48cbe001
MD
650 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
651 (unsigned int) n, nr_child_ptr);
8e519e3c
MD
652 nr_child = *nr_child_ptr;
653 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
654
655 values = &node->u.data[1];
2e313670
MD
656 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
657 /* Check if node value is already populated */
8e519e3c 658 for (i = 0; i < nr_child; i++) {
2e313670
MD
659 if (values[i] == n) {
660 if (pointers[i])
661 return -EEXIST;
662 else
663 break;
664 } else {
665 if (!pointers[i])
666 unused++;
667 }
8e519e3c 668 }
2e313670
MD
669 if (i == nr_child && nr_child >= type->max_linear_child) {
670 if (unused)
671 return -ERANGE; /* recompact node */
672 else
673 return -ENOSPC; /* No space left in this node type */
674 }
675
676 assert(pointers[i] == NULL);
677 rcu_assign_pointer(pointers[i], child_node_flag);
678 /* If we expanded the nr_child, increment it */
679 if (i == nr_child) {
680 CMM_STORE_SHARED(values[nr_child], n);
681 /* write pointer and value before nr_child */
682 cmm_smp_wmb();
683 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 684 }
e1db2db5 685 shadow_node->nr_child++;
a2a7ff59
MD
686 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
687 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
688 (unsigned int) shadow_node->nr_child,
689 node, shadow_node);
690
8e519e3c
MD
691 return 0;
692}
693
694static
d96bfb0d 695int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 696 struct cds_ja_inode *node,
b1a90ce3 697 struct cds_ja_inode_flag *node_flag,
d96bfb0d 698 struct cds_ja_shadow_node *shadow_node,
8e519e3c 699 uint8_t n,
b4540e8a 700 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 701{
b4540e8a 702 struct cds_ja_inode *linear;
8e519e3c
MD
703
704 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
705
706 switch (type->nr_pool_order) {
707 case 1:
708 {
709 unsigned long bitsel, index;
710
711 bitsel = ja_node_pool_1d_bitsel(node_flag);
712 assert(bitsel < CHAR_BIT);
19ddcd04 713 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
714 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
715 break;
716 }
717 case 2:
718 {
19ddcd04
MD
719 unsigned long bitsel[2], index[2], rindex;
720
721 ja_node_pool_2d_bitsel(node_flag, bitsel);
722 assert(bitsel[0] < CHAR_BIT);
723 assert(bitsel[1] < CHAR_BIT);
724 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
725 index[0] <<= 1;
726 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
727 rindex = index[0] | index[1];
728 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
729 break;
730 }
731 default:
732 linear = NULL;
733 assert(0);
734 }
735
e1db2db5
MD
736 return ja_linear_node_set_nth(type, linear, shadow_node,
737 n, child_node_flag);
8e519e3c
MD
738}
739
740static
d96bfb0d 741int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 742 struct cds_ja_inode *node,
d96bfb0d 743 struct cds_ja_shadow_node *shadow_node,
8e519e3c 744 uint8_t n,
b4540e8a 745 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 746{
b4540e8a 747 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
748
749 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 750 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 751 if (*ptr)
8e519e3c
MD
752 return -EEXIST;
753 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 754 shadow_node->nr_child++;
8e519e3c
MD
755 return 0;
756}
757
d68c6810 758/*
7a0b2331 759 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 760 * (negative error value) if it is already there.
d68c6810 761 */
8e519e3c 762static
d96bfb0d 763int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 764 struct cds_ja_inode *node,
b1a90ce3 765 struct cds_ja_inode_flag *node_flag,
d96bfb0d 766 struct cds_ja_shadow_node *shadow_node,
e1db2db5 767 uint8_t n,
b4540e8a 768 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 769{
8e519e3c
MD
770 switch (type->type_class) {
771 case RCU_JA_LINEAR:
e1db2db5 772 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
773 child_node_flag);
774 case RCU_JA_POOL:
b1a90ce3 775 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
8e519e3c
MD
776 child_node_flag);
777 case RCU_JA_PIGEON:
e1db2db5 778 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 779 child_node_flag);
e1db2db5
MD
780 case RCU_JA_NULL:
781 return -ENOSPC;
8e519e3c
MD
782 default:
783 assert(0);
784 return -EINVAL;
785 }
786
787 return 0;
788}
7a0b2331 789
2e313670 790static
af3cbd45 791int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
792 struct cds_ja_inode *node,
793 struct cds_ja_shadow_node *shadow_node,
af3cbd45 794 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
795{
796 uint8_t nr_child;
af3cbd45 797 uint8_t *nr_child_ptr;
2e313670
MD
798
799 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
800
801 nr_child_ptr = &node->u.data[0];
2e313670
MD
802 nr_child = *nr_child_ptr;
803 assert(nr_child <= type->max_linear_child);
804
48cbe001
MD
805 if (type->type_class == RCU_JA_LINEAR) {
806 assert(!shadow_node->fallback_removal_count);
807 if (shadow_node->nr_child <= type->min_child) {
2e313670
MD
808 /* We need to try recompacting the node */
809 return -EFBIG;
810 }
811 }
19ddcd04 812 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
af3cbd45
MD
813 assert(*node_flag_ptr != NULL);
814 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
815 /*
816 * Value and nr_child are never changed (would cause ABA issue).
817 * Instead, we leave the pointer to NULL and recompact the node
818 * once in a while. It is allowed to set a NULL pointer to a new
819 * value without recompaction though.
820 * Only update the shadow node accounting.
821 */
822 shadow_node->nr_child--;
af3cbd45 823 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
824 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
825 (unsigned int) shadow_node->nr_child,
826 node, shadow_node);
2e313670
MD
827 return 0;
828}
829
830static
af3cbd45 831int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670 832 struct cds_ja_inode *node,
19ddcd04 833 struct cds_ja_inode_flag *node_flag,
2e313670 834 struct cds_ja_shadow_node *shadow_node,
af3cbd45 835 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
836 uint8_t n)
837{
838 struct cds_ja_inode *linear;
839
840 assert(type->type_class == RCU_JA_POOL);
19ddcd04
MD
841
842 if (shadow_node->fallback_removal_count) {
843 shadow_node->fallback_removal_count--;
844 } else {
845 /* We should try recompacting the node */
846 if (shadow_node->nr_child <= type->min_child)
847 return -EFBIG;
848 }
849
850 switch (type->nr_pool_order) {
851 case 1:
852 {
853 unsigned long bitsel, index;
854
855 bitsel = ja_node_pool_1d_bitsel(node_flag);
856 assert(bitsel < CHAR_BIT);
857 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
858 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
859 break;
860 }
861 case 2:
862 {
863 unsigned long bitsel[2], index[2], rindex;
864
865 ja_node_pool_2d_bitsel(node_flag, bitsel);
866 assert(bitsel[0] < CHAR_BIT);
867 assert(bitsel[1] < CHAR_BIT);
868 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
869 index[0] <<= 1;
870 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
871 rindex = index[0] | index[1];
872 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
873 break;
874 }
875 default:
876 linear = NULL;
877 assert(0);
878 }
879
af3cbd45 880 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
881}
882
883static
af3cbd45 884int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
885 struct cds_ja_inode *node,
886 struct cds_ja_shadow_node *shadow_node,
af3cbd45 887 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 888{
2e313670 889 assert(type->type_class == RCU_JA_PIGEON);
19ddcd04
MD
890
891 if (shadow_node->fallback_removal_count) {
892 shadow_node->fallback_removal_count--;
893 } else {
894 /* We should try recompacting the node */
895 if (shadow_node->nr_child <= type->min_child)
896 return -EFBIG;
897 }
4d6ef45e 898 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 899 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
900 shadow_node->nr_child--;
901 return 0;
902}
903
904/*
af3cbd45 905 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
906 * (negative error value) if it is not found (-ENOENT).
907 */
908static
af3cbd45 909int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670 910 struct cds_ja_inode *node,
19ddcd04 911 struct cds_ja_inode_flag *node_flag,
2e313670 912 struct cds_ja_shadow_node *shadow_node,
af3cbd45 913 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
914 uint8_t n)
915{
916 switch (type->type_class) {
917 case RCU_JA_LINEAR:
af3cbd45 918 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 919 case RCU_JA_POOL:
19ddcd04 920 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
2e313670 921 case RCU_JA_PIGEON:
af3cbd45 922 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
923 case RCU_JA_NULL:
924 return -ENOENT;
925 default:
926 assert(0);
927 return -EINVAL;
928 }
929
930 return 0;
931}
932
b1a90ce3
MD
933/*
934 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
935 * distribution in two sub-distributions containing as much elements one
936 * compared to the other.
937 */
938static
939unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
940 struct cds_ja *ja,
941 unsigned int type_index,
942 const struct cds_ja_type *type,
943 struct cds_ja_inode *node,
944 struct cds_ja_shadow_node *shadow_node,
945 uint8_t n,
946 struct cds_ja_inode_flag *child_node_flag,
947 struct cds_ja_inode_flag **nullify_node_flag_ptr)
948{
949 uint8_t nr_one[JA_BITS_PER_BYTE];
950 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
951 unsigned int distrib_nr_child = 0;
952
953 memset(nr_one, 0, sizeof(nr_one));
954
955 switch (type->type_class) {
956 case RCU_JA_LINEAR:
957 {
958 uint8_t nr_child =
959 ja_linear_node_get_nr_child(type, node);
960 unsigned int i;
961
962 for (i = 0; i < nr_child; i++) {
963 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
964 uint8_t v;
965
966 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
967 if (!iter)
968 continue;
969 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
970 continue;
f5531dd9
MD
971 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
972 if (v & (1U << bit_i))
973 nr_one[bit_i]++;
b1a90ce3
MD
974 }
975 distrib_nr_child++;
976 }
977 break;
978 }
979 case RCU_JA_POOL:
980 {
981 unsigned int pool_nr;
982
983 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
984 struct cds_ja_inode *pool =
985 ja_pool_node_get_ith_pool(type,
986 node, pool_nr);
987 uint8_t nr_child =
988 ja_linear_node_get_nr_child(type, pool);
989 unsigned int j;
990
991 for (j = 0; j < nr_child; j++) {
992 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
993 uint8_t v;
994
995 ja_linear_node_get_ith_pos(type, pool,
996 j, &v, &iter);
997 if (!iter)
998 continue;
999 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1000 continue;
f5531dd9
MD
1001 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1002 if (v & (1U << bit_i))
1003 nr_one[bit_i]++;
b1a90ce3
MD
1004 }
1005 distrib_nr_child++;
1006 }
1007 }
1008 break;
1009 }
1010 case RCU_JA_PIGEON:
1011 {
b1a90ce3
MD
1012 unsigned int i;
1013
1014 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1015 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
b1a90ce3 1016 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1017
1018 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1019 if (!iter)
1020 continue;
1021 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1022 continue;
f5531dd9
MD
1023 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1024 if (i & (1U << bit_i))
1025 nr_one[bit_i]++;
b1a90ce3
MD
1026 }
1027 distrib_nr_child++;
1028 }
1029 break;
1030 }
1031 case RCU_JA_NULL:
19ddcd04 1032 assert(mode == JA_RECOMPACT_ADD_NEXT);
b1a90ce3
MD
1033 break;
1034 default:
1035 assert(0);
1036 break;
1037 }
1038
19ddcd04 1039 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
f5531dd9
MD
1040 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1041 if (n & (1U << bit_i))
1042 nr_one[bit_i]++;
b1a90ce3
MD
1043 }
1044 distrib_nr_child++;
1045 }
1046
1047 /*
1048 * The best bit selector is that for which the number of ones is
1049 * closest to half of the number of children in the
f5531dd9
MD
1050 * distribution. We calculate the distance using the double of
1051 * the sub-distribution sizes to eliminate truncation error.
b1a90ce3
MD
1052 */
1053 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1054 unsigned int distance_to_best;
1055
f5531dd9 1056 distance_to_best = abs_int((nr_one[bit_i] << 1U) - distrib_nr_child);
b1a90ce3
MD
1057 if (distance_to_best < overall_best_distance) {
1058 overall_best_distance = distance_to_best;
1059 bitsel = bit_i;
1060 }
1061 }
1062 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
1063 return bitsel;
1064}
1065
19ddcd04
MD
1066/*
1067 * Calculate bit distribution in two dimensions. Returns the two bits
1068 * (each 0 to 7) that splits the distribution in four sub-distributions
1069 * containing as much elements one compared to the other.
1070 */
1071static
1072void ja_node_sum_distribution_2d(enum ja_recompact mode,
1073 struct cds_ja *ja,
1074 unsigned int type_index,
1075 const struct cds_ja_type *type,
1076 struct cds_ja_inode *node,
1077 struct cds_ja_shadow_node *shadow_node,
1078 uint8_t n,
1079 struct cds_ja_inode_flag *child_node_flag,
1080 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1081 unsigned int *_bitsel)
1082{
1083 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1084 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1085 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1086 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
1087 unsigned int bitsel[2] = { 0, 1 };
4a073c53
MD
1088 unsigned int bit_i, bit_j;
1089 int overall_best_distance = INT_MAX;
19ddcd04
MD
1090 unsigned int distrib_nr_child = 0;
1091
1092 memset(nr_2d_11, 0, sizeof(nr_2d_11));
1093 memset(nr_2d_10, 0, sizeof(nr_2d_10));
4a073c53
MD
1094 memset(nr_2d_01, 0, sizeof(nr_2d_01));
1095 memset(nr_2d_00, 0, sizeof(nr_2d_00));
19ddcd04
MD
1096
1097 switch (type->type_class) {
1098 case RCU_JA_LINEAR:
1099 {
1100 uint8_t nr_child =
1101 ja_linear_node_get_nr_child(type, node);
1102 unsigned int i;
1103
1104 for (i = 0; i < nr_child; i++) {
1105 struct cds_ja_inode_flag *iter;
1106 uint8_t v;
1107
1108 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1109 if (!iter)
1110 continue;
1111 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1112 continue;
1113 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1114 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1115 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1116 nr_2d_11[bit_i][bit_j]++;
1117 }
1118 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1119 nr_2d_10[bit_i][bit_j]++;
1120 }
1121 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1122 nr_2d_01[bit_i][bit_j]++;
1123 }
1124 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1125 nr_2d_00[bit_i][bit_j]++;
1126 }
1127 }
1128 }
1129 distrib_nr_child++;
1130 }
1131 break;
1132 }
1133 case RCU_JA_POOL:
1134 {
1135 unsigned int pool_nr;
1136
1137 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1138 struct cds_ja_inode *pool =
1139 ja_pool_node_get_ith_pool(type,
1140 node, pool_nr);
1141 uint8_t nr_child =
1142 ja_linear_node_get_nr_child(type, pool);
1143 unsigned int j;
1144
1145 for (j = 0; j < nr_child; j++) {
1146 struct cds_ja_inode_flag *iter;
1147 uint8_t v;
1148
1149 ja_linear_node_get_ith_pos(type, pool,
1150 j, &v, &iter);
1151 if (!iter)
1152 continue;
1153 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1154 continue;
1155 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1156 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1157 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1158 nr_2d_11[bit_i][bit_j]++;
1159 }
1160 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1161 nr_2d_10[bit_i][bit_j]++;
1162 }
1163 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1164 nr_2d_01[bit_i][bit_j]++;
1165 }
1166 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1167 nr_2d_00[bit_i][bit_j]++;
1168 }
1169 }
1170 }
1171 distrib_nr_child++;
1172 }
1173 }
1174 break;
1175 }
1176 case RCU_JA_PIGEON:
1177 {
19ddcd04
MD
1178 unsigned int i;
1179
1180 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1181 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
19ddcd04
MD
1182 struct cds_ja_inode_flag *iter;
1183
1184 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1185 if (!iter)
1186 continue;
1187 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1188 continue;
1189 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1190 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1191 if ((i & (1U << bit_i)) && (i & (1U << bit_j))) {
1192 nr_2d_11[bit_i][bit_j]++;
1193 }
1194 if ((i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1195 nr_2d_10[bit_i][bit_j]++;
1196 }
1197 if (!(i & (1U << bit_i)) && (i & (1U << bit_j))) {
1198 nr_2d_01[bit_i][bit_j]++;
1199 }
1200 if (!(i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1201 nr_2d_00[bit_i][bit_j]++;
1202 }
1203 }
1204 }
1205 distrib_nr_child++;
1206 }
1207 break;
1208 }
1209 case RCU_JA_NULL:
1210 assert(mode == JA_RECOMPACT_ADD_NEXT);
1211 break;
1212 default:
1213 assert(0);
1214 break;
1215 }
1216
1217 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1218 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1219 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1220 if ((n & (1U << bit_i)) && (n & (1U << bit_j))) {
1221 nr_2d_11[bit_i][bit_j]++;
1222 }
1223 if ((n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1224 nr_2d_10[bit_i][bit_j]++;
1225 }
1226 if (!(n & (1U << bit_i)) && (n & (1U << bit_j))) {
1227 nr_2d_01[bit_i][bit_j]++;
1228 }
1229 if (!(n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1230 nr_2d_00[bit_i][bit_j]++;
1231 }
1232 }
1233 }
1234 distrib_nr_child++;
1235 }
1236
1237 /*
1238 * The best bit selector is that for which the number of nodes
1239 * in each sub-class is closest to one-fourth of the number of
1240 * children in the distribution. We calculate the distance using
1241 * 4 times the size of the sub-distribution to eliminate
1242 * truncation error.
1243 */
1244 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1245 for (bit_j = 0; bit_j < bit_i; bit_j++) {
4a073c53 1246 int distance_to_best[4];
19ddcd04 1247
4a073c53
MD
1248 distance_to_best[0] = (nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1249 distance_to_best[1] = (nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1250 distance_to_best[2] = (nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1251 distance_to_best[3] = (nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
19ddcd04 1252
4a073c53
MD
1253 /* Consider worse distance above best */
1254 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
19ddcd04 1255 distance_to_best[0] = distance_to_best[1];
4a073c53 1256 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
19ddcd04 1257 distance_to_best[0] = distance_to_best[2];
4a073c53 1258 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
19ddcd04 1259 distance_to_best[0] = distance_to_best[3];
4a073c53 1260
19ddcd04
MD
1261 /*
1262 * If our worse distance is better than overall,
1263 * we become new best candidate.
1264 */
1265 if (distance_to_best[0] < overall_best_distance) {
1266 overall_best_distance = distance_to_best[0];
1267 bitsel[0] = bit_i;
1268 bitsel[1] = bit_j;
1269 }
1270 }
1271 }
1272
1273 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1274
1275 /* Return our bit selection */
1276 _bitsel[0] = bitsel[0];
1277 _bitsel[1] = bitsel[1];
1278}
1279
48cbe001
MD
1280static
1281unsigned int find_nearest_type_index(unsigned int type_index,
1282 unsigned int nr_nodes)
1283{
1284 const struct cds_ja_type *type;
1285
1286 assert(type_index != NODE_INDEX_NULL);
1287 if (nr_nodes == 0)
1288 return NODE_INDEX_NULL;
1289 for (;;) {
1290 type = &ja_types[type_index];
1291 if (nr_nodes < type->min_child)
1292 type_index--;
1293 else if (nr_nodes > type->max_child)
1294 type_index++;
1295 else
1296 break;
1297 }
1298 return type_index;
1299}
1300
7a0b2331
MD
1301/*
1302 * ja_node_recompact_add: recompact a node, adding a new child.
2e313670 1303 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 1304 * error value otherwise.
7a0b2331
MD
1305 */
1306static
2e313670
MD
1307int ja_node_recompact(enum ja_recompact mode,
1308 struct cds_ja *ja,
e1db2db5 1309 unsigned int old_type_index,
d96bfb0d 1310 const struct cds_ja_type *old_type,
b4540e8a 1311 struct cds_ja_inode *old_node,
5a9a87dd 1312 struct cds_ja_shadow_node *shadow_node,
3d8fe307 1313 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45 1314 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1315 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1316 int level)
7a0b2331 1317{
e1db2db5 1318 unsigned int new_type_index;
b4540e8a 1319 struct cds_ja_inode *new_node;
af3cbd45 1320 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 1321 const struct cds_ja_type *new_type;
3d8fe307 1322 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 1323 int ret;
f07b240f 1324 int fallback = 0;
7a0b2331 1325
3d8fe307
MD
1326 old_node_flag = *old_node_flag_ptr;
1327
48cbe001
MD
1328 /*
1329 * Need to find nearest type index even for ADD_SAME, because
1330 * this recompaction, when applied to linear nodes, will garbage
1331 * collect dummy (NULL) entries, and can therefore cause a few
1332 * linear representations to be skipped.
1333 */
2e313670 1334 switch (mode) {
19ddcd04 1335 case JA_RECOMPACT_ADD_SAME:
48cbe001
MD
1336 new_type_index = find_nearest_type_index(old_type_index,
1337 shadow_node->nr_child + 1);
1338 dbg_printf("Recompact for node with %u children\n",
1339 shadow_node->nr_child + 1);
2e313670 1340 break;
19ddcd04 1341 case JA_RECOMPACT_ADD_NEXT:
2e313670
MD
1342 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1343 new_type_index = 0;
48cbe001 1344 dbg_printf("Recompact for NULL\n");
2e313670 1345 } else {
48cbe001
MD
1346 new_type_index = find_nearest_type_index(old_type_index,
1347 shadow_node->nr_child + 1);
1348 dbg_printf("Recompact for node with %u children\n",
1349 shadow_node->nr_child + 1);
2e313670
MD
1350 }
1351 break;
1352 case JA_RECOMPACT_DEL:
48cbe001
MD
1353 new_type_index = find_nearest_type_index(old_type_index,
1354 shadow_node->nr_child - 1);
1355 dbg_printf("Recompact for node with %u children\n",
1356 shadow_node->nr_child - 1);
2e313670
MD
1357 break;
1358 default:
1359 assert(0);
7a0b2331 1360 }
a2a7ff59 1361
f07b240f 1362retry: /* for fallback */
582a6ade
MD
1363 dbg_printf("Recompact from type %d to type %d\n",
1364 old_type_index, new_type_index);
7a0b2331 1365 new_type = &ja_types[new_type_index];
2e313670 1366 if (new_type_index != NODE_INDEX_NULL) {
354981c2 1367 new_node = alloc_cds_ja_node(ja, new_type);
2e313670
MD
1368 if (!new_node)
1369 return -ENOMEM;
b1a90ce3
MD
1370
1371 if (new_type->type_class == RCU_JA_POOL) {
1372 switch (new_type->nr_pool_order) {
1373 case 1:
1374 {
19ddcd04
MD
1375 unsigned int node_distrib_bitsel;
1376
b1a90ce3
MD
1377 node_distrib_bitsel =
1378 ja_node_sum_distribution_1d(mode, ja,
1379 old_type_index, old_type,
1380 old_node, shadow_node,
1381 n, child_node_flag,
1382 nullify_node_flag_ptr);
1383 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1384 new_node_flag = ja_node_flag_pool_1d(new_node,
1385 new_type_index, node_distrib_bitsel);
1386 break;
1387 }
1388 case 2:
1389 {
19ddcd04
MD
1390 unsigned int node_distrib_bitsel[2];
1391
1392 ja_node_sum_distribution_2d(mode, ja,
1393 old_type_index, old_type,
1394 old_node, shadow_node,
1395 n, child_node_flag,
1396 nullify_node_flag_ptr,
1397 node_distrib_bitsel);
b1a90ce3
MD
1398 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1399 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
19ddcd04
MD
1400 new_node_flag = ja_node_flag_pool_2d(new_node,
1401 new_type_index, node_distrib_bitsel);
b1a90ce3
MD
1402 break;
1403 }
1404 default:
1405 assert(0);
1406 }
1407 } else {
1408 new_node_flag = ja_node_flag(new_node, new_type_index);
1409 }
1410
2e313670 1411 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
48cbe001 1412 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
2e313670 1413 if (!new_shadow_node) {
354981c2 1414 free_cds_ja_node(ja, new_node);
2e313670
MD
1415 return -ENOMEM;
1416 }
1417 if (fallback)
1418 new_shadow_node->fallback_removal_count =
1419 JA_FALLBACK_REMOVAL_COUNT;
1420 } else {
1421 new_node = NULL;
1422 new_node_flag = NULL;
e1db2db5 1423 }
11c5e016 1424
19ddcd04 1425 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
2e313670
MD
1426
1427 if (new_type_index == NODE_INDEX_NULL)
1428 goto skip_copy;
1429
11c5e016
MD
1430 switch (old_type->type_class) {
1431 case RCU_JA_LINEAR:
1432 {
1433 uint8_t nr_child =
1434 ja_linear_node_get_nr_child(old_type, old_node);
1435 unsigned int i;
1436
1437 for (i = 0; i < nr_child; i++) {
b4540e8a 1438 struct cds_ja_inode_flag *iter;
11c5e016
MD
1439 uint8_t v;
1440
1441 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1442 if (!iter)
1443 continue;
af3cbd45 1444 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1445 continue;
b1a90ce3 1446 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1447 new_shadow_node,
11c5e016 1448 v, iter);
f07b240f
MD
1449 if (new_type->type_class == RCU_JA_POOL && ret) {
1450 goto fallback_toosmall;
1451 }
11c5e016
MD
1452 assert(!ret);
1453 }
1454 break;
1455 }
1456 case RCU_JA_POOL:
1457 {
1458 unsigned int pool_nr;
1459
1460 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 1461 struct cds_ja_inode *pool =
11c5e016
MD
1462 ja_pool_node_get_ith_pool(old_type,
1463 old_node, pool_nr);
1464 uint8_t nr_child =
1465 ja_linear_node_get_nr_child(old_type, pool);
1466 unsigned int j;
1467
1468 for (j = 0; j < nr_child; j++) {
b4540e8a 1469 struct cds_ja_inode_flag *iter;
11c5e016
MD
1470 uint8_t v;
1471
1472 ja_linear_node_get_ith_pos(old_type, pool,
1473 j, &v, &iter);
1474 if (!iter)
1475 continue;
af3cbd45 1476 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1477 continue;
b1a90ce3 1478 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1479 new_shadow_node,
11c5e016 1480 v, iter);
f07b240f
MD
1481 if (new_type->type_class == RCU_JA_POOL
1482 && ret) {
1483 goto fallback_toosmall;
1484 }
11c5e016
MD
1485 assert(!ret);
1486 }
1487 }
1488 break;
7a0b2331 1489 }
a2a7ff59 1490 case RCU_JA_NULL:
19ddcd04 1491 assert(mode == JA_RECOMPACT_ADD_NEXT);
a2a7ff59 1492 break;
11c5e016 1493 case RCU_JA_PIGEON:
2e313670 1494 {
2e313670
MD
1495 unsigned int i;
1496
1497 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1498 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2e313670
MD
1499 struct cds_ja_inode_flag *iter;
1500
1501 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1502 if (!iter)
1503 continue;
af3cbd45 1504 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1505 continue;
b1a90ce3 1506 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1507 new_shadow_node,
1508 i, iter);
1509 if (new_type->type_class == RCU_JA_POOL && ret) {
1510 goto fallback_toosmall;
1511 }
1512 assert(!ret);
1513 }
1514 break;
1515 }
11c5e016
MD
1516 default:
1517 assert(0);
5a9a87dd 1518 ret = -EINVAL;
f07b240f 1519 goto end;
11c5e016 1520 }
2e313670 1521skip_copy:
11c5e016 1522
19ddcd04 1523 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
2e313670 1524 /* add node */
b1a90ce3 1525 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1526 new_shadow_node,
1527 n, child_node_flag);
7b413155
MD
1528 if (new_type->type_class == RCU_JA_POOL && ret) {
1529 goto fallback_toosmall;
1530 }
2e313670
MD
1531 assert(!ret);
1532 }
19ddcd04
MD
1533
1534 if (fallback) {
1535 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1536 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1537 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
354981c2 1538 uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
19ddcd04
MD
1539 }
1540
3d8fe307
MD
1541 /* Return pointer to new recompacted node through old_node_flag_ptr */
1542 *old_node_flag_ptr = new_node_flag;
a2a7ff59 1543 if (old_node) {
2e313670
MD
1544 int flags;
1545
1546 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1547 /*
1548 * It is OK to free the lock associated with a node
1549 * going to NULL, since we are holding the parent lock.
1550 * This synchronizes removal with re-add of that node.
1551 */
1552 if (new_type_index == NODE_INDEX_NULL)
48cbe001 1553 flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 1554 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 1555 flags);
a2a7ff59
MD
1556 assert(!ret);
1557 }
5a9a87dd
MD
1558
1559 ret = 0;
f07b240f 1560end:
5a9a87dd 1561 return ret;
f07b240f
MD
1562
1563fallback_toosmall:
1564 /* fallback if next pool is too small */
af3cbd45 1565 assert(new_shadow_node);
3d8fe307 1566 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
1567 RCUJA_SHADOW_CLEAR_FREE_NODE);
1568 assert(!ret);
1569
19ddcd04
MD
1570 switch (mode) {
1571 case JA_RECOMPACT_ADD_SAME:
1572 /*
1573 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1574 * node within a pool has unused entries. It should
1575 * therefore _never_ be too small.
1576 */
4a073c53 1577 assert(0);
4cde8267
MD
1578
1579 /* Fall-through */
19ddcd04
MD
1580 case JA_RECOMPACT_ADD_NEXT:
1581 {
1582 const struct cds_ja_type *next_type;
1583
1584 /*
1585 * Recompaction attempt on add failed. Should only
1586 * happen if target node type is pool. Caused by
1587 * hard-to-split distribution. Recompact using the next
1588 * distribution size.
1589 */
1590 assert(new_type->type_class == RCU_JA_POOL);
1591 next_type = &ja_types[new_type_index + 1];
1592 /*
1593 * Try going to the next pool size if our population
1594 * fits within its range. This is not flagged as a
1595 * fallback.
1596 */
1597 if (shadow_node->nr_child + 1 >= next_type->min_child
1598 && shadow_node->nr_child + 1 <= next_type->max_child) {
1599 new_type_index++;
1600 goto retry;
1601 } else {
1602 new_type_index++;
1603 dbg_printf("Add fallback to type %d\n", new_type_index);
1604 uatomic_inc(&ja->nr_fallback);
1605 fallback = 1;
1606 goto retry;
1607 }
1608 break;
1609 }
1610 case JA_RECOMPACT_DEL:
1611 /*
1612 * Recompaction attempt on delete failed. Should only
1613 * happen if target node type is pool. This is caused by
1614 * a hard-to-split distribution. Recompact on same node
1615 * size, but flag current node as "fallback" to ensure
1616 * we don't attempt recompaction before some activity
1617 * has reshuffled our node.
1618 */
1619 assert(new_type->type_class == RCU_JA_POOL);
1620 new_type_index = old_type_index;
1621 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1622 uatomic_inc(&ja->nr_fallback);
1623 fallback = 1;
1624 goto retry;
1625 default:
1626 assert(0);
1627 return -EINVAL;
1628 }
1629
1630 /*
1631 * Last resort fallback: pigeon.
1632 */
f07b240f
MD
1633 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1634 dbg_printf("Fallback to type %d\n", new_type_index);
1635 uatomic_inc(&ja->nr_fallback);
1636 fallback = 1;
1637 goto retry;
7a0b2331
MD
1638}
1639
5a9a87dd 1640/*
2e313670 1641 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
1642 * error value otherwise.
1643 */
7a0b2331 1644static
d96bfb0d 1645int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 1646 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd 1647 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1648 struct cds_ja_shadow_node *shadow_node,
1649 int level)
7a0b2331
MD
1650{
1651 int ret;
e1db2db5 1652 unsigned int type_index;
d96bfb0d 1653 const struct cds_ja_type *type;
b4540e8a 1654 struct cds_ja_inode *node;
7a0b2331 1655
a2a7ff59
MD
1656 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1657 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1658
e1db2db5
MD
1659 node = ja_node_ptr(*node_flag);
1660 type_index = ja_node_type(*node_flag);
1661 type = &ja_types[type_index];
b1a90ce3 1662 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
e1db2db5 1663 n, child_node_flag);
2e313670
MD
1664 switch (ret) {
1665 case -ENOSPC:
19ddcd04
MD
1666 /* Not enough space in node, need to recompact to next type. */
1667 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
48cbe001 1668 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1669 break;
1670 case -ERANGE:
1671 /* Node needs to be recompacted. */
19ddcd04 1672 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
48cbe001 1673 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1674 break;
1675 }
1676 return ret;
1677}
1678
1679/*
1680 * Return 0 on success, -EAGAIN if need to retry, or other negative
1681 * error value otherwise.
1682 */
1683static
af3cbd45
MD
1684int ja_node_clear_ptr(struct cds_ja *ja,
1685 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1686 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1687 struct cds_ja_shadow_node *shadow_node, /* of parent */
48cbe001 1688 uint8_t n, int level)
2e313670
MD
1689{
1690 int ret;
1691 unsigned int type_index;
1692 const struct cds_ja_type *type;
1693 struct cds_ja_inode *node;
1694
af3cbd45
MD
1695 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1696 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 1697
af3cbd45
MD
1698 node = ja_node_ptr(*parent_node_flag_ptr);
1699 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 1700 type = &ja_types[type_index];
19ddcd04 1701 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
2e313670 1702 if (ret == -EFBIG) {
19ddcd04 1703 /* Should try recompaction. */
2e313670 1704 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45 1705 shadow_node, parent_node_flag_ptr, n, NULL,
48cbe001 1706 node_flag_ptr, level);
7a0b2331
MD
1707 }
1708 return ret;
1709}
be9a7474 1710
af3cbd45 1711struct cds_hlist_head cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 1712{
41975c12
MD
1713 unsigned int tree_depth, i;
1714 struct cds_ja_inode_flag *node_flag;
af3cbd45 1715 struct cds_hlist_head head = { NULL };
41975c12
MD
1716
1717 if (caa_unlikely(key > ja->key_max))
af3cbd45 1718 return head;
41975c12 1719 tree_depth = ja->tree_depth;
5a9a87dd 1720 node_flag = rcu_dereference(ja->root);
41975c12 1721
5a9a87dd
MD
1722 /* level 0: root node */
1723 if (!ja_node_ptr(node_flag))
af3cbd45 1724 return head;
5a9a87dd
MD
1725
1726 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1727 uint8_t iter_key;
1728
1729 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
48cbe001 1730 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
582a6ade
MD
1731 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1732 (unsigned int) iter_key, node_flag);
41975c12 1733 if (!ja_node_ptr(node_flag))
af3cbd45 1734 return head;
41975c12
MD
1735 }
1736
5a9a87dd 1737 /* Last level lookup succeded. We got an actual match. */
af3cbd45
MD
1738 head.next = (struct cds_hlist_node *) node_flag;
1739 return head;
5a9a87dd
MD
1740}
1741
291b2543
MD
1742struct cds_hlist_head cds_ja_lookup_lower_equal(struct cds_ja *ja, uint64_t key)
1743{
1744 int tree_depth, level;
1745 struct cds_ja_inode_flag *node_flag, *cur_node_depth[JA_MAX_DEPTH];
1746 struct cds_hlist_head head = { NULL };
1747
1748 if (caa_unlikely(key > ja->key_max || !key))
1749 return head;
1750
1751 memset(cur_node_depth, 0, sizeof(cur_node_depth));
1752 tree_depth = ja->tree_depth;
1753 node_flag = rcu_dereference(ja->root);
1754 cur_node_depth[0] = node_flag;
1755
1756 /* level 0: root node */
1757 if (!ja_node_ptr(node_flag))
1758 return head;
1759
1760 for (level = 1; level < tree_depth; level++) {
1761 uint8_t iter_key;
1762
1763 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1764 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1765 if (!ja_node_ptr(node_flag))
1766 break;
1767 cur_node_depth[level] = node_flag;
1768 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1769 (unsigned int) iter_key, node_flag);
1770 }
1771
1772 if (level == tree_depth) {
1773 /* Last level lookup succeded. We got an equal match. */
1774 head.next = (struct cds_hlist_node *) node_flag;
1775 return head;
1776 }
1777
1778 /*
1779 * Find highest value left of current node.
1780 * Current node is cur_node_depth[level].
1781 * Start at current level. If we cannot find any key left of
1782 * ours, go one level up, seek highest value left of current
1783 * (recursively), and when we find one, get the rightmost child
1784 * of its rightmost child (recursively).
1785 */
1786 for (; level > 0; level--) {
1787 uint8_t iter_key;
1788
1789 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1790 node_flag = ja_node_get_left(cur_node_depth[level - 1],
1791 iter_key);
1792 /* If found left sibling, find rightmost child. */
1793 if (ja_node_ptr(node_flag))
1794 break;
1795 }
1796
1797 if (!level) {
1798 /* Reached the root and could not find a left sibling. */
1799 return head;
1800 }
1801
1802 level++;
1803 /* Find rightmost child of rightmost child (recursively). */
1804 for (; level < tree_depth; level++) {
1805 node_flag = ja_node_get_rightmost(node_flag);
1806 /* If found left sibling, find rightmost child. */
1807 if (!ja_node_ptr(node_flag))
1808 break;
1809 }
1810
1811 if (level == tree_depth) {
1812 /* Last level lookup succeded. We got a "lower than" match. */
1813 head.next = (struct cds_hlist_node *) node_flag;
1814 return head;
1815 }
1816
1817 /* No match */
1818 return head;
1819}
1820
5a9a87dd
MD
1821/*
1822 * We reached an unpopulated node. Create it and the children we need,
1823 * and then attach the entire branch to the current node. This may
1824 * trigger recompaction of the current node. Locks needed: node lock
1825 * (for add), and, possibly, parent node lock (to update pointer due to
1826 * node recompaction).
1827 *
1828 * First take node lock, check if recompaction is needed, then take
1829 * parent lock (if needed). Then we can proceed to create the new
1830 * branch. Publish the new branch, and release locks.
1831 * TODO: we currently always take the parent lock even when not needed.
1832 */
1833static
1834int ja_attach_node(struct cds_ja *ja,
b0ca2d21 1835 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 1836 struct cds_ja_inode_flag *attach_node_flag,
48cbe001
MD
1837 struct cds_ja_inode_flag *parent_attach_node_flag,
1838 struct cds_ja_inode_flag **old_node_flag_ptr,
1839 struct cds_ja_inode_flag *old_node_flag,
5a9a87dd 1840 uint64_t key,
79b41067 1841 unsigned int level,
5a9a87dd
MD
1842 struct cds_ja_node *child_node)
1843{
1844 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1845 *parent_shadow_node = NULL;
5a9a87dd
MD
1846 struct cds_hlist_head head;
1847 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1848 int ret, i;
a2a7ff59 1849 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1850 int nr_created_nodes = 0;
1851
48cbe001
MD
1852 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1853 level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag);
a2a7ff59 1854
48cbe001
MD
1855 assert(!old_node_flag);
1856 if (attach_node_flag) {
1857 shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag);
1858 if (!shadow_node) {
1859 ret = -EAGAIN;
1860 goto end;
1861 }
5a9a87dd 1862 }
48cbe001 1863 if (parent_attach_node_flag) {
5a9a87dd 1864 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
48cbe001 1865 parent_attach_node_flag);
5a9a87dd 1866 if (!parent_shadow_node) {
2e313670 1867 ret = -EAGAIN;
5a9a87dd
MD
1868 goto unlock_shadow;
1869 }
1870 }
1871
48cbe001 1872 if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) {
b306a0fe 1873 /*
c112acaa
MD
1874 * Target node has been updated between RCU lookup and
1875 * lock acquisition. We need to re-try lookup and
1876 * attach.
1877 */
1878 ret = -EAGAIN;
1879 goto unlock_parent;
1880 }
1881
9be99d4a
MD
1882 /*
1883 * Perform a lookup query to handle the case where
1884 * old_node_flag_ptr is NULL. We cannot use it to check if the
1885 * node has been populated between RCU lookup and mutex
1886 * acquisition.
1887 */
1888 if (!old_node_flag_ptr) {
1889 uint8_t iter_key;
1890 struct cds_ja_inode_flag *lookup_node_flag;
1891 struct cds_ja_inode_flag **lookup_node_flag_ptr;
1892
1893 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
1894 lookup_node_flag = ja_node_get_nth(attach_node_flag,
1895 &lookup_node_flag_ptr,
1896 iter_key);
1897 if (lookup_node_flag) {
1898 ret = -EEXIST;
1899 goto unlock_parent;
1900 }
1901 }
1902
c112acaa 1903 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 1904 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
1905 /*
1906 * Target node has been updated between RCU lookup and
1907 * lock acquisition. We need to re-try lookup and
1908 * attach.
b306a0fe
MD
1909 */
1910 ret = -EAGAIN;
1911 goto unlock_parent;
1912 }
1913
a2a7ff59 1914 /* Create new branch, starting from bottom */
5a9a87dd
MD
1915 CDS_INIT_HLIST_HEAD(&head);
1916 cds_hlist_add_head_rcu(&child_node->list, &head);
a2a7ff59 1917 iter_node_flag = (struct cds_ja_inode_flag *) head.next;
5a9a87dd 1918
48cbe001 1919 for (i = ja->tree_depth - 1; i >= (int) level; i--) {
79b41067
MD
1920 uint8_t iter_key;
1921
48cbe001 1922 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1)));
79b41067 1923 dbg_printf("branch creation level %d, key %u\n",
48cbe001 1924 i, (unsigned int) iter_key);
5a9a87dd
MD
1925 iter_dest_node_flag = NULL;
1926 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1927 iter_key,
5a9a87dd 1928 iter_node_flag,
48cbe001 1929 NULL, i);
9be99d4a
MD
1930 if (ret) {
1931 dbg_printf("branch creation error %d\n", ret);
5a9a87dd 1932 goto check_error;
9be99d4a 1933 }
5a9a87dd
MD
1934 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1935 iter_node_flag = iter_dest_node_flag;
1936 }
48cbe001 1937 assert(level > 0);
5a9a87dd 1938
48cbe001
MD
1939 /* Publish branch */
1940 if (level == 1) {
1941 /*
1942 * Attaching to root node.
1943 */
1944 rcu_assign_pointer(ja->root, iter_node_flag);
1945 } else {
79b41067
MD
1946 uint8_t iter_key;
1947
1948 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
48cbe001
MD
1949 dbg_printf("publish branch at level %d, key %u\n",
1950 level - 1, (unsigned int) iter_key);
a2a7ff59 1951 /* We need to use set_nth on the previous level. */
48cbe001 1952 iter_dest_node_flag = attach_node_flag;
a2a7ff59 1953 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1954 iter_key,
a2a7ff59 1955 iter_node_flag,
48cbe001 1956 shadow_node, level - 1);
9be99d4a
MD
1957 if (ret) {
1958 dbg_printf("branch publish error %d\n", ret);
a2a7ff59 1959 goto check_error;
9be99d4a 1960 }
48cbe001
MD
1961 /*
1962 * Attach branch
1963 */
1964 rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag);
a2a7ff59
MD
1965 }
1966
5a9a87dd
MD
1967 /* Success */
1968 ret = 0;
1969
1970check_error:
1971 if (ret) {
1972 for (i = 0; i < nr_created_nodes; i++) {
1973 int tmpret;
a2a7ff59
MD
1974 int flags;
1975
1976 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1977 if (i)
1978 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 1979 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 1980 created_nodes[i],
a2a7ff59
MD
1981 NULL,
1982 flags);
5a9a87dd
MD
1983 assert(!tmpret);
1984 }
1985 }
b306a0fe 1986unlock_parent:
5a9a87dd
MD
1987 if (parent_shadow_node)
1988 rcuja_shadow_unlock(parent_shadow_node);
1989unlock_shadow:
1990 if (shadow_node)
1991 rcuja_shadow_unlock(shadow_node);
1992end:
1993 return ret;
1994}
1995
1996/*
af3cbd45
MD
1997 * Lock the parent containing the hlist head pointer, and add node to list of
1998 * duplicates. Failure can happen if concurrent update changes the
1999 * parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
2000 * Return 0 on success, negative error value on failure.
2001 */
2002static
2003int ja_chain_node(struct cds_ja *ja,
af3cbd45 2004 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2005 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 2006 struct cds_ja_inode_flag *node_flag,
5a9a87dd
MD
2007 struct cds_ja_node *node)
2008{
2009 struct cds_ja_shadow_node *shadow_node;
fa112799 2010 int ret = 0;
5a9a87dd 2011
3d8fe307 2012 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 2013 if (!shadow_node) {
2e313670 2014 return -EAGAIN;
b306a0fe 2015 }
c112acaa 2016 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2017 ret = -EAGAIN;
2018 goto end;
2019 }
48cbe001 2020 cds_hlist_add_head_rcu(&node->list, (struct cds_hlist_head *) node_flag_ptr);
fa112799 2021end:
5a9a87dd 2022 rcuja_shadow_unlock(shadow_node);
fa112799 2023 return ret;
5a9a87dd
MD
2024}
2025
75d573aa
MD
2026static
2027int _cds_ja_add(struct cds_ja *ja, uint64_t key,
2028 struct cds_ja_node *new_node,
2029 struct cds_ja_node **unique_node_ret)
5a9a87dd
MD
2030{
2031 unsigned int tree_depth, i;
48cbe001 2032 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd 2033 *parent_node_flag,
b62a8d0c 2034 *parent2_node_flag,
48cbe001
MD
2035 *node_flag,
2036 *parent_attach_node_flag;
2037 struct cds_ja_inode_flag **attach_node_flag_ptr,
2038 **parent_node_flag_ptr,
2039 **node_flag_ptr;
5a9a87dd
MD
2040 int ret;
2041
b306a0fe 2042 if (caa_unlikely(key > ja->key_max)) {
5a9a87dd 2043 return -EINVAL;
b306a0fe 2044 }
5a9a87dd
MD
2045 tree_depth = ja->tree_depth;
2046
2047retry:
a2a7ff59
MD
2048 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
2049 key, new_node);
5a9a87dd 2050 parent2_node_flag = NULL;
b0f74e47
MD
2051 parent_node_flag =
2052 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
48cbe001 2053 parent_node_flag_ptr = NULL;
35170a44 2054 node_flag = rcu_dereference(ja->root);
48cbe001 2055 node_flag_ptr = &ja->root;
5a9a87dd
MD
2056
2057 /* Iterate on all internal levels */
a2a7ff59 2058 for (i = 1; i < tree_depth; i++) {
79b41067
MD
2059 uint8_t iter_key;
2060
48cbe001
MD
2061 if (!ja_node_ptr(node_flag))
2062 break;
2063 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2064 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
79b41067 2065 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
2066 parent2_node_flag = parent_node_flag;
2067 parent_node_flag = node_flag;
48cbe001 2068 parent_node_flag_ptr = node_flag_ptr;
5a9a87dd
MD
2069 node_flag = ja_node_get_nth(node_flag,
2070 &node_flag_ptr,
79b41067 2071 iter_key);
5a9a87dd
MD
2072 }
2073
2074 /*
48cbe001
MD
2075 * We reached either bottom of tree or internal NULL node,
2076 * simply add node to last internal level, or chain it if key is
2077 * already present.
5a9a87dd
MD
2078 */
2079 if (!ja_node_ptr(node_flag)) {
48cbe001
MD
2080 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2081 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2082
48cbe001
MD
2083 attach_node_flag = parent_node_flag;
2084 attach_node_flag_ptr = parent_node_flag_ptr;
2085 parent_attach_node_flag = parent2_node_flag;
2086
b0ca2d21 2087 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 2088 attach_node_flag,
48cbe001
MD
2089 parent_attach_node_flag,
2090 node_flag_ptr,
2091 node_flag,
2092 key, i, new_node);
5a9a87dd 2093 } else {
75d573aa
MD
2094 if (unique_node_ret) {
2095 *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag);
2096 return -EEXIST;
2097 }
2098
48cbe001
MD
2099 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2100 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2101
48cbe001
MD
2102 attach_node_flag = node_flag;
2103 attach_node_flag_ptr = node_flag_ptr;
2104 parent_attach_node_flag = parent_node_flag;
2105
5a9a87dd 2106 ret = ja_chain_node(ja,
48cbe001
MD
2107 parent_attach_node_flag,
2108 attach_node_flag_ptr,
2109 attach_node_flag,
5a9a87dd
MD
2110 new_node);
2111 }
b306a0fe 2112 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd 2113 goto retry;
48cbe001 2114
5a9a87dd 2115 return ret;
b4540e8a
MD
2116}
2117
75d573aa
MD
2118int cds_ja_add(struct cds_ja *ja, uint64_t key,
2119 struct cds_ja_node *new_node)
2120{
2121 return _cds_ja_add(ja, key, new_node, NULL);
2122}
2123
2124struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key,
2125 struct cds_ja_node *new_node)
2126{
2127 int ret;
2128 struct cds_ja_node *ret_node;
2129
2130 ret = _cds_ja_add(ja, key, new_node, &ret_node);
2131 if (ret == -EEXIST)
2132 return ret_node;
2133 else
2134 return new_node;
2135}
2136
af3cbd45
MD
2137/*
2138 * Note: there is no need to lookup the pointer address associated with
2139 * each node's nth item after taking the lock: it's already been done by
2140 * cds_ja_del while holding the rcu read-side lock, and our node rules
2141 * ensure that when a match value -> pointer is found in a node, it is
2142 * _NEVER_ changed for that node without recompaction, and recompaction
2143 * reallocates the node.
b306a0fe
MD
2144 * However, when a child is removed from "linear" nodes, its pointer
2145 * is set to NULL. We therefore check, while holding the locks, if this
2146 * pointer is NULL, and return -ENOENT to the caller if it is the case.
af3cbd45 2147 */
35170a44
MD
2148static
2149int ja_detach_node(struct cds_ja *ja,
2150 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
2151 struct cds_ja_inode_flag ***snapshot_ptr,
2152 uint8_t *snapshot_n,
35170a44
MD
2153 int nr_snapshot,
2154 uint64_t key,
2155 struct cds_ja_node *node)
2156{
af3cbd45
MD
2157 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
2158 struct cds_ja_inode_flag **node_flag_ptr = NULL,
2159 *parent_node_flag = NULL,
2160 **parent_node_flag_ptr = NULL;
b62a8d0c 2161 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
2162 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
2163 uint8_t n = 0;
35170a44 2164
4d6ef45e 2165 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 2166
af3cbd45
MD
2167 /*
2168 * From the last internal level node going up, get the node
2169 * lock, check if the node has only one child left. If it is the
2170 * case, we continue iterating upward. When we reach a node
2171 * which has more that one child left, we lock the parent, and
2172 * proceed to the node deletion (removing its children too).
2173 */
4d6ef45e 2174 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
2175 struct cds_ja_shadow_node *shadow_node;
2176
2177 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2178 snapshot[i]);
af3cbd45
MD
2179 if (!shadow_node) {
2180 ret = -EAGAIN;
2181 goto end;
2182 }
af3cbd45 2183 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2184
2185 /*
2186 * Check if node has been removed between RCU
2187 * lookup and lock acquisition.
2188 */
2189 assert(snapshot_ptr[i + 1]);
2190 if (ja_node_ptr(*snapshot_ptr[i + 1])
2191 != ja_node_ptr(snapshot[i + 1])) {
2192 ret = -ENOENT;
2193 goto end;
2194 }
2195
2196 assert(shadow_node->nr_child > 0);
d810c97f 2197 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
2198 nr_clear++;
2199 nr_branch++;
af3cbd45
MD
2200 if (shadow_node->nr_child > 1 || i == 1) {
2201 /* Lock parent and break */
2202 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2203 snapshot[i - 1]);
af3cbd45
MD
2204 if (!shadow_node) {
2205 ret = -EAGAIN;
2206 goto end;
2207 }
2208 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 2209
c112acaa
MD
2210 /*
2211 * Check if node has been removed between RCU
2212 * lookup and lock acquisition.
2213 */
b62a8d0c
MD
2214 assert(snapshot_ptr[i]);
2215 if (ja_node_ptr(*snapshot_ptr[i])
2216 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
2217 ret = -ENOENT;
2218 goto end;
2219 }
2220
b62a8d0c 2221 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
2222 n = snapshot_n[i + 1];
2223 parent_node_flag_ptr = snapshot_ptr[i];
2224 parent_node_flag = snapshot[i];
c112acaa 2225
af3cbd45
MD
2226 if (i > 1) {
2227 /*
2228 * Lock parent's parent, in case we need
2229 * to recompact parent.
2230 */
2231 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2232 snapshot[i - 2]);
af3cbd45
MD
2233 if (!shadow_node) {
2234 ret = -EAGAIN;
2235 goto end;
2236 }
2237 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2238
2239 /*
2240 * Check if node has been removed between RCU
2241 * lookup and lock acquisition.
2242 */
2243 assert(snapshot_ptr[i - 1]);
2244 if (ja_node_ptr(*snapshot_ptr[i - 1])
2245 != ja_node_ptr(snapshot[i - 1])) {
2246 ret = -ENOENT;
2247 goto end;
2248 }
af3cbd45 2249 }
b62a8d0c 2250
af3cbd45
MD
2251 break;
2252 }
2253 }
2254
2255 /*
4d6ef45e
MD
2256 * At this point, we want to delete all nodes that are about to
2257 * be removed from shadow_nodes (except the last one, which is
2258 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
2259 * child). OK to free lock here, because RCU read lock is held,
2260 * and free only performed in call_rcu.
af3cbd45
MD
2261 */
2262
2263 for (i = 0; i < nr_clear; i++) {
2264 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 2265 shadow_nodes[i]->node_flag,
af3cbd45
MD
2266 shadow_nodes[i],
2267 RCUJA_SHADOW_CLEAR_FREE_NODE
2268 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2269 assert(!ret);
2270 }
2271
2272 iter_node_flag = parent_node_flag;
2273 /* Remove from parent */
2274 ret = ja_node_clear_ptr(ja,
2275 node_flag_ptr, /* Pointer to location to nullify */
2276 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 2277 shadow_nodes[nr_branch - 1], /* of parent */
48cbe001 2278 n, nr_branch - 1);
b306a0fe
MD
2279 if (ret)
2280 goto end;
af3cbd45 2281
4d6ef45e
MD
2282 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2283 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
2284 /* Update address of parent ptr in its parent */
2285 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2286
2287end:
2288 for (i = 0; i < nr_shadow; i++)
2289 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
2290 return ret;
2291}
2292
af3cbd45
MD
2293static
2294int ja_unchain_node(struct cds_ja *ja,
2295 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2296 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 2297 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
2298 struct cds_ja_node *node)
2299{
2300 struct cds_ja_shadow_node *shadow_node;
f2758d14 2301 struct cds_hlist_node *hlist_node;
013a6083
MD
2302 struct cds_hlist_head hlist_head;
2303 int ret = 0, count = 0, found = 0;
af3cbd45 2304
3d8fe307 2305 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
2306 if (!shadow_node)
2307 return -EAGAIN;
013a6083 2308 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2309 ret = -EAGAIN;
2310 goto end;
2311 }
013a6083 2312 hlist_head.next = (struct cds_hlist_node *) ja_node_ptr(node_flag);
af3cbd45
MD
2313 /*
2314 * Retry if another thread removed all but one of duplicates
fa112799 2315 * since check (this check was performed without lock).
013a6083
MD
2316 * Ensure that the node we are about to remove is still in the
2317 * list (while holding lock).
af3cbd45 2318 */
013a6083 2319 cds_hlist_for_each_rcu(hlist_node, &hlist_head) {
ade342cb
MD
2320 if (count == 0) {
2321 /* FIXME: currently a work-around */
2322 hlist_node->prev = (struct cds_hlist_node *) node_flag_ptr;
2323 }
f2758d14 2324 count++;
013a6083
MD
2325 if (hlist_node == &node->list)
2326 found++;
f2758d14 2327 }
013a6083
MD
2328 assert(found <= 1);
2329 if (!found || count == 1) {
af3cbd45
MD
2330 ret = -EAGAIN;
2331 goto end;
2332 }
2333 cds_hlist_del_rcu(&node->list);
ade342cb
MD
2334 /*
2335 * Validate that we indeed removed the node from linked list.
2336 */
2337 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
2338end:
2339 rcuja_shadow_unlock(shadow_node);
2340 return ret;
2341}
2342
2343/*
2344 * Called with RCU read lock held.
2345 */
35170a44
MD
2346int cds_ja_del(struct cds_ja *ja, uint64_t key,
2347 struct cds_ja_node *node)
2348{
2349 unsigned int tree_depth, i;
2350 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
2351 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2352 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 2353 struct cds_ja_inode_flag *node_flag;
fa112799
MD
2354 struct cds_ja_inode_flag **prev_node_flag_ptr,
2355 **node_flag_ptr;
4d6ef45e 2356 int nr_snapshot;
35170a44
MD
2357 int ret;
2358
2359 if (caa_unlikely(key > ja->key_max))
2360 return -EINVAL;
2361 tree_depth = ja->tree_depth;
2362
2363retry:
4d6ef45e 2364 nr_snapshot = 0;
35170a44
MD
2365 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2366 key, node);
2367
2368 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
2369 snapshot_n[0] = 0;
2370 snapshot_n[1] = 0;
af3cbd45 2371 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
2372 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2373 node_flag = rcu_dereference(ja->root);
af3cbd45 2374 prev_node_flag_ptr = &ja->root;
fa112799 2375 node_flag_ptr = &ja->root;
35170a44
MD
2376
2377 /* Iterate on all internal levels */
2378 for (i = 1; i < tree_depth; i++) {
2379 uint8_t iter_key;
2380
2381 dbg_printf("cds_ja_del iter node_flag %p\n",
2382 node_flag);
2383 if (!ja_node_ptr(node_flag)) {
2384 return -ENOENT;
2385 }
35170a44 2386 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 2387 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
2388 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2389 snapshot[nr_snapshot++] = node_flag;
35170a44 2390 node_flag = ja_node_get_nth(node_flag,
fa112799 2391 &node_flag_ptr,
35170a44 2392 iter_key);
48cbe001
MD
2393 if (node_flag)
2394 prev_node_flag_ptr = node_flag_ptr;
af3cbd45
MD
2395 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2396 (unsigned int) iter_key, node_flag,
2397 prev_node_flag_ptr);
35170a44 2398 }
35170a44
MD
2399 /*
2400 * We reached bottom of tree, try to find the node we are trying
2401 * to remove. Fail if we cannot find it.
2402 */
2403 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
2404 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2405 key);
35170a44
MD
2406 return -ENOENT;
2407 } else {
4d6ef45e 2408 struct cds_hlist_head hlist_head;
35170a44 2409 struct cds_hlist_node *hlist_node;
af3cbd45
MD
2410 struct cds_ja_node *entry, *match = NULL;
2411 int count = 0;
35170a44 2412
4d6ef45e
MD
2413 hlist_head.next =
2414 (struct cds_hlist_node *) ja_node_ptr(node_flag);
af3cbd45 2415 cds_hlist_for_each_entry_rcu(entry,
35170a44 2416 hlist_node,
4d6ef45e 2417 &hlist_head,
35170a44 2418 list) {
4d6ef45e 2419 dbg_printf("cds_ja_del: compare %p with entry %p\n", node, entry);
af3cbd45
MD
2420 if (entry == node)
2421 match = entry;
2422 count++;
35170a44 2423 }
4d6ef45e
MD
2424 if (!match) {
2425 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 2426 return -ENOENT;
4d6ef45e 2427 }
af3cbd45
MD
2428 assert(count > 0);
2429 if (count == 1) {
2430 /*
4d6ef45e
MD
2431 * Removing last of duplicates. Last snapshot
2432 * does not have a shadow node (external leafs).
af3cbd45
MD
2433 */
2434 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2435 snapshot[nr_snapshot++] = node_flag;
2436 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2437 snapshot_n, nr_snapshot, key, node);
2438 } else {
f2758d14 2439 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 2440 node_flag_ptr, node_flag, match);
af3cbd45 2441 }
35170a44 2442 }
b306a0fe
MD
2443 /*
2444 * Explanation of -ENOENT handling: caused by concurrent delete
2445 * between RCU lookup and actual removal. Need to re-do the
2446 * lookup and removal attempt.
2447 */
2448 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
2449 goto retry;
2450 return ret;
2451}
2452
b4540e8a
MD
2453struct cds_ja *_cds_ja_new(unsigned int key_bits,
2454 const struct rcu_flavor_struct *flavor)
be9a7474
MD
2455{
2456 struct cds_ja *ja;
b0f74e47 2457 int ret;
f07b240f 2458 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
2459
2460 ja = calloc(sizeof(*ja), 1);
2461 if (!ja)
2462 goto ja_error;
b4540e8a
MD
2463
2464 switch (key_bits) {
2465 case 8:
b4540e8a 2466 case 16:
1216b3d2 2467 case 24:
b4540e8a 2468 case 32:
1216b3d2
MD
2469 case 40:
2470 case 48:
2471 case 56:
2472 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
2473 break;
2474 case 64:
2475 ja->key_max = UINT64_MAX;
2476 break;
2477 default:
2478 goto check_error;
2479 }
2480
be9a7474 2481 /* ja->root is NULL */
5a9a87dd 2482 /* tree_depth 0 is for pointer to root node */
582a6ade 2483 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 2484 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
2485 ja->ht = rcuja_create_ht(flavor);
2486 if (!ja->ht)
2487 goto ht_error;
b0f74e47
MD
2488
2489 /*
2490 * Note: we should not free this node until judy array destroy.
2491 */
f07b240f 2492 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307 2493 (struct cds_ja_inode_flag *) &ja->root,
48cbe001 2494 NULL, ja, 0);
f07b240f
MD
2495 if (!root_shadow_node) {
2496 ret = -ENOMEM;
b0f74e47 2497 goto ht_node_error;
f07b240f 2498 }
b0f74e47 2499
be9a7474
MD
2500 return ja;
2501
b0f74e47
MD
2502ht_node_error:
2503 ret = rcuja_delete_ht(ja->ht);
2504 assert(!ret);
be9a7474 2505ht_error:
b4540e8a 2506check_error:
be9a7474
MD
2507 free(ja);
2508ja_error:
2509 return NULL;
2510}
2511
3d8fe307
MD
2512/*
2513 * Called from RCU read-side CS.
2514 */
2515__attribute__((visibility("protected")))
2516void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2517 struct cds_ja_inode_flag *node_flag,
2518 void (*free_node_cb)(struct rcu_head *head))
2519{
2520 const struct rcu_flavor_struct *flavor;
2521 unsigned int type_index;
2522 struct cds_ja_inode *node;
2523 const struct cds_ja_type *type;
2524
2525 flavor = cds_lfht_rcu_flavor(shadow_node->ja->ht);
2526 node = ja_node_ptr(node_flag);
2527 assert(node != NULL);
2528 type_index = ja_node_type(node_flag);
2529 type = &ja_types[type_index];
2530
2531 switch (type->type_class) {
2532 case RCU_JA_LINEAR:
2533 {
2534 uint8_t nr_child =
2535 ja_linear_node_get_nr_child(type, node);
2536 unsigned int i;
2537
2538 for (i = 0; i < nr_child; i++) {
2539 struct cds_ja_inode_flag *iter;
2540 struct cds_hlist_head head;
2541 struct cds_ja_node *entry;
48cbe001 2542 struct cds_hlist_node *pos, *tmp;
3d8fe307
MD
2543 uint8_t v;
2544
2545 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
2546 if (!iter)
2547 continue;
2548 head.next = (struct cds_hlist_node *) iter;
48cbe001 2549 cds_hlist_for_each_entry_safe(entry, pos, tmp, &head, list) {
3d8fe307
MD
2550 flavor->update_call_rcu(&entry->head, free_node_cb);
2551 }
2552 }
2553 break;
2554 }
2555 case RCU_JA_POOL:
2556 {
2557 unsigned int pool_nr;
2558
2559 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2560 struct cds_ja_inode *pool =
2561 ja_pool_node_get_ith_pool(type, node, pool_nr);
2562 uint8_t nr_child =
2563 ja_linear_node_get_nr_child(type, pool);
2564 unsigned int j;
2565
2566 for (j = 0; j < nr_child; j++) {
2567 struct cds_ja_inode_flag *iter;
2568 struct cds_hlist_head head;
2569 struct cds_ja_node *entry;
48cbe001 2570 struct cds_hlist_node *pos, *tmp;
3d8fe307
MD
2571 uint8_t v;
2572
75d573aa 2573 ja_linear_node_get_ith_pos(type, pool, j, &v, &iter);
3d8fe307
MD
2574 if (!iter)
2575 continue;
2576 head.next = (struct cds_hlist_node *) iter;
48cbe001 2577 cds_hlist_for_each_entry_safe(entry, pos, tmp, &head, list) {
3d8fe307
MD
2578 flavor->update_call_rcu(&entry->head, free_node_cb);
2579 }
2580 }
2581 }
2582 break;
2583 }
2584 case RCU_JA_NULL:
2585 break;
2586 case RCU_JA_PIGEON:
2587 {
3d8fe307
MD
2588 unsigned int i;
2589
48cbe001 2590 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
3d8fe307
MD
2591 struct cds_ja_inode_flag *iter;
2592 struct cds_hlist_head head;
2593 struct cds_ja_node *entry;
48cbe001 2594 struct cds_hlist_node *pos, *tmp;
3d8fe307
MD
2595
2596 iter = ja_pigeon_node_get_ith_pos(type, node, i);
2597 if (!iter)
2598 continue;
2599 head.next = (struct cds_hlist_node *) iter;
48cbe001 2600 cds_hlist_for_each_entry_safe(entry, pos, tmp, &head, list) {
3d8fe307
MD
2601 flavor->update_call_rcu(&entry->head, free_node_cb);
2602 }
2603 }
2604 break;
2605 }
2606 default:
2607 assert(0);
2608 }
2609}
2610
19ddcd04 2611static
354981c2 2612void print_debug_fallback_distribution(struct cds_ja *ja)
19ddcd04
MD
2613{
2614 int i;
2615
2616 fprintf(stderr, "Fallback node distribution:\n");
2617 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
354981c2 2618 if (!ja->node_fallback_count_distribution[i])
19ddcd04
MD
2619 continue;
2620 fprintf(stderr, " %3u: %4lu\n",
354981c2 2621 i, ja->node_fallback_count_distribution[i]);
19ddcd04
MD
2622 }
2623}
2624
021c72c0 2625static
19a748d9 2626int ja_final_checks(struct cds_ja *ja)
021c72c0
MD
2627{
2628 double fallback_ratio;
2629 unsigned long na, nf, nr_fallback;
19a748d9 2630 int ret = 0;
021c72c0
MD
2631
2632 fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
2633 fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
2634 nr_fallback = uatomic_read(&ja->nr_fallback);
2635 if (nr_fallback)
2636 fprintf(stderr,
2637 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2638 uatomic_read(&ja->nr_fallback),
2639 fallback_ratio);
2640
2641 na = uatomic_read(&ja->nr_nodes_allocated);
2642 nf = uatomic_read(&ja->nr_nodes_freed);
19a748d9
MD
2643 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
2644 if (nr_fallback)
2645 print_debug_fallback_distribution(ja);
2646
021c72c0
MD
2647 if (na != nf) {
2648 fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2649 (long) na - nf, na, nf);
19a748d9 2650 ret = -1;
021c72c0 2651 }
19a748d9 2652 return ret;
021c72c0
MD
2653}
2654
be9a7474
MD
2655/*
2656 * There should be no more concurrent add to the judy array while it is
2657 * being destroyed (ensured by the caller).
2658 */
3d8fe307
MD
2659int cds_ja_destroy(struct cds_ja *ja,
2660 void (*free_node_cb)(struct rcu_head *head))
be9a7474 2661{
48cbe001 2662 const struct rcu_flavor_struct *flavor;
b4540e8a
MD
2663 int ret;
2664
48cbe001 2665 flavor = cds_lfht_rcu_flavor(ja->ht);
be9a7474 2666 rcuja_shadow_prune(ja->ht,
3d8fe307
MD
2667 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
2668 free_node_cb);
48cbe001 2669 flavor->thread_offline();
b4540e8a
MD
2670 ret = rcuja_delete_ht(ja->ht);
2671 if (ret)
2672 return ret;
f2ae7af7
MD
2673
2674 /* Wait for in-flight call_rcu free to complete. */
2675 flavor->barrier();
2676
48cbe001 2677 flavor->thread_online();
19a748d9 2678 ret = ja_final_checks(ja);
b4540e8a 2679 free(ja);
19a748d9 2680 return ret;
be9a7474 2681}
This page took 0.153631 seconds and 4 git commands to generate.