rcuja: document that destroy free_node_cb does not need to wait for Q.S.
[userspace-rcu.git] / rcuja / rcuja.c
1 /*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdint.h>
25 #include <errno.h>
26 #include <limits.h>
27 #include <string.h>
28 #include <urcu/rcuja.h>
29 #include <urcu/compiler.h>
30 #include <urcu/arch.h>
31 #include <assert.h>
32 #include <urcu-pointer.h>
33 #include <urcu/uatomic.h>
34 #include <stdint.h>
35
36 #include "rcuja-internal.h"
37
38 #ifndef abs
39 #define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
40 #endif
41
42 enum cds_ja_type_class {
43 RCU_JA_LINEAR = 0, /* Type A */
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL = 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
49 RCU_JA_PIGEON = 2, /* Type C */
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
52 /* Leaf nodes are implicit from their height in the tree */
53 RCU_JA_NR_TYPES,
54
55 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
56 };
57
58 struct cds_ja_type {
59 enum cds_ja_type_class type_class;
60 uint16_t min_child; /* minimum number of children: 1 to 256 */
61 uint16_t max_child; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order; /* node size is (1 << order), in bytes */
64 uint16_t nr_pool_order; /* number of pools */
65 uint16_t pool_size_order; /* pool size */
66 };
67
68 /*
69 * Iteration on the array to find the right node size for the number of
70 * children stops when it reaches .max_child == 256 (this is the largest
71 * possible node size, which contains 256 children).
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
77 * the size order.
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
84 */
85
86 #if (CAA_BITS_PER_LONG < 64)
87 /* 32-bit pointers */
88 enum {
89 ja_type_0_max_child = 1,
90 ja_type_1_max_child = 3,
91 ja_type_2_max_child = 6,
92 ja_type_3_max_child = 12,
93 ja_type_4_max_child = 25,
94 ja_type_5_max_child = 48,
95 ja_type_6_max_child = 92,
96 ja_type_7_max_child = 256,
97 ja_type_8_max_child = 0, /* NULL */
98 };
99
100 enum {
101 ja_type_0_max_linear_child = 1,
102 ja_type_1_max_linear_child = 3,
103 ja_type_2_max_linear_child = 6,
104 ja_type_3_max_linear_child = 12,
105 ja_type_4_max_linear_child = 25,
106 ja_type_5_max_linear_child = 24,
107 ja_type_6_max_linear_child = 23,
108 };
109
110 enum {
111 ja_type_5_nr_pool_order = 1,
112 ja_type_6_nr_pool_order = 2,
113 };
114
115 const struct cds_ja_type ja_types[] = {
116 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
121
122 /* Pools may fill sooner than max_child */
123 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
125 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
126 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
127
128 /*
129 * Upon node removal below min_child, if child pool is filled
130 * beyond capacity, we roll back to pigeon.
131 */
132 { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
133
134 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
135 };
136 #else /* !(CAA_BITS_PER_LONG < 64) */
137 /* 64-bit pointers */
138 enum {
139 ja_type_0_max_child = 1,
140 ja_type_1_max_child = 3,
141 ja_type_2_max_child = 7,
142 ja_type_3_max_child = 14,
143 ja_type_4_max_child = 28,
144 ja_type_5_max_child = 54,
145 ja_type_6_max_child = 104,
146 ja_type_7_max_child = 256,
147 ja_type_8_max_child = 256,
148 };
149
150 enum {
151 ja_type_0_max_linear_child = 1,
152 ja_type_1_max_linear_child = 3,
153 ja_type_2_max_linear_child = 7,
154 ja_type_3_max_linear_child = 14,
155 ja_type_4_max_linear_child = 28,
156 ja_type_5_max_linear_child = 27,
157 ja_type_6_max_linear_child = 26,
158 };
159
160 enum {
161 ja_type_5_nr_pool_order = 1,
162 ja_type_6_nr_pool_order = 2,
163 };
164
165 const struct cds_ja_type ja_types[] = {
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
170 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
171
172 /* Pools may fill sooner than max_child. */
173 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
174 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
175 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
176 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
177
178 /*
179 * Upon node removal below min_child, if child pool is filled
180 * beyond capacity, we roll back to pigeon.
181 */
182 { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
183
184 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
185 };
186 #endif /* !(BITS_PER_LONG < 64) */
187
188 static inline __attribute__((unused))
189 void static_array_size_check(void)
190 {
191 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
192 }
193
194 /*
195 * The cds_ja_node contains the compressed node data needed for
196 * read-side. For linear and pool node configurations, it starts with a
197 * byte counting the number of children in the node. Then, the
198 * node-specific data is placed.
199 * The node mutex, if any is needed, protecting concurrent updated of
200 * each node is placed in a separate hash table indexed by node address.
201 * For the pigeon configuration, the number of children is also kept in
202 * a separate hash table, indexed by node address, because it is only
203 * required for updates.
204 */
205
206 #define DECLARE_LINEAR_NODE(index) \
207 struct { \
208 uint8_t nr_child; \
209 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
210 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
211 }
212
213 #define DECLARE_POOL_NODE(index) \
214 struct { \
215 struct { \
216 uint8_t nr_child; \
217 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
218 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
219 } linear[1U << ja_type_## index ##_nr_pool_order]; \
220 }
221
222 struct cds_ja_inode {
223 union {
224 /* Linear configuration */
225 DECLARE_LINEAR_NODE(0) conf_0;
226 DECLARE_LINEAR_NODE(1) conf_1;
227 DECLARE_LINEAR_NODE(2) conf_2;
228 DECLARE_LINEAR_NODE(3) conf_3;
229 DECLARE_LINEAR_NODE(4) conf_4;
230
231 /* Pool configuration */
232 DECLARE_POOL_NODE(5) conf_5;
233 DECLARE_POOL_NODE(6) conf_6;
234
235 /* Pigeon configuration */
236 struct {
237 struct cds_ja_inode_flag *child[ja_type_7_max_child];
238 } conf_7;
239 /* data aliasing nodes for computed accesses */
240 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
241 } u;
242 };
243
244 enum ja_recompact {
245 JA_RECOMPACT_ADD_SAME,
246 JA_RECOMPACT_ADD_NEXT,
247 JA_RECOMPACT_DEL,
248 };
249
250 static
251 struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
252 {
253 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
254 }
255
256 unsigned long ja_node_type(struct cds_ja_inode_flag *node)
257 {
258 unsigned long type;
259
260 if (_ja_node_mask_ptr(node) == NULL) {
261 return NODE_INDEX_NULL;
262 }
263 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
264 assert(type < (1UL << JA_TYPE_BITS));
265 return type;
266 }
267
268 static
269 struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
270 const struct cds_ja_type *ja_type)
271 {
272 size_t len = 1U << ja_type->order;
273 void *p;
274 int ret;
275
276 ret = posix_memalign(&p, len, len);
277 if (ret || !p) {
278 return NULL;
279 }
280 memset(p, 0, len);
281 uatomic_inc(&ja->nr_nodes_allocated);
282 return p;
283 }
284
285 void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
286 {
287 free(node);
288 if (node)
289 uatomic_inc(&ja->nr_nodes_freed);
290 }
291
292 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
293 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
294 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
295 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
296
297 static
298 uint8_t *align_ptr_size(uint8_t *ptr)
299 {
300 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
301 }
302
303 static
304 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
305 struct cds_ja_inode *node)
306 {
307 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
308 return rcu_dereference(node->u.data[0]);
309 }
310
311 /*
312 * The order in which values and pointers are does does not matter: if
313 * a value is missing, we return NULL. If a value is there, but its
314 * associated pointers is still NULL, we return NULL too.
315 */
316 static
317 struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
318 struct cds_ja_inode *node,
319 struct cds_ja_inode_flag ***node_flag_ptr,
320 uint8_t n)
321 {
322 uint8_t nr_child;
323 uint8_t *values;
324 struct cds_ja_inode_flag **pointers;
325 struct cds_ja_inode_flag *ptr;
326 unsigned int i;
327
328 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
329
330 nr_child = ja_linear_node_get_nr_child(type, node);
331 cmm_smp_rmb(); /* read nr_child before values and pointers */
332 assert(nr_child <= type->max_linear_child);
333 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
334
335 values = &node->u.data[1];
336 for (i = 0; i < nr_child; i++) {
337 if (CMM_LOAD_SHARED(values[i]) == n)
338 break;
339 }
340 if (i >= nr_child) {
341 if (caa_unlikely(node_flag_ptr))
342 *node_flag_ptr = NULL;
343 return NULL;
344 }
345 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
346 ptr = rcu_dereference(pointers[i]);
347 if (caa_unlikely(node_flag_ptr))
348 *node_flag_ptr = &pointers[i];
349 return ptr;
350 }
351
352 static
353 struct cds_ja_inode_flag *ja_linear_node_get_left(const struct cds_ja_type *type,
354 struct cds_ja_inode *node,
355 unsigned int n)
356 {
357 uint8_t nr_child;
358 uint8_t *values;
359 struct cds_ja_inode_flag **pointers;
360 struct cds_ja_inode_flag *ptr;
361 unsigned int i, match_idx;
362 int match_v = -1;
363
364 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
365
366 nr_child = ja_linear_node_get_nr_child(type, node);
367 cmm_smp_rmb(); /* read nr_child before values and pointers */
368 assert(nr_child <= type->max_linear_child);
369 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
370
371 values = &node->u.data[1];
372 for (i = 0; i < nr_child; i++) {
373 unsigned int v;
374
375 v = CMM_LOAD_SHARED(values[i]);
376 if (v < n && (int) v > match_v) {
377 match_v = v;
378 match_idx = i;
379 }
380 }
381 if (match_v < 0) {
382 return NULL;
383 }
384 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
385 ptr = rcu_dereference(pointers[match_idx]);
386 return ptr;
387 }
388
389 static
390 void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
391 struct cds_ja_inode *node,
392 uint8_t i,
393 uint8_t *v,
394 struct cds_ja_inode_flag **iter)
395 {
396 uint8_t *values;
397 struct cds_ja_inode_flag **pointers;
398
399 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
400 assert(i < ja_linear_node_get_nr_child(type, node));
401
402 values = &node->u.data[1];
403 *v = values[i];
404 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
405 *iter = pointers[i];
406 }
407
408 static
409 struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
410 struct cds_ja_inode *node,
411 struct cds_ja_inode_flag *node_flag,
412 struct cds_ja_inode_flag ***node_flag_ptr,
413 uint8_t n)
414 {
415 struct cds_ja_inode *linear;
416
417 assert(type->type_class == RCU_JA_POOL);
418
419 switch (type->nr_pool_order) {
420 case 1:
421 {
422 unsigned long bitsel, index;
423
424 bitsel = ja_node_pool_1d_bitsel(node_flag);
425 assert(bitsel < CHAR_BIT);
426 index = ((unsigned long) n >> bitsel) & 0x1;
427 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
428 break;
429 }
430 case 2:
431 {
432 unsigned long bitsel[2], index[2], rindex;
433
434 ja_node_pool_2d_bitsel(node_flag, bitsel);
435 assert(bitsel[0] < CHAR_BIT);
436 assert(bitsel[1] < CHAR_BIT);
437 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
438 index[0] <<= 1;
439 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
440 rindex = index[0] | index[1];
441 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
442 break;
443 }
444 default:
445 linear = NULL;
446 assert(0);
447 }
448 return ja_linear_node_get_nth(type, linear, node_flag_ptr, n);
449 }
450
451 static
452 struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
453 struct cds_ja_inode *node,
454 uint8_t i)
455 {
456 assert(type->type_class == RCU_JA_POOL);
457 return (struct cds_ja_inode *)
458 &node->u.data[(unsigned int) i << type->pool_size_order];
459 }
460
461 static
462 struct cds_ja_inode_flag *ja_pool_node_get_left(const struct cds_ja_type *type,
463 struct cds_ja_inode *node,
464 unsigned int n)
465 {
466 unsigned int pool_nr;
467 int match_v = -1;
468 struct cds_ja_inode_flag *match_node_flag = NULL;
469
470 assert(type->type_class == RCU_JA_POOL);
471
472 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
473 struct cds_ja_inode *pool =
474 ja_pool_node_get_ith_pool(type,
475 node, pool_nr);
476 uint8_t nr_child =
477 ja_linear_node_get_nr_child(type, pool);
478 unsigned int j;
479
480 for (j = 0; j < nr_child; j++) {
481 struct cds_ja_inode_flag *iter;
482 uint8_t v;
483
484 ja_linear_node_get_ith_pos(type, pool,
485 j, &v, &iter);
486 if (!iter)
487 continue;
488 if (v < n && (int) v > match_v) {
489 match_v = v;
490 match_node_flag = iter;
491 }
492 }
493 }
494 return match_node_flag;
495 }
496
497 static
498 struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
499 struct cds_ja_inode *node,
500 struct cds_ja_inode_flag ***node_flag_ptr,
501 uint8_t n)
502 {
503 struct cds_ja_inode_flag **child_node_flag_ptr;
504 struct cds_ja_inode_flag *child_node_flag;
505
506 assert(type->type_class == RCU_JA_PIGEON);
507 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
508 child_node_flag = rcu_dereference(*child_node_flag_ptr);
509 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
510 child_node_flag_ptr);
511 if (caa_unlikely(node_flag_ptr))
512 *node_flag_ptr = child_node_flag_ptr;
513 return child_node_flag;
514 }
515
516 static
517 struct cds_ja_inode_flag *ja_pigeon_node_get_left(const struct cds_ja_type *type,
518 struct cds_ja_inode *node,
519 unsigned int n)
520 {
521 struct cds_ja_inode_flag **child_node_flag_ptr;
522 struct cds_ja_inode_flag *child_node_flag;
523 int i;
524
525 assert(type->type_class == RCU_JA_PIGEON);
526
527 /* n - 1 is first value left of n */
528 for (i = n - 1; i >= 0; i--) {
529 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
530 child_node_flag = rcu_dereference(*child_node_flag_ptr);
531 if (child_node_flag) {
532 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
533 child_node_flag);
534 return child_node_flag;
535 }
536 }
537 return NULL;
538 }
539
540 static
541 struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
542 struct cds_ja_inode *node,
543 uint8_t i)
544 {
545 return ja_pigeon_node_get_nth(type, node, NULL, i);
546 }
547
548 /*
549 * ja_node_get_nth: get nth item from a node.
550 * node_flag is already rcu_dereference'd.
551 */
552 static
553 struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
554 struct cds_ja_inode_flag ***node_flag_ptr,
555 uint8_t n)
556 {
557 unsigned int type_index;
558 struct cds_ja_inode *node;
559 const struct cds_ja_type *type;
560
561 node = ja_node_ptr(node_flag);
562 assert(node != NULL);
563 type_index = ja_node_type(node_flag);
564 type = &ja_types[type_index];
565
566 switch (type->type_class) {
567 case RCU_JA_LINEAR:
568 return ja_linear_node_get_nth(type, node,
569 node_flag_ptr, n);
570 case RCU_JA_POOL:
571 return ja_pool_node_get_nth(type, node, node_flag,
572 node_flag_ptr, n);
573 case RCU_JA_PIGEON:
574 return ja_pigeon_node_get_nth(type, node,
575 node_flag_ptr, n);
576 default:
577 assert(0);
578 return (void *) -1UL;
579 }
580 }
581
582 static
583 struct cds_ja_inode_flag *ja_node_get_left(struct cds_ja_inode_flag *node_flag,
584 unsigned int n)
585 {
586 unsigned int type_index;
587 struct cds_ja_inode *node;
588 const struct cds_ja_type *type;
589
590 node = ja_node_ptr(node_flag);
591 assert(node != NULL);
592 type_index = ja_node_type(node_flag);
593 type = &ja_types[type_index];
594
595 switch (type->type_class) {
596 case RCU_JA_LINEAR:
597 return ja_linear_node_get_left(type, node, n);
598 case RCU_JA_POOL:
599 return ja_pool_node_get_left(type, node, n);
600 case RCU_JA_PIGEON:
601 return ja_pigeon_node_get_left(type, node, n);
602 default:
603 assert(0);
604 return (void *) -1UL;
605 }
606 }
607
608 static
609 struct cds_ja_inode_flag *ja_node_get_rightmost(struct cds_ja_inode_flag *node_flag)
610 {
611 return ja_node_get_left(node_flag, JA_ENTRY_PER_NODE);
612 }
613
614 static
615 int ja_linear_node_set_nth(const struct cds_ja_type *type,
616 struct cds_ja_inode *node,
617 struct cds_ja_shadow_node *shadow_node,
618 uint8_t n,
619 struct cds_ja_inode_flag *child_node_flag)
620 {
621 uint8_t nr_child;
622 uint8_t *values, *nr_child_ptr;
623 struct cds_ja_inode_flag **pointers;
624 unsigned int i, unused = 0;
625
626 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
627
628 nr_child_ptr = &node->u.data[0];
629 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
630 (unsigned int) n, nr_child_ptr);
631 nr_child = *nr_child_ptr;
632 assert(nr_child <= type->max_linear_child);
633
634 values = &node->u.data[1];
635 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
636 /* Check if node value is already populated */
637 for (i = 0; i < nr_child; i++) {
638 if (values[i] == n) {
639 if (pointers[i])
640 return -EEXIST;
641 else
642 break;
643 } else {
644 if (!pointers[i])
645 unused++;
646 }
647 }
648 if (i == nr_child && nr_child >= type->max_linear_child) {
649 if (unused)
650 return -ERANGE; /* recompact node */
651 else
652 return -ENOSPC; /* No space left in this node type */
653 }
654
655 assert(pointers[i] == NULL);
656 rcu_assign_pointer(pointers[i], child_node_flag);
657 /* If we expanded the nr_child, increment it */
658 if (i == nr_child) {
659 CMM_STORE_SHARED(values[nr_child], n);
660 /* write pointer and value before nr_child */
661 cmm_smp_wmb();
662 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
663 }
664 shadow_node->nr_child++;
665 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
666 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
667 (unsigned int) shadow_node->nr_child,
668 node, shadow_node);
669
670 return 0;
671 }
672
673 static
674 int ja_pool_node_set_nth(const struct cds_ja_type *type,
675 struct cds_ja_inode *node,
676 struct cds_ja_inode_flag *node_flag,
677 struct cds_ja_shadow_node *shadow_node,
678 uint8_t n,
679 struct cds_ja_inode_flag *child_node_flag)
680 {
681 struct cds_ja_inode *linear;
682
683 assert(type->type_class == RCU_JA_POOL);
684
685 switch (type->nr_pool_order) {
686 case 1:
687 {
688 unsigned long bitsel, index;
689
690 bitsel = ja_node_pool_1d_bitsel(node_flag);
691 assert(bitsel < CHAR_BIT);
692 index = ((unsigned long) n >> bitsel) & 0x1;
693 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
694 break;
695 }
696 case 2:
697 {
698 unsigned long bitsel[2], index[2], rindex;
699
700 ja_node_pool_2d_bitsel(node_flag, bitsel);
701 assert(bitsel[0] < CHAR_BIT);
702 assert(bitsel[1] < CHAR_BIT);
703 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
704 index[0] <<= 1;
705 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
706 rindex = index[0] | index[1];
707 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
708 break;
709 }
710 default:
711 linear = NULL;
712 assert(0);
713 }
714
715 return ja_linear_node_set_nth(type, linear, shadow_node,
716 n, child_node_flag);
717 }
718
719 static
720 int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
721 struct cds_ja_inode *node,
722 struct cds_ja_shadow_node *shadow_node,
723 uint8_t n,
724 struct cds_ja_inode_flag *child_node_flag)
725 {
726 struct cds_ja_inode_flag **ptr;
727
728 assert(type->type_class == RCU_JA_PIGEON);
729 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
730 if (*ptr)
731 return -EEXIST;
732 rcu_assign_pointer(*ptr, child_node_flag);
733 shadow_node->nr_child++;
734 return 0;
735 }
736
737 /*
738 * _ja_node_set_nth: set nth item within a node. Return an error
739 * (negative error value) if it is already there.
740 */
741 static
742 int _ja_node_set_nth(const struct cds_ja_type *type,
743 struct cds_ja_inode *node,
744 struct cds_ja_inode_flag *node_flag,
745 struct cds_ja_shadow_node *shadow_node,
746 uint8_t n,
747 struct cds_ja_inode_flag *child_node_flag)
748 {
749 switch (type->type_class) {
750 case RCU_JA_LINEAR:
751 return ja_linear_node_set_nth(type, node, shadow_node, n,
752 child_node_flag);
753 case RCU_JA_POOL:
754 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
755 child_node_flag);
756 case RCU_JA_PIGEON:
757 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
758 child_node_flag);
759 case RCU_JA_NULL:
760 return -ENOSPC;
761 default:
762 assert(0);
763 return -EINVAL;
764 }
765
766 return 0;
767 }
768
769 static
770 int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
771 struct cds_ja_inode *node,
772 struct cds_ja_shadow_node *shadow_node,
773 struct cds_ja_inode_flag **node_flag_ptr)
774 {
775 uint8_t nr_child;
776 uint8_t *nr_child_ptr;
777
778 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
779
780 nr_child_ptr = &node->u.data[0];
781 nr_child = *nr_child_ptr;
782 assert(nr_child <= type->max_linear_child);
783
784 if (type->type_class == RCU_JA_LINEAR) {
785 assert(!shadow_node->fallback_removal_count);
786 if (shadow_node->nr_child <= type->min_child) {
787 /* We need to try recompacting the node */
788 return -EFBIG;
789 }
790 }
791 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
792 assert(*node_flag_ptr != NULL);
793 rcu_assign_pointer(*node_flag_ptr, NULL);
794 /*
795 * Value and nr_child are never changed (would cause ABA issue).
796 * Instead, we leave the pointer to NULL and recompact the node
797 * once in a while. It is allowed to set a NULL pointer to a new
798 * value without recompaction though.
799 * Only update the shadow node accounting.
800 */
801 shadow_node->nr_child--;
802 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
803 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
804 (unsigned int) shadow_node->nr_child,
805 node, shadow_node);
806 return 0;
807 }
808
809 static
810 int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
811 struct cds_ja_inode *node,
812 struct cds_ja_inode_flag *node_flag,
813 struct cds_ja_shadow_node *shadow_node,
814 struct cds_ja_inode_flag **node_flag_ptr,
815 uint8_t n)
816 {
817 struct cds_ja_inode *linear;
818
819 assert(type->type_class == RCU_JA_POOL);
820
821 if (shadow_node->fallback_removal_count) {
822 shadow_node->fallback_removal_count--;
823 } else {
824 /* We should try recompacting the node */
825 if (shadow_node->nr_child <= type->min_child)
826 return -EFBIG;
827 }
828
829 switch (type->nr_pool_order) {
830 case 1:
831 {
832 unsigned long bitsel, index;
833
834 bitsel = ja_node_pool_1d_bitsel(node_flag);
835 assert(bitsel < CHAR_BIT);
836 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
837 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
838 break;
839 }
840 case 2:
841 {
842 unsigned long bitsel[2], index[2], rindex;
843
844 ja_node_pool_2d_bitsel(node_flag, bitsel);
845 assert(bitsel[0] < CHAR_BIT);
846 assert(bitsel[1] < CHAR_BIT);
847 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
848 index[0] <<= 1;
849 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
850 rindex = index[0] | index[1];
851 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
852 break;
853 }
854 default:
855 linear = NULL;
856 assert(0);
857 }
858
859 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
860 }
861
862 static
863 int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
864 struct cds_ja_inode *node,
865 struct cds_ja_shadow_node *shadow_node,
866 struct cds_ja_inode_flag **node_flag_ptr)
867 {
868 assert(type->type_class == RCU_JA_PIGEON);
869
870 if (shadow_node->fallback_removal_count) {
871 shadow_node->fallback_removal_count--;
872 } else {
873 /* We should try recompacting the node */
874 if (shadow_node->nr_child <= type->min_child)
875 return -EFBIG;
876 }
877 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
878 rcu_assign_pointer(*node_flag_ptr, NULL);
879 shadow_node->nr_child--;
880 return 0;
881 }
882
883 /*
884 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
885 * (negative error value) if it is not found (-ENOENT).
886 */
887 static
888 int _ja_node_clear_ptr(const struct cds_ja_type *type,
889 struct cds_ja_inode *node,
890 struct cds_ja_inode_flag *node_flag,
891 struct cds_ja_shadow_node *shadow_node,
892 struct cds_ja_inode_flag **node_flag_ptr,
893 uint8_t n)
894 {
895 switch (type->type_class) {
896 case RCU_JA_LINEAR:
897 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
898 case RCU_JA_POOL:
899 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
900 case RCU_JA_PIGEON:
901 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
902 case RCU_JA_NULL:
903 return -ENOENT;
904 default:
905 assert(0);
906 return -EINVAL;
907 }
908
909 return 0;
910 }
911
912 /*
913 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
914 * distribution in two sub-distributions containing as much elements one
915 * compared to the other.
916 */
917 static
918 unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
919 struct cds_ja *ja,
920 unsigned int type_index,
921 const struct cds_ja_type *type,
922 struct cds_ja_inode *node,
923 struct cds_ja_shadow_node *shadow_node,
924 uint8_t n,
925 struct cds_ja_inode_flag *child_node_flag,
926 struct cds_ja_inode_flag **nullify_node_flag_ptr)
927 {
928 uint8_t nr_one[JA_BITS_PER_BYTE];
929 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
930 unsigned int distrib_nr_child = 0;
931
932 memset(nr_one, 0, sizeof(nr_one));
933
934 switch (type->type_class) {
935 case RCU_JA_LINEAR:
936 {
937 uint8_t nr_child =
938 ja_linear_node_get_nr_child(type, node);
939 unsigned int i;
940
941 for (i = 0; i < nr_child; i++) {
942 struct cds_ja_inode_flag *iter;
943 uint8_t v;
944
945 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
946 if (!iter)
947 continue;
948 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
949 continue;
950 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
951 if (v & (1U << bit_i))
952 nr_one[bit_i]++;
953 }
954 distrib_nr_child++;
955 }
956 break;
957 }
958 case RCU_JA_POOL:
959 {
960 unsigned int pool_nr;
961
962 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
963 struct cds_ja_inode *pool =
964 ja_pool_node_get_ith_pool(type,
965 node, pool_nr);
966 uint8_t nr_child =
967 ja_linear_node_get_nr_child(type, pool);
968 unsigned int j;
969
970 for (j = 0; j < nr_child; j++) {
971 struct cds_ja_inode_flag *iter;
972 uint8_t v;
973
974 ja_linear_node_get_ith_pos(type, pool,
975 j, &v, &iter);
976 if (!iter)
977 continue;
978 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
979 continue;
980 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
981 if (v & (1U << bit_i))
982 nr_one[bit_i]++;
983 }
984 distrib_nr_child++;
985 }
986 }
987 break;
988 }
989 case RCU_JA_PIGEON:
990 {
991 unsigned int i;
992
993 assert(mode == JA_RECOMPACT_DEL);
994 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
995 struct cds_ja_inode_flag *iter;
996
997 iter = ja_pigeon_node_get_ith_pos(type, node, i);
998 if (!iter)
999 continue;
1000 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1001 continue;
1002 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1003 if (i & (1U << bit_i))
1004 nr_one[bit_i]++;
1005 }
1006 distrib_nr_child++;
1007 }
1008 break;
1009 }
1010 case RCU_JA_NULL:
1011 assert(mode == JA_RECOMPACT_ADD_NEXT);
1012 break;
1013 default:
1014 assert(0);
1015 break;
1016 }
1017
1018 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1019 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1020 if (n & (1U << bit_i))
1021 nr_one[bit_i]++;
1022 }
1023 distrib_nr_child++;
1024 }
1025
1026 /*
1027 * The best bit selector is that for which the number of ones is
1028 * closest to half of the number of children in the
1029 * distribution. We calculate the distance using the double of
1030 * the sub-distribution sizes to eliminate truncation error.
1031 */
1032 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1033 unsigned int distance_to_best;
1034
1035 distance_to_best = abs_int(((unsigned int) nr_one[bit_i] << 1U) - distrib_nr_child);
1036 if (distance_to_best < overall_best_distance) {
1037 overall_best_distance = distance_to_best;
1038 bitsel = bit_i;
1039 }
1040 }
1041 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
1042 return bitsel;
1043 }
1044
1045 /*
1046 * Calculate bit distribution in two dimensions. Returns the two bits
1047 * (each 0 to 7) that splits the distribution in four sub-distributions
1048 * containing as much elements one compared to the other.
1049 */
1050 static
1051 void ja_node_sum_distribution_2d(enum ja_recompact mode,
1052 struct cds_ja *ja,
1053 unsigned int type_index,
1054 const struct cds_ja_type *type,
1055 struct cds_ja_inode *node,
1056 struct cds_ja_shadow_node *shadow_node,
1057 uint8_t n,
1058 struct cds_ja_inode_flag *child_node_flag,
1059 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1060 unsigned int *_bitsel)
1061 {
1062 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1063 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1064 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1065 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
1066 unsigned int bitsel[2] = { 0, 1 };
1067 unsigned int bit_i, bit_j;
1068 int overall_best_distance = INT_MAX;
1069 unsigned int distrib_nr_child = 0;
1070
1071 memset(nr_2d_11, 0, sizeof(nr_2d_11));
1072 memset(nr_2d_10, 0, sizeof(nr_2d_10));
1073 memset(nr_2d_01, 0, sizeof(nr_2d_01));
1074 memset(nr_2d_00, 0, sizeof(nr_2d_00));
1075
1076 switch (type->type_class) {
1077 case RCU_JA_LINEAR:
1078 {
1079 uint8_t nr_child =
1080 ja_linear_node_get_nr_child(type, node);
1081 unsigned int i;
1082
1083 for (i = 0; i < nr_child; i++) {
1084 struct cds_ja_inode_flag *iter;
1085 uint8_t v;
1086
1087 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1088 if (!iter)
1089 continue;
1090 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1091 continue;
1092 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1093 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1094 if (v & (1U << bit_i)) {
1095 if (v & (1U << bit_j)) {
1096 nr_2d_11[bit_i][bit_j]++;
1097 } else {
1098 nr_2d_10[bit_i][bit_j]++;
1099 }
1100 } else {
1101 if (v & (1U << bit_j)) {
1102 nr_2d_01[bit_i][bit_j]++;
1103 } else {
1104 nr_2d_00[bit_i][bit_j]++;
1105 }
1106 }
1107 }
1108 }
1109 distrib_nr_child++;
1110 }
1111 break;
1112 }
1113 case RCU_JA_POOL:
1114 {
1115 unsigned int pool_nr;
1116
1117 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1118 struct cds_ja_inode *pool =
1119 ja_pool_node_get_ith_pool(type,
1120 node, pool_nr);
1121 uint8_t nr_child =
1122 ja_linear_node_get_nr_child(type, pool);
1123 unsigned int j;
1124
1125 for (j = 0; j < nr_child; j++) {
1126 struct cds_ja_inode_flag *iter;
1127 uint8_t v;
1128
1129 ja_linear_node_get_ith_pos(type, pool,
1130 j, &v, &iter);
1131 if (!iter)
1132 continue;
1133 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1134 continue;
1135 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1136 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1137 if (v & (1U << bit_i)) {
1138 if (v & (1U << bit_j)) {
1139 nr_2d_11[bit_i][bit_j]++;
1140 } else {
1141 nr_2d_10[bit_i][bit_j]++;
1142 }
1143 } else {
1144 if (v & (1U << bit_j)) {
1145 nr_2d_01[bit_i][bit_j]++;
1146 } else {
1147 nr_2d_00[bit_i][bit_j]++;
1148 }
1149 }
1150 }
1151 }
1152 distrib_nr_child++;
1153 }
1154 }
1155 break;
1156 }
1157 case RCU_JA_PIGEON:
1158 {
1159 unsigned int i;
1160
1161 assert(mode == JA_RECOMPACT_DEL);
1162 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
1163 struct cds_ja_inode_flag *iter;
1164
1165 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1166 if (!iter)
1167 continue;
1168 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1169 continue;
1170 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1171 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1172 if (i & (1U << bit_i)) {
1173 if (i & (1U << bit_j)) {
1174 nr_2d_11[bit_i][bit_j]++;
1175 } else {
1176 nr_2d_10[bit_i][bit_j]++;
1177 }
1178 } else {
1179 if (i & (1U << bit_j)) {
1180 nr_2d_01[bit_i][bit_j]++;
1181 } else {
1182 nr_2d_00[bit_i][bit_j]++;
1183 }
1184 }
1185 }
1186 }
1187 distrib_nr_child++;
1188 }
1189 break;
1190 }
1191 case RCU_JA_NULL:
1192 assert(mode == JA_RECOMPACT_ADD_NEXT);
1193 break;
1194 default:
1195 assert(0);
1196 break;
1197 }
1198
1199 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1200 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1201 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1202 if (n & (1U << bit_i)) {
1203 if (n & (1U << bit_j)) {
1204 nr_2d_11[bit_i][bit_j]++;
1205 } else {
1206 nr_2d_10[bit_i][bit_j]++;
1207 }
1208 } else {
1209 if (n & (1U << bit_j)) {
1210 nr_2d_01[bit_i][bit_j]++;
1211 } else {
1212 nr_2d_00[bit_i][bit_j]++;
1213 }
1214 }
1215 }
1216 }
1217 distrib_nr_child++;
1218 }
1219
1220 /*
1221 * The best bit selector is that for which the number of nodes
1222 * in each sub-class is closest to one-fourth of the number of
1223 * children in the distribution. We calculate the distance using
1224 * 4 times the size of the sub-distribution to eliminate
1225 * truncation error.
1226 */
1227 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1228 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1229 int distance_to_best[4];
1230
1231 distance_to_best[0] = ((unsigned int) nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1232 distance_to_best[1] = ((unsigned int) nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1233 distance_to_best[2] = ((unsigned int) nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1234 distance_to_best[3] = ((unsigned int) nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
1235
1236 /* Consider worse distance above best */
1237 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
1238 distance_to_best[0] = distance_to_best[1];
1239 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
1240 distance_to_best[0] = distance_to_best[2];
1241 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
1242 distance_to_best[0] = distance_to_best[3];
1243
1244 /*
1245 * If our worse distance is better than overall,
1246 * we become new best candidate.
1247 */
1248 if (distance_to_best[0] < overall_best_distance) {
1249 overall_best_distance = distance_to_best[0];
1250 bitsel[0] = bit_i;
1251 bitsel[1] = bit_j;
1252 }
1253 }
1254 }
1255
1256 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1257
1258 /* Return our bit selection */
1259 _bitsel[0] = bitsel[0];
1260 _bitsel[1] = bitsel[1];
1261 }
1262
1263 static
1264 unsigned int find_nearest_type_index(unsigned int type_index,
1265 unsigned int nr_nodes)
1266 {
1267 const struct cds_ja_type *type;
1268
1269 assert(type_index != NODE_INDEX_NULL);
1270 if (nr_nodes == 0)
1271 return NODE_INDEX_NULL;
1272 for (;;) {
1273 type = &ja_types[type_index];
1274 if (nr_nodes < type->min_child)
1275 type_index--;
1276 else if (nr_nodes > type->max_child)
1277 type_index++;
1278 else
1279 break;
1280 }
1281 return type_index;
1282 }
1283
1284 /*
1285 * ja_node_recompact_add: recompact a node, adding a new child.
1286 * Return 0 on success, -EAGAIN if need to retry, or other negative
1287 * error value otherwise.
1288 */
1289 static
1290 int ja_node_recompact(enum ja_recompact mode,
1291 struct cds_ja *ja,
1292 unsigned int old_type_index,
1293 const struct cds_ja_type *old_type,
1294 struct cds_ja_inode *old_node,
1295 struct cds_ja_shadow_node *shadow_node,
1296 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
1297 struct cds_ja_inode_flag *child_node_flag,
1298 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1299 int level)
1300 {
1301 unsigned int new_type_index;
1302 struct cds_ja_inode *new_node;
1303 struct cds_ja_shadow_node *new_shadow_node = NULL;
1304 const struct cds_ja_type *new_type;
1305 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
1306 int ret;
1307 int fallback = 0;
1308
1309 old_node_flag = *old_node_flag_ptr;
1310
1311 /*
1312 * Need to find nearest type index even for ADD_SAME, because
1313 * this recompaction, when applied to linear nodes, will garbage
1314 * collect dummy (NULL) entries, and can therefore cause a few
1315 * linear representations to be skipped.
1316 */
1317 switch (mode) {
1318 case JA_RECOMPACT_ADD_SAME:
1319 new_type_index = find_nearest_type_index(old_type_index,
1320 shadow_node->nr_child + 1);
1321 dbg_printf("Recompact for node with %u children\n",
1322 shadow_node->nr_child + 1);
1323 break;
1324 case JA_RECOMPACT_ADD_NEXT:
1325 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1326 new_type_index = 0;
1327 dbg_printf("Recompact for NULL\n");
1328 } else {
1329 new_type_index = find_nearest_type_index(old_type_index,
1330 shadow_node->nr_child + 1);
1331 dbg_printf("Recompact for node with %u children\n",
1332 shadow_node->nr_child + 1);
1333 }
1334 break;
1335 case JA_RECOMPACT_DEL:
1336 new_type_index = find_nearest_type_index(old_type_index,
1337 shadow_node->nr_child - 1);
1338 dbg_printf("Recompact for node with %u children\n",
1339 shadow_node->nr_child - 1);
1340 break;
1341 default:
1342 assert(0);
1343 }
1344
1345 retry: /* for fallback */
1346 dbg_printf("Recompact from type %d to type %d\n",
1347 old_type_index, new_type_index);
1348 new_type = &ja_types[new_type_index];
1349 if (new_type_index != NODE_INDEX_NULL) {
1350 new_node = alloc_cds_ja_node(ja, new_type);
1351 if (!new_node)
1352 return -ENOMEM;
1353
1354 if (new_type->type_class == RCU_JA_POOL) {
1355 switch (new_type->nr_pool_order) {
1356 case 1:
1357 {
1358 unsigned int node_distrib_bitsel;
1359
1360 node_distrib_bitsel =
1361 ja_node_sum_distribution_1d(mode, ja,
1362 old_type_index, old_type,
1363 old_node, shadow_node,
1364 n, child_node_flag,
1365 nullify_node_flag_ptr);
1366 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1367 new_node_flag = ja_node_flag_pool_1d(new_node,
1368 new_type_index, node_distrib_bitsel);
1369 break;
1370 }
1371 case 2:
1372 {
1373 unsigned int node_distrib_bitsel[2];
1374
1375 ja_node_sum_distribution_2d(mode, ja,
1376 old_type_index, old_type,
1377 old_node, shadow_node,
1378 n, child_node_flag,
1379 nullify_node_flag_ptr,
1380 node_distrib_bitsel);
1381 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1382 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
1383 new_node_flag = ja_node_flag_pool_2d(new_node,
1384 new_type_index, node_distrib_bitsel);
1385 break;
1386 }
1387 default:
1388 assert(0);
1389 }
1390 } else {
1391 new_node_flag = ja_node_flag(new_node, new_type_index);
1392 }
1393
1394 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
1395 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
1396 if (!new_shadow_node) {
1397 free_cds_ja_node(ja, new_node);
1398 return -ENOMEM;
1399 }
1400 if (fallback)
1401 new_shadow_node->fallback_removal_count =
1402 JA_FALLBACK_REMOVAL_COUNT;
1403 } else {
1404 new_node = NULL;
1405 new_node_flag = NULL;
1406 }
1407
1408 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
1409
1410 if (new_type_index == NODE_INDEX_NULL)
1411 goto skip_copy;
1412
1413 switch (old_type->type_class) {
1414 case RCU_JA_LINEAR:
1415 {
1416 uint8_t nr_child =
1417 ja_linear_node_get_nr_child(old_type, old_node);
1418 unsigned int i;
1419
1420 for (i = 0; i < nr_child; i++) {
1421 struct cds_ja_inode_flag *iter;
1422 uint8_t v;
1423
1424 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1425 if (!iter)
1426 continue;
1427 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1428 continue;
1429 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1430 new_shadow_node,
1431 v, iter);
1432 if (new_type->type_class == RCU_JA_POOL && ret) {
1433 goto fallback_toosmall;
1434 }
1435 assert(!ret);
1436 }
1437 break;
1438 }
1439 case RCU_JA_POOL:
1440 {
1441 unsigned int pool_nr;
1442
1443 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
1444 struct cds_ja_inode *pool =
1445 ja_pool_node_get_ith_pool(old_type,
1446 old_node, pool_nr);
1447 uint8_t nr_child =
1448 ja_linear_node_get_nr_child(old_type, pool);
1449 unsigned int j;
1450
1451 for (j = 0; j < nr_child; j++) {
1452 struct cds_ja_inode_flag *iter;
1453 uint8_t v;
1454
1455 ja_linear_node_get_ith_pos(old_type, pool,
1456 j, &v, &iter);
1457 if (!iter)
1458 continue;
1459 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1460 continue;
1461 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1462 new_shadow_node,
1463 v, iter);
1464 if (new_type->type_class == RCU_JA_POOL
1465 && ret) {
1466 goto fallback_toosmall;
1467 }
1468 assert(!ret);
1469 }
1470 }
1471 break;
1472 }
1473 case RCU_JA_NULL:
1474 assert(mode == JA_RECOMPACT_ADD_NEXT);
1475 break;
1476 case RCU_JA_PIGEON:
1477 {
1478 unsigned int i;
1479
1480 assert(mode == JA_RECOMPACT_DEL);
1481 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
1482 struct cds_ja_inode_flag *iter;
1483
1484 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1485 if (!iter)
1486 continue;
1487 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1488 continue;
1489 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1490 new_shadow_node,
1491 i, iter);
1492 if (new_type->type_class == RCU_JA_POOL && ret) {
1493 goto fallback_toosmall;
1494 }
1495 assert(!ret);
1496 }
1497 break;
1498 }
1499 default:
1500 assert(0);
1501 ret = -EINVAL;
1502 goto end;
1503 }
1504 skip_copy:
1505
1506 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1507 /* add node */
1508 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1509 new_shadow_node,
1510 n, child_node_flag);
1511 if (new_type->type_class == RCU_JA_POOL && ret) {
1512 goto fallback_toosmall;
1513 }
1514 assert(!ret);
1515 }
1516
1517 if (fallback) {
1518 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1519 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1520 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
1521 uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
1522 }
1523
1524 /* Return pointer to new recompacted node through old_node_flag_ptr */
1525 *old_node_flag_ptr = new_node_flag;
1526 if (old_node) {
1527 int flags;
1528
1529 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1530 /*
1531 * It is OK to free the lock associated with a node
1532 * going to NULL, since we are holding the parent lock.
1533 * This synchronizes removal with re-add of that node.
1534 */
1535 if (new_type_index == NODE_INDEX_NULL)
1536 flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK;
1537 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
1538 flags);
1539 assert(!ret);
1540 }
1541
1542 ret = 0;
1543 end:
1544 return ret;
1545
1546 fallback_toosmall:
1547 /* fallback if next pool is too small */
1548 assert(new_shadow_node);
1549 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
1550 RCUJA_SHADOW_CLEAR_FREE_NODE);
1551 assert(!ret);
1552
1553 switch (mode) {
1554 case JA_RECOMPACT_ADD_SAME:
1555 /*
1556 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1557 * node within a pool has unused entries. It should
1558 * therefore _never_ be too small.
1559 */
1560 assert(0);
1561
1562 /* Fall-through */
1563 case JA_RECOMPACT_ADD_NEXT:
1564 {
1565 const struct cds_ja_type *next_type;
1566
1567 /*
1568 * Recompaction attempt on add failed. Should only
1569 * happen if target node type is pool. Caused by
1570 * hard-to-split distribution. Recompact using the next
1571 * distribution size.
1572 */
1573 assert(new_type->type_class == RCU_JA_POOL);
1574 next_type = &ja_types[new_type_index + 1];
1575 /*
1576 * Try going to the next pool size if our population
1577 * fits within its range. This is not flagged as a
1578 * fallback.
1579 */
1580 if (shadow_node->nr_child + 1 >= next_type->min_child
1581 && shadow_node->nr_child + 1 <= next_type->max_child) {
1582 new_type_index++;
1583 goto retry;
1584 } else {
1585 new_type_index++;
1586 dbg_printf("Add fallback to type %d\n", new_type_index);
1587 uatomic_inc(&ja->nr_fallback);
1588 fallback = 1;
1589 goto retry;
1590 }
1591 break;
1592 }
1593 case JA_RECOMPACT_DEL:
1594 /*
1595 * Recompaction attempt on delete failed. Should only
1596 * happen if target node type is pool. This is caused by
1597 * a hard-to-split distribution. Recompact on same node
1598 * size, but flag current node as "fallback" to ensure
1599 * we don't attempt recompaction before some activity
1600 * has reshuffled our node.
1601 */
1602 assert(new_type->type_class == RCU_JA_POOL);
1603 new_type_index = old_type_index;
1604 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1605 uatomic_inc(&ja->nr_fallback);
1606 fallback = 1;
1607 goto retry;
1608 default:
1609 assert(0);
1610 return -EINVAL;
1611 }
1612
1613 /*
1614 * Last resort fallback: pigeon.
1615 */
1616 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1617 dbg_printf("Fallback to type %d\n", new_type_index);
1618 uatomic_inc(&ja->nr_fallback);
1619 fallback = 1;
1620 goto retry;
1621 }
1622
1623 /*
1624 * Return 0 on success, -EAGAIN if need to retry, or other negative
1625 * error value otherwise.
1626 */
1627 static
1628 int ja_node_set_nth(struct cds_ja *ja,
1629 struct cds_ja_inode_flag **node_flag, uint8_t n,
1630 struct cds_ja_inode_flag *child_node_flag,
1631 struct cds_ja_shadow_node *shadow_node,
1632 int level)
1633 {
1634 int ret;
1635 unsigned int type_index;
1636 const struct cds_ja_type *type;
1637 struct cds_ja_inode *node;
1638
1639 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1640 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1641
1642 node = ja_node_ptr(*node_flag);
1643 type_index = ja_node_type(*node_flag);
1644 type = &ja_types[type_index];
1645 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
1646 n, child_node_flag);
1647 switch (ret) {
1648 case -ENOSPC:
1649 /* Not enough space in node, need to recompact to next type. */
1650 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
1651 shadow_node, node_flag, n, child_node_flag, NULL, level);
1652 break;
1653 case -ERANGE:
1654 /* Node needs to be recompacted. */
1655 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
1656 shadow_node, node_flag, n, child_node_flag, NULL, level);
1657 break;
1658 }
1659 return ret;
1660 }
1661
1662 /*
1663 * Return 0 on success, -EAGAIN if need to retry, or other negative
1664 * error value otherwise.
1665 */
1666 static
1667 int ja_node_clear_ptr(struct cds_ja *ja,
1668 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1669 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1670 struct cds_ja_shadow_node *shadow_node, /* of parent */
1671 uint8_t n, int level)
1672 {
1673 int ret;
1674 unsigned int type_index;
1675 const struct cds_ja_type *type;
1676 struct cds_ja_inode *node;
1677
1678 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1679 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
1680
1681 node = ja_node_ptr(*parent_node_flag_ptr);
1682 type_index = ja_node_type(*parent_node_flag_ptr);
1683 type = &ja_types[type_index];
1684 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
1685 if (ret == -EFBIG) {
1686 /* Should try recompaction. */
1687 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
1688 shadow_node, parent_node_flag_ptr, n, NULL,
1689 node_flag_ptr, level);
1690 }
1691 return ret;
1692 }
1693
1694 struct cds_ja_node *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
1695 {
1696 unsigned int tree_depth, i;
1697 struct cds_ja_inode_flag *node_flag;
1698
1699 if (caa_unlikely(key > ja->key_max))
1700 return NULL;
1701 tree_depth = ja->tree_depth;
1702 node_flag = rcu_dereference(ja->root);
1703
1704 /* level 0: root node */
1705 if (!ja_node_ptr(node_flag))
1706 return NULL;
1707
1708 for (i = 1; i < tree_depth; i++) {
1709 uint8_t iter_key;
1710
1711 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
1712 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1713 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1714 (unsigned int) iter_key, node_flag);
1715 if (!ja_node_ptr(node_flag))
1716 return NULL;
1717 }
1718
1719 /* Last level lookup succeded. We got an actual match. */
1720 return (struct cds_ja_node *) node_flag;
1721 }
1722
1723 struct cds_ja_node *cds_ja_lookup_lower_equal(struct cds_ja *ja, uint64_t key)
1724 {
1725 int tree_depth, level;
1726 struct cds_ja_inode_flag *node_flag, *cur_node_depth[JA_MAX_DEPTH];
1727
1728 if (caa_unlikely(key > ja->key_max || !key))
1729 return NULL;
1730
1731 memset(cur_node_depth, 0, sizeof(cur_node_depth));
1732 tree_depth = ja->tree_depth;
1733 node_flag = rcu_dereference(ja->root);
1734 cur_node_depth[0] = node_flag;
1735
1736 /* level 0: root node */
1737 if (!ja_node_ptr(node_flag))
1738 return NULL;
1739
1740 for (level = 1; level < tree_depth; level++) {
1741 uint8_t iter_key;
1742
1743 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1744 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1745 if (!ja_node_ptr(node_flag))
1746 break;
1747 cur_node_depth[level] = node_flag;
1748 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1749 (unsigned int) iter_key, node_flag);
1750 }
1751
1752 if (level == tree_depth) {
1753 /* Last level lookup succeded. We got an equal match. */
1754 return (struct cds_ja_node *) node_flag;
1755 }
1756
1757 /*
1758 * Find highest value left of current node.
1759 * Current node is cur_node_depth[level].
1760 * Start at current level. If we cannot find any key left of
1761 * ours, go one level up, seek highest value left of current
1762 * (recursively), and when we find one, get the rightmost child
1763 * of its rightmost child (recursively).
1764 */
1765 for (; level > 0; level--) {
1766 uint8_t iter_key;
1767
1768 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1769 node_flag = ja_node_get_left(cur_node_depth[level - 1],
1770 iter_key);
1771 /* If found left sibling, find rightmost child. */
1772 if (ja_node_ptr(node_flag))
1773 break;
1774 }
1775
1776 if (!level) {
1777 /* Reached the root and could not find a left sibling. */
1778 return NULL;
1779 }
1780
1781 level++;
1782
1783 /*
1784 * From this point, we are guaranteed to be able to find a
1785 * "lower than" match. ja_attach_node() and ja_detach_node()
1786 * both guarantee that it is not possible for a lookup to reach
1787 * a dead-end.
1788 */
1789
1790 /* Find rightmost child of rightmost child (recursively). */
1791 for (; level < tree_depth; level++) {
1792 node_flag = ja_node_get_rightmost(node_flag);
1793 /* If found left sibling, find rightmost child. */
1794 if (!ja_node_ptr(node_flag))
1795 break;
1796 }
1797
1798 assert(level == tree_depth);
1799
1800 return (struct cds_ja_node *) node_flag;
1801 }
1802
1803 /*
1804 * We reached an unpopulated node. Create it and the children we need,
1805 * and then attach the entire branch to the current node. This may
1806 * trigger recompaction of the current node. Locks needed: node lock
1807 * (for add), and, possibly, parent node lock (to update pointer due to
1808 * node recompaction).
1809 *
1810 * First take node lock, check if recompaction is needed, then take
1811 * parent lock (if needed). Then we can proceed to create the new
1812 * branch. Publish the new branch, and release locks.
1813 * TODO: we currently always take the parent lock even when not needed.
1814 *
1815 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1816 * leads to a dead-end: before attaching a branch, the entire content of
1817 * the new branch is populated, thus creating a cluster, before
1818 * attaching the cluster to the rest of the tree, thus making it visible
1819 * to lookups.
1820 */
1821 static
1822 int ja_attach_node(struct cds_ja *ja,
1823 struct cds_ja_inode_flag **attach_node_flag_ptr,
1824 struct cds_ja_inode_flag *attach_node_flag,
1825 struct cds_ja_inode_flag *parent_attach_node_flag,
1826 struct cds_ja_inode_flag **old_node_flag_ptr,
1827 struct cds_ja_inode_flag *old_node_flag,
1828 uint64_t key,
1829 unsigned int level,
1830 struct cds_ja_node *child_node)
1831 {
1832 struct cds_ja_shadow_node *shadow_node = NULL,
1833 *parent_shadow_node = NULL;
1834 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1835 int ret, i;
1836 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
1837 int nr_created_nodes = 0;
1838
1839 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1840 level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag);
1841
1842 assert(!old_node_flag);
1843 if (attach_node_flag) {
1844 shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag);
1845 if (!shadow_node) {
1846 ret = -EAGAIN;
1847 goto end;
1848 }
1849 }
1850 if (parent_attach_node_flag) {
1851 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1852 parent_attach_node_flag);
1853 if (!parent_shadow_node) {
1854 ret = -EAGAIN;
1855 goto unlock_shadow;
1856 }
1857 }
1858
1859 if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) {
1860 /*
1861 * Target node has been updated between RCU lookup and
1862 * lock acquisition. We need to re-try lookup and
1863 * attach.
1864 */
1865 ret = -EAGAIN;
1866 goto unlock_parent;
1867 }
1868
1869 /*
1870 * Perform a lookup query to handle the case where
1871 * old_node_flag_ptr is NULL. We cannot use it to check if the
1872 * node has been populated between RCU lookup and mutex
1873 * acquisition.
1874 */
1875 if (!old_node_flag_ptr) {
1876 uint8_t iter_key;
1877 struct cds_ja_inode_flag *lookup_node_flag;
1878 struct cds_ja_inode_flag **lookup_node_flag_ptr;
1879
1880 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
1881 lookup_node_flag = ja_node_get_nth(attach_node_flag,
1882 &lookup_node_flag_ptr,
1883 iter_key);
1884 if (lookup_node_flag) {
1885 ret = -EEXIST;
1886 goto unlock_parent;
1887 }
1888 }
1889
1890 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
1891 ja_node_ptr(attach_node_flag)) {
1892 /*
1893 * Target node has been updated between RCU lookup and
1894 * lock acquisition. We need to re-try lookup and
1895 * attach.
1896 */
1897 ret = -EAGAIN;
1898 goto unlock_parent;
1899 }
1900
1901 /* Create new branch, starting from bottom */
1902 iter_node_flag = (struct cds_ja_inode_flag *) child_node;
1903
1904 for (i = ja->tree_depth - 1; i >= (int) level; i--) {
1905 uint8_t iter_key;
1906
1907 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1)));
1908 dbg_printf("branch creation level %d, key %u\n",
1909 i, (unsigned int) iter_key);
1910 iter_dest_node_flag = NULL;
1911 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
1912 iter_key,
1913 iter_node_flag,
1914 NULL, i);
1915 if (ret) {
1916 dbg_printf("branch creation error %d\n", ret);
1917 goto check_error;
1918 }
1919 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1920 iter_node_flag = iter_dest_node_flag;
1921 }
1922 assert(level > 0);
1923
1924 /* Publish branch */
1925 if (level == 1) {
1926 /*
1927 * Attaching to root node.
1928 */
1929 rcu_assign_pointer(ja->root, iter_node_flag);
1930 } else {
1931 uint8_t iter_key;
1932
1933 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
1934 dbg_printf("publish branch at level %d, key %u\n",
1935 level - 1, (unsigned int) iter_key);
1936 /* We need to use set_nth on the previous level. */
1937 iter_dest_node_flag = attach_node_flag;
1938 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
1939 iter_key,
1940 iter_node_flag,
1941 shadow_node, level - 1);
1942 if (ret) {
1943 dbg_printf("branch publish error %d\n", ret);
1944 goto check_error;
1945 }
1946 /*
1947 * Attach branch
1948 */
1949 rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag);
1950 }
1951
1952 /* Success */
1953 ret = 0;
1954
1955 check_error:
1956 if (ret) {
1957 for (i = 0; i < nr_created_nodes; i++) {
1958 int tmpret;
1959 int flags;
1960
1961 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1962 if (i)
1963 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
1964 tmpret = rcuja_shadow_clear(ja->ht,
1965 created_nodes[i],
1966 NULL,
1967 flags);
1968 assert(!tmpret);
1969 }
1970 }
1971 unlock_parent:
1972 if (parent_shadow_node)
1973 rcuja_shadow_unlock(parent_shadow_node);
1974 unlock_shadow:
1975 if (shadow_node)
1976 rcuja_shadow_unlock(shadow_node);
1977 end:
1978 return ret;
1979 }
1980
1981 /*
1982 * Lock the parent containing the pointer to list of duplicates, and add
1983 * node to this list. Failure can happen if concurrent update changes
1984 * the parent before we get the lock. We return -EAGAIN in that case.
1985 * Return 0 on success, negative error value on failure.
1986 */
1987 static
1988 int ja_chain_node(struct cds_ja *ja,
1989 struct cds_ja_inode_flag *parent_node_flag,
1990 struct cds_ja_inode_flag **node_flag_ptr,
1991 struct cds_ja_inode_flag *node_flag,
1992 struct cds_ja_node *node)
1993 {
1994 struct cds_ja_shadow_node *shadow_node;
1995 int ret = 0;
1996
1997 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
1998 if (!shadow_node) {
1999 return -EAGAIN;
2000 }
2001 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
2002 ret = -EAGAIN;
2003 goto end;
2004 }
2005 /*
2006 * Add node to head of list. Safe against concurrent RCU read
2007 * traversals.
2008 */
2009 node->next = (struct cds_ja_node *) node_flag;
2010 rcu_assign_pointer(*node_flag_ptr, (struct cds_ja_inode_flag *) node);
2011 end:
2012 rcuja_shadow_unlock(shadow_node);
2013 return ret;
2014 }
2015
2016 static
2017 int _cds_ja_add(struct cds_ja *ja, uint64_t key,
2018 struct cds_ja_node *node,
2019 struct cds_ja_node **unique_node_ret)
2020 {
2021 unsigned int tree_depth, i;
2022 struct cds_ja_inode_flag *attach_node_flag,
2023 *parent_node_flag,
2024 *parent2_node_flag,
2025 *node_flag,
2026 *parent_attach_node_flag;
2027 struct cds_ja_inode_flag **attach_node_flag_ptr,
2028 **parent_node_flag_ptr,
2029 **node_flag_ptr;
2030 int ret;
2031
2032 if (caa_unlikely(key > ja->key_max)) {
2033 return -EINVAL;
2034 }
2035 tree_depth = ja->tree_depth;
2036
2037 retry:
2038 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
2039 key, node);
2040 parent2_node_flag = NULL;
2041 parent_node_flag =
2042 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
2043 parent_node_flag_ptr = NULL;
2044 node_flag = rcu_dereference(ja->root);
2045 node_flag_ptr = &ja->root;
2046
2047 /* Iterate on all internal levels */
2048 for (i = 1; i < tree_depth; i++) {
2049 uint8_t iter_key;
2050
2051 if (!ja_node_ptr(node_flag))
2052 break;
2053 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2054 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
2055 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
2056 parent2_node_flag = parent_node_flag;
2057 parent_node_flag = node_flag;
2058 parent_node_flag_ptr = node_flag_ptr;
2059 node_flag = ja_node_get_nth(node_flag,
2060 &node_flag_ptr,
2061 iter_key);
2062 }
2063
2064 /*
2065 * We reached either bottom of tree or internal NULL node,
2066 * simply add node to last internal level, or chain it if key is
2067 * already present.
2068 */
2069 if (!ja_node_ptr(node_flag)) {
2070 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2071 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
2072
2073 attach_node_flag = parent_node_flag;
2074 attach_node_flag_ptr = parent_node_flag_ptr;
2075 parent_attach_node_flag = parent2_node_flag;
2076
2077 ret = ja_attach_node(ja, attach_node_flag_ptr,
2078 attach_node_flag,
2079 parent_attach_node_flag,
2080 node_flag_ptr,
2081 node_flag,
2082 key, i, node);
2083 } else {
2084 if (unique_node_ret) {
2085 *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag);
2086 return -EEXIST;
2087 }
2088
2089 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2090 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
2091
2092 attach_node_flag = node_flag;
2093 attach_node_flag_ptr = node_flag_ptr;
2094 parent_attach_node_flag = parent_node_flag;
2095
2096 ret = ja_chain_node(ja,
2097 parent_attach_node_flag,
2098 attach_node_flag_ptr,
2099 attach_node_flag,
2100 node);
2101 }
2102 if (ret == -EAGAIN || ret == -EEXIST)
2103 goto retry;
2104
2105 return ret;
2106 }
2107
2108 int cds_ja_add(struct cds_ja *ja, uint64_t key,
2109 struct cds_ja_node *node)
2110 {
2111 return _cds_ja_add(ja, key, node, NULL);
2112 }
2113
2114 struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key,
2115 struct cds_ja_node *node)
2116 {
2117 int ret;
2118 struct cds_ja_node *ret_node;
2119
2120 ret = _cds_ja_add(ja, key, node, &ret_node);
2121 if (ret == -EEXIST)
2122 return ret_node;
2123 else
2124 return node;
2125 }
2126
2127 /*
2128 * Note: there is no need to lookup the pointer address associated with
2129 * each node's nth item after taking the lock: it's already been done by
2130 * cds_ja_del while holding the rcu read-side lock, and our node rules
2131 * ensure that when a match value -> pointer is found in a node, it is
2132 * _NEVER_ changed for that node without recompaction, and recompaction
2133 * reallocates the node.
2134 * However, when a child is removed from "linear" nodes, its pointer
2135 * is set to NULL. We therefore check, while holding the locks, if this
2136 * pointer is NULL, and return -ENOENT to the caller if it is the case.
2137 *
2138 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2139 * leads to a dead-end: when removing branch, it makes sure to perform
2140 * the "cut" at the highest node that has only one child, effectively
2141 * replacing it with a NULL pointer.
2142 */
2143 static
2144 int ja_detach_node(struct cds_ja *ja,
2145 struct cds_ja_inode_flag **snapshot,
2146 struct cds_ja_inode_flag ***snapshot_ptr,
2147 uint8_t *snapshot_n,
2148 int nr_snapshot,
2149 uint64_t key,
2150 struct cds_ja_node *node)
2151 {
2152 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
2153 struct cds_ja_inode_flag **node_flag_ptr = NULL,
2154 *parent_node_flag = NULL,
2155 **parent_node_flag_ptr = NULL;
2156 struct cds_ja_inode_flag *iter_node_flag;
2157 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
2158 uint8_t n = 0;
2159
2160 assert(nr_snapshot == ja->tree_depth + 1);
2161
2162 /*
2163 * From the last internal level node going up, get the node
2164 * lock, check if the node has only one child left. If it is the
2165 * case, we continue iterating upward. When we reach a node
2166 * which has more that one child left, we lock the parent, and
2167 * proceed to the node deletion (removing its children too).
2168 */
2169 for (i = nr_snapshot - 2; i >= 1; i--) {
2170 struct cds_ja_shadow_node *shadow_node;
2171
2172 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
2173 snapshot[i]);
2174 if (!shadow_node) {
2175 ret = -EAGAIN;
2176 goto end;
2177 }
2178 shadow_nodes[nr_shadow++] = shadow_node;
2179
2180 /*
2181 * Check if node has been removed between RCU
2182 * lookup and lock acquisition.
2183 */
2184 assert(snapshot_ptr[i + 1]);
2185 if (ja_node_ptr(*snapshot_ptr[i + 1])
2186 != ja_node_ptr(snapshot[i + 1])) {
2187 ret = -ENOENT;
2188 goto end;
2189 }
2190
2191 assert(shadow_node->nr_child > 0);
2192 if (shadow_node->nr_child == 1 && i > 1)
2193 nr_clear++;
2194 nr_branch++;
2195 if (shadow_node->nr_child > 1 || i == 1) {
2196 /* Lock parent and break */
2197 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
2198 snapshot[i - 1]);
2199 if (!shadow_node) {
2200 ret = -EAGAIN;
2201 goto end;
2202 }
2203 shadow_nodes[nr_shadow++] = shadow_node;
2204
2205 /*
2206 * Check if node has been removed between RCU
2207 * lookup and lock acquisition.
2208 */
2209 assert(snapshot_ptr[i]);
2210 if (ja_node_ptr(*snapshot_ptr[i])
2211 != ja_node_ptr(snapshot[i])) {
2212 ret = -ENOENT;
2213 goto end;
2214 }
2215
2216 node_flag_ptr = snapshot_ptr[i + 1];
2217 n = snapshot_n[i + 1];
2218 parent_node_flag_ptr = snapshot_ptr[i];
2219 parent_node_flag = snapshot[i];
2220
2221 if (i > 1) {
2222 /*
2223 * Lock parent's parent, in case we need
2224 * to recompact parent.
2225 */
2226 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
2227 snapshot[i - 2]);
2228 if (!shadow_node) {
2229 ret = -EAGAIN;
2230 goto end;
2231 }
2232 shadow_nodes[nr_shadow++] = shadow_node;
2233
2234 /*
2235 * Check if node has been removed between RCU
2236 * lookup and lock acquisition.
2237 */
2238 assert(snapshot_ptr[i - 1]);
2239 if (ja_node_ptr(*snapshot_ptr[i - 1])
2240 != ja_node_ptr(snapshot[i - 1])) {
2241 ret = -ENOENT;
2242 goto end;
2243 }
2244 }
2245
2246 break;
2247 }
2248 }
2249
2250 /*
2251 * At this point, we want to delete all nodes that are about to
2252 * be removed from shadow_nodes (except the last one, which is
2253 * either the root or the parent of the upmost node with 1
2254 * child). OK to free lock here, because RCU read lock is held,
2255 * and free only performed in call_rcu.
2256 */
2257
2258 for (i = 0; i < nr_clear; i++) {
2259 ret = rcuja_shadow_clear(ja->ht,
2260 shadow_nodes[i]->node_flag,
2261 shadow_nodes[i],
2262 RCUJA_SHADOW_CLEAR_FREE_NODE
2263 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2264 assert(!ret);
2265 }
2266
2267 iter_node_flag = parent_node_flag;
2268 /* Remove from parent */
2269 ret = ja_node_clear_ptr(ja,
2270 node_flag_ptr, /* Pointer to location to nullify */
2271 &iter_node_flag, /* Old new parent ptr in its parent */
2272 shadow_nodes[nr_branch - 1], /* of parent */
2273 n, nr_branch - 1);
2274 if (ret)
2275 goto end;
2276
2277 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2278 iter_node_flag, *parent_node_flag_ptr);
2279 /* Update address of parent ptr in its parent */
2280 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2281
2282 end:
2283 for (i = 0; i < nr_shadow; i++)
2284 rcuja_shadow_unlock(shadow_nodes[i]);
2285 return ret;
2286 }
2287
2288 static
2289 int ja_unchain_node(struct cds_ja *ja,
2290 struct cds_ja_inode_flag *parent_node_flag,
2291 struct cds_ja_inode_flag **node_flag_ptr,
2292 struct cds_ja_inode_flag *node_flag,
2293 struct cds_ja_node *node)
2294 {
2295 struct cds_ja_shadow_node *shadow_node;
2296 struct cds_ja_node *iter_node, **iter_node_ptr, **prev_node_ptr = NULL;
2297 int ret = 0, count = 0, found = 0;
2298
2299 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
2300 if (!shadow_node)
2301 return -EAGAIN;
2302 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
2303 ret = -EAGAIN;
2304 goto end;
2305 }
2306 /*
2307 * Find the previous node's next pointer pointing to our node,
2308 * so we can update it. Retry if another thread removed all but
2309 * one of duplicates since check (this check was performed
2310 * without lock). Ensure that the node we are about to remove is
2311 * still in the list (while holding lock). No need for RCU
2312 * traversal here since we hold the lock on the parent.
2313 */
2314 iter_node_ptr = (struct cds_ja_node **) node_flag_ptr;
2315 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2316 cds_ja_for_each_duplicate(iter_node) {
2317 count++;
2318 if (iter_node == node) {
2319 prev_node_ptr = iter_node_ptr;
2320 found++;
2321 }
2322 iter_node_ptr = &iter_node->next;
2323 }
2324 assert(found <= 1);
2325 if (!found || count == 1) {
2326 ret = -EAGAIN;
2327 goto end;
2328 }
2329 CMM_STORE_SHARED(*prev_node_ptr, node->next);
2330 /*
2331 * Validate that we indeed removed the node from linked list.
2332 */
2333 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
2334 end:
2335 rcuja_shadow_unlock(shadow_node);
2336 return ret;
2337 }
2338
2339 /*
2340 * Called with RCU read lock held.
2341 */
2342 int cds_ja_del(struct cds_ja *ja, uint64_t key,
2343 struct cds_ja_node *node)
2344 {
2345 unsigned int tree_depth, i;
2346 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
2347 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2348 uint8_t snapshot_n[JA_MAX_DEPTH];
2349 struct cds_ja_inode_flag *node_flag;
2350 struct cds_ja_inode_flag **prev_node_flag_ptr,
2351 **node_flag_ptr;
2352 int nr_snapshot;
2353 int ret;
2354
2355 if (caa_unlikely(key > ja->key_max))
2356 return -EINVAL;
2357 tree_depth = ja->tree_depth;
2358
2359 retry:
2360 nr_snapshot = 0;
2361 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2362 key, node);
2363
2364 /* snapshot for level 0 is only for shadow node lookup */
2365 snapshot_n[0] = 0;
2366 snapshot_n[1] = 0;
2367 snapshot_ptr[nr_snapshot] = NULL;
2368 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2369 node_flag = rcu_dereference(ja->root);
2370 prev_node_flag_ptr = &ja->root;
2371 node_flag_ptr = &ja->root;
2372
2373 /* Iterate on all internal levels */
2374 for (i = 1; i < tree_depth; i++) {
2375 uint8_t iter_key;
2376
2377 dbg_printf("cds_ja_del iter node_flag %p\n",
2378 node_flag);
2379 if (!ja_node_ptr(node_flag)) {
2380 return -ENOENT;
2381 }
2382 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
2383 snapshot_n[nr_snapshot + 1] = iter_key;
2384 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2385 snapshot[nr_snapshot++] = node_flag;
2386 node_flag = ja_node_get_nth(node_flag,
2387 &node_flag_ptr,
2388 iter_key);
2389 if (node_flag)
2390 prev_node_flag_ptr = node_flag_ptr;
2391 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2392 (unsigned int) iter_key, node_flag,
2393 prev_node_flag_ptr);
2394 }
2395 /*
2396 * We reached bottom of tree, try to find the node we are trying
2397 * to remove. Fail if we cannot find it.
2398 */
2399 if (!ja_node_ptr(node_flag)) {
2400 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2401 key);
2402 return -ENOENT;
2403 } else {
2404 struct cds_ja_node *iter_node, *match = NULL;
2405 int count = 0;
2406
2407 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2408 cds_ja_for_each_duplicate_rcu(iter_node) {
2409 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node, iter_node);
2410 if (iter_node == node)
2411 match = iter_node;
2412 count++;
2413 }
2414
2415 if (!match) {
2416 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
2417 return -ENOENT;
2418 }
2419 assert(count > 0);
2420 if (count == 1) {
2421 /*
2422 * Removing last of duplicates. Last snapshot
2423 * does not have a shadow node (external leafs).
2424 */
2425 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2426 snapshot[nr_snapshot++] = node_flag;
2427 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2428 snapshot_n, nr_snapshot, key, node);
2429 } else {
2430 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
2431 node_flag_ptr, node_flag, match);
2432 }
2433 }
2434 /*
2435 * Explanation of -ENOENT handling: caused by concurrent delete
2436 * between RCU lookup and actual removal. Need to re-do the
2437 * lookup and removal attempt.
2438 */
2439 if (ret == -EAGAIN || ret == -ENOENT)
2440 goto retry;
2441 return ret;
2442 }
2443
2444 struct cds_ja *_cds_ja_new(unsigned int key_bits,
2445 const struct rcu_flavor_struct *flavor)
2446 {
2447 struct cds_ja *ja;
2448 int ret;
2449 struct cds_ja_shadow_node *root_shadow_node;
2450
2451 ja = calloc(sizeof(*ja), 1);
2452 if (!ja)
2453 goto ja_error;
2454
2455 switch (key_bits) {
2456 case 8:
2457 case 16:
2458 case 24:
2459 case 32:
2460 case 40:
2461 case 48:
2462 case 56:
2463 ja->key_max = (1ULL << key_bits) - 1;
2464 break;
2465 case 64:
2466 ja->key_max = UINT64_MAX;
2467 break;
2468 default:
2469 goto check_error;
2470 }
2471
2472 /* ja->root is NULL */
2473 /* tree_depth 0 is for pointer to root node */
2474 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
2475 assert(ja->tree_depth <= JA_MAX_DEPTH);
2476 ja->ht = rcuja_create_ht(flavor);
2477 if (!ja->ht)
2478 goto ht_error;
2479
2480 /*
2481 * Note: we should not free this node until judy array destroy.
2482 */
2483 root_shadow_node = rcuja_shadow_set(ja->ht,
2484 (struct cds_ja_inode_flag *) &ja->root,
2485 NULL, ja, 0);
2486 if (!root_shadow_node) {
2487 ret = -ENOMEM;
2488 goto ht_node_error;
2489 }
2490
2491 return ja;
2492
2493 ht_node_error:
2494 ret = rcuja_delete_ht(ja->ht);
2495 assert(!ret);
2496 ht_error:
2497 check_error:
2498 free(ja);
2499 ja_error:
2500 return NULL;
2501 }
2502
2503 /*
2504 * Called from RCU read-side CS.
2505 */
2506 __attribute__((visibility("protected")))
2507 void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2508 struct cds_ja_inode_flag *node_flag,
2509 void (*rcu_free_node)(struct cds_ja_node *node))
2510 {
2511 unsigned int type_index;
2512 struct cds_ja_inode *node;
2513 const struct cds_ja_type *type;
2514
2515 node = ja_node_ptr(node_flag);
2516 assert(node != NULL);
2517 type_index = ja_node_type(node_flag);
2518 type = &ja_types[type_index];
2519
2520 switch (type->type_class) {
2521 case RCU_JA_LINEAR:
2522 {
2523 uint8_t nr_child =
2524 ja_linear_node_get_nr_child(type, node);
2525 unsigned int i;
2526
2527 for (i = 0; i < nr_child; i++) {
2528 struct cds_ja_inode_flag *iter;
2529 struct cds_ja_node *node_iter, *n;
2530 uint8_t v;
2531
2532 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
2533 node_iter = (struct cds_ja_node *) iter;
2534 cds_ja_for_each_duplicate_safe(node_iter, n) {
2535 rcu_free_node(node_iter);
2536 }
2537 }
2538 break;
2539 }
2540 case RCU_JA_POOL:
2541 {
2542 unsigned int pool_nr;
2543
2544 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2545 struct cds_ja_inode *pool =
2546 ja_pool_node_get_ith_pool(type, node, pool_nr);
2547 uint8_t nr_child =
2548 ja_linear_node_get_nr_child(type, pool);
2549 unsigned int j;
2550
2551 for (j = 0; j < nr_child; j++) {
2552 struct cds_ja_inode_flag *iter;
2553 struct cds_ja_node *node_iter, *n;
2554 uint8_t v;
2555
2556 ja_linear_node_get_ith_pos(type, pool, j, &v, &iter);
2557 node_iter = (struct cds_ja_node *) iter;
2558 cds_ja_for_each_duplicate_safe(node_iter, n) {
2559 rcu_free_node(node_iter);
2560 }
2561 }
2562 }
2563 break;
2564 }
2565 case RCU_JA_NULL:
2566 break;
2567 case RCU_JA_PIGEON:
2568 {
2569 unsigned int i;
2570
2571 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2572 struct cds_ja_inode_flag *iter;
2573 struct cds_ja_node *node_iter, *n;
2574
2575 iter = ja_pigeon_node_get_ith_pos(type, node, i);
2576 node_iter = (struct cds_ja_node *) iter;
2577 cds_ja_for_each_duplicate_safe(node_iter, n) {
2578 rcu_free_node(node_iter);
2579 }
2580 }
2581 break;
2582 }
2583 default:
2584 assert(0);
2585 }
2586 }
2587
2588 static
2589 void print_debug_fallback_distribution(struct cds_ja *ja)
2590 {
2591 int i;
2592
2593 fprintf(stderr, "Fallback node distribution:\n");
2594 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2595 if (!ja->node_fallback_count_distribution[i])
2596 continue;
2597 fprintf(stderr, " %3u: %4lu\n",
2598 i, ja->node_fallback_count_distribution[i]);
2599 }
2600 }
2601
2602 static
2603 int ja_final_checks(struct cds_ja *ja)
2604 {
2605 double fallback_ratio;
2606 unsigned long na, nf, nr_fallback;
2607 int ret = 0;
2608
2609 fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
2610 fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
2611 nr_fallback = uatomic_read(&ja->nr_fallback);
2612 if (nr_fallback)
2613 fprintf(stderr,
2614 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2615 uatomic_read(&ja->nr_fallback),
2616 fallback_ratio);
2617
2618 na = uatomic_read(&ja->nr_nodes_allocated);
2619 nf = uatomic_read(&ja->nr_nodes_freed);
2620 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
2621 if (nr_fallback)
2622 print_debug_fallback_distribution(ja);
2623
2624 if (na != nf) {
2625 fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2626 (long) na - nf, na, nf);
2627 ret = -1;
2628 }
2629 return ret;
2630 }
2631
2632 /*
2633 * There should be no more concurrent add, delete, nor look-up performed
2634 * on the Judy array while it is being destroyed (ensured by the
2635 * caller).
2636 */
2637 int cds_ja_destroy(struct cds_ja *ja,
2638 void (*free_node_cb)(struct cds_ja_node *node))
2639 {
2640 const struct rcu_flavor_struct *flavor;
2641 int ret;
2642
2643 flavor = cds_lfht_rcu_flavor(ja->ht);
2644 rcuja_shadow_prune(ja->ht,
2645 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
2646 free_node_cb);
2647 flavor->thread_offline();
2648 ret = rcuja_delete_ht(ja->ht);
2649 if (ret)
2650 return ret;
2651
2652 /* Wait for in-flight call_rcu free to complete. */
2653 flavor->barrier();
2654
2655 flavor->thread_online();
2656 ret = ja_final_checks(ja);
2657 free(ja);
2658 return ret;
2659 }
This page took 0.082115 seconds and 4 git commands to generate.