41ac70c43fb457aa98ae7ba80ebfce4b515904b6
[userspace-rcu.git] / rcuja / rcuja.c
1 /*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdint.h>
25 #include <errno.h>
26 #include <limits.h>
27 #include <string.h>
28 #include <urcu/rcuja.h>
29 #include <urcu/compiler.h>
30 #include <urcu/arch.h>
31 #include <assert.h>
32 #include <urcu-pointer.h>
33 #include <urcu/uatomic.h>
34 #include <stdint.h>
35
36 #include "rcuja-internal.h"
37 #include "bitfield.h"
38
39 #ifndef abs
40 #define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
41 #endif
42
43 enum cds_ja_type_class {
44 RCU_JA_LINEAR = 0, /* Type A */
45 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
46 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
47 RCU_JA_POOL = 1, /* Type B */
48 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
49 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
50 RCU_JA_PIGEON = 2, /* Type C */
51 /* 32-bit: 101 to 256 children, 1024 bytes */
52 /* 64-bit: 113 to 256 children, 2048 bytes */
53 /* Leaf nodes are implicit from their height in the tree */
54 RCU_JA_NR_TYPES,
55
56 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
57 };
58
59 struct cds_ja_type {
60 enum cds_ja_type_class type_class;
61 uint16_t min_child; /* minimum number of children: 1 to 256 */
62 uint16_t max_child; /* maximum number of children: 1 to 256 */
63 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
64 uint16_t order; /* node size is (1 << order), in bytes */
65 uint16_t nr_pool_order; /* number of pools */
66 uint16_t pool_size_order; /* pool size */
67 };
68
69 /*
70 * Iteration on the array to find the right node size for the number of
71 * children stops when it reaches .max_child == 256 (this is the largest
72 * possible node size, which contains 256 children).
73 * The min_child overlaps with the previous max_child to provide an
74 * hysteresis loop to reallocation for patterns of cyclic add/removal
75 * within the same node.
76 * The node the index within the following arrays is represented on 3
77 * bits. It identifies the node type, min/max number of children, and
78 * the size order.
79 * The max_child values for the RCU_JA_POOL below result from
80 * statistical approximation: over million populations, the max_child
81 * covers between 97% and 99% of the populations generated. Therefore, a
82 * fallback should exist to cover the rare extreme population unbalance
83 * cases, but it will not have a major impact on speed nor space
84 * consumption, since those are rare cases.
85 */
86
87 #if (CAA_BITS_PER_LONG < 64)
88 /* 32-bit pointers */
89 enum {
90 ja_type_0_max_child = 1,
91 ja_type_1_max_child = 3,
92 ja_type_2_max_child = 6,
93 ja_type_3_max_child = 12,
94 ja_type_4_max_child = 25,
95 ja_type_5_max_child = 48,
96 ja_type_6_max_child = 92,
97 ja_type_7_max_child = 256,
98 ja_type_8_max_child = 0, /* NULL */
99 };
100
101 enum {
102 ja_type_0_max_linear_child = 1,
103 ja_type_1_max_linear_child = 3,
104 ja_type_2_max_linear_child = 6,
105 ja_type_3_max_linear_child = 12,
106 ja_type_4_max_linear_child = 25,
107 ja_type_5_max_linear_child = 24,
108 ja_type_6_max_linear_child = 23,
109 };
110
111 enum {
112 ja_type_5_nr_pool_order = 1,
113 ja_type_6_nr_pool_order = 2,
114 };
115
116 const struct cds_ja_type ja_types[] = {
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
121 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
122
123 /* Pools may fill sooner than max_child */
124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
125 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
126
127 /*
128 * Upon node removal below min_child, if child pool is filled
129 * beyond capacity, we roll back to pigeon.
130 */
131 { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
132
133 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
134 };
135 #else /* !(CAA_BITS_PER_LONG < 64) */
136 /* 64-bit pointers */
137 enum {
138 ja_type_0_max_child = 1,
139 ja_type_1_max_child = 3,
140 ja_type_2_max_child = 7,
141 ja_type_3_max_child = 14,
142 ja_type_4_max_child = 28,
143 ja_type_5_max_child = 54,
144 ja_type_6_max_child = 104,
145 ja_type_7_max_child = 256,
146 ja_type_8_max_child = 256,
147 };
148
149 enum {
150 ja_type_0_max_linear_child = 1,
151 ja_type_1_max_linear_child = 3,
152 ja_type_2_max_linear_child = 7,
153 ja_type_3_max_linear_child = 14,
154 ja_type_4_max_linear_child = 28,
155 ja_type_5_max_linear_child = 27,
156 ja_type_6_max_linear_child = 26,
157 };
158
159 enum {
160 ja_type_5_nr_pool_order = 1,
161 ja_type_6_nr_pool_order = 2,
162 };
163
164 const struct cds_ja_type ja_types[] = {
165 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
170
171 /* Pools may fill sooner than max_child. */
172 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
173 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
174
175 /*
176 * Upon node removal below min_child, if child pool is filled
177 * beyond capacity, we roll back to pigeon.
178 */
179 { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
180
181 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
182 };
183 #endif /* !(BITS_PER_LONG < 64) */
184
185 static inline __attribute__((unused))
186 void static_array_size_check(void)
187 {
188 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
189 }
190
191 /*
192 * The cds_ja_node contains the compressed node data needed for
193 * read-side. For linear and pool node configurations, it starts with a
194 * byte counting the number of children in the node. Then, the
195 * node-specific data is placed.
196 * The node mutex, if any is needed, protecting concurrent updated of
197 * each node is placed in a separate hash table indexed by node address.
198 * For the pigeon configuration, the number of children is also kept in
199 * a separate hash table, indexed by node address, because it is only
200 * required for updates.
201 */
202
203 #define DECLARE_LINEAR_NODE(index) \
204 struct { \
205 uint8_t nr_child; \
206 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
207 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
208 }
209
210 #define DECLARE_POOL_NODE(index) \
211 struct { \
212 struct { \
213 uint8_t nr_child; \
214 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
215 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
216 } linear[1U << ja_type_## index ##_nr_pool_order]; \
217 }
218
219 struct cds_ja_inode {
220 union {
221 /* Linear configuration */
222 DECLARE_LINEAR_NODE(0) conf_0;
223 DECLARE_LINEAR_NODE(1) conf_1;
224 DECLARE_LINEAR_NODE(2) conf_2;
225 DECLARE_LINEAR_NODE(3) conf_3;
226 DECLARE_LINEAR_NODE(4) conf_4;
227
228 /* Pool configuration */
229 DECLARE_POOL_NODE(5) conf_5;
230 DECLARE_POOL_NODE(6) conf_6;
231
232 /* Pigeon configuration */
233 struct {
234 struct cds_ja_inode_flag *child[ja_type_7_max_child];
235 } conf_7;
236 /* data aliasing nodes for computed accesses */
237 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
238 } u;
239 };
240
241 enum ja_recompact {
242 JA_RECOMPACT_ADD_SAME,
243 JA_RECOMPACT_ADD_NEXT,
244 JA_RECOMPACT_DEL,
245 };
246
247 static
248 struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
249 {
250 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
251 }
252
253 unsigned long ja_node_type(struct cds_ja_inode_flag *node)
254 {
255 unsigned long type;
256
257 if (_ja_node_mask_ptr(node) == NULL) {
258 return NODE_INDEX_NULL;
259 }
260 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
261 assert(type < (1UL << JA_TYPE_BITS));
262 return type;
263 }
264
265 struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
266 {
267 unsigned long type_index = ja_node_type(node);
268 const struct cds_ja_type *type;
269
270 type = &ja_types[type_index];
271 switch (type->type_class) {
272 case RCU_JA_LINEAR:
273 case RCU_JA_PIGEON: /* fall-through */
274 case RCU_JA_NULL: /* fall-through */
275 default: /* fall-through */
276 return _ja_node_mask_ptr(node);
277 case RCU_JA_POOL:
278 switch (type->nr_pool_order) {
279 case 1:
280 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_1D_MASK | JA_TYPE_MASK));
281 case 2:
282 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_2D_MASK | JA_POOL_1D_MASK | JA_TYPE_MASK));
283 default:
284 assert(0);
285 }
286 }
287 }
288
289 static
290 struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
291 const struct cds_ja_type *ja_type)
292 {
293 size_t len = 1U << ja_type->order;
294 void *p;
295 int ret;
296
297 ret = posix_memalign(&p, len, len);
298 if (ret || !p) {
299 return NULL;
300 }
301 memset(p, 0, len);
302 uatomic_inc(&ja->nr_nodes_allocated);
303 return p;
304 }
305
306 void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
307 {
308 free(node);
309 if (node)
310 uatomic_inc(&ja->nr_nodes_freed);
311 }
312
313 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
314 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
315 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
316 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
317
318 static
319 uint8_t *align_ptr_size(uint8_t *ptr)
320 {
321 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
322 }
323
324 static
325 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
326 struct cds_ja_inode *node)
327 {
328 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
329 return rcu_dereference(node->u.data[0]);
330 }
331
332 /*
333 * The order in which values and pointers are does does not matter: if
334 * a value is missing, we return NULL. If a value is there, but its
335 * associated pointers is still NULL, we return NULL too.
336 */
337 static
338 struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
339 struct cds_ja_inode *node,
340 struct cds_ja_inode_flag ***node_flag_ptr,
341 uint8_t n)
342 {
343 uint8_t nr_child;
344 uint8_t *values;
345 struct cds_ja_inode_flag **pointers;
346 struct cds_ja_inode_flag *ptr;
347 unsigned int i;
348
349 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
350
351 nr_child = ja_linear_node_get_nr_child(type, node);
352 cmm_smp_rmb(); /* read nr_child before values and pointers */
353 assert(nr_child <= type->max_linear_child);
354 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
355
356 values = &node->u.data[1];
357 for (i = 0; i < nr_child; i++) {
358 if (CMM_LOAD_SHARED(values[i]) == n)
359 break;
360 }
361 if (i >= nr_child) {
362 if (caa_unlikely(node_flag_ptr))
363 *node_flag_ptr = NULL;
364 return NULL;
365 }
366 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
367 ptr = rcu_dereference(pointers[i]);
368 if (caa_unlikely(node_flag_ptr))
369 *node_flag_ptr = &pointers[i];
370 return ptr;
371 }
372
373 static
374 void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
375 struct cds_ja_inode *node,
376 uint8_t i,
377 uint8_t *v,
378 struct cds_ja_inode_flag **iter)
379 {
380 uint8_t *values;
381 struct cds_ja_inode_flag **pointers;
382
383 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
384 assert(i < ja_linear_node_get_nr_child(type, node));
385
386 values = &node->u.data[1];
387 *v = values[i];
388 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
389 *iter = pointers[i];
390 }
391
392 static
393 struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
394 struct cds_ja_inode *node,
395 struct cds_ja_inode_flag *node_flag,
396 struct cds_ja_inode_flag ***node_flag_ptr,
397 uint8_t n)
398 {
399 struct cds_ja_inode *linear;
400
401 assert(type->type_class == RCU_JA_POOL);
402
403 switch (type->nr_pool_order) {
404 case 1:
405 {
406 unsigned long bitsel, index;
407
408 bitsel = ja_node_pool_1d_bitsel(node_flag);
409 assert(bitsel < CHAR_BIT);
410 index = ((unsigned long) n >> bitsel) & 0x1;
411 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
412 break;
413 }
414 case 2:
415 {
416 unsigned long bitsel[2], index[2], rindex;
417
418 ja_node_pool_2d_bitsel(node_flag, bitsel);
419 assert(bitsel[0] < CHAR_BIT);
420 assert(bitsel[1] < CHAR_BIT);
421 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
422 index[0] <<= 1;
423 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
424 rindex = index[0] | index[1];
425 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
426 break;
427 }
428 default:
429 linear = NULL;
430 assert(0);
431 }
432 return ja_linear_node_get_nth(type, linear, node_flag_ptr, n);
433 }
434
435 static
436 struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
437 struct cds_ja_inode *node,
438 uint8_t i)
439 {
440 assert(type->type_class == RCU_JA_POOL);
441 return (struct cds_ja_inode *)
442 &node->u.data[(unsigned int) i << type->pool_size_order];
443 }
444
445 static
446 struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
447 struct cds_ja_inode *node,
448 struct cds_ja_inode_flag ***node_flag_ptr,
449 uint8_t n)
450 {
451 struct cds_ja_inode_flag **child_node_flag_ptr;
452 struct cds_ja_inode_flag *child_node_flag;
453
454 assert(type->type_class == RCU_JA_PIGEON);
455 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
456 child_node_flag = rcu_dereference(*child_node_flag_ptr);
457 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
458 child_node_flag_ptr);
459 if (caa_unlikely(node_flag_ptr))
460 *node_flag_ptr = child_node_flag_ptr;
461 return child_node_flag;
462 }
463
464 static
465 struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
466 struct cds_ja_inode *node,
467 uint8_t i)
468 {
469 return ja_pigeon_node_get_nth(type, node, NULL, i);
470 }
471
472 /*
473 * ja_node_get_nth: get nth item from a node.
474 * node_flag is already rcu_dereference'd.
475 */
476 static
477 struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
478 struct cds_ja_inode_flag ***node_flag_ptr,
479 uint8_t n)
480 {
481 unsigned int type_index;
482 struct cds_ja_inode *node;
483 const struct cds_ja_type *type;
484
485 node = ja_node_ptr(node_flag);
486 assert(node != NULL);
487 type_index = ja_node_type(node_flag);
488 type = &ja_types[type_index];
489
490 switch (type->type_class) {
491 case RCU_JA_LINEAR:
492 return ja_linear_node_get_nth(type, node,
493 node_flag_ptr, n);
494 case RCU_JA_POOL:
495 return ja_pool_node_get_nth(type, node, node_flag,
496 node_flag_ptr, n);
497 case RCU_JA_PIGEON:
498 return ja_pigeon_node_get_nth(type, node,
499 node_flag_ptr, n);
500 default:
501 assert(0);
502 return (void *) -1UL;
503 }
504 }
505
506 static
507 int ja_linear_node_set_nth(const struct cds_ja_type *type,
508 struct cds_ja_inode *node,
509 struct cds_ja_shadow_node *shadow_node,
510 uint8_t n,
511 struct cds_ja_inode_flag *child_node_flag)
512 {
513 uint8_t nr_child;
514 uint8_t *values, *nr_child_ptr;
515 struct cds_ja_inode_flag **pointers;
516 unsigned int i, unused = 0;
517
518 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
519
520 nr_child_ptr = &node->u.data[0];
521 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
522 (unsigned int) n, nr_child_ptr);
523 nr_child = *nr_child_ptr;
524 assert(nr_child <= type->max_linear_child);
525
526 values = &node->u.data[1];
527 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
528 /* Check if node value is already populated */
529 for (i = 0; i < nr_child; i++) {
530 if (values[i] == n) {
531 if (pointers[i])
532 return -EEXIST;
533 else
534 break;
535 } else {
536 if (!pointers[i])
537 unused++;
538 }
539 }
540 if (i == nr_child && nr_child >= type->max_linear_child) {
541 if (unused)
542 return -ERANGE; /* recompact node */
543 else
544 return -ENOSPC; /* No space left in this node type */
545 }
546
547 assert(pointers[i] == NULL);
548 rcu_assign_pointer(pointers[i], child_node_flag);
549 /* If we expanded the nr_child, increment it */
550 if (i == nr_child) {
551 CMM_STORE_SHARED(values[nr_child], n);
552 /* write pointer and value before nr_child */
553 cmm_smp_wmb();
554 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
555 }
556 shadow_node->nr_child++;
557 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
558 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
559 (unsigned int) shadow_node->nr_child,
560 node, shadow_node);
561
562 return 0;
563 }
564
565 static
566 int ja_pool_node_set_nth(const struct cds_ja_type *type,
567 struct cds_ja_inode *node,
568 struct cds_ja_inode_flag *node_flag,
569 struct cds_ja_shadow_node *shadow_node,
570 uint8_t n,
571 struct cds_ja_inode_flag *child_node_flag)
572 {
573 struct cds_ja_inode *linear;
574
575 assert(type->type_class == RCU_JA_POOL);
576
577 switch (type->nr_pool_order) {
578 case 1:
579 {
580 unsigned long bitsel, index;
581
582 bitsel = ja_node_pool_1d_bitsel(node_flag);
583 assert(bitsel < CHAR_BIT);
584 index = ((unsigned long) n >> bitsel) & 0x1;
585 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
586 break;
587 }
588 case 2:
589 {
590 unsigned long bitsel[2], index[2], rindex;
591
592 ja_node_pool_2d_bitsel(node_flag, bitsel);
593 assert(bitsel[0] < CHAR_BIT);
594 assert(bitsel[1] < CHAR_BIT);
595 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
596 index[0] <<= 1;
597 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
598 rindex = index[0] | index[1];
599 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
600 break;
601 }
602 default:
603 linear = NULL;
604 assert(0);
605 }
606
607 return ja_linear_node_set_nth(type, linear, shadow_node,
608 n, child_node_flag);
609 }
610
611 static
612 int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
613 struct cds_ja_inode *node,
614 struct cds_ja_shadow_node *shadow_node,
615 uint8_t n,
616 struct cds_ja_inode_flag *child_node_flag)
617 {
618 struct cds_ja_inode_flag **ptr;
619
620 assert(type->type_class == RCU_JA_PIGEON);
621 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
622 if (*ptr)
623 return -EEXIST;
624 rcu_assign_pointer(*ptr, child_node_flag);
625 shadow_node->nr_child++;
626 return 0;
627 }
628
629 /*
630 * _ja_node_set_nth: set nth item within a node. Return an error
631 * (negative error value) if it is already there.
632 */
633 static
634 int _ja_node_set_nth(const struct cds_ja_type *type,
635 struct cds_ja_inode *node,
636 struct cds_ja_inode_flag *node_flag,
637 struct cds_ja_shadow_node *shadow_node,
638 uint8_t n,
639 struct cds_ja_inode_flag *child_node_flag)
640 {
641 switch (type->type_class) {
642 case RCU_JA_LINEAR:
643 return ja_linear_node_set_nth(type, node, shadow_node, n,
644 child_node_flag);
645 case RCU_JA_POOL:
646 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
647 child_node_flag);
648 case RCU_JA_PIGEON:
649 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
650 child_node_flag);
651 case RCU_JA_NULL:
652 return -ENOSPC;
653 default:
654 assert(0);
655 return -EINVAL;
656 }
657
658 return 0;
659 }
660
661 static
662 int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
663 struct cds_ja_inode *node,
664 struct cds_ja_shadow_node *shadow_node,
665 struct cds_ja_inode_flag **node_flag_ptr)
666 {
667 uint8_t nr_child;
668 uint8_t *nr_child_ptr;
669
670 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
671
672 nr_child_ptr = &node->u.data[0];
673 nr_child = *nr_child_ptr;
674 assert(nr_child <= type->max_linear_child);
675
676 if (type->type_class == RCU_JA_LINEAR) {
677 assert(!shadow_node->fallback_removal_count);
678 if (shadow_node->nr_child <= type->min_child) {
679 /* We need to try recompacting the node */
680 return -EFBIG;
681 }
682 }
683 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
684 assert(*node_flag_ptr != NULL);
685 rcu_assign_pointer(*node_flag_ptr, NULL);
686 /*
687 * Value and nr_child are never changed (would cause ABA issue).
688 * Instead, we leave the pointer to NULL and recompact the node
689 * once in a while. It is allowed to set a NULL pointer to a new
690 * value without recompaction though.
691 * Only update the shadow node accounting.
692 */
693 shadow_node->nr_child--;
694 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
695 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
696 (unsigned int) shadow_node->nr_child,
697 node, shadow_node);
698 return 0;
699 }
700
701 static
702 int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
703 struct cds_ja_inode *node,
704 struct cds_ja_inode_flag *node_flag,
705 struct cds_ja_shadow_node *shadow_node,
706 struct cds_ja_inode_flag **node_flag_ptr,
707 uint8_t n)
708 {
709 struct cds_ja_inode *linear;
710
711 assert(type->type_class == RCU_JA_POOL);
712
713 if (shadow_node->fallback_removal_count) {
714 shadow_node->fallback_removal_count--;
715 } else {
716 /* We should try recompacting the node */
717 if (shadow_node->nr_child <= type->min_child)
718 return -EFBIG;
719 }
720
721 switch (type->nr_pool_order) {
722 case 1:
723 {
724 unsigned long bitsel, index;
725
726 bitsel = ja_node_pool_1d_bitsel(node_flag);
727 assert(bitsel < CHAR_BIT);
728 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
729 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
730 break;
731 }
732 case 2:
733 {
734 unsigned long bitsel[2], index[2], rindex;
735
736 ja_node_pool_2d_bitsel(node_flag, bitsel);
737 assert(bitsel[0] < CHAR_BIT);
738 assert(bitsel[1] < CHAR_BIT);
739 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
740 index[0] <<= 1;
741 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
742 rindex = index[0] | index[1];
743 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
744 break;
745 }
746 default:
747 linear = NULL;
748 assert(0);
749 }
750
751 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
752 }
753
754 static
755 int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
756 struct cds_ja_inode *node,
757 struct cds_ja_shadow_node *shadow_node,
758 struct cds_ja_inode_flag **node_flag_ptr)
759 {
760 assert(type->type_class == RCU_JA_PIGEON);
761
762 if (shadow_node->fallback_removal_count) {
763 shadow_node->fallback_removal_count--;
764 } else {
765 /* We should try recompacting the node */
766 if (shadow_node->nr_child <= type->min_child)
767 return -EFBIG;
768 }
769 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
770 rcu_assign_pointer(*node_flag_ptr, NULL);
771 shadow_node->nr_child--;
772 return 0;
773 }
774
775 /*
776 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
777 * (negative error value) if it is not found (-ENOENT).
778 */
779 static
780 int _ja_node_clear_ptr(const struct cds_ja_type *type,
781 struct cds_ja_inode *node,
782 struct cds_ja_inode_flag *node_flag,
783 struct cds_ja_shadow_node *shadow_node,
784 struct cds_ja_inode_flag **node_flag_ptr,
785 uint8_t n)
786 {
787 switch (type->type_class) {
788 case RCU_JA_LINEAR:
789 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
790 case RCU_JA_POOL:
791 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
792 case RCU_JA_PIGEON:
793 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
794 case RCU_JA_NULL:
795 return -ENOENT;
796 default:
797 assert(0);
798 return -EINVAL;
799 }
800
801 return 0;
802 }
803
804 /*
805 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
806 * distribution in two sub-distributions containing as much elements one
807 * compared to the other.
808 */
809 static
810 unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
811 struct cds_ja *ja,
812 unsigned int type_index,
813 const struct cds_ja_type *type,
814 struct cds_ja_inode *node,
815 struct cds_ja_shadow_node *shadow_node,
816 uint8_t n,
817 struct cds_ja_inode_flag *child_node_flag,
818 struct cds_ja_inode_flag **nullify_node_flag_ptr)
819 {
820 uint8_t nr_one[JA_BITS_PER_BYTE];
821 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
822 unsigned int distrib_nr_child = 0;
823
824 memset(nr_one, 0, sizeof(nr_one));
825
826 switch (type->type_class) {
827 case RCU_JA_LINEAR:
828 {
829 uint8_t nr_child =
830 ja_linear_node_get_nr_child(type, node);
831 unsigned int i;
832
833 for (i = 0; i < nr_child; i++) {
834 struct cds_ja_inode_flag *iter;
835 uint8_t v;
836
837 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
838 if (!iter)
839 continue;
840 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
841 continue;
842 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
843 if (v & (1U << bit_i))
844 nr_one[bit_i]++;
845 }
846 distrib_nr_child++;
847 }
848 break;
849 }
850 case RCU_JA_POOL:
851 {
852 unsigned int pool_nr;
853
854 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
855 struct cds_ja_inode *pool =
856 ja_pool_node_get_ith_pool(type,
857 node, pool_nr);
858 uint8_t nr_child =
859 ja_linear_node_get_nr_child(type, pool);
860 unsigned int j;
861
862 for (j = 0; j < nr_child; j++) {
863 struct cds_ja_inode_flag *iter;
864 uint8_t v;
865
866 ja_linear_node_get_ith_pos(type, pool,
867 j, &v, &iter);
868 if (!iter)
869 continue;
870 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
871 continue;
872 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
873 if (v & (1U << bit_i))
874 nr_one[bit_i]++;
875 }
876 distrib_nr_child++;
877 }
878 }
879 break;
880 }
881 case RCU_JA_PIGEON:
882 {
883 unsigned int i;
884
885 assert(mode == JA_RECOMPACT_DEL);
886 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
887 struct cds_ja_inode_flag *iter;
888
889 iter = ja_pigeon_node_get_ith_pos(type, node, i);
890 if (!iter)
891 continue;
892 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
893 continue;
894 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
895 if (i & (1U << bit_i))
896 nr_one[bit_i]++;
897 }
898 distrib_nr_child++;
899 }
900 break;
901 }
902 case RCU_JA_NULL:
903 assert(mode == JA_RECOMPACT_ADD_NEXT);
904 break;
905 default:
906 assert(0);
907 break;
908 }
909
910 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
911 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
912 if (n & (1U << bit_i))
913 nr_one[bit_i]++;
914 }
915 distrib_nr_child++;
916 }
917
918 /*
919 * The best bit selector is that for which the number of ones is
920 * closest to half of the number of children in the
921 * distribution. We calculate the distance using the double of
922 * the sub-distribution sizes to eliminate truncation error.
923 */
924 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
925 unsigned int distance_to_best;
926
927 distance_to_best = abs_int((nr_one[bit_i] << 1U) - distrib_nr_child);
928 if (distance_to_best < overall_best_distance) {
929 overall_best_distance = distance_to_best;
930 bitsel = bit_i;
931 }
932 }
933 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
934 return bitsel;
935 }
936
937 /*
938 * Calculate bit distribution in two dimensions. Returns the two bits
939 * (each 0 to 7) that splits the distribution in four sub-distributions
940 * containing as much elements one compared to the other.
941 */
942 static
943 void ja_node_sum_distribution_2d(enum ja_recompact mode,
944 struct cds_ja *ja,
945 unsigned int type_index,
946 const struct cds_ja_type *type,
947 struct cds_ja_inode *node,
948 struct cds_ja_shadow_node *shadow_node,
949 uint8_t n,
950 struct cds_ja_inode_flag *child_node_flag,
951 struct cds_ja_inode_flag **nullify_node_flag_ptr,
952 unsigned int *_bitsel)
953 {
954 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
955 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
956 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
957 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
958 unsigned int bitsel[2] = { 0, 1 };
959 unsigned int bit_i, bit_j;
960 int overall_best_distance = INT_MAX;
961 unsigned int distrib_nr_child = 0;
962
963 memset(nr_2d_11, 0, sizeof(nr_2d_11));
964 memset(nr_2d_10, 0, sizeof(nr_2d_10));
965 memset(nr_2d_01, 0, sizeof(nr_2d_01));
966 memset(nr_2d_00, 0, sizeof(nr_2d_00));
967
968 switch (type->type_class) {
969 case RCU_JA_LINEAR:
970 {
971 uint8_t nr_child =
972 ja_linear_node_get_nr_child(type, node);
973 unsigned int i;
974
975 for (i = 0; i < nr_child; i++) {
976 struct cds_ja_inode_flag *iter;
977 uint8_t v;
978
979 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
980 if (!iter)
981 continue;
982 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
983 continue;
984 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
985 for (bit_j = 0; bit_j < bit_i; bit_j++) {
986 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
987 nr_2d_11[bit_i][bit_j]++;
988 }
989 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
990 nr_2d_10[bit_i][bit_j]++;
991 }
992 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
993 nr_2d_01[bit_i][bit_j]++;
994 }
995 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
996 nr_2d_00[bit_i][bit_j]++;
997 }
998 }
999 }
1000 distrib_nr_child++;
1001 }
1002 break;
1003 }
1004 case RCU_JA_POOL:
1005 {
1006 unsigned int pool_nr;
1007
1008 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1009 struct cds_ja_inode *pool =
1010 ja_pool_node_get_ith_pool(type,
1011 node, pool_nr);
1012 uint8_t nr_child =
1013 ja_linear_node_get_nr_child(type, pool);
1014 unsigned int j;
1015
1016 for (j = 0; j < nr_child; j++) {
1017 struct cds_ja_inode_flag *iter;
1018 uint8_t v;
1019
1020 ja_linear_node_get_ith_pos(type, pool,
1021 j, &v, &iter);
1022 if (!iter)
1023 continue;
1024 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1025 continue;
1026 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1027 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1028 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1029 nr_2d_11[bit_i][bit_j]++;
1030 }
1031 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1032 nr_2d_10[bit_i][bit_j]++;
1033 }
1034 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1035 nr_2d_01[bit_i][bit_j]++;
1036 }
1037 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1038 nr_2d_00[bit_i][bit_j]++;
1039 }
1040 }
1041 }
1042 distrib_nr_child++;
1043 }
1044 }
1045 break;
1046 }
1047 case RCU_JA_PIGEON:
1048 {
1049 unsigned int i;
1050
1051 assert(mode == JA_RECOMPACT_DEL);
1052 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
1053 struct cds_ja_inode_flag *iter;
1054
1055 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1056 if (!iter)
1057 continue;
1058 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1059 continue;
1060 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1061 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1062 if ((i & (1U << bit_i)) && (i & (1U << bit_j))) {
1063 nr_2d_11[bit_i][bit_j]++;
1064 }
1065 if ((i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1066 nr_2d_10[bit_i][bit_j]++;
1067 }
1068 if (!(i & (1U << bit_i)) && (i & (1U << bit_j))) {
1069 nr_2d_01[bit_i][bit_j]++;
1070 }
1071 if (!(i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1072 nr_2d_00[bit_i][bit_j]++;
1073 }
1074 }
1075 }
1076 distrib_nr_child++;
1077 }
1078 break;
1079 }
1080 case RCU_JA_NULL:
1081 assert(mode == JA_RECOMPACT_ADD_NEXT);
1082 break;
1083 default:
1084 assert(0);
1085 break;
1086 }
1087
1088 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1089 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1090 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1091 if ((n & (1U << bit_i)) && (n & (1U << bit_j))) {
1092 nr_2d_11[bit_i][bit_j]++;
1093 }
1094 if ((n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1095 nr_2d_10[bit_i][bit_j]++;
1096 }
1097 if (!(n & (1U << bit_i)) && (n & (1U << bit_j))) {
1098 nr_2d_01[bit_i][bit_j]++;
1099 }
1100 if (!(n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1101 nr_2d_00[bit_i][bit_j]++;
1102 }
1103 }
1104 }
1105 distrib_nr_child++;
1106 }
1107
1108 /*
1109 * The best bit selector is that for which the number of nodes
1110 * in each sub-class is closest to one-fourth of the number of
1111 * children in the distribution. We calculate the distance using
1112 * 4 times the size of the sub-distribution to eliminate
1113 * truncation error.
1114 */
1115 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1116 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1117 int distance_to_best[4];
1118
1119 distance_to_best[0] = (nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1120 distance_to_best[1] = (nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1121 distance_to_best[2] = (nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1122 distance_to_best[3] = (nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
1123
1124 /* Consider worse distance above best */
1125 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
1126 distance_to_best[0] = distance_to_best[1];
1127 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
1128 distance_to_best[0] = distance_to_best[2];
1129 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
1130 distance_to_best[0] = distance_to_best[3];
1131
1132 /*
1133 * If our worse distance is better than overall,
1134 * we become new best candidate.
1135 */
1136 if (distance_to_best[0] < overall_best_distance) {
1137 overall_best_distance = distance_to_best[0];
1138 bitsel[0] = bit_i;
1139 bitsel[1] = bit_j;
1140 }
1141 }
1142 }
1143
1144 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1145
1146 /* Return our bit selection */
1147 _bitsel[0] = bitsel[0];
1148 _bitsel[1] = bitsel[1];
1149 }
1150
1151 static
1152 unsigned int find_nearest_type_index(unsigned int type_index,
1153 unsigned int nr_nodes)
1154 {
1155 const struct cds_ja_type *type;
1156
1157 assert(type_index != NODE_INDEX_NULL);
1158 if (nr_nodes == 0)
1159 return NODE_INDEX_NULL;
1160 for (;;) {
1161 type = &ja_types[type_index];
1162 if (nr_nodes < type->min_child)
1163 type_index--;
1164 else if (nr_nodes > type->max_child)
1165 type_index++;
1166 else
1167 break;
1168 }
1169 return type_index;
1170 }
1171
1172 /*
1173 * ja_node_recompact_add: recompact a node, adding a new child.
1174 * Return 0 on success, -EAGAIN if need to retry, or other negative
1175 * error value otherwise.
1176 */
1177 static
1178 int ja_node_recompact(enum ja_recompact mode,
1179 struct cds_ja *ja,
1180 unsigned int old_type_index,
1181 const struct cds_ja_type *old_type,
1182 struct cds_ja_inode *old_node,
1183 struct cds_ja_shadow_node *shadow_node,
1184 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
1185 struct cds_ja_inode_flag *child_node_flag,
1186 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1187 int level)
1188 {
1189 unsigned int new_type_index;
1190 struct cds_ja_inode *new_node;
1191 struct cds_ja_shadow_node *new_shadow_node = NULL;
1192 const struct cds_ja_type *new_type;
1193 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
1194 int ret;
1195 int fallback = 0;
1196
1197 old_node_flag = *old_node_flag_ptr;
1198
1199 /*
1200 * Need to find nearest type index even for ADD_SAME, because
1201 * this recompaction, when applied to linear nodes, will garbage
1202 * collect dummy (NULL) entries, and can therefore cause a few
1203 * linear representations to be skipped.
1204 */
1205 switch (mode) {
1206 case JA_RECOMPACT_ADD_SAME:
1207 new_type_index = find_nearest_type_index(old_type_index,
1208 shadow_node->nr_child + 1);
1209 dbg_printf("Recompact for node with %u children\n",
1210 shadow_node->nr_child + 1);
1211 break;
1212 case JA_RECOMPACT_ADD_NEXT:
1213 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1214 new_type_index = 0;
1215 dbg_printf("Recompact for NULL\n");
1216 } else {
1217 new_type_index = find_nearest_type_index(old_type_index,
1218 shadow_node->nr_child + 1);
1219 dbg_printf("Recompact for node with %u children\n",
1220 shadow_node->nr_child + 1);
1221 }
1222 break;
1223 case JA_RECOMPACT_DEL:
1224 new_type_index = find_nearest_type_index(old_type_index,
1225 shadow_node->nr_child - 1);
1226 dbg_printf("Recompact for node with %u children\n",
1227 shadow_node->nr_child - 1);
1228 break;
1229 default:
1230 assert(0);
1231 }
1232
1233 retry: /* for fallback */
1234 dbg_printf("Recompact from type %d to type %d\n",
1235 old_type_index, new_type_index);
1236 new_type = &ja_types[new_type_index];
1237 if (new_type_index != NODE_INDEX_NULL) {
1238 new_node = alloc_cds_ja_node(ja, new_type);
1239 if (!new_node)
1240 return -ENOMEM;
1241
1242 if (new_type->type_class == RCU_JA_POOL) {
1243 switch (new_type->nr_pool_order) {
1244 case 1:
1245 {
1246 unsigned int node_distrib_bitsel;
1247
1248 node_distrib_bitsel =
1249 ja_node_sum_distribution_1d(mode, ja,
1250 old_type_index, old_type,
1251 old_node, shadow_node,
1252 n, child_node_flag,
1253 nullify_node_flag_ptr);
1254 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1255 new_node_flag = ja_node_flag_pool_1d(new_node,
1256 new_type_index, node_distrib_bitsel);
1257 break;
1258 }
1259 case 2:
1260 {
1261 unsigned int node_distrib_bitsel[2];
1262
1263 ja_node_sum_distribution_2d(mode, ja,
1264 old_type_index, old_type,
1265 old_node, shadow_node,
1266 n, child_node_flag,
1267 nullify_node_flag_ptr,
1268 node_distrib_bitsel);
1269 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1270 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
1271 new_node_flag = ja_node_flag_pool_2d(new_node,
1272 new_type_index, node_distrib_bitsel);
1273 break;
1274 }
1275 default:
1276 assert(0);
1277 }
1278 } else {
1279 new_node_flag = ja_node_flag(new_node, new_type_index);
1280 }
1281
1282 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
1283 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
1284 if (!new_shadow_node) {
1285 free_cds_ja_node(ja, new_node);
1286 return -ENOMEM;
1287 }
1288 if (fallback)
1289 new_shadow_node->fallback_removal_count =
1290 JA_FALLBACK_REMOVAL_COUNT;
1291 } else {
1292 new_node = NULL;
1293 new_node_flag = NULL;
1294 }
1295
1296 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
1297
1298 if (new_type_index == NODE_INDEX_NULL)
1299 goto skip_copy;
1300
1301 switch (old_type->type_class) {
1302 case RCU_JA_LINEAR:
1303 {
1304 uint8_t nr_child =
1305 ja_linear_node_get_nr_child(old_type, old_node);
1306 unsigned int i;
1307
1308 for (i = 0; i < nr_child; i++) {
1309 struct cds_ja_inode_flag *iter;
1310 uint8_t v;
1311
1312 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1313 if (!iter)
1314 continue;
1315 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1316 continue;
1317 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1318 new_shadow_node,
1319 v, iter);
1320 if (new_type->type_class == RCU_JA_POOL && ret) {
1321 goto fallback_toosmall;
1322 }
1323 assert(!ret);
1324 }
1325 break;
1326 }
1327 case RCU_JA_POOL:
1328 {
1329 unsigned int pool_nr;
1330
1331 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
1332 struct cds_ja_inode *pool =
1333 ja_pool_node_get_ith_pool(old_type,
1334 old_node, pool_nr);
1335 uint8_t nr_child =
1336 ja_linear_node_get_nr_child(old_type, pool);
1337 unsigned int j;
1338
1339 for (j = 0; j < nr_child; j++) {
1340 struct cds_ja_inode_flag *iter;
1341 uint8_t v;
1342
1343 ja_linear_node_get_ith_pos(old_type, pool,
1344 j, &v, &iter);
1345 if (!iter)
1346 continue;
1347 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1348 continue;
1349 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1350 new_shadow_node,
1351 v, iter);
1352 if (new_type->type_class == RCU_JA_POOL
1353 && ret) {
1354 goto fallback_toosmall;
1355 }
1356 assert(!ret);
1357 }
1358 }
1359 break;
1360 }
1361 case RCU_JA_NULL:
1362 assert(mode == JA_RECOMPACT_ADD_NEXT);
1363 break;
1364 case RCU_JA_PIGEON:
1365 {
1366 unsigned int i;
1367
1368 assert(mode == JA_RECOMPACT_DEL);
1369 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
1370 struct cds_ja_inode_flag *iter;
1371
1372 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1373 if (!iter)
1374 continue;
1375 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1376 continue;
1377 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1378 new_shadow_node,
1379 i, iter);
1380 if (new_type->type_class == RCU_JA_POOL && ret) {
1381 goto fallback_toosmall;
1382 }
1383 assert(!ret);
1384 }
1385 break;
1386 }
1387 default:
1388 assert(0);
1389 ret = -EINVAL;
1390 goto end;
1391 }
1392 skip_copy:
1393
1394 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1395 /* add node */
1396 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1397 new_shadow_node,
1398 n, child_node_flag);
1399 if (new_type->type_class == RCU_JA_POOL && ret) {
1400 goto fallback_toosmall;
1401 }
1402 assert(!ret);
1403 }
1404
1405 if (fallback) {
1406 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1407 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1408 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
1409 uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
1410 }
1411
1412 /* Return pointer to new recompacted node through old_node_flag_ptr */
1413 *old_node_flag_ptr = new_node_flag;
1414 if (old_node) {
1415 int flags;
1416
1417 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1418 /*
1419 * It is OK to free the lock associated with a node
1420 * going to NULL, since we are holding the parent lock.
1421 * This synchronizes removal with re-add of that node.
1422 */
1423 if (new_type_index == NODE_INDEX_NULL)
1424 flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK;
1425 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
1426 flags);
1427 assert(!ret);
1428 }
1429
1430 ret = 0;
1431 end:
1432 return ret;
1433
1434 fallback_toosmall:
1435 /* fallback if next pool is too small */
1436 assert(new_shadow_node);
1437 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
1438 RCUJA_SHADOW_CLEAR_FREE_NODE);
1439 assert(!ret);
1440
1441 switch (mode) {
1442 case JA_RECOMPACT_ADD_SAME:
1443 /*
1444 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1445 * node within a pool has unused entries. It should
1446 * therefore _never_ be too small.
1447 */
1448 assert(0);
1449
1450 /* Fall-through */
1451 case JA_RECOMPACT_ADD_NEXT:
1452 {
1453 const struct cds_ja_type *next_type;
1454
1455 /*
1456 * Recompaction attempt on add failed. Should only
1457 * happen if target node type is pool. Caused by
1458 * hard-to-split distribution. Recompact using the next
1459 * distribution size.
1460 */
1461 assert(new_type->type_class == RCU_JA_POOL);
1462 next_type = &ja_types[new_type_index + 1];
1463 /*
1464 * Try going to the next pool size if our population
1465 * fits within its range. This is not flagged as a
1466 * fallback.
1467 */
1468 if (shadow_node->nr_child + 1 >= next_type->min_child
1469 && shadow_node->nr_child + 1 <= next_type->max_child) {
1470 new_type_index++;
1471 goto retry;
1472 } else {
1473 new_type_index++;
1474 dbg_printf("Add fallback to type %d\n", new_type_index);
1475 uatomic_inc(&ja->nr_fallback);
1476 fallback = 1;
1477 goto retry;
1478 }
1479 break;
1480 }
1481 case JA_RECOMPACT_DEL:
1482 /*
1483 * Recompaction attempt on delete failed. Should only
1484 * happen if target node type is pool. This is caused by
1485 * a hard-to-split distribution. Recompact on same node
1486 * size, but flag current node as "fallback" to ensure
1487 * we don't attempt recompaction before some activity
1488 * has reshuffled our node.
1489 */
1490 assert(new_type->type_class == RCU_JA_POOL);
1491 new_type_index = old_type_index;
1492 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1493 uatomic_inc(&ja->nr_fallback);
1494 fallback = 1;
1495 goto retry;
1496 default:
1497 assert(0);
1498 return -EINVAL;
1499 }
1500
1501 /*
1502 * Last resort fallback: pigeon.
1503 */
1504 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1505 dbg_printf("Fallback to type %d\n", new_type_index);
1506 uatomic_inc(&ja->nr_fallback);
1507 fallback = 1;
1508 goto retry;
1509 }
1510
1511 /*
1512 * Return 0 on success, -EAGAIN if need to retry, or other negative
1513 * error value otherwise.
1514 */
1515 static
1516 int ja_node_set_nth(struct cds_ja *ja,
1517 struct cds_ja_inode_flag **node_flag, uint8_t n,
1518 struct cds_ja_inode_flag *child_node_flag,
1519 struct cds_ja_shadow_node *shadow_node,
1520 int level)
1521 {
1522 int ret;
1523 unsigned int type_index;
1524 const struct cds_ja_type *type;
1525 struct cds_ja_inode *node;
1526
1527 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1528 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1529
1530 node = ja_node_ptr(*node_flag);
1531 type_index = ja_node_type(*node_flag);
1532 type = &ja_types[type_index];
1533 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
1534 n, child_node_flag);
1535 switch (ret) {
1536 case -ENOSPC:
1537 /* Not enough space in node, need to recompact to next type. */
1538 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
1539 shadow_node, node_flag, n, child_node_flag, NULL, level);
1540 break;
1541 case -ERANGE:
1542 /* Node needs to be recompacted. */
1543 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
1544 shadow_node, node_flag, n, child_node_flag, NULL, level);
1545 break;
1546 }
1547 return ret;
1548 }
1549
1550 /*
1551 * Return 0 on success, -EAGAIN if need to retry, or other negative
1552 * error value otherwise.
1553 */
1554 static
1555 int ja_node_clear_ptr(struct cds_ja *ja,
1556 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1557 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1558 struct cds_ja_shadow_node *shadow_node, /* of parent */
1559 uint8_t n, int level)
1560 {
1561 int ret;
1562 unsigned int type_index;
1563 const struct cds_ja_type *type;
1564 struct cds_ja_inode *node;
1565
1566 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1567 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
1568
1569 node = ja_node_ptr(*parent_node_flag_ptr);
1570 type_index = ja_node_type(*parent_node_flag_ptr);
1571 type = &ja_types[type_index];
1572 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
1573 if (ret == -EFBIG) {
1574 /* Should try recompaction. */
1575 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
1576 shadow_node, parent_node_flag_ptr, n, NULL,
1577 node_flag_ptr, level);
1578 }
1579 return ret;
1580 }
1581
1582 struct cds_hlist_head cds_ja_lookup(struct cds_ja *ja, uint64_t key)
1583 {
1584 unsigned int tree_depth, i;
1585 struct cds_ja_inode_flag *node_flag;
1586 struct cds_hlist_head head = { NULL };
1587
1588 if (caa_unlikely(key > ja->key_max))
1589 return head;
1590 tree_depth = ja->tree_depth;
1591 node_flag = rcu_dereference(ja->root);
1592
1593 /* level 0: root node */
1594 if (!ja_node_ptr(node_flag))
1595 return head;
1596
1597 for (i = 1; i < tree_depth; i++) {
1598 uint8_t iter_key;
1599
1600 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
1601 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1602 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1603 (unsigned int) iter_key, node_flag);
1604 if (!ja_node_ptr(node_flag))
1605 return head;
1606 }
1607
1608 /* Last level lookup succeded. We got an actual match. */
1609 head.next = (struct cds_hlist_node *) node_flag;
1610 return head;
1611 }
1612
1613 /*
1614 * We reached an unpopulated node. Create it and the children we need,
1615 * and then attach the entire branch to the current node. This may
1616 * trigger recompaction of the current node. Locks needed: node lock
1617 * (for add), and, possibly, parent node lock (to update pointer due to
1618 * node recompaction).
1619 *
1620 * First take node lock, check if recompaction is needed, then take
1621 * parent lock (if needed). Then we can proceed to create the new
1622 * branch. Publish the new branch, and release locks.
1623 * TODO: we currently always take the parent lock even when not needed.
1624 */
1625 static
1626 int ja_attach_node(struct cds_ja *ja,
1627 struct cds_ja_inode_flag **attach_node_flag_ptr,
1628 struct cds_ja_inode_flag *attach_node_flag,
1629 struct cds_ja_inode_flag *parent_attach_node_flag,
1630 struct cds_ja_inode_flag **old_node_flag_ptr,
1631 struct cds_ja_inode_flag *old_node_flag,
1632 uint64_t key,
1633 unsigned int level,
1634 struct cds_ja_node *child_node)
1635 {
1636 struct cds_ja_shadow_node *shadow_node = NULL,
1637 *parent_shadow_node = NULL;
1638 struct cds_hlist_head head;
1639 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1640 int ret, i;
1641 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
1642 int nr_created_nodes = 0;
1643
1644 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1645 level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag);
1646
1647 assert(!old_node_flag);
1648 if (attach_node_flag) {
1649 shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag);
1650 if (!shadow_node) {
1651 ret = -EAGAIN;
1652 goto end;
1653 }
1654 }
1655 if (parent_attach_node_flag) {
1656 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1657 parent_attach_node_flag);
1658 if (!parent_shadow_node) {
1659 ret = -EAGAIN;
1660 goto unlock_shadow;
1661 }
1662 }
1663
1664 if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) {
1665 /*
1666 * Target node has been updated between RCU lookup and
1667 * lock acquisition. We need to re-try lookup and
1668 * attach.
1669 */
1670 ret = -EAGAIN;
1671 goto unlock_parent;
1672 }
1673
1674 /*
1675 * Perform a lookup query to handle the case where
1676 * old_node_flag_ptr is NULL. We cannot use it to check if the
1677 * node has been populated between RCU lookup and mutex
1678 * acquisition.
1679 */
1680 if (!old_node_flag_ptr) {
1681 uint8_t iter_key;
1682 struct cds_ja_inode_flag *lookup_node_flag;
1683 struct cds_ja_inode_flag **lookup_node_flag_ptr;
1684
1685 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
1686 lookup_node_flag = ja_node_get_nth(attach_node_flag,
1687 &lookup_node_flag_ptr,
1688 iter_key);
1689 if (lookup_node_flag) {
1690 ret = -EEXIST;
1691 goto unlock_parent;
1692 }
1693 }
1694
1695 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
1696 ja_node_ptr(attach_node_flag)) {
1697 /*
1698 * Target node has been updated between RCU lookup and
1699 * lock acquisition. We need to re-try lookup and
1700 * attach.
1701 */
1702 ret = -EAGAIN;
1703 goto unlock_parent;
1704 }
1705
1706 /* Create new branch, starting from bottom */
1707 CDS_INIT_HLIST_HEAD(&head);
1708 cds_hlist_add_head_rcu(&child_node->list, &head);
1709 iter_node_flag = (struct cds_ja_inode_flag *) head.next;
1710
1711 for (i = ja->tree_depth - 1; i >= (int) level; i--) {
1712 uint8_t iter_key;
1713
1714 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1)));
1715 dbg_printf("branch creation level %d, key %u\n",
1716 i, (unsigned int) iter_key);
1717 iter_dest_node_flag = NULL;
1718 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
1719 iter_key,
1720 iter_node_flag,
1721 NULL, i);
1722 if (ret) {
1723 dbg_printf("branch creation error %d\n", ret);
1724 goto check_error;
1725 }
1726 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1727 iter_node_flag = iter_dest_node_flag;
1728 }
1729 assert(level > 0);
1730
1731 /* Publish branch */
1732 if (level == 1) {
1733 /*
1734 * Attaching to root node.
1735 */
1736 rcu_assign_pointer(ja->root, iter_node_flag);
1737 } else {
1738 uint8_t iter_key;
1739
1740 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
1741 dbg_printf("publish branch at level %d, key %u\n",
1742 level - 1, (unsigned int) iter_key);
1743 /* We need to use set_nth on the previous level. */
1744 iter_dest_node_flag = attach_node_flag;
1745 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
1746 iter_key,
1747 iter_node_flag,
1748 shadow_node, level - 1);
1749 if (ret) {
1750 dbg_printf("branch publish error %d\n", ret);
1751 goto check_error;
1752 }
1753 /*
1754 * Attach branch
1755 */
1756 rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag);
1757 }
1758
1759 /* Success */
1760 ret = 0;
1761
1762 check_error:
1763 if (ret) {
1764 for (i = 0; i < nr_created_nodes; i++) {
1765 int tmpret;
1766 int flags;
1767
1768 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1769 if (i)
1770 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
1771 tmpret = rcuja_shadow_clear(ja->ht,
1772 created_nodes[i],
1773 NULL,
1774 flags);
1775 assert(!tmpret);
1776 }
1777 }
1778 unlock_parent:
1779 if (parent_shadow_node)
1780 rcuja_shadow_unlock(parent_shadow_node);
1781 unlock_shadow:
1782 if (shadow_node)
1783 rcuja_shadow_unlock(shadow_node);
1784 end:
1785 return ret;
1786 }
1787
1788 /*
1789 * Lock the parent containing the hlist head pointer, and add node to list of
1790 * duplicates. Failure can happen if concurrent update changes the
1791 * parent before we get the lock. We return -EAGAIN in that case.
1792 * Return 0 on success, negative error value on failure.
1793 */
1794 static
1795 int ja_chain_node(struct cds_ja *ja,
1796 struct cds_ja_inode_flag *parent_node_flag,
1797 struct cds_ja_inode_flag **node_flag_ptr,
1798 struct cds_ja_inode_flag *node_flag,
1799 struct cds_ja_node *node)
1800 {
1801 struct cds_ja_shadow_node *shadow_node;
1802 int ret = 0;
1803
1804 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
1805 if (!shadow_node) {
1806 return -EAGAIN;
1807 }
1808 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
1809 ret = -EAGAIN;
1810 goto end;
1811 }
1812 cds_hlist_add_head_rcu(&node->list, (struct cds_hlist_head *) node_flag_ptr);
1813 end:
1814 rcuja_shadow_unlock(shadow_node);
1815 return ret;
1816 }
1817
1818 static
1819 int _cds_ja_add(struct cds_ja *ja, uint64_t key,
1820 struct cds_ja_node *new_node,
1821 struct cds_ja_node **unique_node_ret)
1822 {
1823 unsigned int tree_depth, i;
1824 struct cds_ja_inode_flag *attach_node_flag,
1825 *parent_node_flag,
1826 *parent2_node_flag,
1827 *node_flag,
1828 *parent_attach_node_flag;
1829 struct cds_ja_inode_flag **attach_node_flag_ptr,
1830 **parent_node_flag_ptr,
1831 **node_flag_ptr;
1832 int ret;
1833
1834 if (caa_unlikely(key > ja->key_max)) {
1835 return -EINVAL;
1836 }
1837 tree_depth = ja->tree_depth;
1838
1839 retry:
1840 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
1841 key, new_node);
1842 parent2_node_flag = NULL;
1843 parent_node_flag =
1844 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
1845 parent_node_flag_ptr = NULL;
1846 node_flag = rcu_dereference(ja->root);
1847 node_flag_ptr = &ja->root;
1848
1849 /* Iterate on all internal levels */
1850 for (i = 1; i < tree_depth; i++) {
1851 uint8_t iter_key;
1852
1853 if (!ja_node_ptr(node_flag))
1854 break;
1855 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
1856 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
1857 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
1858 parent2_node_flag = parent_node_flag;
1859 parent_node_flag = node_flag;
1860 parent_node_flag_ptr = node_flag_ptr;
1861 node_flag = ja_node_get_nth(node_flag,
1862 &node_flag_ptr,
1863 iter_key);
1864 }
1865
1866 /*
1867 * We reached either bottom of tree or internal NULL node,
1868 * simply add node to last internal level, or chain it if key is
1869 * already present.
1870 */
1871 if (!ja_node_ptr(node_flag)) {
1872 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
1873 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
1874
1875 attach_node_flag = parent_node_flag;
1876 attach_node_flag_ptr = parent_node_flag_ptr;
1877 parent_attach_node_flag = parent2_node_flag;
1878
1879 ret = ja_attach_node(ja, attach_node_flag_ptr,
1880 attach_node_flag,
1881 parent_attach_node_flag,
1882 node_flag_ptr,
1883 node_flag,
1884 key, i, new_node);
1885 } else {
1886 if (unique_node_ret) {
1887 *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag);
1888 return -EEXIST;
1889 }
1890
1891 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
1892 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
1893
1894 attach_node_flag = node_flag;
1895 attach_node_flag_ptr = node_flag_ptr;
1896 parent_attach_node_flag = parent_node_flag;
1897
1898 ret = ja_chain_node(ja,
1899 parent_attach_node_flag,
1900 attach_node_flag_ptr,
1901 attach_node_flag,
1902 new_node);
1903 }
1904 if (ret == -EAGAIN || ret == -EEXIST)
1905 goto retry;
1906
1907 return ret;
1908 }
1909
1910 int cds_ja_add(struct cds_ja *ja, uint64_t key,
1911 struct cds_ja_node *new_node)
1912 {
1913 return _cds_ja_add(ja, key, new_node, NULL);
1914 }
1915
1916 struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key,
1917 struct cds_ja_node *new_node)
1918 {
1919 int ret;
1920 struct cds_ja_node *ret_node;
1921
1922 ret = _cds_ja_add(ja, key, new_node, &ret_node);
1923 if (ret == -EEXIST)
1924 return ret_node;
1925 else
1926 return new_node;
1927 }
1928
1929 /*
1930 * Note: there is no need to lookup the pointer address associated with
1931 * each node's nth item after taking the lock: it's already been done by
1932 * cds_ja_del while holding the rcu read-side lock, and our node rules
1933 * ensure that when a match value -> pointer is found in a node, it is
1934 * _NEVER_ changed for that node without recompaction, and recompaction
1935 * reallocates the node.
1936 * However, when a child is removed from "linear" nodes, its pointer
1937 * is set to NULL. We therefore check, while holding the locks, if this
1938 * pointer is NULL, and return -ENOENT to the caller if it is the case.
1939 */
1940 static
1941 int ja_detach_node(struct cds_ja *ja,
1942 struct cds_ja_inode_flag **snapshot,
1943 struct cds_ja_inode_flag ***snapshot_ptr,
1944 uint8_t *snapshot_n,
1945 int nr_snapshot,
1946 uint64_t key,
1947 struct cds_ja_node *node)
1948 {
1949 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
1950 struct cds_ja_inode_flag **node_flag_ptr = NULL,
1951 *parent_node_flag = NULL,
1952 **parent_node_flag_ptr = NULL;
1953 struct cds_ja_inode_flag *iter_node_flag;
1954 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
1955 uint8_t n = 0;
1956
1957 assert(nr_snapshot == ja->tree_depth + 1);
1958
1959 /*
1960 * From the last internal level node going up, get the node
1961 * lock, check if the node has only one child left. If it is the
1962 * case, we continue iterating upward. When we reach a node
1963 * which has more that one child left, we lock the parent, and
1964 * proceed to the node deletion (removing its children too).
1965 */
1966 for (i = nr_snapshot - 2; i >= 1; i--) {
1967 struct cds_ja_shadow_node *shadow_node;
1968
1969 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1970 snapshot[i]);
1971 if (!shadow_node) {
1972 ret = -EAGAIN;
1973 goto end;
1974 }
1975 shadow_nodes[nr_shadow++] = shadow_node;
1976
1977 /*
1978 * Check if node has been removed between RCU
1979 * lookup and lock acquisition.
1980 */
1981 assert(snapshot_ptr[i + 1]);
1982 if (ja_node_ptr(*snapshot_ptr[i + 1])
1983 != ja_node_ptr(snapshot[i + 1])) {
1984 ret = -ENOENT;
1985 goto end;
1986 }
1987
1988 assert(shadow_node->nr_child > 0);
1989 if (shadow_node->nr_child == 1 && i > 1)
1990 nr_clear++;
1991 nr_branch++;
1992 if (shadow_node->nr_child > 1 || i == 1) {
1993 /* Lock parent and break */
1994 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1995 snapshot[i - 1]);
1996 if (!shadow_node) {
1997 ret = -EAGAIN;
1998 goto end;
1999 }
2000 shadow_nodes[nr_shadow++] = shadow_node;
2001
2002 /*
2003 * Check if node has been removed between RCU
2004 * lookup and lock acquisition.
2005 */
2006 assert(snapshot_ptr[i]);
2007 if (ja_node_ptr(*snapshot_ptr[i])
2008 != ja_node_ptr(snapshot[i])) {
2009 ret = -ENOENT;
2010 goto end;
2011 }
2012
2013 node_flag_ptr = snapshot_ptr[i + 1];
2014 n = snapshot_n[i + 1];
2015 parent_node_flag_ptr = snapshot_ptr[i];
2016 parent_node_flag = snapshot[i];
2017
2018 if (i > 1) {
2019 /*
2020 * Lock parent's parent, in case we need
2021 * to recompact parent.
2022 */
2023 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
2024 snapshot[i - 2]);
2025 if (!shadow_node) {
2026 ret = -EAGAIN;
2027 goto end;
2028 }
2029 shadow_nodes[nr_shadow++] = shadow_node;
2030
2031 /*
2032 * Check if node has been removed between RCU
2033 * lookup and lock acquisition.
2034 */
2035 assert(snapshot_ptr[i - 1]);
2036 if (ja_node_ptr(*snapshot_ptr[i - 1])
2037 != ja_node_ptr(snapshot[i - 1])) {
2038 ret = -ENOENT;
2039 goto end;
2040 }
2041 }
2042
2043 break;
2044 }
2045 }
2046
2047 /*
2048 * At this point, we want to delete all nodes that are about to
2049 * be removed from shadow_nodes (except the last one, which is
2050 * either the root or the parent of the upmost node with 1
2051 * child). OK to free lock here, because RCU read lock is held,
2052 * and free only performed in call_rcu.
2053 */
2054
2055 for (i = 0; i < nr_clear; i++) {
2056 ret = rcuja_shadow_clear(ja->ht,
2057 shadow_nodes[i]->node_flag,
2058 shadow_nodes[i],
2059 RCUJA_SHADOW_CLEAR_FREE_NODE
2060 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2061 assert(!ret);
2062 }
2063
2064 iter_node_flag = parent_node_flag;
2065 /* Remove from parent */
2066 ret = ja_node_clear_ptr(ja,
2067 node_flag_ptr, /* Pointer to location to nullify */
2068 &iter_node_flag, /* Old new parent ptr in its parent */
2069 shadow_nodes[nr_branch - 1], /* of parent */
2070 n, nr_branch - 1);
2071 if (ret)
2072 goto end;
2073
2074 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2075 iter_node_flag, *parent_node_flag_ptr);
2076 /* Update address of parent ptr in its parent */
2077 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2078
2079 end:
2080 for (i = 0; i < nr_shadow; i++)
2081 rcuja_shadow_unlock(shadow_nodes[i]);
2082 return ret;
2083 }
2084
2085 static
2086 int ja_unchain_node(struct cds_ja *ja,
2087 struct cds_ja_inode_flag *parent_node_flag,
2088 struct cds_ja_inode_flag **node_flag_ptr,
2089 struct cds_ja_inode_flag *node_flag,
2090 struct cds_ja_node *node)
2091 {
2092 struct cds_ja_shadow_node *shadow_node;
2093 struct cds_hlist_node *hlist_node;
2094 struct cds_hlist_head hlist_head;
2095 int ret = 0, count = 0, found = 0;
2096
2097 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
2098 if (!shadow_node)
2099 return -EAGAIN;
2100 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
2101 ret = -EAGAIN;
2102 goto end;
2103 }
2104 hlist_head.next = (struct cds_hlist_node *) ja_node_ptr(node_flag);
2105 /*
2106 * Retry if another thread removed all but one of duplicates
2107 * since check (this check was performed without lock).
2108 * Ensure that the node we are about to remove is still in the
2109 * list (while holding lock).
2110 */
2111 cds_hlist_for_each_rcu(hlist_node, &hlist_head) {
2112 if (count == 0) {
2113 /* FIXME: currently a work-around */
2114 hlist_node->prev = (struct cds_hlist_node *) node_flag_ptr;
2115 }
2116 count++;
2117 if (hlist_node == &node->list)
2118 found++;
2119 }
2120 assert(found <= 1);
2121 if (!found || count == 1) {
2122 ret = -EAGAIN;
2123 goto end;
2124 }
2125 cds_hlist_del_rcu(&node->list);
2126 /*
2127 * Validate that we indeed removed the node from linked list.
2128 */
2129 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
2130 end:
2131 rcuja_shadow_unlock(shadow_node);
2132 return ret;
2133 }
2134
2135 /*
2136 * Called with RCU read lock held.
2137 */
2138 int cds_ja_del(struct cds_ja *ja, uint64_t key,
2139 struct cds_ja_node *node)
2140 {
2141 unsigned int tree_depth, i;
2142 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
2143 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2144 uint8_t snapshot_n[JA_MAX_DEPTH];
2145 struct cds_ja_inode_flag *node_flag;
2146 struct cds_ja_inode_flag **prev_node_flag_ptr,
2147 **node_flag_ptr;
2148 int nr_snapshot;
2149 int ret;
2150
2151 if (caa_unlikely(key > ja->key_max))
2152 return -EINVAL;
2153 tree_depth = ja->tree_depth;
2154
2155 retry:
2156 nr_snapshot = 0;
2157 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2158 key, node);
2159
2160 /* snapshot for level 0 is only for shadow node lookup */
2161 snapshot_n[0] = 0;
2162 snapshot_n[1] = 0;
2163 snapshot_ptr[nr_snapshot] = NULL;
2164 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2165 node_flag = rcu_dereference(ja->root);
2166 prev_node_flag_ptr = &ja->root;
2167 node_flag_ptr = &ja->root;
2168
2169 /* Iterate on all internal levels */
2170 for (i = 1; i < tree_depth; i++) {
2171 uint8_t iter_key;
2172
2173 dbg_printf("cds_ja_del iter node_flag %p\n",
2174 node_flag);
2175 if (!ja_node_ptr(node_flag)) {
2176 return -ENOENT;
2177 }
2178 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
2179 snapshot_n[nr_snapshot + 1] = iter_key;
2180 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2181 snapshot[nr_snapshot++] = node_flag;
2182 node_flag = ja_node_get_nth(node_flag,
2183 &node_flag_ptr,
2184 iter_key);
2185 if (node_flag)
2186 prev_node_flag_ptr = node_flag_ptr;
2187 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2188 (unsigned int) iter_key, node_flag,
2189 prev_node_flag_ptr);
2190 }
2191 /*
2192 * We reached bottom of tree, try to find the node we are trying
2193 * to remove. Fail if we cannot find it.
2194 */
2195 if (!ja_node_ptr(node_flag)) {
2196 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2197 key);
2198 return -ENOENT;
2199 } else {
2200 struct cds_hlist_head hlist_head;
2201 struct cds_hlist_node *hlist_node;
2202 struct cds_ja_node *entry, *match = NULL;
2203 int count = 0;
2204
2205 hlist_head.next =
2206 (struct cds_hlist_node *) ja_node_ptr(node_flag);
2207 cds_hlist_for_each_entry_rcu(entry,
2208 hlist_node,
2209 &hlist_head,
2210 list) {
2211 dbg_printf("cds_ja_del: compare %p with entry %p\n", node, entry);
2212 if (entry == node)
2213 match = entry;
2214 count++;
2215 }
2216 if (!match) {
2217 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
2218 return -ENOENT;
2219 }
2220 assert(count > 0);
2221 if (count == 1) {
2222 /*
2223 * Removing last of duplicates. Last snapshot
2224 * does not have a shadow node (external leafs).
2225 */
2226 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2227 snapshot[nr_snapshot++] = node_flag;
2228 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2229 snapshot_n, nr_snapshot, key, node);
2230 } else {
2231 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
2232 node_flag_ptr, node_flag, match);
2233 }
2234 }
2235 /*
2236 * Explanation of -ENOENT handling: caused by concurrent delete
2237 * between RCU lookup and actual removal. Need to re-do the
2238 * lookup and removal attempt.
2239 */
2240 if (ret == -EAGAIN || ret == -ENOENT)
2241 goto retry;
2242 return ret;
2243 }
2244
2245 struct cds_ja *_cds_ja_new(unsigned int key_bits,
2246 const struct rcu_flavor_struct *flavor)
2247 {
2248 struct cds_ja *ja;
2249 int ret;
2250 struct cds_ja_shadow_node *root_shadow_node;
2251
2252 ja = calloc(sizeof(*ja), 1);
2253 if (!ja)
2254 goto ja_error;
2255
2256 switch (key_bits) {
2257 case 8:
2258 case 16:
2259 case 24:
2260 case 32:
2261 case 40:
2262 case 48:
2263 case 56:
2264 ja->key_max = (1ULL << key_bits) - 1;
2265 break;
2266 case 64:
2267 ja->key_max = UINT64_MAX;
2268 break;
2269 default:
2270 goto check_error;
2271 }
2272
2273 /* ja->root is NULL */
2274 /* tree_depth 0 is for pointer to root node */
2275 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
2276 assert(ja->tree_depth <= JA_MAX_DEPTH);
2277 ja->ht = rcuja_create_ht(flavor);
2278 if (!ja->ht)
2279 goto ht_error;
2280
2281 /*
2282 * Note: we should not free this node until judy array destroy.
2283 */
2284 root_shadow_node = rcuja_shadow_set(ja->ht,
2285 (struct cds_ja_inode_flag *) &ja->root,
2286 NULL, ja, 0);
2287 if (!root_shadow_node) {
2288 ret = -ENOMEM;
2289 goto ht_node_error;
2290 }
2291
2292 return ja;
2293
2294 ht_node_error:
2295 ret = rcuja_delete_ht(ja->ht);
2296 assert(!ret);
2297 ht_error:
2298 check_error:
2299 free(ja);
2300 ja_error:
2301 return NULL;
2302 }
2303
2304 /*
2305 * Called from RCU read-side CS.
2306 */
2307 __attribute__((visibility("protected")))
2308 void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2309 struct cds_ja_inode_flag *node_flag,
2310 void (*free_node_cb)(struct rcu_head *head))
2311 {
2312 const struct rcu_flavor_struct *flavor;
2313 unsigned int type_index;
2314 struct cds_ja_inode *node;
2315 const struct cds_ja_type *type;
2316
2317 flavor = cds_lfht_rcu_flavor(shadow_node->ja->ht);
2318 node = ja_node_ptr(node_flag);
2319 assert(node != NULL);
2320 type_index = ja_node_type(node_flag);
2321 type = &ja_types[type_index];
2322
2323 switch (type->type_class) {
2324 case RCU_JA_LINEAR:
2325 {
2326 uint8_t nr_child =
2327 ja_linear_node_get_nr_child(type, node);
2328 unsigned int i;
2329
2330 for (i = 0; i < nr_child; i++) {
2331 struct cds_ja_inode_flag *iter;
2332 struct cds_hlist_head head;
2333 struct cds_ja_node *entry;
2334 struct cds_hlist_node *pos, *tmp;
2335 uint8_t v;
2336
2337 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
2338 if (!iter)
2339 continue;
2340 head.next = (struct cds_hlist_node *) iter;
2341 cds_hlist_for_each_entry_safe(entry, pos, tmp, &head, list) {
2342 flavor->update_call_rcu(&entry->head, free_node_cb);
2343 }
2344 }
2345 break;
2346 }
2347 case RCU_JA_POOL:
2348 {
2349 unsigned int pool_nr;
2350
2351 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2352 struct cds_ja_inode *pool =
2353 ja_pool_node_get_ith_pool(type, node, pool_nr);
2354 uint8_t nr_child =
2355 ja_linear_node_get_nr_child(type, pool);
2356 unsigned int j;
2357
2358 for (j = 0; j < nr_child; j++) {
2359 struct cds_ja_inode_flag *iter;
2360 struct cds_hlist_head head;
2361 struct cds_ja_node *entry;
2362 struct cds_hlist_node *pos, *tmp;
2363 uint8_t v;
2364
2365 ja_linear_node_get_ith_pos(type, pool, j, &v, &iter);
2366 if (!iter)
2367 continue;
2368 head.next = (struct cds_hlist_node *) iter;
2369 cds_hlist_for_each_entry_safe(entry, pos, tmp, &head, list) {
2370 flavor->update_call_rcu(&entry->head, free_node_cb);
2371 }
2372 }
2373 }
2374 break;
2375 }
2376 case RCU_JA_NULL:
2377 break;
2378 case RCU_JA_PIGEON:
2379 {
2380 unsigned int i;
2381
2382 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2383 struct cds_ja_inode_flag *iter;
2384 struct cds_hlist_head head;
2385 struct cds_ja_node *entry;
2386 struct cds_hlist_node *pos, *tmp;
2387
2388 iter = ja_pigeon_node_get_ith_pos(type, node, i);
2389 if (!iter)
2390 continue;
2391 head.next = (struct cds_hlist_node *) iter;
2392 cds_hlist_for_each_entry_safe(entry, pos, tmp, &head, list) {
2393 flavor->update_call_rcu(&entry->head, free_node_cb);
2394 }
2395 }
2396 break;
2397 }
2398 default:
2399 assert(0);
2400 }
2401 }
2402
2403 static
2404 void print_debug_fallback_distribution(struct cds_ja *ja)
2405 {
2406 int i;
2407
2408 fprintf(stderr, "Fallback node distribution:\n");
2409 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2410 if (!ja->node_fallback_count_distribution[i])
2411 continue;
2412 fprintf(stderr, " %3u: %4lu\n",
2413 i, ja->node_fallback_count_distribution[i]);
2414 }
2415 }
2416
2417 static
2418 int ja_final_checks(struct cds_ja *ja)
2419 {
2420 double fallback_ratio;
2421 unsigned long na, nf, nr_fallback;
2422 int ret = 0;
2423
2424 fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
2425 fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
2426 nr_fallback = uatomic_read(&ja->nr_fallback);
2427 if (nr_fallback)
2428 fprintf(stderr,
2429 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2430 uatomic_read(&ja->nr_fallback),
2431 fallback_ratio);
2432
2433 na = uatomic_read(&ja->nr_nodes_allocated);
2434 nf = uatomic_read(&ja->nr_nodes_freed);
2435 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
2436 if (nr_fallback)
2437 print_debug_fallback_distribution(ja);
2438
2439 if (na != nf) {
2440 fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2441 (long) na - nf, na, nf);
2442 ret = -1;
2443 }
2444 return ret;
2445 }
2446
2447 /*
2448 * There should be no more concurrent add to the judy array while it is
2449 * being destroyed (ensured by the caller).
2450 */
2451 int cds_ja_destroy(struct cds_ja *ja,
2452 void (*free_node_cb)(struct rcu_head *head))
2453 {
2454 const struct rcu_flavor_struct *flavor;
2455 int ret;
2456
2457 flavor = cds_lfht_rcu_flavor(ja->ht);
2458 rcuja_shadow_prune(ja->ht,
2459 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
2460 free_node_cb);
2461 flavor->thread_offline();
2462 ret = rcuja_delete_ht(ja->ht);
2463 if (ret)
2464 return ret;
2465
2466 /* Wait for in-flight call_rcu free to complete. */
2467 flavor->barrier();
2468
2469 flavor->thread_online();
2470 ret = ja_final_checks(ja);
2471 free(ja);
2472 return ret;
2473 }
This page took 0.074682 seconds and 3 git commands to generate.