rcuja fix: update 2d distance calculation
[userspace-rcu.git] / rcuja / rcuja.c
1 /*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdint.h>
25 #include <errno.h>
26 #include <limits.h>
27 #include <string.h>
28 #include <urcu/rcuja.h>
29 #include <urcu/compiler.h>
30 #include <urcu/arch.h>
31 #include <assert.h>
32 #include <urcu-pointer.h>
33 #include <urcu/uatomic.h>
34 #include <stdint.h>
35
36 #include "rcuja-internal.h"
37 #include "bitfield.h"
38
39 #ifndef abs
40 #define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
41 #endif
42
43 enum cds_ja_type_class {
44 RCU_JA_LINEAR = 0, /* Type A */
45 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
46 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
47 RCU_JA_POOL = 1, /* Type B */
48 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
49 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
50 RCU_JA_PIGEON = 2, /* Type C */
51 /* 32-bit: 101 to 256 children, 1024 bytes */
52 /* 64-bit: 113 to 256 children, 2048 bytes */
53 /* Leaf nodes are implicit from their height in the tree */
54 RCU_JA_NR_TYPES,
55
56 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
57 };
58
59 struct cds_ja_type {
60 enum cds_ja_type_class type_class;
61 uint16_t min_child; /* minimum number of children: 1 to 256 */
62 uint16_t max_child; /* maximum number of children: 1 to 256 */
63 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
64 uint16_t order; /* node size is (1 << order), in bytes */
65 uint16_t nr_pool_order; /* number of pools */
66 uint16_t pool_size_order; /* pool size */
67 };
68
69 /*
70 * Iteration on the array to find the right node size for the number of
71 * children stops when it reaches .max_child == 256 (this is the largest
72 * possible node size, which contains 256 children).
73 * The min_child overlaps with the previous max_child to provide an
74 * hysteresis loop to reallocation for patterns of cyclic add/removal
75 * within the same node.
76 * The node the index within the following arrays is represented on 3
77 * bits. It identifies the node type, min/max number of children, and
78 * the size order.
79 * The max_child values for the RCU_JA_POOL below result from
80 * statistical approximation: over million populations, the max_child
81 * covers between 97% and 99% of the populations generated. Therefore, a
82 * fallback should exist to cover the rare extreme population unbalance
83 * cases, but it will not have a major impact on speed nor space
84 * consumption, since those are rare cases.
85 */
86
87 #if (CAA_BITS_PER_LONG < 64)
88 /* 32-bit pointers */
89 enum {
90 ja_type_0_max_child = 1,
91 ja_type_1_max_child = 3,
92 ja_type_2_max_child = 6,
93 ja_type_3_max_child = 12,
94 ja_type_4_max_child = 25,
95 ja_type_5_max_child = 48,
96 ja_type_6_max_child = 92,
97 ja_type_7_max_child = 256,
98 ja_type_8_max_child = 0, /* NULL */
99 };
100
101 enum {
102 ja_type_0_max_linear_child = 1,
103 ja_type_1_max_linear_child = 3,
104 ja_type_2_max_linear_child = 6,
105 ja_type_3_max_linear_child = 12,
106 ja_type_4_max_linear_child = 25,
107 ja_type_5_max_linear_child = 24,
108 ja_type_6_max_linear_child = 23,
109 };
110
111 enum {
112 ja_type_5_nr_pool_order = 1,
113 ja_type_6_nr_pool_order = 2,
114 };
115
116 const struct cds_ja_type ja_types[] = {
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
121 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
122
123 /* Pools may fill sooner than max_child */
124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
125 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
126
127 /*
128 * Upon node removal below min_child, if child pool is filled
129 * beyond capacity, we roll back to pigeon.
130 */
131 { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
132
133 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
134 };
135 #else /* !(CAA_BITS_PER_LONG < 64) */
136 /* 64-bit pointers */
137 enum {
138 ja_type_0_max_child = 1,
139 ja_type_1_max_child = 3,
140 ja_type_2_max_child = 7,
141 ja_type_3_max_child = 14,
142 ja_type_4_max_child = 28,
143 ja_type_5_max_child = 54,
144 ja_type_6_max_child = 104,
145 ja_type_7_max_child = 256,
146 ja_type_8_max_child = 256,
147 };
148
149 enum {
150 ja_type_0_max_linear_child = 1,
151 ja_type_1_max_linear_child = 3,
152 ja_type_2_max_linear_child = 7,
153 ja_type_3_max_linear_child = 14,
154 ja_type_4_max_linear_child = 28,
155 ja_type_5_max_linear_child = 27,
156 ja_type_6_max_linear_child = 26,
157 };
158
159 enum {
160 ja_type_5_nr_pool_order = 1,
161 ja_type_6_nr_pool_order = 2,
162 };
163
164 const struct cds_ja_type ja_types[] = {
165 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
170
171 /* Pools may fill sooner than max_child. */
172 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
173 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
174
175 /*
176 * Upon node removal below min_child, if child pool is filled
177 * beyond capacity, we roll back to pigeon.
178 */
179 { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
180
181 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
182 };
183 #endif /* !(BITS_PER_LONG < 64) */
184
185 static inline __attribute__((unused))
186 void static_array_size_check(void)
187 {
188 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
189 }
190
191 /*
192 * The cds_ja_node contains the compressed node data needed for
193 * read-side. For linear and pool node configurations, it starts with a
194 * byte counting the number of children in the node. Then, the
195 * node-specific data is placed.
196 * The node mutex, if any is needed, protecting concurrent updated of
197 * each node is placed in a separate hash table indexed by node address.
198 * For the pigeon configuration, the number of children is also kept in
199 * a separate hash table, indexed by node address, because it is only
200 * required for updates.
201 */
202
203 #define DECLARE_LINEAR_NODE(index) \
204 struct { \
205 uint8_t nr_child; \
206 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
207 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
208 }
209
210 #define DECLARE_POOL_NODE(index) \
211 struct { \
212 struct { \
213 uint8_t nr_child; \
214 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
215 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
216 } linear[1U << ja_type_## index ##_nr_pool_order]; \
217 }
218
219 struct cds_ja_inode {
220 union {
221 /* Linear configuration */
222 DECLARE_LINEAR_NODE(0) conf_0;
223 DECLARE_LINEAR_NODE(1) conf_1;
224 DECLARE_LINEAR_NODE(2) conf_2;
225 DECLARE_LINEAR_NODE(3) conf_3;
226 DECLARE_LINEAR_NODE(4) conf_4;
227
228 /* Pool configuration */
229 DECLARE_POOL_NODE(5) conf_5;
230 DECLARE_POOL_NODE(6) conf_6;
231
232 /* Pigeon configuration */
233 struct {
234 struct cds_ja_inode_flag *child[ja_type_7_max_child];
235 } conf_7;
236 /* data aliasing nodes for computed accesses */
237 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
238 } u;
239 };
240
241 enum ja_recompact {
242 JA_RECOMPACT_ADD_SAME,
243 JA_RECOMPACT_ADD_NEXT,
244 JA_RECOMPACT_DEL,
245 };
246
247 static
248 unsigned long node_fallback_count_distribution[JA_ENTRY_PER_NODE];
249
250 static
251 struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
252 {
253 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
254 }
255
256 unsigned long ja_node_type(struct cds_ja_inode_flag *node)
257 {
258 unsigned long type;
259
260 if (_ja_node_mask_ptr(node) == NULL) {
261 return NODE_INDEX_NULL;
262 }
263 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
264 assert(type < (1UL << JA_TYPE_BITS));
265 return type;
266 }
267
268 struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
269 {
270 unsigned long type_index = ja_node_type(node);
271 const struct cds_ja_type *type;
272
273 type = &ja_types[type_index];
274 switch (type->type_class) {
275 case RCU_JA_LINEAR:
276 case RCU_JA_PIGEON: /* fall-through */
277 case RCU_JA_NULL: /* fall-through */
278 default: /* fall-through */
279 return _ja_node_mask_ptr(node);
280 case RCU_JA_POOL:
281 switch (type->nr_pool_order) {
282 case 1:
283 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_1D_MASK | JA_TYPE_MASK));
284 case 2:
285 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_2D_MASK | JA_POOL_1D_MASK | JA_TYPE_MASK));
286 default:
287 assert(0);
288 }
289 }
290 }
291
292 struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
293 {
294 size_t len = 1U << ja_type->order;
295 void *p;
296 int ret;
297
298 ret = posix_memalign(&p, len, len);
299 if (ret || !p) {
300 return NULL;
301 }
302 memset(p, 0, len);
303 return p;
304 }
305
306 void free_cds_ja_node(struct cds_ja_inode *node)
307 {
308 free(node);
309 }
310
311 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
312 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
313 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
314 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
315
316 static
317 uint8_t *align_ptr_size(uint8_t *ptr)
318 {
319 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
320 }
321
322 static
323 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
324 struct cds_ja_inode *node)
325 {
326 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
327 return rcu_dereference(node->u.data[0]);
328 }
329
330 /*
331 * The order in which values and pointers are does does not matter: if
332 * a value is missing, we return NULL. If a value is there, but its
333 * associated pointers is still NULL, we return NULL too.
334 */
335 static
336 struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
337 struct cds_ja_inode *node,
338 struct cds_ja_inode_flag ***child_node_flag_ptr,
339 struct cds_ja_inode_flag **child_node_flag_v,
340 struct cds_ja_inode_flag ***node_flag_ptr,
341 uint8_t n)
342 {
343 uint8_t nr_child;
344 uint8_t *values;
345 struct cds_ja_inode_flag **pointers;
346 struct cds_ja_inode_flag *ptr;
347 unsigned int i;
348
349 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
350
351 nr_child = ja_linear_node_get_nr_child(type, node);
352 cmm_smp_rmb(); /* read nr_child before values and pointers */
353 assert(nr_child <= type->max_linear_child);
354 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
355
356 values = &node->u.data[1];
357 for (i = 0; i < nr_child; i++) {
358 if (CMM_LOAD_SHARED(values[i]) == n)
359 break;
360 }
361 if (i >= nr_child) {
362 if (caa_unlikely(node_flag_ptr))
363 *node_flag_ptr = NULL;
364 return NULL;
365 }
366 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
367 ptr = rcu_dereference(pointers[i]);
368 if (caa_unlikely(child_node_flag_ptr) && ptr)
369 *child_node_flag_ptr = &pointers[i];
370 if (caa_unlikely(child_node_flag_v) && ptr)
371 *child_node_flag_v = ptr;
372 if (caa_unlikely(node_flag_ptr))
373 *node_flag_ptr = &pointers[i];
374 return ptr;
375 }
376
377 static
378 void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
379 struct cds_ja_inode *node,
380 uint8_t i,
381 uint8_t *v,
382 struct cds_ja_inode_flag **iter)
383 {
384 uint8_t *values;
385 struct cds_ja_inode_flag **pointers;
386
387 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
388 assert(i < ja_linear_node_get_nr_child(type, node));
389
390 values = &node->u.data[1];
391 *v = values[i];
392 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
393 *iter = pointers[i];
394 }
395
396 static
397 struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
398 struct cds_ja_inode *node,
399 struct cds_ja_inode_flag *node_flag,
400 struct cds_ja_inode_flag ***child_node_flag_ptr,
401 struct cds_ja_inode_flag **child_node_flag_v,
402 struct cds_ja_inode_flag ***node_flag_ptr,
403 uint8_t n)
404 {
405 struct cds_ja_inode *linear;
406
407 assert(type->type_class == RCU_JA_POOL);
408
409 switch (type->nr_pool_order) {
410 case 1:
411 {
412 unsigned long bitsel, index;
413
414 bitsel = ja_node_pool_1d_bitsel(node_flag);
415 assert(bitsel < CHAR_BIT);
416 index = ((unsigned long) n >> bitsel) & 0x1;
417 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
418 break;
419 }
420 case 2:
421 {
422 unsigned long bitsel[2], index[2], rindex;
423
424 ja_node_pool_2d_bitsel(node_flag, bitsel);
425 assert(bitsel[0] < CHAR_BIT);
426 assert(bitsel[1] < CHAR_BIT);
427 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
428 index[0] <<= 1;
429 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
430 rindex = index[0] | index[1];
431 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
432 break;
433 }
434 default:
435 linear = NULL;
436 assert(0);
437 }
438 return ja_linear_node_get_nth(type, linear, child_node_flag_ptr,
439 child_node_flag_v, node_flag_ptr, n);
440 }
441
442 static
443 struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
444 struct cds_ja_inode *node,
445 uint8_t i)
446 {
447 assert(type->type_class == RCU_JA_POOL);
448 return (struct cds_ja_inode *)
449 &node->u.data[(unsigned int) i << type->pool_size_order];
450 }
451
452 static
453 struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
454 struct cds_ja_inode *node,
455 struct cds_ja_inode_flag ***child_node_flag_ptr,
456 struct cds_ja_inode_flag **child_node_flag_v,
457 struct cds_ja_inode_flag ***node_flag_ptr,
458 uint8_t n)
459 {
460 struct cds_ja_inode_flag **child_node_flag;
461 struct cds_ja_inode_flag *child_node_flag_read;
462
463 assert(type->type_class == RCU_JA_PIGEON);
464 child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n];
465 child_node_flag_read = rcu_dereference(*child_node_flag);
466 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
467 child_node_flag);
468 if (caa_unlikely(child_node_flag_ptr) && child_node_flag_read)
469 *child_node_flag_ptr = child_node_flag;
470 if (caa_unlikely(child_node_flag_v) && child_node_flag_read)
471 *child_node_flag_v = child_node_flag_read;
472 if (caa_unlikely(node_flag_ptr))
473 *node_flag_ptr = child_node_flag;
474 return child_node_flag_read;
475 }
476
477 static
478 struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
479 struct cds_ja_inode *node,
480 uint8_t i)
481 {
482 return ja_pigeon_node_get_nth(type, node, NULL, NULL, NULL, i);
483 }
484
485 /*
486 * ja_node_get_nth: get nth item from a node.
487 * node_flag is already rcu_dereference'd.
488 */
489 static
490 struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
491 struct cds_ja_inode_flag ***child_node_flag_ptr,
492 struct cds_ja_inode_flag **child_node_flag,
493 struct cds_ja_inode_flag ***node_flag_ptr,
494 uint8_t n)
495 {
496 unsigned int type_index;
497 struct cds_ja_inode *node;
498 const struct cds_ja_type *type;
499
500 node = ja_node_ptr(node_flag);
501 assert(node != NULL);
502 type_index = ja_node_type(node_flag);
503 type = &ja_types[type_index];
504
505 switch (type->type_class) {
506 case RCU_JA_LINEAR:
507 return ja_linear_node_get_nth(type, node,
508 child_node_flag_ptr, child_node_flag,
509 node_flag_ptr, n);
510 case RCU_JA_POOL:
511 return ja_pool_node_get_nth(type, node, node_flag,
512 child_node_flag_ptr, child_node_flag,
513 node_flag_ptr, n);
514 case RCU_JA_PIGEON:
515 return ja_pigeon_node_get_nth(type, node,
516 child_node_flag_ptr, child_node_flag,
517 node_flag_ptr, n);
518 default:
519 assert(0);
520 return (void *) -1UL;
521 }
522 }
523
524 static
525 int ja_linear_node_set_nth(const struct cds_ja_type *type,
526 struct cds_ja_inode *node,
527 struct cds_ja_shadow_node *shadow_node,
528 uint8_t n,
529 struct cds_ja_inode_flag *child_node_flag)
530 {
531 uint8_t nr_child;
532 uint8_t *values, *nr_child_ptr;
533 struct cds_ja_inode_flag **pointers;
534 unsigned int i, unused = 0;
535
536 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
537
538 nr_child_ptr = &node->u.data[0];
539 dbg_printf("linear set nth: nr_child_ptr %p\n", nr_child_ptr);
540 nr_child = *nr_child_ptr;
541 assert(nr_child <= type->max_linear_child);
542
543 values = &node->u.data[1];
544 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
545 /* Check if node value is already populated */
546 for (i = 0; i < nr_child; i++) {
547 if (values[i] == n) {
548 if (pointers[i])
549 return -EEXIST;
550 else
551 break;
552 } else {
553 if (!pointers[i])
554 unused++;
555 }
556 }
557 if (i == nr_child && nr_child >= type->max_linear_child) {
558 if (unused)
559 return -ERANGE; /* recompact node */
560 else
561 return -ENOSPC; /* No space left in this node type */
562 }
563
564 assert(pointers[i] == NULL);
565 rcu_assign_pointer(pointers[i], child_node_flag);
566 /* If we expanded the nr_child, increment it */
567 if (i == nr_child) {
568 CMM_STORE_SHARED(values[nr_child], n);
569 /* write pointer and value before nr_child */
570 cmm_smp_wmb();
571 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
572 }
573 shadow_node->nr_child++;
574 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
575 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
576 (unsigned int) shadow_node->nr_child,
577 node, shadow_node);
578
579 return 0;
580 }
581
582 static
583 int ja_pool_node_set_nth(const struct cds_ja_type *type,
584 struct cds_ja_inode *node,
585 struct cds_ja_inode_flag *node_flag,
586 struct cds_ja_shadow_node *shadow_node,
587 uint8_t n,
588 struct cds_ja_inode_flag *child_node_flag)
589 {
590 struct cds_ja_inode *linear;
591
592 assert(type->type_class == RCU_JA_POOL);
593
594 switch (type->nr_pool_order) {
595 case 1:
596 {
597 unsigned long bitsel, index;
598
599 bitsel = ja_node_pool_1d_bitsel(node_flag);
600 assert(bitsel < CHAR_BIT);
601 index = ((unsigned long) n >> bitsel) & 0x1;
602 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
603 break;
604 }
605 case 2:
606 {
607 unsigned long bitsel[2], index[2], rindex;
608
609 ja_node_pool_2d_bitsel(node_flag, bitsel);
610 assert(bitsel[0] < CHAR_BIT);
611 assert(bitsel[1] < CHAR_BIT);
612 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
613 index[0] <<= 1;
614 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
615 rindex = index[0] | index[1];
616 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
617 break;
618 }
619 default:
620 linear = NULL;
621 assert(0);
622 }
623
624 return ja_linear_node_set_nth(type, linear, shadow_node,
625 n, child_node_flag);
626 }
627
628 static
629 int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
630 struct cds_ja_inode *node,
631 struct cds_ja_shadow_node *shadow_node,
632 uint8_t n,
633 struct cds_ja_inode_flag *child_node_flag)
634 {
635 struct cds_ja_inode_flag **ptr;
636
637 assert(type->type_class == RCU_JA_PIGEON);
638 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
639 if (*ptr)
640 return -EEXIST;
641 rcu_assign_pointer(*ptr, child_node_flag);
642 shadow_node->nr_child++;
643 return 0;
644 }
645
646 /*
647 * _ja_node_set_nth: set nth item within a node. Return an error
648 * (negative error value) if it is already there.
649 */
650 static
651 int _ja_node_set_nth(const struct cds_ja_type *type,
652 struct cds_ja_inode *node,
653 struct cds_ja_inode_flag *node_flag,
654 struct cds_ja_shadow_node *shadow_node,
655 uint8_t n,
656 struct cds_ja_inode_flag *child_node_flag)
657 {
658 switch (type->type_class) {
659 case RCU_JA_LINEAR:
660 return ja_linear_node_set_nth(type, node, shadow_node, n,
661 child_node_flag);
662 case RCU_JA_POOL:
663 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
664 child_node_flag);
665 case RCU_JA_PIGEON:
666 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
667 child_node_flag);
668 case RCU_JA_NULL:
669 return -ENOSPC;
670 default:
671 assert(0);
672 return -EINVAL;
673 }
674
675 return 0;
676 }
677
678 static
679 int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
680 struct cds_ja_inode *node,
681 struct cds_ja_shadow_node *shadow_node,
682 struct cds_ja_inode_flag **node_flag_ptr)
683 {
684 uint8_t nr_child;
685 uint8_t *nr_child_ptr;
686
687 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
688
689 nr_child_ptr = &node->u.data[0];
690 nr_child = *nr_child_ptr;
691 assert(nr_child <= type->max_linear_child);
692
693 if (shadow_node->fallback_removal_count) {
694 shadow_node->fallback_removal_count--;
695 } else {
696 if (type->type_class == RCU_JA_LINEAR
697 && shadow_node->nr_child <= type->min_child) {
698 /* We need to try recompacting the node */
699 return -EFBIG;
700 }
701 }
702 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
703 assert(*node_flag_ptr != NULL);
704 rcu_assign_pointer(*node_flag_ptr, NULL);
705 /*
706 * Value and nr_child are never changed (would cause ABA issue).
707 * Instead, we leave the pointer to NULL and recompact the node
708 * once in a while. It is allowed to set a NULL pointer to a new
709 * value without recompaction though.
710 * Only update the shadow node accounting.
711 */
712 shadow_node->nr_child--;
713 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
714 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
715 (unsigned int) shadow_node->nr_child,
716 node, shadow_node);
717 return 0;
718 }
719
720 static
721 int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
722 struct cds_ja_inode *node,
723 struct cds_ja_inode_flag *node_flag,
724 struct cds_ja_shadow_node *shadow_node,
725 struct cds_ja_inode_flag **node_flag_ptr,
726 uint8_t n)
727 {
728 struct cds_ja_inode *linear;
729
730 assert(type->type_class == RCU_JA_POOL);
731
732 if (shadow_node->fallback_removal_count) {
733 shadow_node->fallback_removal_count--;
734 } else {
735 /* We should try recompacting the node */
736 if (shadow_node->nr_child <= type->min_child)
737 return -EFBIG;
738 }
739
740 switch (type->nr_pool_order) {
741 case 1:
742 {
743 unsigned long bitsel, index;
744
745 bitsel = ja_node_pool_1d_bitsel(node_flag);
746 assert(bitsel < CHAR_BIT);
747 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
748 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
749 break;
750 }
751 case 2:
752 {
753 unsigned long bitsel[2], index[2], rindex;
754
755 ja_node_pool_2d_bitsel(node_flag, bitsel);
756 assert(bitsel[0] < CHAR_BIT);
757 assert(bitsel[1] < CHAR_BIT);
758 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
759 index[0] <<= 1;
760 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
761 rindex = index[0] | index[1];
762 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
763 break;
764 }
765 default:
766 linear = NULL;
767 assert(0);
768 }
769
770 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
771 }
772
773 static
774 int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
775 struct cds_ja_inode *node,
776 struct cds_ja_shadow_node *shadow_node,
777 struct cds_ja_inode_flag **node_flag_ptr)
778 {
779 assert(type->type_class == RCU_JA_PIGEON);
780
781 if (shadow_node->fallback_removal_count) {
782 shadow_node->fallback_removal_count--;
783 } else {
784 /* We should try recompacting the node */
785 if (shadow_node->nr_child <= type->min_child)
786 return -EFBIG;
787 }
788 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
789 rcu_assign_pointer(*node_flag_ptr, NULL);
790 shadow_node->nr_child--;
791 return 0;
792 }
793
794 /*
795 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
796 * (negative error value) if it is not found (-ENOENT).
797 */
798 static
799 int _ja_node_clear_ptr(const struct cds_ja_type *type,
800 struct cds_ja_inode *node,
801 struct cds_ja_inode_flag *node_flag,
802 struct cds_ja_shadow_node *shadow_node,
803 struct cds_ja_inode_flag **node_flag_ptr,
804 uint8_t n)
805 {
806 switch (type->type_class) {
807 case RCU_JA_LINEAR:
808 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
809 case RCU_JA_POOL:
810 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
811 case RCU_JA_PIGEON:
812 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
813 case RCU_JA_NULL:
814 return -ENOENT;
815 default:
816 assert(0);
817 return -EINVAL;
818 }
819
820 return 0;
821 }
822
823 /*
824 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
825 * distribution in two sub-distributions containing as much elements one
826 * compared to the other.
827 */
828 static
829 unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
830 struct cds_ja *ja,
831 unsigned int type_index,
832 const struct cds_ja_type *type,
833 struct cds_ja_inode *node,
834 struct cds_ja_shadow_node *shadow_node,
835 uint8_t n,
836 struct cds_ja_inode_flag *child_node_flag,
837 struct cds_ja_inode_flag **nullify_node_flag_ptr)
838 {
839 uint8_t nr_one[JA_BITS_PER_BYTE];
840 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
841 unsigned int distrib_nr_child = 0;
842
843 memset(nr_one, 0, sizeof(nr_one));
844
845 switch (type->type_class) {
846 case RCU_JA_LINEAR:
847 {
848 uint8_t nr_child =
849 ja_linear_node_get_nr_child(type, node);
850 unsigned int i;
851
852 for (i = 0; i < nr_child; i++) {
853 struct cds_ja_inode_flag *iter;
854 uint8_t v;
855
856 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
857 if (!iter)
858 continue;
859 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
860 continue;
861 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
862 if (v & (1U << bit_i))
863 nr_one[bit_i]++;
864 }
865 distrib_nr_child++;
866 }
867 break;
868 }
869 case RCU_JA_POOL:
870 {
871 unsigned int pool_nr;
872
873 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
874 struct cds_ja_inode *pool =
875 ja_pool_node_get_ith_pool(type,
876 node, pool_nr);
877 uint8_t nr_child =
878 ja_linear_node_get_nr_child(type, pool);
879 unsigned int j;
880
881 for (j = 0; j < nr_child; j++) {
882 struct cds_ja_inode_flag *iter;
883 uint8_t v;
884
885 ja_linear_node_get_ith_pos(type, pool,
886 j, &v, &iter);
887 if (!iter)
888 continue;
889 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
890 continue;
891 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
892 if (v & (1U << bit_i))
893 nr_one[bit_i]++;
894 }
895 distrib_nr_child++;
896 }
897 }
898 break;
899 }
900 case RCU_JA_PIGEON:
901 {
902 uint8_t nr_child;
903 unsigned int i;
904
905 assert(mode == JA_RECOMPACT_DEL);
906 nr_child = shadow_node->nr_child;
907 for (i = 0; i < nr_child; i++) {
908 struct cds_ja_inode_flag *iter;
909
910 iter = ja_pigeon_node_get_ith_pos(type, node, i);
911 if (!iter)
912 continue;
913 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
914 continue;
915 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
916 if (i & (1U << bit_i))
917 nr_one[bit_i]++;
918 }
919 distrib_nr_child++;
920 }
921 break;
922 }
923 case RCU_JA_NULL:
924 assert(mode == JA_RECOMPACT_ADD_NEXT);
925 break;
926 default:
927 assert(0);
928 break;
929 }
930
931 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
932 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
933 if (n & (1U << bit_i))
934 nr_one[bit_i]++;
935 }
936 distrib_nr_child++;
937 }
938
939 /*
940 * The best bit selector is that for which the number of ones is
941 * closest to half of the number of children in the
942 * distribution. We calculate the distance using the double of
943 * the sub-distribution sizes to eliminate truncation error.
944 */
945 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
946 unsigned int distance_to_best;
947
948 distance_to_best = abs_int((nr_one[bit_i] << 1U) - distrib_nr_child);
949 if (distance_to_best < overall_best_distance) {
950 overall_best_distance = distance_to_best;
951 bitsel = bit_i;
952 }
953 }
954 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
955 return bitsel;
956 }
957
958 /*
959 * Calculate bit distribution in two dimensions. Returns the two bits
960 * (each 0 to 7) that splits the distribution in four sub-distributions
961 * containing as much elements one compared to the other.
962 */
963 static
964 void ja_node_sum_distribution_2d(enum ja_recompact mode,
965 struct cds_ja *ja,
966 unsigned int type_index,
967 const struct cds_ja_type *type,
968 struct cds_ja_inode *node,
969 struct cds_ja_shadow_node *shadow_node,
970 uint8_t n,
971 struct cds_ja_inode_flag *child_node_flag,
972 struct cds_ja_inode_flag **nullify_node_flag_ptr,
973 unsigned int *_bitsel)
974 {
975 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
976 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
977 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
978 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
979 unsigned int bitsel[2] = { 0, 1 };
980 unsigned int bit_i, bit_j, overall_best_distance = UINT_MAX;
981 unsigned int distrib_nr_child = 0;
982
983 memset(nr_2d_11, 0, sizeof(nr_2d_11));
984 memset(nr_2d_10, 0, sizeof(nr_2d_10));
985
986 switch (type->type_class) {
987 case RCU_JA_LINEAR:
988 {
989 uint8_t nr_child =
990 ja_linear_node_get_nr_child(type, node);
991 unsigned int i;
992
993 for (i = 0; i < nr_child; i++) {
994 struct cds_ja_inode_flag *iter;
995 uint8_t v;
996
997 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
998 if (!iter)
999 continue;
1000 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1001 continue;
1002 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1003 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1004 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1005 nr_2d_11[bit_i][bit_j]++;
1006 }
1007 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1008 nr_2d_10[bit_i][bit_j]++;
1009 }
1010 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1011 nr_2d_01[bit_i][bit_j]++;
1012 }
1013 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1014 nr_2d_00[bit_i][bit_j]++;
1015 }
1016 }
1017 }
1018 distrib_nr_child++;
1019 }
1020 break;
1021 }
1022 case RCU_JA_POOL:
1023 {
1024 unsigned int pool_nr;
1025
1026 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1027 struct cds_ja_inode *pool =
1028 ja_pool_node_get_ith_pool(type,
1029 node, pool_nr);
1030 uint8_t nr_child =
1031 ja_linear_node_get_nr_child(type, pool);
1032 unsigned int j;
1033
1034 for (j = 0; j < nr_child; j++) {
1035 struct cds_ja_inode_flag *iter;
1036 uint8_t v;
1037
1038 ja_linear_node_get_ith_pos(type, pool,
1039 j, &v, &iter);
1040 if (!iter)
1041 continue;
1042 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1043 continue;
1044 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1045 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1046 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1047 nr_2d_11[bit_i][bit_j]++;
1048 }
1049 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1050 nr_2d_10[bit_i][bit_j]++;
1051 }
1052 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1053 nr_2d_01[bit_i][bit_j]++;
1054 }
1055 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1056 nr_2d_00[bit_i][bit_j]++;
1057 }
1058 }
1059 }
1060 distrib_nr_child++;
1061 }
1062 }
1063 break;
1064 }
1065 case RCU_JA_PIGEON:
1066 {
1067 uint8_t nr_child;
1068 unsigned int i;
1069
1070 assert(mode == JA_RECOMPACT_DEL);
1071 nr_child = shadow_node->nr_child;
1072 for (i = 0; i < nr_child; i++) {
1073 struct cds_ja_inode_flag *iter;
1074
1075 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1076 if (!iter)
1077 continue;
1078 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1079 continue;
1080 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1081 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1082 if ((i & (1U << bit_i)) && (i & (1U << bit_j))) {
1083 nr_2d_11[bit_i][bit_j]++;
1084 }
1085 if ((i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1086 nr_2d_10[bit_i][bit_j]++;
1087 }
1088 if (!(i & (1U << bit_i)) && (i & (1U << bit_j))) {
1089 nr_2d_01[bit_i][bit_j]++;
1090 }
1091 if (!(i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1092 nr_2d_00[bit_i][bit_j]++;
1093 }
1094 }
1095 }
1096 distrib_nr_child++;
1097 }
1098 break;
1099 }
1100 case RCU_JA_NULL:
1101 assert(mode == JA_RECOMPACT_ADD_NEXT);
1102 break;
1103 default:
1104 assert(0);
1105 break;
1106 }
1107
1108 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1109 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1110 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1111 if ((n & (1U << bit_i)) && (n & (1U << bit_j))) {
1112 nr_2d_11[bit_i][bit_j]++;
1113 }
1114 if ((n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1115 nr_2d_10[bit_i][bit_j]++;
1116 }
1117 if (!(n & (1U << bit_i)) && (n & (1U << bit_j))) {
1118 nr_2d_01[bit_i][bit_j]++;
1119 }
1120 if (!(n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1121 nr_2d_00[bit_i][bit_j]++;
1122 }
1123 }
1124 }
1125 distrib_nr_child++;
1126 }
1127
1128 /*
1129 * The best bit selector is that for which the number of nodes
1130 * in each sub-class is closest to one-fourth of the number of
1131 * children in the distribution. We calculate the distance using
1132 * 4 times the size of the sub-distribution to eliminate
1133 * truncation error.
1134 */
1135 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1136 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1137 unsigned int distance_to_best[4];
1138
1139 distance_to_best[0] = abs_int((nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child);
1140 distance_to_best[1] = abs_int((nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child);
1141 distance_to_best[2] = abs_int((nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child);
1142 distance_to_best[3] = abs_int((nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child);
1143
1144 /* Consider worse distance to best */
1145 if (distance_to_best[1] > distance_to_best[0])
1146 distance_to_best[0] = distance_to_best[1];
1147 if (distance_to_best[2] > distance_to_best[0])
1148 distance_to_best[0] = distance_to_best[2];
1149 if (distance_to_best[3] > distance_to_best[0])
1150 distance_to_best[0] = distance_to_best[3];
1151 /*
1152 * If our worse distance is better than overall,
1153 * we become new best candidate.
1154 */
1155 if (distance_to_best[0] < overall_best_distance) {
1156 overall_best_distance = distance_to_best[0];
1157 bitsel[0] = bit_i;
1158 bitsel[1] = bit_j;
1159 }
1160 }
1161 }
1162
1163 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1164
1165 /* Return our bit selection */
1166 _bitsel[0] = bitsel[0];
1167 _bitsel[1] = bitsel[1];
1168 }
1169
1170 /*
1171 * ja_node_recompact_add: recompact a node, adding a new child.
1172 * Return 0 on success, -EAGAIN if need to retry, or other negative
1173 * error value otherwise.
1174 */
1175 static
1176 int ja_node_recompact(enum ja_recompact mode,
1177 struct cds_ja *ja,
1178 unsigned int old_type_index,
1179 const struct cds_ja_type *old_type,
1180 struct cds_ja_inode *old_node,
1181 struct cds_ja_shadow_node *shadow_node,
1182 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
1183 struct cds_ja_inode_flag *child_node_flag,
1184 struct cds_ja_inode_flag **nullify_node_flag_ptr)
1185 {
1186 unsigned int new_type_index;
1187 struct cds_ja_inode *new_node;
1188 struct cds_ja_shadow_node *new_shadow_node = NULL;
1189 const struct cds_ja_type *new_type;
1190 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
1191 int ret;
1192 int fallback = 0;
1193
1194 old_node_flag = *old_node_flag_ptr;
1195
1196 switch (mode) {
1197 case JA_RECOMPACT_ADD_SAME:
1198 if (old_type->type_class == RCU_JA_POOL) {
1199 /*
1200 * For pool type, try redistributing
1201 * into a different distribution of same
1202 * size if we have not reached limits.
1203 */
1204 if (shadow_node->nr_child + 1 > old_type->max_child) {
1205 new_type_index = old_type_index + 1;
1206 } else if (shadow_node->nr_child + 1 < old_type->min_child) {
1207 new_type_index = old_type_index - 1;
1208 } else {
1209 new_type_index = old_type_index;
1210 }
1211 } else {
1212 new_type_index = old_type_index;
1213 }
1214 break;
1215 case JA_RECOMPACT_ADD_NEXT:
1216 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1217 new_type_index = 0;
1218 } else {
1219 if (old_type->type_class == RCU_JA_POOL) {
1220 /*
1221 * For pool type, try redistributing
1222 * into a different distribution of same
1223 * size if we have not reached limits.
1224 */
1225 if (shadow_node->nr_child + 1 > old_type->max_child) {
1226 new_type_index = old_type_index + 1;
1227 } else {
1228 new_type_index = old_type_index;
1229 }
1230 } else {
1231 new_type_index = old_type_index + 1;
1232 }
1233 }
1234 break;
1235 case JA_RECOMPACT_DEL:
1236 if (old_type_index == 0) {
1237 new_type_index = NODE_INDEX_NULL;
1238 } else {
1239 if (old_type->type_class == RCU_JA_POOL) {
1240 /*
1241 * For pool type, try redistributing
1242 * into a different distribution of same
1243 * size if we have not reached limits.
1244 */
1245 if (shadow_node->nr_child - 1 < old_type->min_child) {
1246 new_type_index = old_type_index - 1;
1247 } else {
1248 new_type_index = old_type_index;
1249 }
1250 } else {
1251 new_type_index = old_type_index - 1;
1252 }
1253 }
1254 break;
1255 default:
1256 assert(0);
1257 }
1258
1259 retry: /* for fallback */
1260 dbg_printf("Recompact from type %d to type %d\n",
1261 old_type_index, new_type_index);
1262 new_type = &ja_types[new_type_index];
1263 if (new_type_index != NODE_INDEX_NULL) {
1264 new_node = alloc_cds_ja_node(new_type);
1265 if (!new_node)
1266 return -ENOMEM;
1267
1268 if (new_type->type_class == RCU_JA_POOL) {
1269 switch (new_type->nr_pool_order) {
1270 case 1:
1271 {
1272 unsigned int node_distrib_bitsel;
1273
1274 node_distrib_bitsel =
1275 ja_node_sum_distribution_1d(mode, ja,
1276 old_type_index, old_type,
1277 old_node, shadow_node,
1278 n, child_node_flag,
1279 nullify_node_flag_ptr);
1280 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1281 new_node_flag = ja_node_flag_pool_1d(new_node,
1282 new_type_index, node_distrib_bitsel);
1283 break;
1284 }
1285 case 2:
1286 {
1287 unsigned int node_distrib_bitsel[2];
1288
1289 ja_node_sum_distribution_2d(mode, ja,
1290 old_type_index, old_type,
1291 old_node, shadow_node,
1292 n, child_node_flag,
1293 nullify_node_flag_ptr,
1294 node_distrib_bitsel);
1295 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1296 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
1297 new_node_flag = ja_node_flag_pool_2d(new_node,
1298 new_type_index, node_distrib_bitsel);
1299 break;
1300 }
1301 default:
1302 assert(0);
1303 }
1304 } else {
1305 new_node_flag = ja_node_flag(new_node, new_type_index);
1306 }
1307
1308 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
1309 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja);
1310 if (!new_shadow_node) {
1311 free(new_node);
1312 return -ENOMEM;
1313 }
1314 if (fallback)
1315 new_shadow_node->fallback_removal_count =
1316 JA_FALLBACK_REMOVAL_COUNT;
1317 } else {
1318 new_node = NULL;
1319 new_node_flag = NULL;
1320 }
1321
1322 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
1323
1324 if (new_type_index == NODE_INDEX_NULL)
1325 goto skip_copy;
1326
1327 switch (old_type->type_class) {
1328 case RCU_JA_LINEAR:
1329 {
1330 uint8_t nr_child =
1331 ja_linear_node_get_nr_child(old_type, old_node);
1332 unsigned int i;
1333
1334 for (i = 0; i < nr_child; i++) {
1335 struct cds_ja_inode_flag *iter;
1336 uint8_t v;
1337
1338 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1339 if (!iter)
1340 continue;
1341 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1342 continue;
1343 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1344 new_shadow_node,
1345 v, iter);
1346 if (new_type->type_class == RCU_JA_POOL && ret) {
1347 goto fallback_toosmall;
1348 }
1349 assert(!ret);
1350 }
1351 break;
1352 }
1353 case RCU_JA_POOL:
1354 {
1355 unsigned int pool_nr;
1356
1357 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
1358 struct cds_ja_inode *pool =
1359 ja_pool_node_get_ith_pool(old_type,
1360 old_node, pool_nr);
1361 uint8_t nr_child =
1362 ja_linear_node_get_nr_child(old_type, pool);
1363 unsigned int j;
1364
1365 for (j = 0; j < nr_child; j++) {
1366 struct cds_ja_inode_flag *iter;
1367 uint8_t v;
1368
1369 ja_linear_node_get_ith_pos(old_type, pool,
1370 j, &v, &iter);
1371 if (!iter)
1372 continue;
1373 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1374 continue;
1375 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1376 new_shadow_node,
1377 v, iter);
1378 if (new_type->type_class == RCU_JA_POOL
1379 && ret) {
1380 goto fallback_toosmall;
1381 }
1382 assert(!ret);
1383 }
1384 }
1385 break;
1386 }
1387 case RCU_JA_NULL:
1388 assert(mode == JA_RECOMPACT_ADD_NEXT);
1389 break;
1390 case RCU_JA_PIGEON:
1391 {
1392 uint8_t nr_child;
1393 unsigned int i;
1394
1395 assert(mode == JA_RECOMPACT_DEL);
1396 nr_child = shadow_node->nr_child;
1397 for (i = 0; i < nr_child; i++) {
1398 struct cds_ja_inode_flag *iter;
1399
1400 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1401 if (!iter)
1402 continue;
1403 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1404 continue;
1405 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1406 new_shadow_node,
1407 i, iter);
1408 if (new_type->type_class == RCU_JA_POOL && ret) {
1409 goto fallback_toosmall;
1410 }
1411 assert(!ret);
1412 }
1413 break;
1414 }
1415 default:
1416 assert(0);
1417 ret = -EINVAL;
1418 goto end;
1419 }
1420 skip_copy:
1421
1422 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1423 /* add node */
1424 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
1425 new_shadow_node,
1426 n, child_node_flag);
1427 if (new_type->type_class == RCU_JA_POOL && ret) {
1428 goto fallback_toosmall;
1429 }
1430 assert(!ret);
1431 }
1432
1433 if (fallback) {
1434 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1435 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1436 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
1437 uatomic_inc(&node_fallback_count_distribution[new_shadow_node->nr_child]);
1438 }
1439
1440 /* Return pointer to new recompacted node through old_node_flag_ptr */
1441 *old_node_flag_ptr = new_node_flag;
1442 if (old_node) {
1443 int flags;
1444
1445 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1446 /*
1447 * It is OK to free the lock associated with a node
1448 * going to NULL, since we are holding the parent lock.
1449 * This synchronizes removal with re-add of that node.
1450 */
1451 if (new_type_index == NODE_INDEX_NULL)
1452 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1453 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
1454 flags);
1455 assert(!ret);
1456 }
1457
1458 ret = 0;
1459 end:
1460 return ret;
1461
1462 fallback_toosmall:
1463 /* fallback if next pool is too small */
1464 assert(new_shadow_node);
1465 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
1466 RCUJA_SHADOW_CLEAR_FREE_NODE);
1467 assert(!ret);
1468
1469 switch (mode) {
1470 case JA_RECOMPACT_ADD_SAME:
1471 /*
1472 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1473 * node within a pool has unused entries. It should
1474 * therefore _never_ be too small.
1475 */
1476 //TODO assert(0);
1477
1478 /* Fall-through */
1479 case JA_RECOMPACT_ADD_NEXT:
1480 {
1481 const struct cds_ja_type *next_type;
1482
1483 /*
1484 * Recompaction attempt on add failed. Should only
1485 * happen if target node type is pool. Caused by
1486 * hard-to-split distribution. Recompact using the next
1487 * distribution size.
1488 */
1489 assert(new_type->type_class == RCU_JA_POOL);
1490 next_type = &ja_types[new_type_index + 1];
1491 /*
1492 * Try going to the next pool size if our population
1493 * fits within its range. This is not flagged as a
1494 * fallback.
1495 */
1496 if (shadow_node->nr_child + 1 >= next_type->min_child
1497 && shadow_node->nr_child + 1 <= next_type->max_child) {
1498 new_type_index++;
1499 goto retry;
1500 } else {
1501 new_type_index++;
1502 dbg_printf("Add fallback to type %d\n", new_type_index);
1503 uatomic_inc(&ja->nr_fallback);
1504 fallback = 1;
1505 goto retry;
1506 }
1507 break;
1508 }
1509 case JA_RECOMPACT_DEL:
1510 /*
1511 * Recompaction attempt on delete failed. Should only
1512 * happen if target node type is pool. This is caused by
1513 * a hard-to-split distribution. Recompact on same node
1514 * size, but flag current node as "fallback" to ensure
1515 * we don't attempt recompaction before some activity
1516 * has reshuffled our node.
1517 */
1518 assert(new_type->type_class == RCU_JA_POOL);
1519 new_type_index = old_type_index;
1520 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1521 uatomic_inc(&ja->nr_fallback);
1522 fallback = 1;
1523 goto retry;
1524 default:
1525 assert(0);
1526 return -EINVAL;
1527 }
1528
1529 /*
1530 * Last resort fallback: pigeon.
1531 */
1532 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1533 dbg_printf("Fallback to type %d\n", new_type_index);
1534 uatomic_inc(&ja->nr_fallback);
1535 fallback = 1;
1536 goto retry;
1537 }
1538
1539 /*
1540 * Return 0 on success, -EAGAIN if need to retry, or other negative
1541 * error value otherwise.
1542 */
1543 static
1544 int ja_node_set_nth(struct cds_ja *ja,
1545 struct cds_ja_inode_flag **node_flag, uint8_t n,
1546 struct cds_ja_inode_flag *child_node_flag,
1547 struct cds_ja_shadow_node *shadow_node)
1548 {
1549 int ret;
1550 unsigned int type_index;
1551 const struct cds_ja_type *type;
1552 struct cds_ja_inode *node;
1553
1554 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1555 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1556
1557 node = ja_node_ptr(*node_flag);
1558 type_index = ja_node_type(*node_flag);
1559 type = &ja_types[type_index];
1560 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
1561 n, child_node_flag);
1562 switch (ret) {
1563 case -ENOSPC:
1564 /* Not enough space in node, need to recompact to next type. */
1565 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
1566 shadow_node, node_flag, n, child_node_flag, NULL);
1567 break;
1568 case -ERANGE:
1569 /* Node needs to be recompacted. */
1570 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
1571 shadow_node, node_flag, n, child_node_flag, NULL);
1572 break;
1573 }
1574 return ret;
1575 }
1576
1577 /*
1578 * Return 0 on success, -EAGAIN if need to retry, or other negative
1579 * error value otherwise.
1580 */
1581 static
1582 int ja_node_clear_ptr(struct cds_ja *ja,
1583 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1584 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1585 struct cds_ja_shadow_node *shadow_node, /* of parent */
1586 uint8_t n)
1587 {
1588 int ret;
1589 unsigned int type_index;
1590 const struct cds_ja_type *type;
1591 struct cds_ja_inode *node;
1592
1593 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1594 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
1595
1596 node = ja_node_ptr(*parent_node_flag_ptr);
1597 type_index = ja_node_type(*parent_node_flag_ptr);
1598 type = &ja_types[type_index];
1599 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
1600 if (ret == -EFBIG) {
1601 /* Should try recompaction. */
1602 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
1603 shadow_node, parent_node_flag_ptr, n, NULL,
1604 node_flag_ptr);
1605 }
1606 return ret;
1607 }
1608
1609 struct cds_hlist_head cds_ja_lookup(struct cds_ja *ja, uint64_t key)
1610 {
1611 unsigned int tree_depth, i;
1612 struct cds_ja_inode_flag *node_flag;
1613 struct cds_hlist_head head = { NULL };
1614
1615 if (caa_unlikely(key > ja->key_max))
1616 return head;
1617 tree_depth = ja->tree_depth;
1618 node_flag = rcu_dereference(ja->root);
1619
1620 /* level 0: root node */
1621 if (!ja_node_ptr(node_flag))
1622 return head;
1623
1624 for (i = 1; i < tree_depth; i++) {
1625 uint8_t iter_key;
1626
1627 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
1628 node_flag = ja_node_get_nth(node_flag, NULL, NULL, NULL,
1629 iter_key);
1630 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1631 (unsigned int) iter_key, node_flag);
1632 if (!ja_node_ptr(node_flag))
1633 return head;
1634 }
1635
1636 /* Last level lookup succeded. We got an actual match. */
1637 head.next = (struct cds_hlist_node *) node_flag;
1638 return head;
1639 }
1640
1641 /*
1642 * We reached an unpopulated node. Create it and the children we need,
1643 * and then attach the entire branch to the current node. This may
1644 * trigger recompaction of the current node. Locks needed: node lock
1645 * (for add), and, possibly, parent node lock (to update pointer due to
1646 * node recompaction).
1647 *
1648 * First take node lock, check if recompaction is needed, then take
1649 * parent lock (if needed). Then we can proceed to create the new
1650 * branch. Publish the new branch, and release locks.
1651 * TODO: we currently always take the parent lock even when not needed.
1652 */
1653 static
1654 int ja_attach_node(struct cds_ja *ja,
1655 struct cds_ja_inode_flag **attach_node_flag_ptr,
1656 struct cds_ja_inode_flag *attach_node_flag,
1657 struct cds_ja_inode_flag **node_flag_ptr,
1658 struct cds_ja_inode_flag *node_flag,
1659 struct cds_ja_inode_flag *parent_node_flag,
1660 uint64_t key,
1661 unsigned int level,
1662 struct cds_ja_node *child_node)
1663 {
1664 struct cds_ja_shadow_node *shadow_node = NULL,
1665 *parent_shadow_node = NULL;
1666 struct cds_ja_inode *node = ja_node_ptr(node_flag);
1667 struct cds_ja_inode *parent_node = ja_node_ptr(parent_node_flag);
1668 struct cds_hlist_head head;
1669 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1670 int ret, i;
1671 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
1672 int nr_created_nodes = 0;
1673
1674 dbg_printf("Attach node at level %u (node %p, node_flag %p)\n",
1675 level, node, node_flag);
1676
1677 assert(node);
1678 shadow_node = rcuja_shadow_lookup_lock(ja->ht, node_flag);
1679 if (!shadow_node) {
1680 ret = -EAGAIN;
1681 goto end;
1682 }
1683 if (parent_node) {
1684 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1685 parent_node_flag);
1686 if (!parent_shadow_node) {
1687 ret = -EAGAIN;
1688 goto unlock_shadow;
1689 }
1690 }
1691
1692 if (node_flag_ptr && ja_node_ptr(*node_flag_ptr)) {
1693 /*
1694 * Target node has been updated between RCU lookup and
1695 * lock acquisition. We need to re-try lookup and
1696 * attach.
1697 */
1698 ret = -EAGAIN;
1699 goto unlock_parent;
1700 }
1701
1702 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
1703 ja_node_ptr(attach_node_flag)) {
1704 /*
1705 * Target node has been updated between RCU lookup and
1706 * lock acquisition. We need to re-try lookup and
1707 * attach.
1708 */
1709 ret = -EAGAIN;
1710 goto unlock_parent;
1711 }
1712
1713 /* Create new branch, starting from bottom */
1714 CDS_INIT_HLIST_HEAD(&head);
1715 cds_hlist_add_head_rcu(&child_node->list, &head);
1716 iter_node_flag = (struct cds_ja_inode_flag *) head.next;
1717
1718 for (i = ja->tree_depth; i > (int) level; i--) {
1719 uint8_t iter_key;
1720
1721 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i)));
1722 dbg_printf("branch creation level %d, key %u\n",
1723 i - 1, (unsigned int) iter_key);
1724 iter_dest_node_flag = NULL;
1725 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
1726 iter_key,
1727 iter_node_flag,
1728 NULL);
1729 if (ret)
1730 goto check_error;
1731 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1732 iter_node_flag = iter_dest_node_flag;
1733 }
1734
1735 if (level > 1) {
1736 uint8_t iter_key;
1737
1738 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
1739 /* We need to use set_nth on the previous level. */
1740 iter_dest_node_flag = node_flag;
1741 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
1742 iter_key,
1743 iter_node_flag,
1744 shadow_node);
1745 if (ret)
1746 goto check_error;
1747 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1748 iter_node_flag = iter_dest_node_flag;
1749 }
1750
1751 /* Publish new branch */
1752 dbg_printf("Publish branch %p, replacing %p\n",
1753 iter_node_flag, *attach_node_flag_ptr);
1754 rcu_assign_pointer(*attach_node_flag_ptr, iter_node_flag);
1755
1756 /* Success */
1757 ret = 0;
1758
1759 check_error:
1760 if (ret) {
1761 for (i = 0; i < nr_created_nodes; i++) {
1762 int tmpret;
1763 int flags;
1764
1765 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1766 if (i)
1767 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
1768 tmpret = rcuja_shadow_clear(ja->ht,
1769 created_nodes[i],
1770 NULL,
1771 flags);
1772 assert(!tmpret);
1773 }
1774 }
1775 unlock_parent:
1776 if (parent_shadow_node)
1777 rcuja_shadow_unlock(parent_shadow_node);
1778 unlock_shadow:
1779 if (shadow_node)
1780 rcuja_shadow_unlock(shadow_node);
1781 end:
1782 return ret;
1783 }
1784
1785 /*
1786 * Lock the parent containing the hlist head pointer, and add node to list of
1787 * duplicates. Failure can happen if concurrent update changes the
1788 * parent before we get the lock. We return -EAGAIN in that case.
1789 * Return 0 on success, negative error value on failure.
1790 */
1791 static
1792 int ja_chain_node(struct cds_ja *ja,
1793 struct cds_ja_inode_flag *parent_node_flag,
1794 struct cds_ja_inode_flag **node_flag_ptr,
1795 struct cds_ja_inode_flag *node_flag,
1796 struct cds_hlist_head *head,
1797 struct cds_ja_node *node)
1798 {
1799 struct cds_ja_shadow_node *shadow_node;
1800 int ret = 0;
1801
1802 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
1803 if (!shadow_node) {
1804 return -EAGAIN;
1805 }
1806 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
1807 ret = -EAGAIN;
1808 goto end;
1809 }
1810 cds_hlist_add_head_rcu(&node->list, head);
1811 end:
1812 rcuja_shadow_unlock(shadow_node);
1813 return ret;
1814 }
1815
1816 int cds_ja_add(struct cds_ja *ja, uint64_t key,
1817 struct cds_ja_node *new_node)
1818 {
1819 unsigned int tree_depth, i;
1820 struct cds_ja_inode_flag **attach_node_flag_ptr,
1821 **node_flag_ptr;
1822 struct cds_ja_inode_flag *node_flag,
1823 *parent_node_flag,
1824 *parent2_node_flag,
1825 *attach_node_flag;
1826 int ret;
1827
1828 if (caa_unlikely(key > ja->key_max)) {
1829 return -EINVAL;
1830 }
1831 tree_depth = ja->tree_depth;
1832
1833 retry:
1834 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
1835 key, new_node);
1836 parent2_node_flag = NULL;
1837 parent_node_flag =
1838 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
1839 attach_node_flag_ptr = &ja->root;
1840 attach_node_flag = rcu_dereference(ja->root);
1841 node_flag_ptr = &ja->root;
1842 node_flag = rcu_dereference(ja->root);
1843
1844 /* Iterate on all internal levels */
1845 for (i = 1; i < tree_depth; i++) {
1846 uint8_t iter_key;
1847
1848 dbg_printf("cds_ja_add iter attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
1849 attach_node_flag_ptr, node_flag_ptr, node_flag);
1850 if (!ja_node_ptr(node_flag)) {
1851 ret = ja_attach_node(ja, attach_node_flag_ptr,
1852 attach_node_flag,
1853 node_flag_ptr,
1854 parent_node_flag,
1855 parent2_node_flag,
1856 key, i, new_node);
1857 if (ret == -EAGAIN || ret == -EEXIST)
1858 goto retry;
1859 else
1860 goto end;
1861 }
1862 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
1863 parent2_node_flag = parent_node_flag;
1864 parent_node_flag = node_flag;
1865 node_flag = ja_node_get_nth(node_flag,
1866 &attach_node_flag_ptr,
1867 &attach_node_flag,
1868 &node_flag_ptr,
1869 iter_key);
1870 dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p attach_node_flag_ptr %p node_flag_ptr %p\n",
1871 (unsigned int) iter_key, node_flag,
1872 attach_node_flag_ptr,
1873 node_flag_ptr);
1874 }
1875
1876 /*
1877 * We reached bottom of tree, simply add node to last internal
1878 * level, or chain it if key is already present.
1879 */
1880 if (!ja_node_ptr(node_flag)) {
1881 dbg_printf("cds_ja_add attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
1882 attach_node_flag_ptr, node_flag_ptr, node_flag);
1883 ret = ja_attach_node(ja, attach_node_flag_ptr,
1884 attach_node_flag,
1885 node_flag_ptr, parent_node_flag,
1886 parent2_node_flag, key, i, new_node);
1887 } else {
1888 ret = ja_chain_node(ja,
1889 parent_node_flag,
1890 node_flag_ptr,
1891 node_flag,
1892 (struct cds_hlist_head *) attach_node_flag_ptr,
1893 new_node);
1894 }
1895 if (ret == -EAGAIN || ret == -EEXIST)
1896 goto retry;
1897 end:
1898 return ret;
1899 }
1900
1901 /*
1902 * Note: there is no need to lookup the pointer address associated with
1903 * each node's nth item after taking the lock: it's already been done by
1904 * cds_ja_del while holding the rcu read-side lock, and our node rules
1905 * ensure that when a match value -> pointer is found in a node, it is
1906 * _NEVER_ changed for that node without recompaction, and recompaction
1907 * reallocates the node.
1908 * However, when a child is removed from "linear" nodes, its pointer
1909 * is set to NULL. We therefore check, while holding the locks, if this
1910 * pointer is NULL, and return -ENOENT to the caller if it is the case.
1911 */
1912 static
1913 int ja_detach_node(struct cds_ja *ja,
1914 struct cds_ja_inode_flag **snapshot,
1915 struct cds_ja_inode_flag ***snapshot_ptr,
1916 uint8_t *snapshot_n,
1917 int nr_snapshot,
1918 uint64_t key,
1919 struct cds_ja_node *node)
1920 {
1921 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
1922 struct cds_ja_inode_flag **node_flag_ptr = NULL,
1923 *parent_node_flag = NULL,
1924 **parent_node_flag_ptr = NULL;
1925 struct cds_ja_inode_flag *iter_node_flag;
1926 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
1927 uint8_t n = 0;
1928
1929 assert(nr_snapshot == ja->tree_depth + 1);
1930
1931 /*
1932 * From the last internal level node going up, get the node
1933 * lock, check if the node has only one child left. If it is the
1934 * case, we continue iterating upward. When we reach a node
1935 * which has more that one child left, we lock the parent, and
1936 * proceed to the node deletion (removing its children too).
1937 */
1938 for (i = nr_snapshot - 2; i >= 1; i--) {
1939 struct cds_ja_shadow_node *shadow_node;
1940
1941 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1942 snapshot[i]);
1943 if (!shadow_node) {
1944 ret = -EAGAIN;
1945 goto end;
1946 }
1947 shadow_nodes[nr_shadow++] = shadow_node;
1948
1949 /*
1950 * Check if node has been removed between RCU
1951 * lookup and lock acquisition.
1952 */
1953 assert(snapshot_ptr[i + 1]);
1954 if (ja_node_ptr(*snapshot_ptr[i + 1])
1955 != ja_node_ptr(snapshot[i + 1])) {
1956 ret = -ENOENT;
1957 goto end;
1958 }
1959
1960 assert(shadow_node->nr_child > 0);
1961 if (shadow_node->nr_child == 1 && i > 1)
1962 nr_clear++;
1963 nr_branch++;
1964 if (shadow_node->nr_child > 1 || i == 1) {
1965 /* Lock parent and break */
1966 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1967 snapshot[i - 1]);
1968 if (!shadow_node) {
1969 ret = -EAGAIN;
1970 goto end;
1971 }
1972 shadow_nodes[nr_shadow++] = shadow_node;
1973
1974 /*
1975 * Check if node has been removed between RCU
1976 * lookup and lock acquisition.
1977 */
1978 assert(snapshot_ptr[i]);
1979 if (ja_node_ptr(*snapshot_ptr[i])
1980 != ja_node_ptr(snapshot[i])) {
1981 ret = -ENOENT;
1982 goto end;
1983 }
1984
1985 node_flag_ptr = snapshot_ptr[i + 1];
1986 n = snapshot_n[i + 1];
1987 parent_node_flag_ptr = snapshot_ptr[i];
1988 parent_node_flag = snapshot[i];
1989
1990 if (i > 1) {
1991 /*
1992 * Lock parent's parent, in case we need
1993 * to recompact parent.
1994 */
1995 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1996 snapshot[i - 2]);
1997 if (!shadow_node) {
1998 ret = -EAGAIN;
1999 goto end;
2000 }
2001 shadow_nodes[nr_shadow++] = shadow_node;
2002
2003 /*
2004 * Check if node has been removed between RCU
2005 * lookup and lock acquisition.
2006 */
2007 assert(snapshot_ptr[i - 1]);
2008 if (ja_node_ptr(*snapshot_ptr[i - 1])
2009 != ja_node_ptr(snapshot[i - 1])) {
2010 ret = -ENOENT;
2011 goto end;
2012 }
2013 }
2014
2015 break;
2016 }
2017 }
2018
2019 /*
2020 * At this point, we want to delete all nodes that are about to
2021 * be removed from shadow_nodes (except the last one, which is
2022 * either the root or the parent of the upmost node with 1
2023 * child). OK to free lock here, because RCU read lock is held,
2024 * and free only performed in call_rcu.
2025 */
2026
2027 for (i = 0; i < nr_clear; i++) {
2028 ret = rcuja_shadow_clear(ja->ht,
2029 shadow_nodes[i]->node_flag,
2030 shadow_nodes[i],
2031 RCUJA_SHADOW_CLEAR_FREE_NODE
2032 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2033 assert(!ret);
2034 }
2035
2036 iter_node_flag = parent_node_flag;
2037 /* Remove from parent */
2038 ret = ja_node_clear_ptr(ja,
2039 node_flag_ptr, /* Pointer to location to nullify */
2040 &iter_node_flag, /* Old new parent ptr in its parent */
2041 shadow_nodes[nr_branch - 1], /* of parent */
2042 n);
2043 if (ret)
2044 goto end;
2045
2046 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2047 iter_node_flag, *parent_node_flag_ptr);
2048 /* Update address of parent ptr in its parent */
2049 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2050
2051 end:
2052 for (i = 0; i < nr_shadow; i++)
2053 rcuja_shadow_unlock(shadow_nodes[i]);
2054 return ret;
2055 }
2056
2057 static
2058 int ja_unchain_node(struct cds_ja *ja,
2059 struct cds_ja_inode_flag *parent_node_flag,
2060 struct cds_ja_inode_flag **node_flag_ptr,
2061 struct cds_ja_inode_flag *node_flag,
2062 struct cds_ja_node *node)
2063 {
2064 struct cds_ja_shadow_node *shadow_node;
2065 struct cds_hlist_node *hlist_node;
2066 struct cds_hlist_head hlist_head;
2067 int ret = 0, count = 0, found = 0;
2068
2069 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
2070 if (!shadow_node)
2071 return -EAGAIN;
2072 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
2073 ret = -EAGAIN;
2074 goto end;
2075 }
2076 hlist_head.next = (struct cds_hlist_node *) ja_node_ptr(node_flag);
2077 /*
2078 * Retry if another thread removed all but one of duplicates
2079 * since check (this check was performed without lock).
2080 * Ensure that the node we are about to remove is still in the
2081 * list (while holding lock).
2082 */
2083 cds_hlist_for_each_rcu(hlist_node, &hlist_head) {
2084 if (count == 0) {
2085 /* FIXME: currently a work-around */
2086 hlist_node->prev = (struct cds_hlist_node *) node_flag_ptr;
2087 }
2088 count++;
2089 if (hlist_node == &node->list)
2090 found++;
2091 }
2092 assert(found <= 1);
2093 if (!found || count == 1) {
2094 ret = -EAGAIN;
2095 goto end;
2096 }
2097 cds_hlist_del_rcu(&node->list);
2098 /*
2099 * Validate that we indeed removed the node from linked list.
2100 */
2101 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
2102 end:
2103 rcuja_shadow_unlock(shadow_node);
2104 return ret;
2105 }
2106
2107 /*
2108 * Called with RCU read lock held.
2109 */
2110 int cds_ja_del(struct cds_ja *ja, uint64_t key,
2111 struct cds_ja_node *node)
2112 {
2113 unsigned int tree_depth, i;
2114 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
2115 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2116 uint8_t snapshot_n[JA_MAX_DEPTH];
2117 struct cds_ja_inode_flag *node_flag;
2118 struct cds_ja_inode_flag **prev_node_flag_ptr,
2119 **node_flag_ptr;
2120 int nr_snapshot;
2121 int ret;
2122
2123 if (caa_unlikely(key > ja->key_max))
2124 return -EINVAL;
2125 tree_depth = ja->tree_depth;
2126
2127 retry:
2128 nr_snapshot = 0;
2129 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2130 key, node);
2131
2132 /* snapshot for level 0 is only for shadow node lookup */
2133 snapshot_n[0] = 0;
2134 snapshot_n[1] = 0;
2135 snapshot_ptr[nr_snapshot] = NULL;
2136 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2137 node_flag = rcu_dereference(ja->root);
2138 prev_node_flag_ptr = &ja->root;
2139 node_flag_ptr = &ja->root;
2140
2141 /* Iterate on all internal levels */
2142 for (i = 1; i < tree_depth; i++) {
2143 uint8_t iter_key;
2144
2145 dbg_printf("cds_ja_del iter node_flag %p\n",
2146 node_flag);
2147 if (!ja_node_ptr(node_flag)) {
2148 return -ENOENT;
2149 }
2150 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
2151 snapshot_n[nr_snapshot + 1] = iter_key;
2152 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2153 snapshot[nr_snapshot++] = node_flag;
2154 node_flag = ja_node_get_nth(node_flag,
2155 &prev_node_flag_ptr,
2156 NULL,
2157 &node_flag_ptr,
2158 iter_key);
2159 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2160 (unsigned int) iter_key, node_flag,
2161 prev_node_flag_ptr);
2162 }
2163 /*
2164 * We reached bottom of tree, try to find the node we are trying
2165 * to remove. Fail if we cannot find it.
2166 */
2167 if (!ja_node_ptr(node_flag)) {
2168 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2169 key);
2170 return -ENOENT;
2171 } else {
2172 struct cds_hlist_head hlist_head;
2173 struct cds_hlist_node *hlist_node;
2174 struct cds_ja_node *entry, *match = NULL;
2175 int count = 0;
2176
2177 hlist_head.next =
2178 (struct cds_hlist_node *) ja_node_ptr(node_flag);
2179 cds_hlist_for_each_entry_rcu(entry,
2180 hlist_node,
2181 &hlist_head,
2182 list) {
2183 dbg_printf("cds_ja_del: compare %p with entry %p\n", node, entry);
2184 if (entry == node)
2185 match = entry;
2186 count++;
2187 }
2188 if (!match) {
2189 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
2190 return -ENOENT;
2191 }
2192 assert(count > 0);
2193 if (count == 1) {
2194 /*
2195 * Removing last of duplicates. Last snapshot
2196 * does not have a shadow node (external leafs).
2197 */
2198 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2199 snapshot[nr_snapshot++] = node_flag;
2200 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2201 snapshot_n, nr_snapshot, key, node);
2202 } else {
2203 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
2204 node_flag_ptr, node_flag, match);
2205 }
2206 }
2207 /*
2208 * Explanation of -ENOENT handling: caused by concurrent delete
2209 * between RCU lookup and actual removal. Need to re-do the
2210 * lookup and removal attempt.
2211 */
2212 if (ret == -EAGAIN || ret == -ENOENT)
2213 goto retry;
2214 return ret;
2215 }
2216
2217 struct cds_ja *_cds_ja_new(unsigned int key_bits,
2218 const struct rcu_flavor_struct *flavor)
2219 {
2220 struct cds_ja *ja;
2221 int ret;
2222 struct cds_ja_shadow_node *root_shadow_node;
2223
2224 ja = calloc(sizeof(*ja), 1);
2225 if (!ja)
2226 goto ja_error;
2227
2228 switch (key_bits) {
2229 case 8:
2230 case 16:
2231 case 24:
2232 case 32:
2233 case 40:
2234 case 48:
2235 case 56:
2236 ja->key_max = (1ULL << key_bits) - 1;
2237 break;
2238 case 64:
2239 ja->key_max = UINT64_MAX;
2240 break;
2241 default:
2242 goto check_error;
2243 }
2244
2245 /* ja->root is NULL */
2246 /* tree_depth 0 is for pointer to root node */
2247 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
2248 assert(ja->tree_depth <= JA_MAX_DEPTH);
2249 ja->ht = rcuja_create_ht(flavor);
2250 if (!ja->ht)
2251 goto ht_error;
2252
2253 /*
2254 * Note: we should not free this node until judy array destroy.
2255 */
2256 root_shadow_node = rcuja_shadow_set(ja->ht,
2257 (struct cds_ja_inode_flag *) &ja->root,
2258 NULL, ja);
2259 if (!root_shadow_node) {
2260 ret = -ENOMEM;
2261 goto ht_node_error;
2262 }
2263 root_shadow_node->level = 0;
2264
2265 return ja;
2266
2267 ht_node_error:
2268 ret = rcuja_delete_ht(ja->ht);
2269 assert(!ret);
2270 ht_error:
2271 check_error:
2272 free(ja);
2273 ja_error:
2274 return NULL;
2275 }
2276
2277 /*
2278 * Called from RCU read-side CS.
2279 */
2280 __attribute__((visibility("protected")))
2281 void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2282 struct cds_ja_inode_flag *node_flag,
2283 void (*free_node_cb)(struct rcu_head *head))
2284 {
2285 const struct rcu_flavor_struct *flavor;
2286 unsigned int type_index;
2287 struct cds_ja_inode *node;
2288 const struct cds_ja_type *type;
2289
2290 flavor = cds_lfht_rcu_flavor(shadow_node->ja->ht);
2291 node = ja_node_ptr(node_flag);
2292 assert(node != NULL);
2293 type_index = ja_node_type(node_flag);
2294 type = &ja_types[type_index];
2295
2296 switch (type->type_class) {
2297 case RCU_JA_LINEAR:
2298 {
2299 uint8_t nr_child =
2300 ja_linear_node_get_nr_child(type, node);
2301 unsigned int i;
2302
2303 for (i = 0; i < nr_child; i++) {
2304 struct cds_ja_inode_flag *iter;
2305 struct cds_hlist_head head;
2306 struct cds_ja_node *entry;
2307 struct cds_hlist_node *pos;
2308 uint8_t v;
2309
2310 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
2311 if (!iter)
2312 continue;
2313 head.next = (struct cds_hlist_node *) iter;
2314 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
2315 flavor->update_call_rcu(&entry->head, free_node_cb);
2316 }
2317 }
2318 break;
2319 }
2320 case RCU_JA_POOL:
2321 {
2322 unsigned int pool_nr;
2323
2324 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2325 struct cds_ja_inode *pool =
2326 ja_pool_node_get_ith_pool(type, node, pool_nr);
2327 uint8_t nr_child =
2328 ja_linear_node_get_nr_child(type, pool);
2329 unsigned int j;
2330
2331 for (j = 0; j < nr_child; j++) {
2332 struct cds_ja_inode_flag *iter;
2333 struct cds_hlist_head head;
2334 struct cds_ja_node *entry;
2335 struct cds_hlist_node *pos;
2336 uint8_t v;
2337
2338 ja_linear_node_get_ith_pos(type, node, j, &v, &iter);
2339 if (!iter)
2340 continue;
2341 head.next = (struct cds_hlist_node *) iter;
2342 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
2343 flavor->update_call_rcu(&entry->head, free_node_cb);
2344 }
2345 }
2346 }
2347 break;
2348 }
2349 case RCU_JA_NULL:
2350 break;
2351 case RCU_JA_PIGEON:
2352 {
2353 uint8_t nr_child;
2354 unsigned int i;
2355
2356 nr_child = shadow_node->nr_child;
2357 for (i = 0; i < nr_child; i++) {
2358 struct cds_ja_inode_flag *iter;
2359 struct cds_hlist_head head;
2360 struct cds_ja_node *entry;
2361 struct cds_hlist_node *pos;
2362
2363 iter = ja_pigeon_node_get_ith_pos(type, node, i);
2364 if (!iter)
2365 continue;
2366 head.next = (struct cds_hlist_node *) iter;
2367 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
2368 flavor->update_call_rcu(&entry->head, free_node_cb);
2369 }
2370 }
2371 break;
2372 }
2373 default:
2374 assert(0);
2375 }
2376 }
2377
2378 static
2379 void print_debug_fallback_distribution(void)
2380 {
2381 int i;
2382
2383 fprintf(stderr, "Fallback node distribution:\n");
2384 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2385 if (!node_fallback_count_distribution[i])
2386 continue;
2387 fprintf(stderr, " %3u: %4lu\n",
2388 i, node_fallback_count_distribution[i]);
2389 }
2390 }
2391
2392 /*
2393 * There should be no more concurrent add to the judy array while it is
2394 * being destroyed (ensured by the caller).
2395 */
2396 int cds_ja_destroy(struct cds_ja *ja,
2397 void (*free_node_cb)(struct rcu_head *head))
2398 {
2399 int ret;
2400
2401 rcuja_shadow_prune(ja->ht,
2402 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
2403 free_node_cb);
2404 ret = rcuja_delete_ht(ja->ht);
2405 if (ret)
2406 return ret;
2407 if (uatomic_read(&ja->nr_fallback))
2408 fprintf(stderr,
2409 "[warning] RCU Judy Array used %lu fallback node(s)\n",
2410 uatomic_read(&ja->nr_fallback));
2411 print_debug_fallback_distribution();
2412 free(ja);
2413 return 0;
2414 }
This page took 0.085381 seconds and 4 git commands to generate.