945b299ea397b1a25ae9c0c0a5b2cf9de78ee598
[userspace-rcu.git] / rcuja / rcuja.c
1 /*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdint.h>
25 #include <errno.h>
26 #include <limits.h>
27 #include <urcu/rcuja.h>
28 #include <urcu/compiler.h>
29 #include <urcu/arch.h>
30 #include <assert.h>
31 #include <urcu-pointer.h>
32 #include <stdint.h>
33
34 #include "rcuja-internal.h"
35 #include "bitfield.h"
36
37 enum cds_ja_type_class {
38 RCU_JA_LINEAR = 0, /* Type A */
39 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
40 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
41 RCU_JA_POOL = 1, /* Type B */
42 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
43 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
44 RCU_JA_PIGEON = 2, /* Type C */
45 /* 32-bit: 101 to 256 children, 1024 bytes */
46 /* 64-bit: 113 to 256 children, 2048 bytes */
47 /* Leaf nodes are implicit from their height in the tree */
48 RCU_JA_NR_TYPES,
49
50 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
51 };
52
53 struct cds_ja_type {
54 enum cds_ja_type_class type_class;
55 uint16_t min_child; /* minimum number of children: 1 to 256 */
56 uint16_t max_child; /* maximum number of children: 1 to 256 */
57 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
58 uint16_t order; /* node size is (1 << order), in bytes */
59 uint16_t nr_pool_order; /* number of pools */
60 uint16_t pool_size_order; /* pool size */
61 };
62
63 /*
64 * Number of least significant pointer bits reserved to represent the
65 * child type.
66 */
67 #define JA_TYPE_BITS 3
68 #define JA_TYPE_MAX_NR (1U << JA_TYPE_BITS)
69 #define JA_TYPE_MASK (JA_TYPE_MAX_NR - 1)
70 #define JA_PTR_MASK (~JA_TYPE_MASK)
71
72 #define JA_ENTRY_PER_NODE 256UL
73 #define JA_BITS_PER_BYTE 3
74
75 #define JA_MAX_INTERNAL_DEPTH 5 /* Maximum depth, excluding leafs */
76
77 /*
78 * Entry for NULL node is at index 8 of the table. It is never encoded
79 * in flags.
80 */
81 #define NODE_INDEX_NULL 8
82
83 /*
84 * Iteration on the array to find the right node size for the number of
85 * children stops when it reaches .max_child == 256 (this is the largest
86 * possible node size, which contains 256 children).
87 * The min_child overlaps with the previous max_child to provide an
88 * hysteresis loop to reallocation for patterns of cyclic add/removal
89 * within the same node.
90 * The node the index within the following arrays is represented on 3
91 * bits. It identifies the node type, min/max number of children, and
92 * the size order.
93 * The max_child values for the RCU_JA_POOL below result from
94 * statistical approximation: over million populations, the max_child
95 * covers between 97% and 99% of the populations generated. Therefore, a
96 * fallback should exist to cover the rare extreme population unbalance
97 * cases, but it will not have a major impact on speed nor space
98 * consumption, since those are rare cases.
99 */
100
101 #if (CAA_BITS_PER_LONG < 64)
102 /* 32-bit pointers */
103 enum {
104 ja_type_0_max_child = 1,
105 ja_type_1_max_child = 3,
106 ja_type_2_max_child = 6,
107 ja_type_3_max_child = 12,
108 ja_type_4_max_child = 25,
109 ja_type_5_max_child = 48,
110 ja_type_6_max_child = 92,
111 ja_type_7_max_child = 256,
112 ja_type_8_max_child = 0, /* NULL */
113 };
114
115 enum {
116 ja_type_0_max_linear_child = 1,
117 ja_type_1_max_linear_child = 3,
118 ja_type_2_max_linear_child = 6,
119 ja_type_3_max_linear_child = 12,
120 ja_type_4_max_linear_child = 25,
121 ja_type_5_max_linear_child = 24,
122 ja_type_6_max_linear_child = 23,
123 };
124
125 enum {
126 ja_type_5_nr_pool_order = 1,
127 ja_type_6_nr_pool_order = 2,
128 };
129
130 const struct cds_ja_type ja_types[] = {
131 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
132 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
133 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
134 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
135 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
136
137 /* Pools may fill sooner than max_child */
138 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
139 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
140
141 /*
142 * TODO: Upon node removal below min_child, if child pool is
143 * filled beyond capacity, we need to roll back to pigeon.
144 */
145 { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
146
147 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
148 };
149 #else /* !(CAA_BITS_PER_LONG < 64) */
150 /* 64-bit pointers */
151 enum {
152 ja_type_0_max_child = 1,
153 ja_type_1_max_child = 3,
154 ja_type_2_max_child = 7,
155 ja_type_3_max_child = 14,
156 ja_type_4_max_child = 28,
157 ja_type_5_max_child = 54,
158 ja_type_6_max_child = 104,
159 ja_type_7_max_child = 256,
160 ja_type_8_max_child = 256,
161 };
162
163 enum {
164 ja_type_0_max_linear_child = 1,
165 ja_type_1_max_linear_child = 3,
166 ja_type_2_max_linear_child = 7,
167 ja_type_3_max_linear_child = 14,
168 ja_type_4_max_linear_child = 28,
169 ja_type_5_max_linear_child = 27,
170 ja_type_6_max_linear_child = 26,
171 };
172
173 enum {
174 ja_type_5_nr_pool_order = 1,
175 ja_type_6_nr_pool_order = 2,
176 };
177
178 const struct cds_ja_type ja_types[] = {
179 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
180 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
181 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
182 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
183 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
184
185 /* Pools may fill sooner than max_child. */
186 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
187 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
188
189 /*
190 * TODO: Upon node removal below min_child, if child pool is
191 * filled beyond capacity, we need to roll back to pigeon.
192 */
193 { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
194
195 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
196 };
197 #endif /* !(BITS_PER_LONG < 64) */
198
199 static inline __attribute__((unused))
200 void static_array_size_check(void)
201 {
202 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
203 }
204
205 /*
206 * The cds_ja_node contains the compressed node data needed for
207 * read-side. For linear and pool node configurations, it starts with a
208 * byte counting the number of children in the node. Then, the
209 * node-specific data is placed.
210 * The node mutex, if any is needed, protecting concurrent updated of
211 * each node is placed in a separate hash table indexed by node address.
212 * For the pigeon configuration, the number of children is also kept in
213 * a separate hash table, indexed by node address, because it is only
214 * required for updates.
215 */
216
217 #define DECLARE_LINEAR_NODE(index) \
218 struct { \
219 uint8_t nr_child; \
220 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
221 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
222 }
223
224 #define DECLARE_POOL_NODE(index) \
225 struct { \
226 struct { \
227 uint8_t nr_child; \
228 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
229 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
230 } linear[1U << ja_type_## index ##_nr_pool_order]; \
231 }
232
233 struct cds_ja_inode {
234 union {
235 /* Linear configuration */
236 DECLARE_LINEAR_NODE(0) conf_0;
237 DECLARE_LINEAR_NODE(1) conf_1;
238 DECLARE_LINEAR_NODE(2) conf_2;
239 DECLARE_LINEAR_NODE(3) conf_3;
240 DECLARE_LINEAR_NODE(4) conf_4;
241
242 /* Pool configuration */
243 DECLARE_POOL_NODE(5) conf_5;
244 DECLARE_POOL_NODE(6) conf_6;
245
246 /* Pigeon configuration */
247 struct {
248 struct cds_ja_inode_flag *child[ja_type_7_max_child];
249 } conf_7;
250 /* data aliasing nodes for computed accesses */
251 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
252 } u;
253 };
254
255 static
256 struct cds_ja_inode_flag *ja_node_flag(struct cds_ja_inode *node,
257 unsigned int type)
258 {
259 assert(type < RCU_JA_NR_TYPES);
260 return (struct cds_ja_inode_flag *) (((unsigned long) node) | type);
261 }
262
263 static
264 struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
265 {
266 return (struct cds_ja_inode *) (((unsigned long) node) | JA_PTR_MASK);
267 }
268
269 static
270 unsigned int ja_node_type(struct cds_ja_inode_flag *node)
271 {
272 unsigned int type;
273
274 if (ja_node_ptr(node) == NULL) {
275 return NODE_INDEX_NULL;
276 }
277 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
278 assert(type < RCU_JA_NR_TYPES);
279 return type;
280 }
281
282 struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
283 {
284 return calloc(1U << ja_type->order, sizeof(char));
285 }
286
287 void free_cds_ja_node(struct cds_ja_inode *node)
288 {
289 free(node);
290 }
291
292 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
293 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
294 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
295 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
296
297 static
298 uint8_t *align_ptr_size(uint8_t *ptr)
299 {
300 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
301 }
302
303 static
304 uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
305 struct cds_ja_inode *node)
306 {
307 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
308 return CMM_LOAD_SHARED(node->u.data[0]);
309 }
310
311 /*
312 * The order in which values and pointers are does does not matter: if
313 * a value is missing, we return NULL. If a value is there, but its
314 * associated pointers is still NULL, we return NULL too.
315 */
316 static
317 struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
318 struct cds_ja_inode *node,
319 struct cds_ja_inode_flag ***child_node_flag_ptr,
320 uint8_t n)
321 {
322 uint8_t nr_child;
323 uint8_t *values;
324 struct cds_ja_inode_flag **pointers;
325 struct cds_ja_inode_flag *ptr;
326 unsigned int i;
327
328 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
329
330 nr_child = ja_linear_node_get_nr_child(type, node);
331 cmm_smp_rmb(); /* read nr_child before values and pointers */
332 assert(nr_child <= type->max_linear_child);
333 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
334
335 values = &node->u.data[1];
336 for (i = 0; i < nr_child; i++) {
337 if (CMM_LOAD_SHARED(values[i]) == n)
338 break;
339 }
340 if (i >= nr_child)
341 return NULL;
342 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
343 if (caa_unlikely(child_node_flag_ptr))
344 *child_node_flag_ptr = &pointers[i];
345 ptr = rcu_dereference(pointers[i]);
346 assert(ja_node_ptr(ptr) != NULL);
347 return ptr;
348 }
349
350 static
351 void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
352 struct cds_ja_inode *node,
353 uint8_t i,
354 uint8_t *v,
355 struct cds_ja_inode_flag **iter)
356 {
357 uint8_t *values;
358 struct cds_ja_inode_flag **pointers;
359
360 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
361 assert(i < ja_linear_node_get_nr_child(type, node));
362
363 values = &node->u.data[1];
364 *v = values[i];
365 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
366 *iter = pointers[i];
367 }
368
369 static
370 struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
371 struct cds_ja_inode *node,
372 struct cds_ja_inode_flag ***child_node_flag_ptr,
373 uint8_t n)
374 {
375 struct cds_ja_inode *linear;
376
377 assert(type->type_class == RCU_JA_POOL);
378 /*
379 * TODO: currently, we select the pool by highest bits. We
380 * should support various encodings.
381 */
382 linear = (struct cds_ja_inode *)
383 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
384 return ja_linear_node_get_nth(type, linear, child_node_flag_ptr, n);
385 }
386
387 static
388 struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
389 struct cds_ja_inode *node,
390 uint8_t i)
391 {
392 assert(type->type_class == RCU_JA_POOL);
393 return (struct cds_ja_inode *)
394 &node->u.data[(unsigned int) i << type->pool_size_order];
395 }
396
397 static
398 struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
399 struct cds_ja_inode *node,
400 struct cds_ja_inode_flag ***child_node_flag_ptr,
401 uint8_t n)
402 {
403 struct cds_ja_inode_flag **child_node_flag;
404
405 assert(type->type_class == RCU_JA_PIGEON);
406 child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n];
407 if (caa_unlikely(child_node_flag_ptr))
408 *child_node_flag_ptr = child_node_flag;
409 return rcu_dereference(*child_node_flag);
410 }
411
412 /*
413 * ja_node_get_nth: get nth item from a node.
414 * node_flag is already rcu_dereference'd.
415 */
416 static
417 struct cds_ja_inode_flag * ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
418 struct cds_ja_inode_flag ***child_node_flag_ptr,
419 uint8_t n)
420 {
421 unsigned int type_index;
422 struct cds_ja_inode *node;
423 const struct cds_ja_type *type;
424
425 node = ja_node_ptr(node_flag);
426 assert(node != NULL);
427 type_index = ja_node_type(node_flag);
428 type = &ja_types[type_index];
429
430 switch (type->type_class) {
431 case RCU_JA_LINEAR:
432 return ja_linear_node_get_nth(type, node,
433 child_node_flag_ptr, n);
434 case RCU_JA_POOL:
435 return ja_pool_node_get_nth(type, node,
436 child_node_flag_ptr, n);
437 case RCU_JA_PIGEON:
438 return ja_pigeon_node_get_nth(type, node,
439 child_node_flag_ptr, n);
440 default:
441 assert(0);
442 return (void *) -1UL;
443 }
444 }
445
446 /*
447 * TODO: use ja_get_nr_child to monitor limits triggering shrink
448 * recompaction.
449 * Also use ja_get_nr_child to make the difference between resize and
450 * pool change of compaction bit(s).
451 */
452 static
453 unsigned int ja_get_nr_child(struct cds_ja_shadow_node *shadow_node)
454 {
455 return shadow_node->nr_child;
456 }
457
458 static
459 int ja_linear_node_set_nth(const struct cds_ja_type *type,
460 struct cds_ja_inode *node,
461 struct cds_ja_shadow_node *shadow_node,
462 uint8_t n,
463 struct cds_ja_inode_flag *child_node_flag)
464 {
465 uint8_t nr_child;
466 uint8_t *values, *nr_child_ptr;
467 struct cds_ja_inode_flag **pointers;
468 unsigned int i;
469
470 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
471
472 nr_child_ptr = &node->u.data[0];
473 nr_child = *nr_child_ptr;
474 assert(nr_child <= type->max_linear_child);
475 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
476
477 values = &node->u.data[1];
478 for (i = 0; i < nr_child; i++) {
479 if (values[i] == n)
480 return -EEXIST;
481 }
482 if (nr_child >= type->max_linear_child) {
483 /* No space left in this node type */
484 return -ENOSPC;
485 }
486 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
487 assert(pointers[nr_child] == NULL);
488 rcu_assign_pointer(pointers[nr_child], child_node_flag);
489 CMM_STORE_SHARED(values[nr_child], n);
490 cmm_smp_wmb(); /* write value and pointer before nr_child */
491 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
492 shadow_node->nr_child++;
493 return 0;
494 }
495
496 static
497 int ja_pool_node_set_nth(const struct cds_ja_type *type,
498 struct cds_ja_inode *node,
499 struct cds_ja_shadow_node *shadow_node,
500 uint8_t n,
501 struct cds_ja_inode_flag *child_node_flag)
502 {
503 struct cds_ja_inode *linear;
504
505 assert(type->type_class == RCU_JA_POOL);
506 linear = (struct cds_ja_inode *)
507 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
508 return ja_linear_node_set_nth(type, linear, shadow_node,
509 n, child_node_flag);
510 }
511
512 static
513 int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
514 struct cds_ja_inode *node,
515 struct cds_ja_shadow_node *shadow_node,
516 uint8_t n,
517 struct cds_ja_inode_flag *child_node_flag)
518 {
519 struct cds_ja_inode_flag **ptr;
520
521 assert(type->type_class == RCU_JA_PIGEON);
522 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
523 if (*ptr)
524 return -EEXIST;
525 rcu_assign_pointer(*ptr, child_node_flag);
526 shadow_node->nr_child++;
527 return 0;
528 }
529
530 /*
531 * _ja_node_set_nth: set nth item within a node. Return an error
532 * (negative error value) if it is already there.
533 * TODO: exclusive access on node.
534 */
535 static
536 int _ja_node_set_nth(const struct cds_ja_type *type,
537 struct cds_ja_inode *node,
538 struct cds_ja_shadow_node *shadow_node,
539 uint8_t n,
540 struct cds_ja_inode_flag *child_node_flag)
541 {
542 switch (type->type_class) {
543 case RCU_JA_LINEAR:
544 return ja_linear_node_set_nth(type, node, shadow_node, n,
545 child_node_flag);
546 case RCU_JA_POOL:
547 return ja_pool_node_set_nth(type, node, shadow_node, n,
548 child_node_flag);
549 case RCU_JA_PIGEON:
550 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
551 child_node_flag);
552 case RCU_JA_NULL:
553 return -ENOSPC;
554 default:
555 assert(0);
556 return -EINVAL;
557 }
558
559 return 0;
560 }
561
562 /*
563 * ja_node_recompact_add: recompact a node, adding a new child.
564 * TODO: for pool type, take selection bit(s) into account.
565 * Return 0 on success, -ENOENT if need to retry, or other negative
566 * error value otherwise.
567 */
568 static
569 int ja_node_recompact_add(struct cds_ja *ja,
570 unsigned int old_type_index,
571 const struct cds_ja_type *old_type,
572 struct cds_ja_inode *old_node,
573 struct cds_ja_shadow_node *shadow_node,
574 struct cds_ja_inode_flag **old_node_flag, uint8_t n,
575 struct cds_ja_inode_flag *child_node_flag)
576 {
577 unsigned int new_type_index;
578 struct cds_ja_inode *new_node;
579 const struct cds_ja_type *new_type;
580 struct cds_ja_inode_flag *new_node_flag;
581 int new_shadow = 0;
582 int ret;
583
584 if (!shadow_node) {
585 new_type_index = 0;
586 } else {
587 new_type_index = old_type_index + 1;
588 }
589 new_type = &ja_types[new_type_index];
590 new_node = alloc_cds_ja_node(new_type);
591 if (!new_node)
592 return -ENOMEM;
593 new_node_flag = ja_node_flag(new_node, new_type_index);
594
595 ret = rcuja_shadow_set(ja->ht, new_node, shadow_node);
596 if (ret) {
597 free(new_node);
598 return ret;
599 }
600
601 if (!shadow_node) {
602 shadow_node = rcuja_shadow_lookup_lock(ja->ht, new_node);
603 assert(shadow_node);
604 new_shadow = 1;
605 }
606
607 /*
608 * We need to clear nr_child, because it will be re-incremented
609 * by _ja_node_set_nth().
610 */
611 shadow_node->nr_child = 0;
612
613 assert(old_type->type_class != RCU_JA_PIGEON);
614 switch (old_type->type_class) {
615 case RCU_JA_LINEAR:
616 {
617 uint8_t nr_child =
618 ja_linear_node_get_nr_child(old_type, old_node);
619 unsigned int i;
620
621 for (i = 0; i < nr_child; i++) {
622 struct cds_ja_inode_flag *iter;
623 uint8_t v;
624
625 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
626 if (!iter)
627 continue;
628 ret = _ja_node_set_nth(new_type, new_node, shadow_node,
629 v, iter);
630 assert(!ret);
631 }
632 break;
633 }
634 case RCU_JA_POOL:
635 {
636 unsigned int pool_nr;
637
638 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
639 struct cds_ja_inode *pool =
640 ja_pool_node_get_ith_pool(old_type,
641 old_node, pool_nr);
642 uint8_t nr_child =
643 ja_linear_node_get_nr_child(old_type, pool);
644 unsigned int j;
645
646 for (j = 0; j < nr_child; j++) {
647 struct cds_ja_inode_flag *iter;
648 uint8_t v;
649
650 ja_linear_node_get_ith_pos(old_type, pool,
651 j, &v, &iter);
652 if (!iter)
653 continue;
654 ret = _ja_node_set_nth(new_type, new_node, shadow_node,
655 v, iter);
656 assert(!ret);
657 }
658 }
659 break;
660 }
661 case RCU_JA_PIGEON:
662 default:
663 assert(0);
664 ret = -EINVAL;
665 goto unlock_new_shadow;
666 }
667
668 /* add node */
669 ret = _ja_node_set_nth(new_type, new_node, shadow_node,
670 n, child_node_flag);
671 assert(!ret);
672 /* Return pointer to new recompacted new through old_node_flag */
673 *old_node_flag = new_node_flag;
674 ret = rcuja_shadow_clear(ja->ht, old_node,
675 RCUJA_SHADOW_CLEAR_FREE_NODE);
676 assert(!ret);
677
678 ret = 0;
679
680 unlock_new_shadow:
681 if (new_shadow)
682 rcuja_shadow_unlock(shadow_node);
683 return ret;
684 }
685
686 /*
687 * Return 0 on success, -ENOENT if need to retry, or other negative
688 * error value otherwise.
689 */
690 static
691 int ja_node_set_nth(struct cds_ja *ja,
692 struct cds_ja_inode_flag **node_flag, uint8_t n,
693 struct cds_ja_inode_flag *child_node_flag,
694 struct cds_ja_shadow_node *shadow_node)
695 {
696 int ret;
697 unsigned int type_index;
698 const struct cds_ja_type *type;
699 struct cds_ja_inode *node;
700
701 node = ja_node_ptr(*node_flag);
702 type_index = ja_node_type(*node_flag);
703 type = &ja_types[type_index];
704 ret = _ja_node_set_nth(type, node, shadow_node,
705 n, child_node_flag);
706 if (ret == -ENOSPC) {
707 /* Not enough space in node, need to recompact. */
708 ret = ja_node_recompact_add(ja, type_index, type, node,
709 shadow_node, node_flag, n, child_node_flag);
710 }
711 return ret;
712 }
713
714 struct cds_hlist_head *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
715 {
716 unsigned int tree_depth, i;
717 struct cds_ja_inode_flag *node_flag;
718
719 if (caa_unlikely(key > ja->key_max))
720 return NULL;
721 tree_depth = ja->tree_depth;
722 node_flag = rcu_dereference(ja->root);
723
724 /* level 0: root node */
725 if (!ja_node_ptr(node_flag))
726 return NULL;
727
728 for (i = 1; i < tree_depth; i++) {
729 node_flag = ja_node_get_nth(node_flag, NULL,
730 (unsigned char) key);
731 if (!ja_node_ptr(node_flag))
732 return NULL;
733 key >>= JA_BITS_PER_BYTE;
734 }
735
736 /* Last level lookup succeded. We got an actual match. */
737 return (struct cds_hlist_head *) node_flag;
738 }
739
740 /*
741 * We reached an unpopulated node. Create it and the children we need,
742 * and then attach the entire branch to the current node. This may
743 * trigger recompaction of the current node. Locks needed: node lock
744 * (for add), and, possibly, parent node lock (to update pointer due to
745 * node recompaction).
746 *
747 * First take node lock, check if recompaction is needed, then take
748 * parent lock (if needed). Then we can proceed to create the new
749 * branch. Publish the new branch, and release locks.
750 * TODO: we currently always take the parent lock even when not needed.
751 */
752 static
753 int ja_attach_node(struct cds_ja *ja,
754 struct cds_ja_inode_flag **node_flag_ptr,
755 struct cds_ja_inode_flag *node_flag,
756 struct cds_ja_inode_flag *parent_node_flag,
757 uint64_t key,
758 unsigned int depth,
759 struct cds_ja_node *child_node)
760 {
761 struct cds_ja_shadow_node *shadow_node = NULL,
762 *parent_shadow_node = NULL;
763 struct cds_ja_inode *node = ja_node_ptr(node_flag);
764 struct cds_ja_inode *parent_node = ja_node_ptr(parent_node_flag);
765 struct cds_hlist_head head;
766 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
767 int ret, i;
768 struct cds_ja_inode_flag *created_nodes[JA_MAX_INTERNAL_DEPTH];
769 int nr_created_nodes = 0;
770
771 assert(node);
772 shadow_node = rcuja_shadow_lookup_lock(ja->ht, node);
773 if (!shadow_node) {
774 ret = -ENOENT;
775 goto end;
776 }
777 if (parent_node) {
778 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
779 parent_node);
780 if (!parent_shadow_node) {
781 ret = -ENOENT;
782 goto unlock_shadow;
783 }
784 }
785
786 CDS_INIT_HLIST_HEAD(&head);
787 cds_hlist_add_head_rcu(&child_node->list, &head);
788
789 iter_dest_node_flag = NULL;
790 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
791 key >> (JA_BITS_PER_BYTE * (ja->tree_depth - 2)),
792 (struct cds_ja_inode_flag *) head.next,
793 NULL);
794 if (ret)
795 goto unlock_parent;
796 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
797 iter_node_flag = iter_dest_node_flag;
798
799 /* Create new branch, starting from bottom */
800 for (i = ja->tree_depth - 2; i >= (int) depth; i--) {
801 iter_dest_node_flag = NULL;
802 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
803 key >> (JA_BITS_PER_BYTE * (i - 1)),
804 iter_node_flag,
805 NULL);
806 if (ret)
807 goto check_error;
808 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
809 iter_node_flag = iter_dest_node_flag;
810 }
811
812 /* Publish new branch */
813 rcu_assign_pointer(*node_flag_ptr, iter_node_flag);
814
815 /* Success */
816 ret = 0;
817
818 check_error:
819 if (ret) {
820 for (i = 0; i < nr_created_nodes; i++) {
821 int tmpret;
822 tmpret = rcuja_shadow_clear(ja->ht,
823 ja_node_ptr(created_nodes[i]),
824 RCUJA_SHADOW_CLEAR_FREE_NODE
825 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
826 assert(!tmpret);
827 }
828 }
829 unlock_parent:
830 if (parent_shadow_node)
831 rcuja_shadow_unlock(parent_shadow_node);
832 unlock_shadow:
833 if (shadow_node)
834 rcuja_shadow_unlock(shadow_node);
835 end:
836 return ret;
837 }
838
839 /*
840 * Lock the hlist head shadow node mutex, and add node to list of
841 * duplicates. Failure can happen if concurrent removal removes the last
842 * node with same key before we get the lock.
843 * Return 0 on success, negative error value on failure.
844 */
845 static
846 int ja_chain_node(struct cds_ja *ja,
847 struct cds_hlist_head *head,
848 struct cds_ja_node *node)
849 {
850 struct cds_ja_shadow_node *shadow_node;
851
852 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
853 (struct cds_ja_inode *) head);
854 if (!shadow_node)
855 return -ENOENT;
856 cds_hlist_add_head_rcu(&node->list, head);
857 rcuja_shadow_unlock(shadow_node);
858 return 0;
859 }
860
861 int cds_ja_add(struct cds_ja *ja, uint64_t key,
862 struct cds_ja_node *new_node)
863 {
864 unsigned int tree_depth, i;
865 uint64_t iter_key;
866 struct cds_ja_inode_flag **node_flag_ptr; /* in parent */
867 struct cds_ja_inode_flag *node_flag,
868 *parent_node_flag,
869 *parent2_node_flag;
870 int ret;
871
872 if (caa_unlikely(key > ja->key_max))
873 return -EINVAL;
874 tree_depth = ja->tree_depth;
875
876 retry:
877 iter_key = key;
878 parent2_node_flag = NULL;
879 parent_node_flag =
880 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
881 node_flag_ptr = &ja->root;
882 node_flag = rcu_dereference(*node_flag_ptr);
883
884 /* Iterate on all internal levels */
885 for (i = 0; i < tree_depth - 1; i++) {
886 if (!ja_node_ptr(node_flag)) {
887 ret = ja_attach_node(ja, node_flag_ptr,
888 parent_node_flag, parent2_node_flag,
889 key, i, new_node);
890 if (ret == -ENOENT || ret == -EEXIST)
891 goto retry;
892 else
893 goto end;
894 }
895 parent2_node_flag = parent_node_flag;
896 parent_node_flag = node_flag;
897 node_flag = ja_node_get_nth(node_flag,
898 &node_flag_ptr,
899 (unsigned char) iter_key);
900 iter_key >>= JA_BITS_PER_BYTE;
901 }
902
903 /*
904 * We reached bottom of tree, simply add node to last internal
905 * level, or chain it if key is already present.
906 */
907 if (!ja_node_ptr(node_flag)) {
908 ret = ja_attach_node(ja, node_flag_ptr, parent_node_flag,
909 parent2_node_flag, key, i, new_node);
910 } else {
911 ret = ja_chain_node(ja,
912 (struct cds_hlist_head *) ja_node_ptr(node_flag),
913 new_node);
914 }
915 if (ret == -ENOENT)
916 goto retry;
917 end:
918 return ret;
919 }
920
921 struct cds_ja *_cds_ja_new(unsigned int key_bits,
922 const struct rcu_flavor_struct *flavor)
923 {
924 struct cds_ja *ja;
925 int ret;
926
927 ja = calloc(sizeof(*ja), 1);
928 if (!ja)
929 goto ja_error;
930
931 switch (key_bits) {
932 case 8:
933 ja->key_max = UINT8_MAX;
934 break;
935 case 16:
936 ja->key_max = UINT16_MAX;
937 break;
938 case 32:
939 ja->key_max = UINT32_MAX;
940 break;
941 case 64:
942 ja->key_max = UINT64_MAX;
943 break;
944 default:
945 goto check_error;
946 }
947
948 /* ja->root is NULL */
949 /* tree_depth 0 is for pointer to root node */
950 ja->tree_depth = (key_bits >> JA_BITS_PER_BYTE) + 1;
951 assert(ja->tree_depth <= JA_MAX_INTERNAL_DEPTH + 1);
952 ja->ht = rcuja_create_ht(flavor);
953 if (!ja->ht)
954 goto ht_error;
955
956 /*
957 * Note: we should not free this node until judy array destroy.
958 */
959 ret = rcuja_shadow_set(ja->ht,
960 ja_node_ptr((struct cds_ja_inode_flag *) &ja->root),
961 NULL);
962 if (ret)
963 goto ht_node_error;
964
965 return ja;
966
967 ht_node_error:
968 ret = rcuja_delete_ht(ja->ht);
969 assert(!ret);
970 ht_error:
971 check_error:
972 free(ja);
973 ja_error:
974 return NULL;
975 }
976
977 /*
978 * There should be no more concurrent add to the judy array while it is
979 * being destroyed (ensured by the caller).
980 */
981 int cds_ja_destroy(struct cds_ja *ja)
982 {
983 int ret;
984
985 rcuja_shadow_prune(ja->ht,
986 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK);
987 ret = rcuja_delete_ht(ja->ht);
988 if (ret)
989 return ret;
990 free(ja);
991 return 0;
992 }
This page took 0.0468 seconds and 3 git commands to generate.