rcuja: extend tests, more fixes
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
61009379 27#include <urcu/rcuja.h>
d68c6810
MD
28#include <urcu/compiler.h>
29#include <urcu/arch.h>
30#include <assert.h>
8e519e3c 31#include <urcu-pointer.h>
f07b240f 32#include <urcu/uatomic.h>
b4540e8a 33#include <stdint.h>
8e519e3c 34
61009379 35#include "rcuja-internal.h"
d68c6810 36#include "bitfield.h"
61009379 37
d96bfb0d 38enum cds_ja_type_class {
e5227865 39 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
40 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
41 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
42 RCU_JA_POOL = 1, /* Type B */
43 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
44 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 45 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
46 /* 32-bit: 101 to 256 children, 1024 bytes */
47 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 48 /* Leaf nodes are implicit from their height in the tree */
1db4943c 49 RCU_JA_NR_TYPES,
e1db2db5
MD
50
51 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
52};
53
d96bfb0d
MD
54struct cds_ja_type {
55 enum cds_ja_type_class type_class;
8e519e3c
MD
56 uint16_t min_child; /* minimum number of children: 1 to 256 */
57 uint16_t max_child; /* maximum number of children: 1 to 256 */
58 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
59 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
60 uint16_t nr_pool_order; /* number of pools */
61 uint16_t pool_size_order; /* pool size */
e5227865
MD
62};
63
d68c6810
MD
64/*
65 * Number of least significant pointer bits reserved to represent the
66 * child type.
67 */
68#define JA_TYPE_BITS 3
a2a7ff59 69#define JA_TYPE_MAX_NR (1UL << JA_TYPE_BITS)
d68c6810
MD
70#define JA_TYPE_MASK (JA_TYPE_MAX_NR - 1)
71#define JA_PTR_MASK (~JA_TYPE_MASK)
72
73#define JA_ENTRY_PER_NODE 256UL
582a6ade
MD
74#define JA_LOG2_BITS_PER_BYTE 3U
75#define JA_BITS_PER_BYTE (1U << JA_LOG2_BITS_PER_BYTE)
d68c6810 76
5b0f19e6 77#define JA_MAX_DEPTH 9 /* Maximum depth, including leafs */
5a9a87dd 78
e1db2db5
MD
79/*
80 * Entry for NULL node is at index 8 of the table. It is never encoded
81 * in flags.
82 */
83#define NODE_INDEX_NULL 8
84
f07b240f
MD
85/*
86 * Number of removals needed on a fallback node before we try to shrink
87 * it.
88 */
89#define JA_FALLBACK_REMOVAL_COUNT 8
90
e5227865
MD
91/*
92 * Iteration on the array to find the right node size for the number of
d68c6810 93 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 94 * possible node size, which contains 256 children).
d68c6810
MD
95 * The min_child overlaps with the previous max_child to provide an
96 * hysteresis loop to reallocation for patterns of cyclic add/removal
97 * within the same node.
98 * The node the index within the following arrays is represented on 3
99 * bits. It identifies the node type, min/max number of children, and
100 * the size order.
3d45251f
MD
101 * The max_child values for the RCU_JA_POOL below result from
102 * statistical approximation: over million populations, the max_child
103 * covers between 97% and 99% of the populations generated. Therefore, a
104 * fallback should exist to cover the rare extreme population unbalance
105 * cases, but it will not have a major impact on speed nor space
106 * consumption, since those are rare cases.
e5227865 107 */
e5227865 108
d68c6810
MD
109#if (CAA_BITS_PER_LONG < 64)
110/* 32-bit pointers */
1db4943c
MD
111enum {
112 ja_type_0_max_child = 1,
113 ja_type_1_max_child = 3,
114 ja_type_2_max_child = 6,
115 ja_type_3_max_child = 12,
116 ja_type_4_max_child = 25,
117 ja_type_5_max_child = 48,
118 ja_type_6_max_child = 92,
119 ja_type_7_max_child = 256,
e1db2db5 120 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
121};
122
8e519e3c
MD
123enum {
124 ja_type_0_max_linear_child = 1,
125 ja_type_1_max_linear_child = 3,
126 ja_type_2_max_linear_child = 6,
127 ja_type_3_max_linear_child = 12,
128 ja_type_4_max_linear_child = 25,
129 ja_type_5_max_linear_child = 24,
130 ja_type_6_max_linear_child = 23,
131};
132
1db4943c
MD
133enum {
134 ja_type_5_nr_pool_order = 1,
135 ja_type_6_nr_pool_order = 2,
136};
137
d96bfb0d 138const struct cds_ja_type ja_types[] = {
8e519e3c
MD
139 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
140 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
141 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
142 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
143 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 144
fd800776 145 /* Pools may fill sooner than max_child */
8e519e3c
MD
146 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
147 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
148
149 /*
150 * TODO: Upon node removal below min_child, if child pool is
151 * filled beyond capacity, we need to roll back to pigeon.
152 */
1db4943c 153 { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
154
155 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 156};
d68c6810
MD
157#else /* !(CAA_BITS_PER_LONG < 64) */
158/* 64-bit pointers */
1db4943c
MD
159enum {
160 ja_type_0_max_child = 1,
161 ja_type_1_max_child = 3,
162 ja_type_2_max_child = 7,
163 ja_type_3_max_child = 14,
164 ja_type_4_max_child = 28,
165 ja_type_5_max_child = 54,
166 ja_type_6_max_child = 104,
167 ja_type_7_max_child = 256,
e1db2db5 168 ja_type_8_max_child = 256,
1db4943c
MD
169};
170
8e519e3c
MD
171enum {
172 ja_type_0_max_linear_child = 1,
173 ja_type_1_max_linear_child = 3,
174 ja_type_2_max_linear_child = 7,
175 ja_type_3_max_linear_child = 14,
176 ja_type_4_max_linear_child = 28,
177 ja_type_5_max_linear_child = 27,
178 ja_type_6_max_linear_child = 26,
179};
180
1db4943c
MD
181enum {
182 ja_type_5_nr_pool_order = 1,
183 ja_type_6_nr_pool_order = 2,
184};
185
d96bfb0d 186const struct cds_ja_type ja_types[] = {
8e519e3c
MD
187 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
188 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
189 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
190 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
191 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 192
3d45251f 193 /* Pools may fill sooner than max_child. */
8e519e3c
MD
194 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
195 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 196
3d45251f
MD
197 /*
198 * TODO: Upon node removal below min_child, if child pool is
199 * filled beyond capacity, we need to roll back to pigeon.
200 */
1db4943c 201 { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
202
203 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 204};
d68c6810 205#endif /* !(BITS_PER_LONG < 64) */
e5227865 206
1db4943c
MD
207static inline __attribute__((unused))
208void static_array_size_check(void)
209{
e1db2db5 210 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
211}
212
e5227865 213/*
d96bfb0d 214 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
215 * read-side. For linear and pool node configurations, it starts with a
216 * byte counting the number of children in the node. Then, the
217 * node-specific data is placed.
218 * The node mutex, if any is needed, protecting concurrent updated of
219 * each node is placed in a separate hash table indexed by node address.
220 * For the pigeon configuration, the number of children is also kept in
221 * a separate hash table, indexed by node address, because it is only
222 * required for updates.
e5227865 223 */
1db4943c 224
ff38c745
MD
225#define DECLARE_LINEAR_NODE(index) \
226 struct { \
227 uint8_t nr_child; \
228 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 229 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
230 }
231
232#define DECLARE_POOL_NODE(index) \
233 struct { \
234 struct { \
235 uint8_t nr_child; \
236 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 237 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
238 } linear[1U << ja_type_## index ##_nr_pool_order]; \
239 }
1db4943c 240
b4540e8a 241struct cds_ja_inode {
1db4943c
MD
242 union {
243 /* Linear configuration */
244 DECLARE_LINEAR_NODE(0) conf_0;
245 DECLARE_LINEAR_NODE(1) conf_1;
246 DECLARE_LINEAR_NODE(2) conf_2;
247 DECLARE_LINEAR_NODE(3) conf_3;
248 DECLARE_LINEAR_NODE(4) conf_4;
249
250 /* Pool configuration */
251 DECLARE_POOL_NODE(5) conf_5;
252 DECLARE_POOL_NODE(6) conf_6;
253
254 /* Pigeon configuration */
255 struct {
b4540e8a 256 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
257 } conf_7;
258 /* data aliasing nodes for computed accesses */
b4540e8a 259 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 260 } u;
e5227865
MD
261};
262
d68c6810 263static
b4540e8a 264struct cds_ja_inode_flag *ja_node_flag(struct cds_ja_inode *node,
a2a7ff59 265 unsigned long type)
d68c6810 266{
a2a7ff59 267 assert(type < (1UL << JA_TYPE_BITS));
b4540e8a 268 return (struct cds_ja_inode_flag *) (((unsigned long) node) | type);
d68c6810
MD
269}
270
e1db2db5 271static
b4540e8a 272struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
e1db2db5 273{
a2a7ff59 274 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
e1db2db5
MD
275}
276
d68c6810 277static
a2a7ff59 278unsigned long ja_node_type(struct cds_ja_inode_flag *node)
d68c6810 279{
a2a7ff59 280 unsigned long type;
d68c6810 281
e1db2db5
MD
282 if (ja_node_ptr(node) == NULL) {
283 return NODE_INDEX_NULL;
284 }
d68c6810 285 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
a2a7ff59 286 assert(type < (1UL << JA_TYPE_BITS));
d68c6810
MD
287 return type;
288}
289
b4540e8a 290struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
e5227865 291{
1db4943c 292 return calloc(1U << ja_type->order, sizeof(char));
e5227865
MD
293}
294
b4540e8a 295void free_cds_ja_node(struct cds_ja_inode *node)
e5227865
MD
296{
297 free(node);
298}
299
d68c6810
MD
300#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
301#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
302#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
303#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
304
305static
1db4943c 306uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 307{
1db4943c 308 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
309}
310
11c5e016 311static
d96bfb0d 312uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 313 struct cds_ja_inode *node)
11c5e016
MD
314{
315 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
316 return CMM_LOAD_SHARED(node->u.data[0]);
317}
318
13a7f5a6
MD
319/*
320 * The order in which values and pointers are does does not matter: if
321 * a value is missing, we return NULL. If a value is there, but its
322 * associated pointers is still NULL, we return NULL too.
323 */
d68c6810 324static
b4540e8a
MD
325struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
326 struct cds_ja_inode *node,
5a9a87dd 327 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 328 uint8_t n)
d68c6810
MD
329{
330 uint8_t nr_child;
331 uint8_t *values;
b4540e8a
MD
332 struct cds_ja_inode_flag **pointers;
333 struct cds_ja_inode_flag *ptr;
d68c6810
MD
334 unsigned int i;
335
8e519e3c 336 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 337
11c5e016 338 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 339 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
340 assert(nr_child <= type->max_linear_child);
341 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 342
1db4943c 343 values = &node->u.data[1];
d68c6810 344 for (i = 0; i < nr_child; i++) {
13a7f5a6 345 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
346 break;
347 }
348 if (i >= nr_child)
349 return NULL;
b4540e8a 350 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
5a9a87dd
MD
351 if (caa_unlikely(child_node_flag_ptr))
352 *child_node_flag_ptr = &pointers[i];
13a7f5a6 353 ptr = rcu_dereference(pointers[i]);
d68c6810
MD
354 assert(ja_node_ptr(ptr) != NULL);
355 return ptr;
356}
357
11c5e016 358static
5a9a87dd 359void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 360 struct cds_ja_inode *node,
11c5e016
MD
361 uint8_t i,
362 uint8_t *v,
b4540e8a 363 struct cds_ja_inode_flag **iter)
11c5e016
MD
364{
365 uint8_t *values;
b4540e8a 366 struct cds_ja_inode_flag **pointers;
11c5e016
MD
367
368 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
369 assert(i < ja_linear_node_get_nr_child(type, node));
370
371 values = &node->u.data[1];
372 *v = values[i];
b4540e8a 373 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
374 *iter = pointers[i];
375}
376
d68c6810 377static
b4540e8a
MD
378struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
379 struct cds_ja_inode *node,
5a9a87dd 380 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 381 uint8_t n)
d68c6810 382{
b4540e8a 383 struct cds_ja_inode *linear;
d68c6810 384
fd800776 385 assert(type->type_class == RCU_JA_POOL);
e1db2db5
MD
386 /*
387 * TODO: currently, we select the pool by highest bits. We
388 * should support various encodings.
389 */
b4540e8a 390 linear = (struct cds_ja_inode *)
1db4943c 391 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
5a9a87dd 392 return ja_linear_node_get_nth(type, linear, child_node_flag_ptr, n);
d68c6810
MD
393}
394
11c5e016 395static
b4540e8a
MD
396struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
397 struct cds_ja_inode *node,
11c5e016
MD
398 uint8_t i)
399{
400 assert(type->type_class == RCU_JA_POOL);
b4540e8a 401 return (struct cds_ja_inode *)
11c5e016
MD
402 &node->u.data[(unsigned int) i << type->pool_size_order];
403}
404
d68c6810 405static
b4540e8a
MD
406struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
407 struct cds_ja_inode *node,
5a9a87dd 408 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 409 uint8_t n)
d68c6810 410{
5a9a87dd
MD
411 struct cds_ja_inode_flag **child_node_flag;
412
d68c6810 413 assert(type->type_class == RCU_JA_PIGEON);
5a9a87dd 414 child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n];
582a6ade
MD
415 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
416 child_node_flag);
417 if (caa_unlikely(child_node_flag_ptr) && *child_node_flag)
5a9a87dd
MD
418 *child_node_flag_ptr = child_node_flag;
419 return rcu_dereference(*child_node_flag);
d68c6810
MD
420}
421
13a7f5a6
MD
422/*
423 * ja_node_get_nth: get nth item from a node.
424 * node_flag is already rcu_dereference'd.
425 */
d68c6810 426static
41975c12 427struct cds_ja_inode_flag * ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
5a9a87dd 428 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 429 uint8_t n)
d68c6810
MD
430{
431 unsigned int type_index;
b4540e8a 432 struct cds_ja_inode *node;
d96bfb0d 433 const struct cds_ja_type *type;
d68c6810 434
d68c6810 435 node = ja_node_ptr(node_flag);
5a9a87dd 436 assert(node != NULL);
d68c6810
MD
437 type_index = ja_node_type(node_flag);
438 type = &ja_types[type_index];
439
440 switch (type->type_class) {
441 case RCU_JA_LINEAR:
5a9a87dd
MD
442 return ja_linear_node_get_nth(type, node,
443 child_node_flag_ptr, n);
fd800776 444 case RCU_JA_POOL:
5a9a87dd
MD
445 return ja_pool_node_get_nth(type, node,
446 child_node_flag_ptr, n);
d68c6810 447 case RCU_JA_PIGEON:
5a9a87dd
MD
448 return ja_pigeon_node_get_nth(type, node,
449 child_node_flag_ptr, n);
d68c6810
MD
450 default:
451 assert(0);
452 return (void *) -1UL;
453 }
454}
455
335d8b18
MD
456/*
457 * TODO: use ja_get_nr_child to monitor limits triggering shrink
458 * recompaction.
459 * Also use ja_get_nr_child to make the difference between resize and
460 * pool change of compaction bit(s).
461 */
e1db2db5 462static
d96bfb0d 463unsigned int ja_get_nr_child(struct cds_ja_shadow_node *shadow_node)
e1db2db5
MD
464{
465 return shadow_node->nr_child;
466}
467
8e519e3c 468static
d96bfb0d 469int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 470 struct cds_ja_inode *node,
d96bfb0d 471 struct cds_ja_shadow_node *shadow_node,
8e519e3c 472 uint8_t n,
b4540e8a 473 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
474{
475 uint8_t nr_child;
476 uint8_t *values, *nr_child_ptr;
b4540e8a 477 struct cds_ja_inode_flag **pointers;
8e519e3c
MD
478 unsigned int i;
479
480 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
481
482 nr_child_ptr = &node->u.data[0];
a2a7ff59 483 dbg_printf("linear set nth: nr_child_ptr %p\n", nr_child_ptr);
8e519e3c
MD
484 nr_child = *nr_child_ptr;
485 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
486
487 values = &node->u.data[1];
488 for (i = 0; i < nr_child; i++) {
489 if (values[i] == n)
490 return -EEXIST;
491 }
492 if (nr_child >= type->max_linear_child) {
493 /* No space left in this node type */
494 return -ENOSPC;
495 }
b4540e8a 496 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6
MD
497 assert(pointers[nr_child] == NULL);
498 rcu_assign_pointer(pointers[nr_child], child_node_flag);
499 CMM_STORE_SHARED(values[nr_child], n);
500 cmm_smp_wmb(); /* write value and pointer before nr_child */
501 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
e1db2db5 502 shadow_node->nr_child++;
a2a7ff59
MD
503 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
504 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
505 (unsigned int) shadow_node->nr_child,
506 node, shadow_node);
507
8e519e3c
MD
508 return 0;
509}
510
511static
d96bfb0d 512int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 513 struct cds_ja_inode *node,
d96bfb0d 514 struct cds_ja_shadow_node *shadow_node,
8e519e3c 515 uint8_t n,
b4540e8a 516 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 517{
b4540e8a 518 struct cds_ja_inode *linear;
8e519e3c
MD
519
520 assert(type->type_class == RCU_JA_POOL);
b4540e8a 521 linear = (struct cds_ja_inode *)
8e519e3c 522 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
e1db2db5
MD
523 return ja_linear_node_set_nth(type, linear, shadow_node,
524 n, child_node_flag);
8e519e3c
MD
525}
526
527static
d96bfb0d 528int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 529 struct cds_ja_inode *node,
d96bfb0d 530 struct cds_ja_shadow_node *shadow_node,
8e519e3c 531 uint8_t n,
b4540e8a 532 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 533{
b4540e8a 534 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
535
536 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 537 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 538 if (*ptr)
8e519e3c
MD
539 return -EEXIST;
540 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 541 shadow_node->nr_child++;
8e519e3c
MD
542 return 0;
543}
544
d68c6810 545/*
7a0b2331 546 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c
MD
547 * (negative error value) if it is already there.
548 * TODO: exclusive access on node.
d68c6810 549 */
8e519e3c 550static
d96bfb0d 551int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 552 struct cds_ja_inode *node,
d96bfb0d 553 struct cds_ja_shadow_node *shadow_node,
e1db2db5 554 uint8_t n,
b4540e8a 555 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 556{
8e519e3c
MD
557 switch (type->type_class) {
558 case RCU_JA_LINEAR:
e1db2db5 559 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
560 child_node_flag);
561 case RCU_JA_POOL:
e1db2db5 562 return ja_pool_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
563 child_node_flag);
564 case RCU_JA_PIGEON:
e1db2db5 565 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 566 child_node_flag);
e1db2db5
MD
567 case RCU_JA_NULL:
568 return -ENOSPC;
8e519e3c
MD
569 default:
570 assert(0);
571 return -EINVAL;
572 }
573
574 return 0;
575}
7a0b2331
MD
576
577/*
578 * ja_node_recompact_add: recompact a node, adding a new child.
e1db2db5 579 * TODO: for pool type, take selection bit(s) into account.
5a9a87dd
MD
580 * Return 0 on success, -ENOENT if need to retry, or other negative
581 * error value otherwise.
7a0b2331
MD
582 */
583static
d96bfb0d 584int ja_node_recompact_add(struct cds_ja *ja,
e1db2db5 585 unsigned int old_type_index,
d96bfb0d 586 const struct cds_ja_type *old_type,
b4540e8a 587 struct cds_ja_inode *old_node,
5a9a87dd 588 struct cds_ja_shadow_node *shadow_node,
b4540e8a
MD
589 struct cds_ja_inode_flag **old_node_flag, uint8_t n,
590 struct cds_ja_inode_flag *child_node_flag)
7a0b2331 591{
e1db2db5 592 unsigned int new_type_index;
b4540e8a 593 struct cds_ja_inode *new_node;
f07b240f 594 struct cds_ja_shadow_node *new_shadow_node;
d96bfb0d 595 const struct cds_ja_type *new_type;
b4540e8a 596 struct cds_ja_inode_flag *new_node_flag;
7a0b2331 597 int ret;
f07b240f 598 int fallback = 0;
7a0b2331 599
a2a7ff59 600 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
7a0b2331
MD
601 new_type_index = 0;
602 } else {
7a0b2331
MD
603 new_type_index = old_type_index + 1;
604 }
a2a7ff59 605
f07b240f 606retry: /* for fallback */
582a6ade
MD
607 dbg_printf("Recompact from type %d to type %d\n",
608 old_type_index, new_type_index);
7a0b2331 609 new_type = &ja_types[new_type_index];
d96bfb0d 610 new_node = alloc_cds_ja_node(new_type);
7a0b2331
MD
611 if (!new_node)
612 return -ENOMEM;
613 new_node_flag = ja_node_flag(new_node, new_type_index);
614
a2a7ff59 615 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
f07b240f
MD
616 new_shadow_node = rcuja_shadow_set(ja->ht, new_node, shadow_node);
617 if (!new_shadow_node) {
5a9a87dd 618 free(new_node);
f07b240f 619 return -ENOMEM;
e1db2db5 620 }
f07b240f
MD
621 if (fallback)
622 new_shadow_node->fallback_removal_count =
623 JA_FALLBACK_REMOVAL_COUNT;
11c5e016
MD
624
625 assert(old_type->type_class != RCU_JA_PIGEON);
626 switch (old_type->type_class) {
627 case RCU_JA_LINEAR:
628 {
629 uint8_t nr_child =
630 ja_linear_node_get_nr_child(old_type, old_node);
631 unsigned int i;
632
633 for (i = 0; i < nr_child; i++) {
b4540e8a 634 struct cds_ja_inode_flag *iter;
11c5e016
MD
635 uint8_t v;
636
637 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
638 if (!iter)
639 continue;
f07b240f
MD
640 ret = _ja_node_set_nth(new_type, new_node,
641 new_shadow_node,
11c5e016 642 v, iter);
f07b240f
MD
643 if (new_type->type_class == RCU_JA_POOL && ret) {
644 goto fallback_toosmall;
645 }
11c5e016
MD
646 assert(!ret);
647 }
648 break;
649 }
650 case RCU_JA_POOL:
651 {
652 unsigned int pool_nr;
653
654 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 655 struct cds_ja_inode *pool =
11c5e016
MD
656 ja_pool_node_get_ith_pool(old_type,
657 old_node, pool_nr);
658 uint8_t nr_child =
659 ja_linear_node_get_nr_child(old_type, pool);
660 unsigned int j;
661
662 for (j = 0; j < nr_child; j++) {
b4540e8a 663 struct cds_ja_inode_flag *iter;
11c5e016
MD
664 uint8_t v;
665
666 ja_linear_node_get_ith_pos(old_type, pool,
667 j, &v, &iter);
668 if (!iter)
669 continue;
f07b240f
MD
670 ret = _ja_node_set_nth(new_type, new_node,
671 new_shadow_node,
11c5e016 672 v, iter);
f07b240f
MD
673 if (new_type->type_class == RCU_JA_POOL
674 && ret) {
675 goto fallback_toosmall;
676 }
11c5e016
MD
677 assert(!ret);
678 }
679 }
680 break;
7a0b2331 681 }
a2a7ff59
MD
682 case RCU_JA_NULL:
683 /* Nothing to copy */
684 break;
11c5e016
MD
685 case RCU_JA_PIGEON:
686 default:
687 assert(0);
5a9a87dd 688 ret = -EINVAL;
f07b240f 689 goto end;
11c5e016
MD
690 }
691
7a0b2331 692 /* add node */
f07b240f
MD
693 ret = _ja_node_set_nth(new_type, new_node,
694 new_shadow_node,
e1db2db5 695 n, child_node_flag);
7a0b2331 696 assert(!ret);
5a9a87dd
MD
697 /* Return pointer to new recompacted new through old_node_flag */
698 *old_node_flag = new_node_flag;
a2a7ff59
MD
699 if (old_node) {
700 ret = rcuja_shadow_clear(ja->ht, old_node, shadow_node,
701 RCUJA_SHADOW_CLEAR_FREE_NODE);
702 assert(!ret);
703 }
5a9a87dd
MD
704
705 ret = 0;
f07b240f 706end:
5a9a87dd 707 return ret;
f07b240f
MD
708
709fallback_toosmall:
710 /* fallback if next pool is too small */
711 ret = rcuja_shadow_clear(ja->ht, new_node, new_shadow_node,
712 RCUJA_SHADOW_CLEAR_FREE_NODE);
713 assert(!ret);
714
715 /* Last type: pigeon */
716 new_type_index = (1UL << JA_TYPE_BITS) - 1;
717 dbg_printf("Fallback to type %d\n", new_type_index);
718 uatomic_inc(&ja->nr_fallback);
719 fallback = 1;
720 goto retry;
7a0b2331
MD
721}
722
5a9a87dd
MD
723/*
724 * Return 0 on success, -ENOENT if need to retry, or other negative
725 * error value otherwise.
726 */
7a0b2331 727static
d96bfb0d 728int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 729 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd
MD
730 struct cds_ja_inode_flag *child_node_flag,
731 struct cds_ja_shadow_node *shadow_node)
7a0b2331
MD
732{
733 int ret;
e1db2db5 734 unsigned int type_index;
d96bfb0d 735 const struct cds_ja_type *type;
b4540e8a 736 struct cds_ja_inode *node;
7a0b2331 737
a2a7ff59
MD
738 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
739 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
740
e1db2db5
MD
741 node = ja_node_ptr(*node_flag);
742 type_index = ja_node_type(*node_flag);
743 type = &ja_types[type_index];
e1db2db5
MD
744 ret = _ja_node_set_nth(type, node, shadow_node,
745 n, child_node_flag);
7a0b2331 746 if (ret == -ENOSPC) {
e1db2db5
MD
747 /* Not enough space in node, need to recompact. */
748 ret = ja_node_recompact_add(ja, type_index, type, node,
5a9a87dd 749 shadow_node, node_flag, n, child_node_flag);
7a0b2331
MD
750 }
751 return ret;
752}
be9a7474 753
5a9a87dd 754struct cds_hlist_head *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 755{
41975c12
MD
756 unsigned int tree_depth, i;
757 struct cds_ja_inode_flag *node_flag;
758
759 if (caa_unlikely(key > ja->key_max))
760 return NULL;
761 tree_depth = ja->tree_depth;
5a9a87dd 762 node_flag = rcu_dereference(ja->root);
41975c12 763
5a9a87dd
MD
764 /* level 0: root node */
765 if (!ja_node_ptr(node_flag))
766 return NULL;
767
768 for (i = 1; i < tree_depth; i++) {
79b41067
MD
769 uint8_t iter_key;
770
771 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd 772 node_flag = ja_node_get_nth(node_flag, NULL,
79b41067 773 iter_key);
582a6ade
MD
774 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
775 (unsigned int) iter_key, node_flag);
41975c12
MD
776 if (!ja_node_ptr(node_flag))
777 return NULL;
41975c12
MD
778 }
779
5a9a87dd
MD
780 /* Last level lookup succeded. We got an actual match. */
781 return (struct cds_hlist_head *) node_flag;
782}
783
784/*
785 * We reached an unpopulated node. Create it and the children we need,
786 * and then attach the entire branch to the current node. This may
787 * trigger recompaction of the current node. Locks needed: node lock
788 * (for add), and, possibly, parent node lock (to update pointer due to
789 * node recompaction).
790 *
791 * First take node lock, check if recompaction is needed, then take
792 * parent lock (if needed). Then we can proceed to create the new
793 * branch. Publish the new branch, and release locks.
794 * TODO: we currently always take the parent lock even when not needed.
795 */
796static
797int ja_attach_node(struct cds_ja *ja,
798 struct cds_ja_inode_flag **node_flag_ptr,
799 struct cds_ja_inode_flag *node_flag,
800 struct cds_ja_inode_flag *parent_node_flag,
801 uint64_t key,
79b41067 802 unsigned int level,
5a9a87dd
MD
803 struct cds_ja_node *child_node)
804{
805 struct cds_ja_shadow_node *shadow_node = NULL,
f07b240f
MD
806 *parent_shadow_node = NULL,
807 *iter_shadow_node;
5a9a87dd
MD
808 struct cds_ja_inode *node = ja_node_ptr(node_flag);
809 struct cds_ja_inode *parent_node = ja_node_ptr(parent_node_flag);
810 struct cds_hlist_head head;
811 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
812 int ret, i;
a2a7ff59 813 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
814 int nr_created_nodes = 0;
815
582a6ade
MD
816 dbg_printf("Attach node at level %u (node %p, node_flag %p)\n",
817 level, node, node_flag);
a2a7ff59 818
5a9a87dd
MD
819 assert(node);
820 shadow_node = rcuja_shadow_lookup_lock(ja->ht, node);
821 if (!shadow_node) {
822 ret = -ENOENT;
823 goto end;
824 }
825 if (parent_node) {
826 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
827 parent_node);
828 if (!parent_shadow_node) {
829 ret = -ENOENT;
830 goto unlock_shadow;
831 }
832 }
833
a2a7ff59 834 /* Create new branch, starting from bottom */
5a9a87dd
MD
835 CDS_INIT_HLIST_HEAD(&head);
836 cds_hlist_add_head_rcu(&child_node->list, &head);
a2a7ff59 837 iter_node_flag = (struct cds_ja_inode_flag *) head.next;
5a9a87dd 838
a2a7ff59
MD
839 /* Create shadow node for the leaf node */
840 dbg_printf("leaf shadow node creation\n");
f07b240f
MD
841 iter_shadow_node = rcuja_shadow_set(ja->ht,
842 ja_node_ptr(iter_node_flag), NULL);
843 if (!iter_shadow_node) {
844 ret = -ENOMEM;
a2a7ff59 845 goto check_error;
f07b240f 846 }
a2a7ff59 847 created_nodes[nr_created_nodes++] = iter_node_flag;
5a9a87dd 848
79b41067
MD
849 for (i = ja->tree_depth; i > (int) level; i--) {
850 uint8_t iter_key;
851
852 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i)));
853 dbg_printf("branch creation level %d, key %u\n",
854 i - 1, (unsigned int) iter_key);
5a9a87dd
MD
855 iter_dest_node_flag = NULL;
856 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 857 iter_key,
5a9a87dd
MD
858 iter_node_flag,
859 NULL);
860 if (ret)
861 goto check_error;
862 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
863 iter_node_flag = iter_dest_node_flag;
864 }
865
79b41067
MD
866 if (level > 1) {
867 uint8_t iter_key;
868
869 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
a2a7ff59
MD
870 /* We need to use set_nth on the previous level. */
871 iter_dest_node_flag = node_flag;
872 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 873 iter_key,
a2a7ff59
MD
874 iter_node_flag,
875 shadow_node);
876 if (ret)
877 goto check_error;
878 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
879 iter_node_flag = iter_dest_node_flag;
880 }
881
5a9a87dd 882 /* Publish new branch */
a2a7ff59
MD
883 dbg_printf("Publish branch %p, replacing %p\n",
884 iter_node_flag, *node_flag_ptr);
5a9a87dd
MD
885 rcu_assign_pointer(*node_flag_ptr, iter_node_flag);
886
887 /* Success */
888 ret = 0;
889
890check_error:
891 if (ret) {
892 for (i = 0; i < nr_created_nodes; i++) {
893 int tmpret;
a2a7ff59
MD
894 int flags;
895
896 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
897 if (i)
898 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd
MD
899 tmpret = rcuja_shadow_clear(ja->ht,
900 ja_node_ptr(created_nodes[i]),
a2a7ff59
MD
901 NULL,
902 flags);
5a9a87dd
MD
903 assert(!tmpret);
904 }
905 }
5a9a87dd
MD
906 if (parent_shadow_node)
907 rcuja_shadow_unlock(parent_shadow_node);
908unlock_shadow:
909 if (shadow_node)
910 rcuja_shadow_unlock(shadow_node);
911end:
912 return ret;
913}
914
915/*
916 * Lock the hlist head shadow node mutex, and add node to list of
917 * duplicates. Failure can happen if concurrent removal removes the last
918 * node with same key before we get the lock.
919 * Return 0 on success, negative error value on failure.
920 */
921static
922int ja_chain_node(struct cds_ja *ja,
923 struct cds_hlist_head *head,
924 struct cds_ja_node *node)
925{
926 struct cds_ja_shadow_node *shadow_node;
927
928 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
929 (struct cds_ja_inode *) head);
930 if (!shadow_node)
931 return -ENOENT;
932 cds_hlist_add_head_rcu(&node->list, head);
933 rcuja_shadow_unlock(shadow_node);
934 return 0;
935}
936
937int cds_ja_add(struct cds_ja *ja, uint64_t key,
938 struct cds_ja_node *new_node)
939{
940 unsigned int tree_depth, i;
5a9a87dd
MD
941 struct cds_ja_inode_flag **node_flag_ptr; /* in parent */
942 struct cds_ja_inode_flag *node_flag,
943 *parent_node_flag,
944 *parent2_node_flag;
945 int ret;
946
947 if (caa_unlikely(key > ja->key_max))
948 return -EINVAL;
949 tree_depth = ja->tree_depth;
950
951retry:
a2a7ff59
MD
952 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
953 key, new_node);
5a9a87dd 954 parent2_node_flag = NULL;
b0f74e47
MD
955 parent_node_flag =
956 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
5a9a87dd
MD
957 node_flag_ptr = &ja->root;
958 node_flag = rcu_dereference(*node_flag_ptr);
959
960 /* Iterate on all internal levels */
a2a7ff59 961 for (i = 1; i < tree_depth; i++) {
79b41067
MD
962 uint8_t iter_key;
963
582a6ade
MD
964 dbg_printf("cds_ja_add iter node_flag_ptr %p node_flag %p\n",
965 *node_flag_ptr, node_flag);
5a9a87dd
MD
966 if (!ja_node_ptr(node_flag)) {
967 ret = ja_attach_node(ja, node_flag_ptr,
968 parent_node_flag, parent2_node_flag,
969 key, i, new_node);
970 if (ret == -ENOENT || ret == -EEXIST)
971 goto retry;
972 else
973 goto end;
974 }
79b41067 975 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
976 parent2_node_flag = parent_node_flag;
977 parent_node_flag = node_flag;
978 node_flag = ja_node_get_nth(node_flag,
979 &node_flag_ptr,
79b41067 980 iter_key);
582a6ade
MD
981 dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p node_flag_ptr %p\n",
982 (unsigned int) iter_key, node_flag, *node_flag_ptr);
5a9a87dd
MD
983 }
984
985 /*
986 * We reached bottom of tree, simply add node to last internal
987 * level, or chain it if key is already present.
988 */
989 if (!ja_node_ptr(node_flag)) {
582a6ade
MD
990 dbg_printf("cds_ja_add last node_flag_ptr %p node_flag %p\n",
991 *node_flag_ptr, node_flag);
5a9a87dd
MD
992 ret = ja_attach_node(ja, node_flag_ptr, parent_node_flag,
993 parent2_node_flag, key, i, new_node);
994 } else {
995 ret = ja_chain_node(ja,
996 (struct cds_hlist_head *) ja_node_ptr(node_flag),
997 new_node);
998 }
999 if (ret == -ENOENT)
1000 goto retry;
1001end:
1002 return ret;
b4540e8a
MD
1003}
1004
1005struct cds_ja *_cds_ja_new(unsigned int key_bits,
1006 const struct rcu_flavor_struct *flavor)
be9a7474
MD
1007{
1008 struct cds_ja *ja;
b0f74e47 1009 int ret;
f07b240f 1010 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
1011
1012 ja = calloc(sizeof(*ja), 1);
1013 if (!ja)
1014 goto ja_error;
b4540e8a
MD
1015
1016 switch (key_bits) {
1017 case 8:
1018 ja->key_max = UINT8_MAX;
1019 break;
1020 case 16:
1021 ja->key_max = UINT16_MAX;
1022 break;
1023 case 32:
1024 ja->key_max = UINT32_MAX;
1025 break;
1026 case 64:
1027 ja->key_max = UINT64_MAX;
1028 break;
1029 default:
1030 goto check_error;
1031 }
1032
be9a7474 1033 /* ja->root is NULL */
5a9a87dd 1034 /* tree_depth 0 is for pointer to root node */
582a6ade 1035 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 1036 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
1037 ja->ht = rcuja_create_ht(flavor);
1038 if (!ja->ht)
1039 goto ht_error;
b0f74e47
MD
1040
1041 /*
1042 * Note: we should not free this node until judy array destroy.
1043 */
f07b240f 1044 root_shadow_node = rcuja_shadow_set(ja->ht,
b0f74e47
MD
1045 ja_node_ptr((struct cds_ja_inode_flag *) &ja->root),
1046 NULL);
f07b240f
MD
1047 if (!root_shadow_node) {
1048 ret = -ENOMEM;
b0f74e47 1049 goto ht_node_error;
f07b240f 1050 }
582a6ade 1051 root_shadow_node->is_root = 1;
b0f74e47 1052
be9a7474
MD
1053 return ja;
1054
b0f74e47
MD
1055ht_node_error:
1056 ret = rcuja_delete_ht(ja->ht);
1057 assert(!ret);
be9a7474 1058ht_error:
b4540e8a 1059check_error:
be9a7474
MD
1060 free(ja);
1061ja_error:
1062 return NULL;
1063}
1064
1065/*
1066 * There should be no more concurrent add to the judy array while it is
1067 * being destroyed (ensured by the caller).
1068 */
1069int cds_ja_destroy(struct cds_ja *ja)
1070{
b4540e8a
MD
1071 int ret;
1072
be9a7474
MD
1073 rcuja_shadow_prune(ja->ht,
1074 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK);
b4540e8a
MD
1075 ret = rcuja_delete_ht(ja->ht);
1076 if (ret)
1077 return ret;
f07b240f
MD
1078 if (uatomic_read(&ja->nr_fallback))
1079 fprintf(stderr,
1080 "[warning] RCU Judy Array used %lu fallback node(s)\n",
1081 uatomic_read(&ja->nr_fallback));
b4540e8a 1082 free(ja);
41975c12 1083 return 0;
be9a7474 1084}
This page took 0.075394 seconds and 4 git commands to generate.