Fix rcuja: fallback when adding element to full pool
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
61009379 27#include <urcu/rcuja.h>
d68c6810
MD
28#include <urcu/compiler.h>
29#include <urcu/arch.h>
30#include <assert.h>
8e519e3c 31#include <urcu-pointer.h>
f07b240f 32#include <urcu/uatomic.h>
b4540e8a 33#include <stdint.h>
8e519e3c 34
61009379 35#include "rcuja-internal.h"
d68c6810 36#include "bitfield.h"
61009379 37
d96bfb0d 38enum cds_ja_type_class {
e5227865 39 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
40 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
41 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
42 RCU_JA_POOL = 1, /* Type B */
43 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
44 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 45 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
46 /* 32-bit: 101 to 256 children, 1024 bytes */
47 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 48 /* Leaf nodes are implicit from their height in the tree */
1db4943c 49 RCU_JA_NR_TYPES,
e1db2db5
MD
50
51 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
52};
53
d96bfb0d
MD
54struct cds_ja_type {
55 enum cds_ja_type_class type_class;
8e519e3c
MD
56 uint16_t min_child; /* minimum number of children: 1 to 256 */
57 uint16_t max_child; /* maximum number of children: 1 to 256 */
58 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
59 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
60 uint16_t nr_pool_order; /* number of pools */
61 uint16_t pool_size_order; /* pool size */
e5227865
MD
62};
63
64/*
65 * Iteration on the array to find the right node size for the number of
d68c6810 66 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 67 * possible node size, which contains 256 children).
d68c6810
MD
68 * The min_child overlaps with the previous max_child to provide an
69 * hysteresis loop to reallocation for patterns of cyclic add/removal
70 * within the same node.
71 * The node the index within the following arrays is represented on 3
72 * bits. It identifies the node type, min/max number of children, and
73 * the size order.
3d45251f
MD
74 * The max_child values for the RCU_JA_POOL below result from
75 * statistical approximation: over million populations, the max_child
76 * covers between 97% and 99% of the populations generated. Therefore, a
77 * fallback should exist to cover the rare extreme population unbalance
78 * cases, but it will not have a major impact on speed nor space
79 * consumption, since those are rare cases.
e5227865 80 */
e5227865 81
d68c6810
MD
82#if (CAA_BITS_PER_LONG < 64)
83/* 32-bit pointers */
1db4943c
MD
84enum {
85 ja_type_0_max_child = 1,
86 ja_type_1_max_child = 3,
87 ja_type_2_max_child = 6,
88 ja_type_3_max_child = 12,
89 ja_type_4_max_child = 25,
90 ja_type_5_max_child = 48,
91 ja_type_6_max_child = 92,
92 ja_type_7_max_child = 256,
e1db2db5 93 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
94};
95
8e519e3c
MD
96enum {
97 ja_type_0_max_linear_child = 1,
98 ja_type_1_max_linear_child = 3,
99 ja_type_2_max_linear_child = 6,
100 ja_type_3_max_linear_child = 12,
101 ja_type_4_max_linear_child = 25,
102 ja_type_5_max_linear_child = 24,
103 ja_type_6_max_linear_child = 23,
104};
105
1db4943c
MD
106enum {
107 ja_type_5_nr_pool_order = 1,
108 ja_type_6_nr_pool_order = 2,
109};
110
d96bfb0d 111const struct cds_ja_type ja_types[] = {
8e519e3c
MD
112 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
113 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
114 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
115 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
116 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 117
fd800776 118 /* Pools may fill sooner than max_child */
8e519e3c
MD
119 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
120 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
121
122 /*
123 * TODO: Upon node removal below min_child, if child pool is
124 * filled beyond capacity, we need to roll back to pigeon.
125 */
1db4943c 126 { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
127
128 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 129};
d68c6810
MD
130#else /* !(CAA_BITS_PER_LONG < 64) */
131/* 64-bit pointers */
1db4943c
MD
132enum {
133 ja_type_0_max_child = 1,
134 ja_type_1_max_child = 3,
135 ja_type_2_max_child = 7,
136 ja_type_3_max_child = 14,
137 ja_type_4_max_child = 28,
138 ja_type_5_max_child = 54,
139 ja_type_6_max_child = 104,
140 ja_type_7_max_child = 256,
e1db2db5 141 ja_type_8_max_child = 256,
1db4943c
MD
142};
143
8e519e3c
MD
144enum {
145 ja_type_0_max_linear_child = 1,
146 ja_type_1_max_linear_child = 3,
147 ja_type_2_max_linear_child = 7,
148 ja_type_3_max_linear_child = 14,
149 ja_type_4_max_linear_child = 28,
150 ja_type_5_max_linear_child = 27,
151 ja_type_6_max_linear_child = 26,
152};
153
1db4943c
MD
154enum {
155 ja_type_5_nr_pool_order = 1,
156 ja_type_6_nr_pool_order = 2,
157};
158
d96bfb0d 159const struct cds_ja_type ja_types[] = {
8e519e3c
MD
160 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
161 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
162 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
163 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
164 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 165
3d45251f 166 /* Pools may fill sooner than max_child. */
8e519e3c
MD
167 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
168 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 169
3d45251f
MD
170 /*
171 * TODO: Upon node removal below min_child, if child pool is
172 * filled beyond capacity, we need to roll back to pigeon.
173 */
1db4943c 174 { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
175
176 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 177};
d68c6810 178#endif /* !(BITS_PER_LONG < 64) */
e5227865 179
1db4943c
MD
180static inline __attribute__((unused))
181void static_array_size_check(void)
182{
e1db2db5 183 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
184}
185
e5227865 186/*
d96bfb0d 187 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
188 * read-side. For linear and pool node configurations, it starts with a
189 * byte counting the number of children in the node. Then, the
190 * node-specific data is placed.
191 * The node mutex, if any is needed, protecting concurrent updated of
192 * each node is placed in a separate hash table indexed by node address.
193 * For the pigeon configuration, the number of children is also kept in
194 * a separate hash table, indexed by node address, because it is only
195 * required for updates.
e5227865 196 */
1db4943c 197
ff38c745
MD
198#define DECLARE_LINEAR_NODE(index) \
199 struct { \
200 uint8_t nr_child; \
201 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 202 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
203 }
204
205#define DECLARE_POOL_NODE(index) \
206 struct { \
207 struct { \
208 uint8_t nr_child; \
209 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 210 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
211 } linear[1U << ja_type_## index ##_nr_pool_order]; \
212 }
1db4943c 213
b4540e8a 214struct cds_ja_inode {
1db4943c
MD
215 union {
216 /* Linear configuration */
217 DECLARE_LINEAR_NODE(0) conf_0;
218 DECLARE_LINEAR_NODE(1) conf_1;
219 DECLARE_LINEAR_NODE(2) conf_2;
220 DECLARE_LINEAR_NODE(3) conf_3;
221 DECLARE_LINEAR_NODE(4) conf_4;
222
223 /* Pool configuration */
224 DECLARE_POOL_NODE(5) conf_5;
225 DECLARE_POOL_NODE(6) conf_6;
226
227 /* Pigeon configuration */
228 struct {
b4540e8a 229 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
230 } conf_7;
231 /* data aliasing nodes for computed accesses */
b4540e8a 232 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 233 } u;
e5227865
MD
234};
235
2e313670
MD
236enum ja_recompact {
237 JA_RECOMPACT,
238 JA_RECOMPACT_ADD,
239 JA_RECOMPACT_DEL,
240};
241
b4540e8a 242struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
e5227865 243{
1db4943c 244 return calloc(1U << ja_type->order, sizeof(char));
e5227865
MD
245}
246
b4540e8a 247void free_cds_ja_node(struct cds_ja_inode *node)
e5227865
MD
248{
249 free(node);
250}
251
d68c6810
MD
252#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
253#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
254#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
255#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
256
257static
1db4943c 258uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 259{
1db4943c 260 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
261}
262
11c5e016 263static
d96bfb0d 264uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 265 struct cds_ja_inode *node)
11c5e016
MD
266{
267 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 268 return rcu_dereference(node->u.data[0]);
11c5e016
MD
269}
270
13a7f5a6
MD
271/*
272 * The order in which values and pointers are does does not matter: if
273 * a value is missing, we return NULL. If a value is there, but its
274 * associated pointers is still NULL, we return NULL too.
275 */
d68c6810 276static
b4540e8a
MD
277struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
278 struct cds_ja_inode *node,
5a9a87dd 279 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 280 struct cds_ja_inode_flag **child_node_flag_v,
b0ca2d21 281 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 282 uint8_t n)
d68c6810
MD
283{
284 uint8_t nr_child;
285 uint8_t *values;
b4540e8a
MD
286 struct cds_ja_inode_flag **pointers;
287 struct cds_ja_inode_flag *ptr;
d68c6810
MD
288 unsigned int i;
289
8e519e3c 290 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 291
11c5e016 292 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 293 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
294 assert(nr_child <= type->max_linear_child);
295 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 296
1db4943c 297 values = &node->u.data[1];
d68c6810 298 for (i = 0; i < nr_child; i++) {
13a7f5a6 299 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
300 break;
301 }
b0ca2d21
MD
302 if (i >= nr_child) {
303 if (caa_unlikely(node_flag_ptr))
304 *node_flag_ptr = NULL;
d68c6810 305 return NULL;
b0ca2d21 306 }
b4540e8a 307 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 308 ptr = rcu_dereference(pointers[i]);
2e313670
MD
309 if (caa_unlikely(child_node_flag_ptr) && ptr)
310 *child_node_flag_ptr = &pointers[i];
b62a8d0c
MD
311 if (caa_unlikely(child_node_flag_v) && ptr)
312 *child_node_flag_v = ptr;
b0ca2d21
MD
313 if (caa_unlikely(node_flag_ptr))
314 *node_flag_ptr = &pointers[i];
d68c6810
MD
315 return ptr;
316}
317
11c5e016 318static
5a9a87dd 319void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 320 struct cds_ja_inode *node,
11c5e016
MD
321 uint8_t i,
322 uint8_t *v,
b4540e8a 323 struct cds_ja_inode_flag **iter)
11c5e016
MD
324{
325 uint8_t *values;
b4540e8a 326 struct cds_ja_inode_flag **pointers;
11c5e016
MD
327
328 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
329 assert(i < ja_linear_node_get_nr_child(type, node));
330
331 values = &node->u.data[1];
332 *v = values[i];
b4540e8a 333 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
334 *iter = pointers[i];
335}
336
d68c6810 337static
b4540e8a
MD
338struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
339 struct cds_ja_inode *node,
5a9a87dd 340 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 341 struct cds_ja_inode_flag **child_node_flag_v,
b0ca2d21 342 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 343 uint8_t n)
d68c6810 344{
b4540e8a 345 struct cds_ja_inode *linear;
d68c6810 346
fd800776 347 assert(type->type_class == RCU_JA_POOL);
e1db2db5
MD
348 /*
349 * TODO: currently, we select the pool by highest bits. We
350 * should support various encodings.
351 */
b4540e8a 352 linear = (struct cds_ja_inode *)
1db4943c 353 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
b0ca2d21 354 return ja_linear_node_get_nth(type, linear, child_node_flag_ptr,
b62a8d0c 355 child_node_flag_v, node_flag_ptr, n);
d68c6810
MD
356}
357
11c5e016 358static
b4540e8a
MD
359struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
360 struct cds_ja_inode *node,
11c5e016
MD
361 uint8_t i)
362{
363 assert(type->type_class == RCU_JA_POOL);
b4540e8a 364 return (struct cds_ja_inode *)
11c5e016
MD
365 &node->u.data[(unsigned int) i << type->pool_size_order];
366}
367
d68c6810 368static
b4540e8a
MD
369struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
370 struct cds_ja_inode *node,
5a9a87dd 371 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 372 struct cds_ja_inode_flag **child_node_flag_v,
b0ca2d21 373 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 374 uint8_t n)
d68c6810 375{
5a9a87dd 376 struct cds_ja_inode_flag **child_node_flag;
b62a8d0c 377 struct cds_ja_inode_flag *child_node_flag_read;
5a9a87dd 378
d68c6810 379 assert(type->type_class == RCU_JA_PIGEON);
5a9a87dd 380 child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n];
b62a8d0c 381 child_node_flag_read = rcu_dereference(*child_node_flag);
582a6ade
MD
382 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
383 child_node_flag);
b62a8d0c 384 if (caa_unlikely(child_node_flag_ptr) && child_node_flag_read)
5a9a87dd 385 *child_node_flag_ptr = child_node_flag;
b62a8d0c
MD
386 if (caa_unlikely(child_node_flag_v) && child_node_flag_read)
387 *child_node_flag_v = child_node_flag_read;
b0ca2d21
MD
388 if (caa_unlikely(node_flag_ptr))
389 *node_flag_ptr = child_node_flag;
b62a8d0c 390 return child_node_flag_read;
d68c6810
MD
391}
392
2e313670
MD
393static
394struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
395 struct cds_ja_inode *node,
396 uint8_t i)
397{
b62a8d0c 398 return ja_pigeon_node_get_nth(type, node, NULL, NULL, NULL, i);
2e313670
MD
399}
400
13a7f5a6
MD
401/*
402 * ja_node_get_nth: get nth item from a node.
403 * node_flag is already rcu_dereference'd.
404 */
d68c6810 405static
b62a8d0c 406struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
5a9a87dd 407 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 408 struct cds_ja_inode_flag **child_node_flag,
b0ca2d21 409 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 410 uint8_t n)
d68c6810
MD
411{
412 unsigned int type_index;
b4540e8a 413 struct cds_ja_inode *node;
d96bfb0d 414 const struct cds_ja_type *type;
d68c6810 415
d68c6810 416 node = ja_node_ptr(node_flag);
5a9a87dd 417 assert(node != NULL);
d68c6810
MD
418 type_index = ja_node_type(node_flag);
419 type = &ja_types[type_index];
420
421 switch (type->type_class) {
422 case RCU_JA_LINEAR:
5a9a87dd 423 return ja_linear_node_get_nth(type, node,
b62a8d0c
MD
424 child_node_flag_ptr, child_node_flag,
425 node_flag_ptr, n);
fd800776 426 case RCU_JA_POOL:
5a9a87dd 427 return ja_pool_node_get_nth(type, node,
b62a8d0c
MD
428 child_node_flag_ptr, child_node_flag,
429 node_flag_ptr, n);
d68c6810 430 case RCU_JA_PIGEON:
5a9a87dd 431 return ja_pigeon_node_get_nth(type, node,
b62a8d0c
MD
432 child_node_flag_ptr, child_node_flag,
433 node_flag_ptr, n);
d68c6810
MD
434 default:
435 assert(0);
436 return (void *) -1UL;
437 }
438}
439
8e519e3c 440static
d96bfb0d 441int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 442 struct cds_ja_inode *node,
d96bfb0d 443 struct cds_ja_shadow_node *shadow_node,
8e519e3c 444 uint8_t n,
b4540e8a 445 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
446{
447 uint8_t nr_child;
448 uint8_t *values, *nr_child_ptr;
b4540e8a 449 struct cds_ja_inode_flag **pointers;
2e313670 450 unsigned int i, unused = 0;
8e519e3c
MD
451
452 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
453
454 nr_child_ptr = &node->u.data[0];
a2a7ff59 455 dbg_printf("linear set nth: nr_child_ptr %p\n", nr_child_ptr);
8e519e3c
MD
456 nr_child = *nr_child_ptr;
457 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
458
459 values = &node->u.data[1];
2e313670
MD
460 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
461 /* Check if node value is already populated */
8e519e3c 462 for (i = 0; i < nr_child; i++) {
2e313670
MD
463 if (values[i] == n) {
464 if (pointers[i])
465 return -EEXIST;
466 else
467 break;
468 } else {
469 if (!pointers[i])
470 unused++;
471 }
8e519e3c 472 }
2e313670
MD
473 if (i == nr_child && nr_child >= type->max_linear_child) {
474 if (unused)
475 return -ERANGE; /* recompact node */
476 else
477 return -ENOSPC; /* No space left in this node type */
478 }
479
480 assert(pointers[i] == NULL);
481 rcu_assign_pointer(pointers[i], child_node_flag);
482 /* If we expanded the nr_child, increment it */
483 if (i == nr_child) {
484 CMM_STORE_SHARED(values[nr_child], n);
485 /* write pointer and value before nr_child */
486 cmm_smp_wmb();
487 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 488 }
e1db2db5 489 shadow_node->nr_child++;
a2a7ff59
MD
490 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
491 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
492 (unsigned int) shadow_node->nr_child,
493 node, shadow_node);
494
8e519e3c
MD
495 return 0;
496}
497
498static
d96bfb0d 499int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 500 struct cds_ja_inode *node,
d96bfb0d 501 struct cds_ja_shadow_node *shadow_node,
8e519e3c 502 uint8_t n,
b4540e8a 503 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 504{
b4540e8a 505 struct cds_ja_inode *linear;
8e519e3c
MD
506
507 assert(type->type_class == RCU_JA_POOL);
b4540e8a 508 linear = (struct cds_ja_inode *)
8e519e3c 509 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
e1db2db5
MD
510 return ja_linear_node_set_nth(type, linear, shadow_node,
511 n, child_node_flag);
8e519e3c
MD
512}
513
514static
d96bfb0d 515int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 516 struct cds_ja_inode *node,
d96bfb0d 517 struct cds_ja_shadow_node *shadow_node,
8e519e3c 518 uint8_t n,
b4540e8a 519 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 520{
b4540e8a 521 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
522
523 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 524 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 525 if (*ptr)
8e519e3c
MD
526 return -EEXIST;
527 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 528 shadow_node->nr_child++;
8e519e3c
MD
529 return 0;
530}
531
d68c6810 532/*
7a0b2331 533 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 534 * (negative error value) if it is already there.
d68c6810 535 */
8e519e3c 536static
d96bfb0d 537int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 538 struct cds_ja_inode *node,
d96bfb0d 539 struct cds_ja_shadow_node *shadow_node,
e1db2db5 540 uint8_t n,
b4540e8a 541 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 542{
8e519e3c
MD
543 switch (type->type_class) {
544 case RCU_JA_LINEAR:
e1db2db5 545 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
546 child_node_flag);
547 case RCU_JA_POOL:
e1db2db5 548 return ja_pool_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
549 child_node_flag);
550 case RCU_JA_PIGEON:
e1db2db5 551 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 552 child_node_flag);
e1db2db5
MD
553 case RCU_JA_NULL:
554 return -ENOSPC;
8e519e3c
MD
555 default:
556 assert(0);
557 return -EINVAL;
558 }
559
560 return 0;
561}
7a0b2331 562
2e313670 563static
af3cbd45 564int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
565 struct cds_ja_inode *node,
566 struct cds_ja_shadow_node *shadow_node,
af3cbd45 567 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
568{
569 uint8_t nr_child;
af3cbd45 570 uint8_t *nr_child_ptr;
2e313670
MD
571
572 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
573
574 nr_child_ptr = &node->u.data[0];
af3cbd45 575 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
2e313670
MD
576 nr_child = *nr_child_ptr;
577 assert(nr_child <= type->max_linear_child);
578
2e313670
MD
579 if (shadow_node->fallback_removal_count) {
580 shadow_node->fallback_removal_count--;
581 } else {
582 if (shadow_node->nr_child <= type->min_child) {
583 /* We need to try recompacting the node */
584 return -EFBIG;
585 }
586 }
af3cbd45
MD
587 assert(*node_flag_ptr != NULL);
588 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
589 /*
590 * Value and nr_child are never changed (would cause ABA issue).
591 * Instead, we leave the pointer to NULL and recompact the node
592 * once in a while. It is allowed to set a NULL pointer to a new
593 * value without recompaction though.
594 * Only update the shadow node accounting.
595 */
596 shadow_node->nr_child--;
af3cbd45 597 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
598 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
599 (unsigned int) shadow_node->nr_child,
600 node, shadow_node);
601
602 return 0;
603}
604
605static
af3cbd45 606int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
607 struct cds_ja_inode *node,
608 struct cds_ja_shadow_node *shadow_node,
af3cbd45 609 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
610 uint8_t n)
611{
612 struct cds_ja_inode *linear;
613
614 assert(type->type_class == RCU_JA_POOL);
615 linear = (struct cds_ja_inode *)
616 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
af3cbd45 617 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
618}
619
620static
af3cbd45 621int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
622 struct cds_ja_inode *node,
623 struct cds_ja_shadow_node *shadow_node,
af3cbd45 624 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 625{
2e313670 626 assert(type->type_class == RCU_JA_PIGEON);
4d6ef45e 627 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 628 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
629 shadow_node->nr_child--;
630 return 0;
631}
632
633/*
af3cbd45 634 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
635 * (negative error value) if it is not found (-ENOENT).
636 */
637static
af3cbd45 638int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
639 struct cds_ja_inode *node,
640 struct cds_ja_shadow_node *shadow_node,
af3cbd45 641 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
642 uint8_t n)
643{
644 switch (type->type_class) {
645 case RCU_JA_LINEAR:
af3cbd45 646 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 647 case RCU_JA_POOL:
af3cbd45 648 return ja_pool_node_clear_ptr(type, node, shadow_node, node_flag_ptr, n);
2e313670 649 case RCU_JA_PIGEON:
af3cbd45 650 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
651 case RCU_JA_NULL:
652 return -ENOENT;
653 default:
654 assert(0);
655 return -EINVAL;
656 }
657
658 return 0;
659}
660
7a0b2331
MD
661/*
662 * ja_node_recompact_add: recompact a node, adding a new child.
e1db2db5 663 * TODO: for pool type, take selection bit(s) into account.
2e313670 664 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 665 * error value otherwise.
7a0b2331
MD
666 */
667static
2e313670
MD
668int ja_node_recompact(enum ja_recompact mode,
669 struct cds_ja *ja,
e1db2db5 670 unsigned int old_type_index,
d96bfb0d 671 const struct cds_ja_type *old_type,
b4540e8a 672 struct cds_ja_inode *old_node,
5a9a87dd 673 struct cds_ja_shadow_node *shadow_node,
3d8fe307 674 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45
MD
675 struct cds_ja_inode_flag *child_node_flag,
676 struct cds_ja_inode_flag **nullify_node_flag_ptr)
7a0b2331 677{
e1db2db5 678 unsigned int new_type_index;
b4540e8a 679 struct cds_ja_inode *new_node;
af3cbd45 680 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 681 const struct cds_ja_type *new_type;
3d8fe307 682 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 683 int ret;
f07b240f 684 int fallback = 0;
7a0b2331 685
3d8fe307
MD
686 old_node_flag = *old_node_flag_ptr;
687
2e313670
MD
688 switch (mode) {
689 case JA_RECOMPACT:
690 new_type_index = old_type_index;
691 break;
692 case JA_RECOMPACT_ADD:
693 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
694 new_type_index = 0;
695 } else {
696 new_type_index = old_type_index + 1;
697 }
698 break;
699 case JA_RECOMPACT_DEL:
700 if (old_type_index == 0) {
701 new_type_index = NODE_INDEX_NULL;
702 } else {
703 new_type_index = old_type_index - 1;
704 }
705 break;
706 default:
707 assert(0);
7a0b2331 708 }
a2a7ff59 709
f07b240f 710retry: /* for fallback */
582a6ade
MD
711 dbg_printf("Recompact from type %d to type %d\n",
712 old_type_index, new_type_index);
7a0b2331 713 new_type = &ja_types[new_type_index];
2e313670
MD
714 if (new_type_index != NODE_INDEX_NULL) {
715 new_node = alloc_cds_ja_node(new_type);
716 if (!new_node)
717 return -ENOMEM;
718 new_node_flag = ja_node_flag(new_node, new_type_index);
719 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
3d8fe307 720 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja);
2e313670
MD
721 if (!new_shadow_node) {
722 free(new_node);
723 return -ENOMEM;
724 }
725 if (fallback)
726 new_shadow_node->fallback_removal_count =
727 JA_FALLBACK_REMOVAL_COUNT;
728 } else {
729 new_node = NULL;
730 new_node_flag = NULL;
e1db2db5 731 }
11c5e016 732
2e313670
MD
733 assert(mode != JA_RECOMPACT_ADD || old_type->type_class != RCU_JA_PIGEON);
734
735 if (new_type_index == NODE_INDEX_NULL)
736 goto skip_copy;
737
11c5e016
MD
738 switch (old_type->type_class) {
739 case RCU_JA_LINEAR:
740 {
741 uint8_t nr_child =
742 ja_linear_node_get_nr_child(old_type, old_node);
743 unsigned int i;
744
745 for (i = 0; i < nr_child; i++) {
b4540e8a 746 struct cds_ja_inode_flag *iter;
11c5e016
MD
747 uint8_t v;
748
749 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
750 if (!iter)
751 continue;
af3cbd45 752 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 753 continue;
f07b240f
MD
754 ret = _ja_node_set_nth(new_type, new_node,
755 new_shadow_node,
11c5e016 756 v, iter);
f07b240f
MD
757 if (new_type->type_class == RCU_JA_POOL && ret) {
758 goto fallback_toosmall;
759 }
11c5e016
MD
760 assert(!ret);
761 }
762 break;
763 }
764 case RCU_JA_POOL:
765 {
766 unsigned int pool_nr;
767
768 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 769 struct cds_ja_inode *pool =
11c5e016
MD
770 ja_pool_node_get_ith_pool(old_type,
771 old_node, pool_nr);
772 uint8_t nr_child =
773 ja_linear_node_get_nr_child(old_type, pool);
774 unsigned int j;
775
776 for (j = 0; j < nr_child; j++) {
b4540e8a 777 struct cds_ja_inode_flag *iter;
11c5e016
MD
778 uint8_t v;
779
780 ja_linear_node_get_ith_pos(old_type, pool,
781 j, &v, &iter);
782 if (!iter)
783 continue;
af3cbd45 784 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 785 continue;
f07b240f
MD
786 ret = _ja_node_set_nth(new_type, new_node,
787 new_shadow_node,
11c5e016 788 v, iter);
f07b240f
MD
789 if (new_type->type_class == RCU_JA_POOL
790 && ret) {
791 goto fallback_toosmall;
792 }
11c5e016
MD
793 assert(!ret);
794 }
795 }
796 break;
7a0b2331 797 }
a2a7ff59 798 case RCU_JA_NULL:
2e313670 799 assert(mode == JA_RECOMPACT_ADD);
a2a7ff59 800 break;
11c5e016 801 case RCU_JA_PIGEON:
2e313670
MD
802 {
803 uint8_t nr_child;
804 unsigned int i;
805
806 assert(mode == JA_RECOMPACT_DEL);
807 nr_child = shadow_node->nr_child;
808 for (i = 0; i < nr_child; i++) {
809 struct cds_ja_inode_flag *iter;
810
811 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
812 if (!iter)
813 continue;
af3cbd45 814 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670
MD
815 continue;
816 ret = _ja_node_set_nth(new_type, new_node,
817 new_shadow_node,
818 i, iter);
819 if (new_type->type_class == RCU_JA_POOL && ret) {
820 goto fallback_toosmall;
821 }
822 assert(!ret);
823 }
824 break;
825 }
11c5e016
MD
826 default:
827 assert(0);
5a9a87dd 828 ret = -EINVAL;
f07b240f 829 goto end;
11c5e016 830 }
2e313670 831skip_copy:
11c5e016 832
9a1c1915 833 if (mode == JA_RECOMPACT_ADD) {
2e313670
MD
834 /* add node */
835 ret = _ja_node_set_nth(new_type, new_node,
836 new_shadow_node,
837 n, child_node_flag);
7b413155
MD
838 if (new_type->type_class == RCU_JA_POOL && ret) {
839 goto fallback_toosmall;
840 }
2e313670
MD
841 assert(!ret);
842 }
3d8fe307
MD
843 /* Return pointer to new recompacted node through old_node_flag_ptr */
844 *old_node_flag_ptr = new_node_flag;
a2a7ff59 845 if (old_node) {
2e313670
MD
846 int flags;
847
848 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
849 /*
850 * It is OK to free the lock associated with a node
851 * going to NULL, since we are holding the parent lock.
852 * This synchronizes removal with re-add of that node.
853 */
854 if (new_type_index == NODE_INDEX_NULL)
855 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 856 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 857 flags);
a2a7ff59
MD
858 assert(!ret);
859 }
5a9a87dd
MD
860
861 ret = 0;
f07b240f 862end:
5a9a87dd 863 return ret;
f07b240f
MD
864
865fallback_toosmall:
866 /* fallback if next pool is too small */
af3cbd45 867 assert(new_shadow_node);
3d8fe307 868 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
869 RCUJA_SHADOW_CLEAR_FREE_NODE);
870 assert(!ret);
871
2e313670 872 /* Choose fallback type: pigeon */
f07b240f
MD
873 new_type_index = (1UL << JA_TYPE_BITS) - 1;
874 dbg_printf("Fallback to type %d\n", new_type_index);
875 uatomic_inc(&ja->nr_fallback);
876 fallback = 1;
877 goto retry;
7a0b2331
MD
878}
879
5a9a87dd 880/*
2e313670 881 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
882 * error value otherwise.
883 */
7a0b2331 884static
d96bfb0d 885int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 886 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd
MD
887 struct cds_ja_inode_flag *child_node_flag,
888 struct cds_ja_shadow_node *shadow_node)
7a0b2331
MD
889{
890 int ret;
e1db2db5 891 unsigned int type_index;
d96bfb0d 892 const struct cds_ja_type *type;
b4540e8a 893 struct cds_ja_inode *node;
7a0b2331 894
a2a7ff59
MD
895 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
896 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
897
e1db2db5
MD
898 node = ja_node_ptr(*node_flag);
899 type_index = ja_node_type(*node_flag);
900 type = &ja_types[type_index];
e1db2db5
MD
901 ret = _ja_node_set_nth(type, node, shadow_node,
902 n, child_node_flag);
2e313670
MD
903 switch (ret) {
904 case -ENOSPC:
e1db2db5 905 /* Not enough space in node, need to recompact. */
2e313670 906 ret = ja_node_recompact(JA_RECOMPACT_ADD, ja, type_index, type, node,
af3cbd45 907 shadow_node, node_flag, n, child_node_flag, NULL);
2e313670
MD
908 break;
909 case -ERANGE:
910 /* Node needs to be recompacted. */
911 ret = ja_node_recompact(JA_RECOMPACT, ja, type_index, type, node,
af3cbd45 912 shadow_node, node_flag, n, child_node_flag, NULL);
2e313670
MD
913 break;
914 }
915 return ret;
916}
917
918/*
919 * Return 0 on success, -EAGAIN if need to retry, or other negative
920 * error value otherwise.
921 */
922static
af3cbd45
MD
923int ja_node_clear_ptr(struct cds_ja *ja,
924 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
925 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
926 struct cds_ja_shadow_node *shadow_node, /* of parent */
927 uint8_t n)
2e313670
MD
928{
929 int ret;
930 unsigned int type_index;
931 const struct cds_ja_type *type;
932 struct cds_ja_inode *node;
933
af3cbd45
MD
934 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
935 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 936
af3cbd45
MD
937 node = ja_node_ptr(*parent_node_flag_ptr);
938 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 939 type = &ja_types[type_index];
af3cbd45 940 ret = _ja_node_clear_ptr(type, node, shadow_node, node_flag_ptr, n);
2e313670
MD
941 if (ret == -EFBIG) {
942 /* Should to try recompaction. */
943 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45
MD
944 shadow_node, parent_node_flag_ptr, n, NULL,
945 node_flag_ptr);
7a0b2331
MD
946 }
947 return ret;
948}
be9a7474 949
af3cbd45 950struct cds_hlist_head cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 951{
41975c12
MD
952 unsigned int tree_depth, i;
953 struct cds_ja_inode_flag *node_flag;
af3cbd45 954 struct cds_hlist_head head = { NULL };
41975c12
MD
955
956 if (caa_unlikely(key > ja->key_max))
af3cbd45 957 return head;
41975c12 958 tree_depth = ja->tree_depth;
5a9a87dd 959 node_flag = rcu_dereference(ja->root);
41975c12 960
5a9a87dd
MD
961 /* level 0: root node */
962 if (!ja_node_ptr(node_flag))
af3cbd45 963 return head;
5a9a87dd
MD
964
965 for (i = 1; i < tree_depth; i++) {
79b41067
MD
966 uint8_t iter_key;
967
968 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
b62a8d0c 969 node_flag = ja_node_get_nth(node_flag, NULL, NULL, NULL,
79b41067 970 iter_key);
582a6ade
MD
971 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
972 (unsigned int) iter_key, node_flag);
41975c12 973 if (!ja_node_ptr(node_flag))
af3cbd45 974 return head;
41975c12
MD
975 }
976
5a9a87dd 977 /* Last level lookup succeded. We got an actual match. */
af3cbd45
MD
978 head.next = (struct cds_hlist_node *) node_flag;
979 return head;
5a9a87dd
MD
980}
981
982/*
983 * We reached an unpopulated node. Create it and the children we need,
984 * and then attach the entire branch to the current node. This may
985 * trigger recompaction of the current node. Locks needed: node lock
986 * (for add), and, possibly, parent node lock (to update pointer due to
987 * node recompaction).
988 *
989 * First take node lock, check if recompaction is needed, then take
990 * parent lock (if needed). Then we can proceed to create the new
991 * branch. Publish the new branch, and release locks.
992 * TODO: we currently always take the parent lock even when not needed.
993 */
994static
995int ja_attach_node(struct cds_ja *ja,
b0ca2d21 996 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 997 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd
MD
998 struct cds_ja_inode_flag **node_flag_ptr,
999 struct cds_ja_inode_flag *node_flag,
1000 struct cds_ja_inode_flag *parent_node_flag,
1001 uint64_t key,
79b41067 1002 unsigned int level,
5a9a87dd
MD
1003 struct cds_ja_node *child_node)
1004{
1005 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1006 *parent_shadow_node = NULL;
5a9a87dd
MD
1007 struct cds_ja_inode *node = ja_node_ptr(node_flag);
1008 struct cds_ja_inode *parent_node = ja_node_ptr(parent_node_flag);
1009 struct cds_hlist_head head;
1010 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1011 int ret, i;
a2a7ff59 1012 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1013 int nr_created_nodes = 0;
1014
582a6ade
MD
1015 dbg_printf("Attach node at level %u (node %p, node_flag %p)\n",
1016 level, node, node_flag);
a2a7ff59 1017
5a9a87dd 1018 assert(node);
3d8fe307 1019 shadow_node = rcuja_shadow_lookup_lock(ja->ht, node_flag);
5a9a87dd 1020 if (!shadow_node) {
2e313670 1021 ret = -EAGAIN;
5a9a87dd
MD
1022 goto end;
1023 }
1024 if (parent_node) {
1025 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1026 parent_node_flag);
5a9a87dd 1027 if (!parent_shadow_node) {
2e313670 1028 ret = -EAGAIN;
5a9a87dd
MD
1029 goto unlock_shadow;
1030 }
1031 }
1032
b62a8d0c 1033 if (node_flag_ptr && ja_node_ptr(*node_flag_ptr)) {
b306a0fe 1034 /*
c112acaa
MD
1035 * Target node has been updated between RCU lookup and
1036 * lock acquisition. We need to re-try lookup and
1037 * attach.
1038 */
1039 ret = -EAGAIN;
1040 goto unlock_parent;
1041 }
1042
1043 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 1044 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
1045 /*
1046 * Target node has been updated between RCU lookup and
1047 * lock acquisition. We need to re-try lookup and
1048 * attach.
b306a0fe
MD
1049 */
1050 ret = -EAGAIN;
1051 goto unlock_parent;
1052 }
1053
a2a7ff59 1054 /* Create new branch, starting from bottom */
5a9a87dd
MD
1055 CDS_INIT_HLIST_HEAD(&head);
1056 cds_hlist_add_head_rcu(&child_node->list, &head);
a2a7ff59 1057 iter_node_flag = (struct cds_ja_inode_flag *) head.next;
5a9a87dd 1058
79b41067
MD
1059 for (i = ja->tree_depth; i > (int) level; i--) {
1060 uint8_t iter_key;
1061
1062 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i)));
1063 dbg_printf("branch creation level %d, key %u\n",
1064 i - 1, (unsigned int) iter_key);
5a9a87dd
MD
1065 iter_dest_node_flag = NULL;
1066 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1067 iter_key,
5a9a87dd
MD
1068 iter_node_flag,
1069 NULL);
1070 if (ret)
1071 goto check_error;
1072 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1073 iter_node_flag = iter_dest_node_flag;
1074 }
1075
79b41067
MD
1076 if (level > 1) {
1077 uint8_t iter_key;
1078
1079 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
a2a7ff59
MD
1080 /* We need to use set_nth on the previous level. */
1081 iter_dest_node_flag = node_flag;
1082 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1083 iter_key,
a2a7ff59
MD
1084 iter_node_flag,
1085 shadow_node);
1086 if (ret)
1087 goto check_error;
1088 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1089 iter_node_flag = iter_dest_node_flag;
1090 }
1091
5a9a87dd 1092 /* Publish new branch */
a2a7ff59 1093 dbg_printf("Publish branch %p, replacing %p\n",
b0ca2d21
MD
1094 iter_node_flag, *attach_node_flag_ptr);
1095 rcu_assign_pointer(*attach_node_flag_ptr, iter_node_flag);
5a9a87dd
MD
1096
1097 /* Success */
1098 ret = 0;
1099
1100check_error:
1101 if (ret) {
1102 for (i = 0; i < nr_created_nodes; i++) {
1103 int tmpret;
a2a7ff59
MD
1104 int flags;
1105
1106 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1107 if (i)
1108 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 1109 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 1110 created_nodes[i],
a2a7ff59
MD
1111 NULL,
1112 flags);
5a9a87dd
MD
1113 assert(!tmpret);
1114 }
1115 }
b306a0fe 1116unlock_parent:
5a9a87dd
MD
1117 if (parent_shadow_node)
1118 rcuja_shadow_unlock(parent_shadow_node);
1119unlock_shadow:
1120 if (shadow_node)
1121 rcuja_shadow_unlock(shadow_node);
1122end:
1123 return ret;
1124}
1125
1126/*
af3cbd45
MD
1127 * Lock the parent containing the hlist head pointer, and add node to list of
1128 * duplicates. Failure can happen if concurrent update changes the
1129 * parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
1130 * Return 0 on success, negative error value on failure.
1131 */
1132static
1133int ja_chain_node(struct cds_ja *ja,
af3cbd45 1134 struct cds_ja_inode_flag *parent_node_flag,
fa112799 1135 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 1136 struct cds_ja_inode_flag *node_flag,
5a9a87dd
MD
1137 struct cds_hlist_head *head,
1138 struct cds_ja_node *node)
1139{
1140 struct cds_ja_shadow_node *shadow_node;
fa112799 1141 int ret = 0;
5a9a87dd 1142
3d8fe307 1143 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 1144 if (!shadow_node) {
2e313670 1145 return -EAGAIN;
b306a0fe 1146 }
c112acaa 1147 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
1148 ret = -EAGAIN;
1149 goto end;
1150 }
5a9a87dd 1151 cds_hlist_add_head_rcu(&node->list, head);
fa112799 1152end:
5a9a87dd 1153 rcuja_shadow_unlock(shadow_node);
fa112799 1154 return ret;
5a9a87dd
MD
1155}
1156
1157int cds_ja_add(struct cds_ja *ja, uint64_t key,
1158 struct cds_ja_node *new_node)
1159{
1160 unsigned int tree_depth, i;
b0ca2d21
MD
1161 struct cds_ja_inode_flag **attach_node_flag_ptr,
1162 **node_flag_ptr;
5a9a87dd
MD
1163 struct cds_ja_inode_flag *node_flag,
1164 *parent_node_flag,
b62a8d0c
MD
1165 *parent2_node_flag,
1166 *attach_node_flag;
5a9a87dd
MD
1167 int ret;
1168
b306a0fe 1169 if (caa_unlikely(key > ja->key_max)) {
5a9a87dd 1170 return -EINVAL;
b306a0fe 1171 }
5a9a87dd
MD
1172 tree_depth = ja->tree_depth;
1173
1174retry:
a2a7ff59
MD
1175 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
1176 key, new_node);
5a9a87dd 1177 parent2_node_flag = NULL;
b0f74e47
MD
1178 parent_node_flag =
1179 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
b0ca2d21 1180 attach_node_flag_ptr = &ja->root;
b62a8d0c 1181 attach_node_flag = rcu_dereference(ja->root);
5a9a87dd 1182 node_flag_ptr = &ja->root;
35170a44 1183 node_flag = rcu_dereference(ja->root);
5a9a87dd
MD
1184
1185 /* Iterate on all internal levels */
a2a7ff59 1186 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1187 uint8_t iter_key;
1188
b0ca2d21 1189 dbg_printf("cds_ja_add iter attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
c112acaa 1190 attach_node_flag_ptr, node_flag_ptr, node_flag);
5a9a87dd 1191 if (!ja_node_ptr(node_flag)) {
b0ca2d21 1192 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 1193 attach_node_flag,
b0ca2d21 1194 node_flag_ptr,
c112acaa
MD
1195 parent_node_flag,
1196 parent2_node_flag,
5a9a87dd 1197 key, i, new_node);
2e313670 1198 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd
MD
1199 goto retry;
1200 else
1201 goto end;
1202 }
79b41067 1203 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
1204 parent2_node_flag = parent_node_flag;
1205 parent_node_flag = node_flag;
1206 node_flag = ja_node_get_nth(node_flag,
b0ca2d21 1207 &attach_node_flag_ptr,
b62a8d0c 1208 &attach_node_flag,
5a9a87dd 1209 &node_flag_ptr,
79b41067 1210 iter_key);
b0ca2d21
MD
1211 dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p attach_node_flag_ptr %p node_flag_ptr %p\n",
1212 (unsigned int) iter_key, node_flag,
c112acaa
MD
1213 attach_node_flag_ptr,
1214 node_flag_ptr);
5a9a87dd
MD
1215 }
1216
1217 /*
1218 * We reached bottom of tree, simply add node to last internal
1219 * level, or chain it if key is already present.
1220 */
1221 if (!ja_node_ptr(node_flag)) {
c112acaa
MD
1222 dbg_printf("cds_ja_add attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
1223 attach_node_flag_ptr, node_flag_ptr, node_flag);
b0ca2d21 1224 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 1225 attach_node_flag,
b0ca2d21 1226 node_flag_ptr, parent_node_flag,
5a9a87dd
MD
1227 parent2_node_flag, key, i, new_node);
1228 } else {
1229 ret = ja_chain_node(ja,
af3cbd45 1230 parent_node_flag,
fa112799 1231 node_flag_ptr,
c112acaa 1232 node_flag,
b0ca2d21 1233 (struct cds_hlist_head *) attach_node_flag_ptr,
5a9a87dd
MD
1234 new_node);
1235 }
b306a0fe 1236 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd
MD
1237 goto retry;
1238end:
1239 return ret;
b4540e8a
MD
1240}
1241
af3cbd45
MD
1242/*
1243 * Note: there is no need to lookup the pointer address associated with
1244 * each node's nth item after taking the lock: it's already been done by
1245 * cds_ja_del while holding the rcu read-side lock, and our node rules
1246 * ensure that when a match value -> pointer is found in a node, it is
1247 * _NEVER_ changed for that node without recompaction, and recompaction
1248 * reallocates the node.
b306a0fe
MD
1249 * However, when a child is removed from "linear" nodes, its pointer
1250 * is set to NULL. We therefore check, while holding the locks, if this
1251 * pointer is NULL, and return -ENOENT to the caller if it is the case.
af3cbd45 1252 */
35170a44
MD
1253static
1254int ja_detach_node(struct cds_ja *ja,
1255 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
1256 struct cds_ja_inode_flag ***snapshot_ptr,
1257 uint8_t *snapshot_n,
35170a44
MD
1258 int nr_snapshot,
1259 uint64_t key,
1260 struct cds_ja_node *node)
1261{
af3cbd45
MD
1262 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
1263 struct cds_ja_inode_flag **node_flag_ptr = NULL,
1264 *parent_node_flag = NULL,
1265 **parent_node_flag_ptr = NULL;
b62a8d0c 1266 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
1267 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
1268 uint8_t n = 0;
35170a44 1269
4d6ef45e 1270 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 1271
af3cbd45
MD
1272 /*
1273 * From the last internal level node going up, get the node
1274 * lock, check if the node has only one child left. If it is the
1275 * case, we continue iterating upward. When we reach a node
1276 * which has more that one child left, we lock the parent, and
1277 * proceed to the node deletion (removing its children too).
1278 */
4d6ef45e 1279 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
1280 struct cds_ja_shadow_node *shadow_node;
1281
1282 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1283 snapshot[i]);
af3cbd45
MD
1284 if (!shadow_node) {
1285 ret = -EAGAIN;
1286 goto end;
1287 }
af3cbd45 1288 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
1289
1290 /*
1291 * Check if node has been removed between RCU
1292 * lookup and lock acquisition.
1293 */
1294 assert(snapshot_ptr[i + 1]);
1295 if (ja_node_ptr(*snapshot_ptr[i + 1])
1296 != ja_node_ptr(snapshot[i + 1])) {
1297 ret = -ENOENT;
1298 goto end;
1299 }
1300
1301 assert(shadow_node->nr_child > 0);
d810c97f 1302 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
1303 nr_clear++;
1304 nr_branch++;
af3cbd45
MD
1305 if (shadow_node->nr_child > 1 || i == 1) {
1306 /* Lock parent and break */
1307 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1308 snapshot[i - 1]);
af3cbd45
MD
1309 if (!shadow_node) {
1310 ret = -EAGAIN;
1311 goto end;
1312 }
1313 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 1314
c112acaa
MD
1315 /*
1316 * Check if node has been removed between RCU
1317 * lookup and lock acquisition.
1318 */
b62a8d0c
MD
1319 assert(snapshot_ptr[i]);
1320 if (ja_node_ptr(*snapshot_ptr[i])
1321 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
1322 ret = -ENOENT;
1323 goto end;
1324 }
1325
b62a8d0c 1326 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
1327 n = snapshot_n[i + 1];
1328 parent_node_flag_ptr = snapshot_ptr[i];
1329 parent_node_flag = snapshot[i];
c112acaa 1330
af3cbd45
MD
1331 if (i > 1) {
1332 /*
1333 * Lock parent's parent, in case we need
1334 * to recompact parent.
1335 */
1336 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1337 snapshot[i - 2]);
af3cbd45
MD
1338 if (!shadow_node) {
1339 ret = -EAGAIN;
1340 goto end;
1341 }
1342 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
1343
1344 /*
1345 * Check if node has been removed between RCU
1346 * lookup and lock acquisition.
1347 */
1348 assert(snapshot_ptr[i - 1]);
1349 if (ja_node_ptr(*snapshot_ptr[i - 1])
1350 != ja_node_ptr(snapshot[i - 1])) {
1351 ret = -ENOENT;
1352 goto end;
1353 }
af3cbd45 1354 }
b62a8d0c 1355
af3cbd45
MD
1356 break;
1357 }
1358 }
1359
1360 /*
4d6ef45e
MD
1361 * At this point, we want to delete all nodes that are about to
1362 * be removed from shadow_nodes (except the last one, which is
1363 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
1364 * child). OK to free lock here, because RCU read lock is held,
1365 * and free only performed in call_rcu.
af3cbd45
MD
1366 */
1367
1368 for (i = 0; i < nr_clear; i++) {
1369 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 1370 shadow_nodes[i]->node_flag,
af3cbd45
MD
1371 shadow_nodes[i],
1372 RCUJA_SHADOW_CLEAR_FREE_NODE
1373 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
1374 assert(!ret);
1375 }
1376
1377 iter_node_flag = parent_node_flag;
1378 /* Remove from parent */
1379 ret = ja_node_clear_ptr(ja,
1380 node_flag_ptr, /* Pointer to location to nullify */
1381 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 1382 shadow_nodes[nr_branch - 1], /* of parent */
af3cbd45 1383 n);
b306a0fe
MD
1384 if (ret)
1385 goto end;
af3cbd45 1386
4d6ef45e
MD
1387 dbg_printf("ja_detach_node: publish %p instead of %p\n",
1388 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
1389 /* Update address of parent ptr in its parent */
1390 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
1391
1392end:
1393 for (i = 0; i < nr_shadow; i++)
1394 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
1395 return ret;
1396}
1397
af3cbd45
MD
1398static
1399int ja_unchain_node(struct cds_ja *ja,
1400 struct cds_ja_inode_flag *parent_node_flag,
fa112799 1401 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 1402 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
1403 struct cds_ja_node *node)
1404{
1405 struct cds_ja_shadow_node *shadow_node;
f2758d14 1406 struct cds_hlist_node *hlist_node;
013a6083
MD
1407 struct cds_hlist_head hlist_head;
1408 int ret = 0, count = 0, found = 0;
af3cbd45 1409
3d8fe307 1410 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
1411 if (!shadow_node)
1412 return -EAGAIN;
013a6083 1413 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
1414 ret = -EAGAIN;
1415 goto end;
1416 }
013a6083 1417 hlist_head.next = (struct cds_hlist_node *) ja_node_ptr(node_flag);
af3cbd45
MD
1418 /*
1419 * Retry if another thread removed all but one of duplicates
fa112799 1420 * since check (this check was performed without lock).
013a6083
MD
1421 * Ensure that the node we are about to remove is still in the
1422 * list (while holding lock).
af3cbd45 1423 */
013a6083 1424 cds_hlist_for_each_rcu(hlist_node, &hlist_head) {
ade342cb
MD
1425 if (count == 0) {
1426 /* FIXME: currently a work-around */
1427 hlist_node->prev = (struct cds_hlist_node *) node_flag_ptr;
1428 }
f2758d14 1429 count++;
013a6083
MD
1430 if (hlist_node == &node->list)
1431 found++;
f2758d14 1432 }
013a6083
MD
1433 assert(found <= 1);
1434 if (!found || count == 1) {
af3cbd45
MD
1435 ret = -EAGAIN;
1436 goto end;
1437 }
1438 cds_hlist_del_rcu(&node->list);
ade342cb
MD
1439 /*
1440 * Validate that we indeed removed the node from linked list.
1441 */
1442 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
1443end:
1444 rcuja_shadow_unlock(shadow_node);
1445 return ret;
1446}
1447
1448/*
1449 * Called with RCU read lock held.
1450 */
35170a44
MD
1451int cds_ja_del(struct cds_ja *ja, uint64_t key,
1452 struct cds_ja_node *node)
1453{
1454 unsigned int tree_depth, i;
1455 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
1456 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
1457 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 1458 struct cds_ja_inode_flag *node_flag;
fa112799
MD
1459 struct cds_ja_inode_flag **prev_node_flag_ptr,
1460 **node_flag_ptr;
4d6ef45e 1461 int nr_snapshot;
35170a44
MD
1462 int ret;
1463
1464 if (caa_unlikely(key > ja->key_max))
1465 return -EINVAL;
1466 tree_depth = ja->tree_depth;
1467
1468retry:
4d6ef45e 1469 nr_snapshot = 0;
35170a44
MD
1470 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
1471 key, node);
1472
1473 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
1474 snapshot_n[0] = 0;
1475 snapshot_n[1] = 0;
af3cbd45 1476 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
1477 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
1478 node_flag = rcu_dereference(ja->root);
af3cbd45 1479 prev_node_flag_ptr = &ja->root;
fa112799 1480 node_flag_ptr = &ja->root;
35170a44
MD
1481
1482 /* Iterate on all internal levels */
1483 for (i = 1; i < tree_depth; i++) {
1484 uint8_t iter_key;
1485
1486 dbg_printf("cds_ja_del iter node_flag %p\n",
1487 node_flag);
1488 if (!ja_node_ptr(node_flag)) {
1489 return -ENOENT;
1490 }
35170a44 1491 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 1492 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
1493 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
1494 snapshot[nr_snapshot++] = node_flag;
35170a44 1495 node_flag = ja_node_get_nth(node_flag,
af3cbd45 1496 &prev_node_flag_ptr,
b62a8d0c 1497 NULL,
fa112799 1498 &node_flag_ptr,
35170a44 1499 iter_key);
af3cbd45
MD
1500 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
1501 (unsigned int) iter_key, node_flag,
1502 prev_node_flag_ptr);
35170a44 1503 }
35170a44
MD
1504 /*
1505 * We reached bottom of tree, try to find the node we are trying
1506 * to remove. Fail if we cannot find it.
1507 */
1508 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
1509 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
1510 key);
35170a44
MD
1511 return -ENOENT;
1512 } else {
4d6ef45e 1513 struct cds_hlist_head hlist_head;
35170a44 1514 struct cds_hlist_node *hlist_node;
af3cbd45
MD
1515 struct cds_ja_node *entry, *match = NULL;
1516 int count = 0;
35170a44 1517
4d6ef45e
MD
1518 hlist_head.next =
1519 (struct cds_hlist_node *) ja_node_ptr(node_flag);
af3cbd45 1520 cds_hlist_for_each_entry_rcu(entry,
35170a44 1521 hlist_node,
4d6ef45e 1522 &hlist_head,
35170a44 1523 list) {
4d6ef45e 1524 dbg_printf("cds_ja_del: compare %p with entry %p\n", node, entry);
af3cbd45
MD
1525 if (entry == node)
1526 match = entry;
1527 count++;
35170a44 1528 }
4d6ef45e
MD
1529 if (!match) {
1530 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 1531 return -ENOENT;
4d6ef45e 1532 }
af3cbd45
MD
1533 assert(count > 0);
1534 if (count == 1) {
1535 /*
4d6ef45e
MD
1536 * Removing last of duplicates. Last snapshot
1537 * does not have a shadow node (external leafs).
af3cbd45
MD
1538 */
1539 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
1540 snapshot[nr_snapshot++] = node_flag;
1541 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
1542 snapshot_n, nr_snapshot, key, node);
1543 } else {
f2758d14 1544 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 1545 node_flag_ptr, node_flag, match);
af3cbd45 1546 }
35170a44 1547 }
b306a0fe
MD
1548 /*
1549 * Explanation of -ENOENT handling: caused by concurrent delete
1550 * between RCU lookup and actual removal. Need to re-do the
1551 * lookup and removal attempt.
1552 */
1553 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
1554 goto retry;
1555 return ret;
1556}
1557
b4540e8a
MD
1558struct cds_ja *_cds_ja_new(unsigned int key_bits,
1559 const struct rcu_flavor_struct *flavor)
be9a7474
MD
1560{
1561 struct cds_ja *ja;
b0f74e47 1562 int ret;
f07b240f 1563 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
1564
1565 ja = calloc(sizeof(*ja), 1);
1566 if (!ja)
1567 goto ja_error;
b4540e8a
MD
1568
1569 switch (key_bits) {
1570 case 8:
b4540e8a 1571 case 16:
1216b3d2 1572 case 24:
b4540e8a 1573 case 32:
1216b3d2
MD
1574 case 40:
1575 case 48:
1576 case 56:
1577 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
1578 break;
1579 case 64:
1580 ja->key_max = UINT64_MAX;
1581 break;
1582 default:
1583 goto check_error;
1584 }
1585
be9a7474 1586 /* ja->root is NULL */
5a9a87dd 1587 /* tree_depth 0 is for pointer to root node */
582a6ade 1588 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 1589 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
1590 ja->ht = rcuja_create_ht(flavor);
1591 if (!ja->ht)
1592 goto ht_error;
b0f74e47
MD
1593
1594 /*
1595 * Note: we should not free this node until judy array destroy.
1596 */
f07b240f 1597 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307
MD
1598 (struct cds_ja_inode_flag *) &ja->root,
1599 NULL, ja);
f07b240f
MD
1600 if (!root_shadow_node) {
1601 ret = -ENOMEM;
b0f74e47 1602 goto ht_node_error;
f07b240f 1603 }
3d8fe307 1604 root_shadow_node->level = 0;
b0f74e47 1605
be9a7474
MD
1606 return ja;
1607
b0f74e47
MD
1608ht_node_error:
1609 ret = rcuja_delete_ht(ja->ht);
1610 assert(!ret);
be9a7474 1611ht_error:
b4540e8a 1612check_error:
be9a7474
MD
1613 free(ja);
1614ja_error:
1615 return NULL;
1616}
1617
3d8fe307
MD
1618/*
1619 * Called from RCU read-side CS.
1620 */
1621__attribute__((visibility("protected")))
1622void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
1623 struct cds_ja_inode_flag *node_flag,
1624 void (*free_node_cb)(struct rcu_head *head))
1625{
1626 const struct rcu_flavor_struct *flavor;
1627 unsigned int type_index;
1628 struct cds_ja_inode *node;
1629 const struct cds_ja_type *type;
1630
1631 flavor = cds_lfht_rcu_flavor(shadow_node->ja->ht);
1632 node = ja_node_ptr(node_flag);
1633 assert(node != NULL);
1634 type_index = ja_node_type(node_flag);
1635 type = &ja_types[type_index];
1636
1637 switch (type->type_class) {
1638 case RCU_JA_LINEAR:
1639 {
1640 uint8_t nr_child =
1641 ja_linear_node_get_nr_child(type, node);
1642 unsigned int i;
1643
1644 for (i = 0; i < nr_child; i++) {
1645 struct cds_ja_inode_flag *iter;
1646 struct cds_hlist_head head;
1647 struct cds_ja_node *entry;
1648 struct cds_hlist_node *pos;
1649 uint8_t v;
1650
1651 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1652 if (!iter)
1653 continue;
1654 head.next = (struct cds_hlist_node *) iter;
1655 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
1656 flavor->update_call_rcu(&entry->head, free_node_cb);
1657 }
1658 }
1659 break;
1660 }
1661 case RCU_JA_POOL:
1662 {
1663 unsigned int pool_nr;
1664
1665 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1666 struct cds_ja_inode *pool =
1667 ja_pool_node_get_ith_pool(type, node, pool_nr);
1668 uint8_t nr_child =
1669 ja_linear_node_get_nr_child(type, pool);
1670 unsigned int j;
1671
1672 for (j = 0; j < nr_child; j++) {
1673 struct cds_ja_inode_flag *iter;
1674 struct cds_hlist_head head;
1675 struct cds_ja_node *entry;
1676 struct cds_hlist_node *pos;
1677 uint8_t v;
1678
1679 ja_linear_node_get_ith_pos(type, node, j, &v, &iter);
1680 if (!iter)
1681 continue;
1682 head.next = (struct cds_hlist_node *) iter;
1683 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
1684 flavor->update_call_rcu(&entry->head, free_node_cb);
1685 }
1686 }
1687 }
1688 break;
1689 }
1690 case RCU_JA_NULL:
1691 break;
1692 case RCU_JA_PIGEON:
1693 {
1694 uint8_t nr_child;
1695 unsigned int i;
1696
1697 nr_child = shadow_node->nr_child;
1698 for (i = 0; i < nr_child; i++) {
1699 struct cds_ja_inode_flag *iter;
1700 struct cds_hlist_head head;
1701 struct cds_ja_node *entry;
1702 struct cds_hlist_node *pos;
1703
1704 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1705 if (!iter)
1706 continue;
1707 head.next = (struct cds_hlist_node *) iter;
1708 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
1709 flavor->update_call_rcu(&entry->head, free_node_cb);
1710 }
1711 }
1712 break;
1713 }
1714 default:
1715 assert(0);
1716 }
1717}
1718
be9a7474
MD
1719/*
1720 * There should be no more concurrent add to the judy array while it is
1721 * being destroyed (ensured by the caller).
1722 */
3d8fe307
MD
1723int cds_ja_destroy(struct cds_ja *ja,
1724 void (*free_node_cb)(struct rcu_head *head))
be9a7474 1725{
b4540e8a
MD
1726 int ret;
1727
be9a7474 1728 rcuja_shadow_prune(ja->ht,
3d8fe307
MD
1729 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
1730 free_node_cb);
b4540e8a
MD
1731 ret = rcuja_delete_ht(ja->ht);
1732 if (ret)
1733 return ret;
f07b240f
MD
1734 if (uatomic_read(&ja->nr_fallback))
1735 fprintf(stderr,
1736 "[warning] RCU Judy Array used %lu fallback node(s)\n",
1737 uatomic_read(&ja->nr_fallback));
b4540e8a 1738 free(ja);
41975c12 1739 return 0;
be9a7474 1740}
This page took 0.121156 seconds and 4 git commands to generate.