hlist: implement cds_hlist_empty
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
61009379 27#include <urcu/rcuja.h>
d68c6810
MD
28#include <urcu/compiler.h>
29#include <urcu/arch.h>
30#include <assert.h>
8e519e3c 31#include <urcu-pointer.h>
f07b240f 32#include <urcu/uatomic.h>
b4540e8a 33#include <stdint.h>
8e519e3c 34
61009379 35#include "rcuja-internal.h"
d68c6810 36#include "bitfield.h"
61009379 37
d96bfb0d 38enum cds_ja_type_class {
e5227865 39 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
40 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
41 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
42 RCU_JA_POOL = 1, /* Type B */
43 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
44 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 45 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
46 /* 32-bit: 101 to 256 children, 1024 bytes */
47 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 48 /* Leaf nodes are implicit from their height in the tree */
1db4943c 49 RCU_JA_NR_TYPES,
e1db2db5
MD
50
51 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
52};
53
d96bfb0d
MD
54struct cds_ja_type {
55 enum cds_ja_type_class type_class;
8e519e3c
MD
56 uint16_t min_child; /* minimum number of children: 1 to 256 */
57 uint16_t max_child; /* maximum number of children: 1 to 256 */
58 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
59 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
60 uint16_t nr_pool_order; /* number of pools */
61 uint16_t pool_size_order; /* pool size */
e5227865
MD
62};
63
d68c6810
MD
64/*
65 * Number of least significant pointer bits reserved to represent the
66 * child type.
67 */
68#define JA_TYPE_BITS 3
a2a7ff59 69#define JA_TYPE_MAX_NR (1UL << JA_TYPE_BITS)
d68c6810
MD
70#define JA_TYPE_MASK (JA_TYPE_MAX_NR - 1)
71#define JA_PTR_MASK (~JA_TYPE_MASK)
72
73#define JA_ENTRY_PER_NODE 256UL
582a6ade
MD
74#define JA_LOG2_BITS_PER_BYTE 3U
75#define JA_BITS_PER_BYTE (1U << JA_LOG2_BITS_PER_BYTE)
d68c6810 76
5b0f19e6 77#define JA_MAX_DEPTH 9 /* Maximum depth, including leafs */
5a9a87dd 78
e1db2db5
MD
79/*
80 * Entry for NULL node is at index 8 of the table. It is never encoded
81 * in flags.
82 */
83#define NODE_INDEX_NULL 8
84
f07b240f
MD
85/*
86 * Number of removals needed on a fallback node before we try to shrink
87 * it.
88 */
89#define JA_FALLBACK_REMOVAL_COUNT 8
90
e5227865
MD
91/*
92 * Iteration on the array to find the right node size for the number of
d68c6810 93 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 94 * possible node size, which contains 256 children).
d68c6810
MD
95 * The min_child overlaps with the previous max_child to provide an
96 * hysteresis loop to reallocation for patterns of cyclic add/removal
97 * within the same node.
98 * The node the index within the following arrays is represented on 3
99 * bits. It identifies the node type, min/max number of children, and
100 * the size order.
3d45251f
MD
101 * The max_child values for the RCU_JA_POOL below result from
102 * statistical approximation: over million populations, the max_child
103 * covers between 97% and 99% of the populations generated. Therefore, a
104 * fallback should exist to cover the rare extreme population unbalance
105 * cases, but it will not have a major impact on speed nor space
106 * consumption, since those are rare cases.
e5227865 107 */
e5227865 108
d68c6810
MD
109#if (CAA_BITS_PER_LONG < 64)
110/* 32-bit pointers */
1db4943c
MD
111enum {
112 ja_type_0_max_child = 1,
113 ja_type_1_max_child = 3,
114 ja_type_2_max_child = 6,
115 ja_type_3_max_child = 12,
116 ja_type_4_max_child = 25,
117 ja_type_5_max_child = 48,
118 ja_type_6_max_child = 92,
119 ja_type_7_max_child = 256,
e1db2db5 120 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
121};
122
8e519e3c
MD
123enum {
124 ja_type_0_max_linear_child = 1,
125 ja_type_1_max_linear_child = 3,
126 ja_type_2_max_linear_child = 6,
127 ja_type_3_max_linear_child = 12,
128 ja_type_4_max_linear_child = 25,
129 ja_type_5_max_linear_child = 24,
130 ja_type_6_max_linear_child = 23,
131};
132
1db4943c
MD
133enum {
134 ja_type_5_nr_pool_order = 1,
135 ja_type_6_nr_pool_order = 2,
136};
137
d96bfb0d 138const struct cds_ja_type ja_types[] = {
8e519e3c
MD
139 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
140 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
141 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
142 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
143 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 144
fd800776 145 /* Pools may fill sooner than max_child */
8e519e3c
MD
146 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
147 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
148
149 /*
150 * TODO: Upon node removal below min_child, if child pool is
151 * filled beyond capacity, we need to roll back to pigeon.
152 */
1db4943c 153 { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
154
155 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 156};
d68c6810
MD
157#else /* !(CAA_BITS_PER_LONG < 64) */
158/* 64-bit pointers */
1db4943c
MD
159enum {
160 ja_type_0_max_child = 1,
161 ja_type_1_max_child = 3,
162 ja_type_2_max_child = 7,
163 ja_type_3_max_child = 14,
164 ja_type_4_max_child = 28,
165 ja_type_5_max_child = 54,
166 ja_type_6_max_child = 104,
167 ja_type_7_max_child = 256,
e1db2db5 168 ja_type_8_max_child = 256,
1db4943c
MD
169};
170
8e519e3c
MD
171enum {
172 ja_type_0_max_linear_child = 1,
173 ja_type_1_max_linear_child = 3,
174 ja_type_2_max_linear_child = 7,
175 ja_type_3_max_linear_child = 14,
176 ja_type_4_max_linear_child = 28,
177 ja_type_5_max_linear_child = 27,
178 ja_type_6_max_linear_child = 26,
179};
180
1db4943c
MD
181enum {
182 ja_type_5_nr_pool_order = 1,
183 ja_type_6_nr_pool_order = 2,
184};
185
d96bfb0d 186const struct cds_ja_type ja_types[] = {
8e519e3c
MD
187 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
188 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
189 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
190 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
191 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 192
3d45251f 193 /* Pools may fill sooner than max_child. */
8e519e3c
MD
194 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
195 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 196
3d45251f
MD
197 /*
198 * TODO: Upon node removal below min_child, if child pool is
199 * filled beyond capacity, we need to roll back to pigeon.
200 */
1db4943c 201 { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
202
203 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 204};
d68c6810 205#endif /* !(BITS_PER_LONG < 64) */
e5227865 206
1db4943c
MD
207static inline __attribute__((unused))
208void static_array_size_check(void)
209{
e1db2db5 210 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
211}
212
e5227865 213/*
d96bfb0d 214 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
215 * read-side. For linear and pool node configurations, it starts with a
216 * byte counting the number of children in the node. Then, the
217 * node-specific data is placed.
218 * The node mutex, if any is needed, protecting concurrent updated of
219 * each node is placed in a separate hash table indexed by node address.
220 * For the pigeon configuration, the number of children is also kept in
221 * a separate hash table, indexed by node address, because it is only
222 * required for updates.
e5227865 223 */
1db4943c 224
ff38c745
MD
225#define DECLARE_LINEAR_NODE(index) \
226 struct { \
227 uint8_t nr_child; \
228 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 229 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
230 }
231
232#define DECLARE_POOL_NODE(index) \
233 struct { \
234 struct { \
235 uint8_t nr_child; \
236 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 237 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
238 } linear[1U << ja_type_## index ##_nr_pool_order]; \
239 }
1db4943c 240
b4540e8a 241struct cds_ja_inode {
1db4943c
MD
242 union {
243 /* Linear configuration */
244 DECLARE_LINEAR_NODE(0) conf_0;
245 DECLARE_LINEAR_NODE(1) conf_1;
246 DECLARE_LINEAR_NODE(2) conf_2;
247 DECLARE_LINEAR_NODE(3) conf_3;
248 DECLARE_LINEAR_NODE(4) conf_4;
249
250 /* Pool configuration */
251 DECLARE_POOL_NODE(5) conf_5;
252 DECLARE_POOL_NODE(6) conf_6;
253
254 /* Pigeon configuration */
255 struct {
b4540e8a 256 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
257 } conf_7;
258 /* data aliasing nodes for computed accesses */
b4540e8a 259 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 260 } u;
e5227865
MD
261};
262
2e313670
MD
263enum ja_recompact {
264 JA_RECOMPACT,
265 JA_RECOMPACT_ADD,
266 JA_RECOMPACT_DEL,
267};
268
d68c6810 269static
b4540e8a 270struct cds_ja_inode_flag *ja_node_flag(struct cds_ja_inode *node,
a2a7ff59 271 unsigned long type)
d68c6810 272{
a2a7ff59 273 assert(type < (1UL << JA_TYPE_BITS));
b4540e8a 274 return (struct cds_ja_inode_flag *) (((unsigned long) node) | type);
d68c6810
MD
275}
276
e1db2db5 277static
b4540e8a 278struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
e1db2db5 279{
a2a7ff59 280 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
e1db2db5
MD
281}
282
d68c6810 283static
a2a7ff59 284unsigned long ja_node_type(struct cds_ja_inode_flag *node)
d68c6810 285{
a2a7ff59 286 unsigned long type;
d68c6810 287
e1db2db5
MD
288 if (ja_node_ptr(node) == NULL) {
289 return NODE_INDEX_NULL;
290 }
d68c6810 291 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
a2a7ff59 292 assert(type < (1UL << JA_TYPE_BITS));
d68c6810
MD
293 return type;
294}
295
b4540e8a 296struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
e5227865 297{
1db4943c 298 return calloc(1U << ja_type->order, sizeof(char));
e5227865
MD
299}
300
b4540e8a 301void free_cds_ja_node(struct cds_ja_inode *node)
e5227865
MD
302{
303 free(node);
304}
305
d68c6810
MD
306#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
307#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
308#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
309#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
310
311static
1db4943c 312uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 313{
1db4943c 314 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
315}
316
11c5e016 317static
d96bfb0d 318uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 319 struct cds_ja_inode *node)
11c5e016
MD
320{
321 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 322 return rcu_dereference(node->u.data[0]);
11c5e016
MD
323}
324
13a7f5a6
MD
325/*
326 * The order in which values and pointers are does does not matter: if
327 * a value is missing, we return NULL. If a value is there, but its
328 * associated pointers is still NULL, we return NULL too.
329 */
d68c6810 330static
b4540e8a
MD
331struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
332 struct cds_ja_inode *node,
5a9a87dd 333 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 334 uint8_t n)
d68c6810
MD
335{
336 uint8_t nr_child;
337 uint8_t *values;
b4540e8a
MD
338 struct cds_ja_inode_flag **pointers;
339 struct cds_ja_inode_flag *ptr;
d68c6810
MD
340 unsigned int i;
341
8e519e3c 342 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 343
11c5e016 344 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 345 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
346 assert(nr_child <= type->max_linear_child);
347 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 348
1db4943c 349 values = &node->u.data[1];
d68c6810 350 for (i = 0; i < nr_child; i++) {
13a7f5a6 351 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
352 break;
353 }
354 if (i >= nr_child)
355 return NULL;
b4540e8a 356 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 357 ptr = rcu_dereference(pointers[i]);
2e313670
MD
358 if (caa_unlikely(child_node_flag_ptr) && ptr)
359 *child_node_flag_ptr = &pointers[i];
d68c6810
MD
360 return ptr;
361}
362
11c5e016 363static
5a9a87dd 364void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 365 struct cds_ja_inode *node,
11c5e016
MD
366 uint8_t i,
367 uint8_t *v,
b4540e8a 368 struct cds_ja_inode_flag **iter)
11c5e016
MD
369{
370 uint8_t *values;
b4540e8a 371 struct cds_ja_inode_flag **pointers;
11c5e016
MD
372
373 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
374 assert(i < ja_linear_node_get_nr_child(type, node));
375
376 values = &node->u.data[1];
377 *v = values[i];
b4540e8a 378 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
379 *iter = pointers[i];
380}
381
d68c6810 382static
b4540e8a
MD
383struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
384 struct cds_ja_inode *node,
5a9a87dd 385 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 386 uint8_t n)
d68c6810 387{
b4540e8a 388 struct cds_ja_inode *linear;
d68c6810 389
fd800776 390 assert(type->type_class == RCU_JA_POOL);
e1db2db5
MD
391 /*
392 * TODO: currently, we select the pool by highest bits. We
393 * should support various encodings.
394 */
b4540e8a 395 linear = (struct cds_ja_inode *)
1db4943c 396 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
5a9a87dd 397 return ja_linear_node_get_nth(type, linear, child_node_flag_ptr, n);
d68c6810
MD
398}
399
11c5e016 400static
b4540e8a
MD
401struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
402 struct cds_ja_inode *node,
11c5e016
MD
403 uint8_t i)
404{
405 assert(type->type_class == RCU_JA_POOL);
b4540e8a 406 return (struct cds_ja_inode *)
11c5e016
MD
407 &node->u.data[(unsigned int) i << type->pool_size_order];
408}
409
d68c6810 410static
b4540e8a
MD
411struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
412 struct cds_ja_inode *node,
5a9a87dd 413 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 414 uint8_t n)
d68c6810 415{
5a9a87dd
MD
416 struct cds_ja_inode_flag **child_node_flag;
417
d68c6810 418 assert(type->type_class == RCU_JA_PIGEON);
5a9a87dd 419 child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n];
582a6ade
MD
420 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
421 child_node_flag);
422 if (caa_unlikely(child_node_flag_ptr) && *child_node_flag)
5a9a87dd
MD
423 *child_node_flag_ptr = child_node_flag;
424 return rcu_dereference(*child_node_flag);
d68c6810
MD
425}
426
2e313670
MD
427static
428struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
429 struct cds_ja_inode *node,
430 uint8_t i)
431{
432 return ja_pigeon_node_get_nth(type, node, NULL, i);
433}
434
13a7f5a6
MD
435/*
436 * ja_node_get_nth: get nth item from a node.
437 * node_flag is already rcu_dereference'd.
438 */
d68c6810 439static
41975c12 440struct cds_ja_inode_flag * ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
5a9a87dd 441 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 442 uint8_t n)
d68c6810
MD
443{
444 unsigned int type_index;
b4540e8a 445 struct cds_ja_inode *node;
d96bfb0d 446 const struct cds_ja_type *type;
d68c6810 447
d68c6810 448 node = ja_node_ptr(node_flag);
5a9a87dd 449 assert(node != NULL);
d68c6810
MD
450 type_index = ja_node_type(node_flag);
451 type = &ja_types[type_index];
452
453 switch (type->type_class) {
454 case RCU_JA_LINEAR:
5a9a87dd
MD
455 return ja_linear_node_get_nth(type, node,
456 child_node_flag_ptr, n);
fd800776 457 case RCU_JA_POOL:
5a9a87dd
MD
458 return ja_pool_node_get_nth(type, node,
459 child_node_flag_ptr, n);
d68c6810 460 case RCU_JA_PIGEON:
5a9a87dd
MD
461 return ja_pigeon_node_get_nth(type, node,
462 child_node_flag_ptr, n);
d68c6810
MD
463 default:
464 assert(0);
465 return (void *) -1UL;
466 }
467}
468
8e519e3c 469static
d96bfb0d 470int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 471 struct cds_ja_inode *node,
d96bfb0d 472 struct cds_ja_shadow_node *shadow_node,
8e519e3c 473 uint8_t n,
b4540e8a 474 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
475{
476 uint8_t nr_child;
477 uint8_t *values, *nr_child_ptr;
b4540e8a 478 struct cds_ja_inode_flag **pointers;
2e313670 479 unsigned int i, unused = 0;
8e519e3c
MD
480
481 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
482
483 nr_child_ptr = &node->u.data[0];
a2a7ff59 484 dbg_printf("linear set nth: nr_child_ptr %p\n", nr_child_ptr);
8e519e3c
MD
485 nr_child = *nr_child_ptr;
486 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
487
488 values = &node->u.data[1];
2e313670
MD
489 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
490 /* Check if node value is already populated */
8e519e3c 491 for (i = 0; i < nr_child; i++) {
2e313670
MD
492 if (values[i] == n) {
493 if (pointers[i])
494 return -EEXIST;
495 else
496 break;
497 } else {
498 if (!pointers[i])
499 unused++;
500 }
8e519e3c 501 }
2e313670
MD
502 if (i == nr_child && nr_child >= type->max_linear_child) {
503 if (unused)
504 return -ERANGE; /* recompact node */
505 else
506 return -ENOSPC; /* No space left in this node type */
507 }
508
509 assert(pointers[i] == NULL);
510 rcu_assign_pointer(pointers[i], child_node_flag);
511 /* If we expanded the nr_child, increment it */
512 if (i == nr_child) {
513 CMM_STORE_SHARED(values[nr_child], n);
514 /* write pointer and value before nr_child */
515 cmm_smp_wmb();
516 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 517 }
e1db2db5 518 shadow_node->nr_child++;
a2a7ff59
MD
519 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
520 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
521 (unsigned int) shadow_node->nr_child,
522 node, shadow_node);
523
8e519e3c
MD
524 return 0;
525}
526
527static
d96bfb0d 528int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 529 struct cds_ja_inode *node,
d96bfb0d 530 struct cds_ja_shadow_node *shadow_node,
8e519e3c 531 uint8_t n,
b4540e8a 532 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 533{
b4540e8a 534 struct cds_ja_inode *linear;
8e519e3c
MD
535
536 assert(type->type_class == RCU_JA_POOL);
b4540e8a 537 linear = (struct cds_ja_inode *)
8e519e3c 538 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
e1db2db5
MD
539 return ja_linear_node_set_nth(type, linear, shadow_node,
540 n, child_node_flag);
8e519e3c
MD
541}
542
543static
d96bfb0d 544int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 545 struct cds_ja_inode *node,
d96bfb0d 546 struct cds_ja_shadow_node *shadow_node,
8e519e3c 547 uint8_t n,
b4540e8a 548 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 549{
b4540e8a 550 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
551
552 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 553 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 554 if (*ptr)
8e519e3c
MD
555 return -EEXIST;
556 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 557 shadow_node->nr_child++;
8e519e3c
MD
558 return 0;
559}
560
d68c6810 561/*
7a0b2331 562 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 563 * (negative error value) if it is already there.
d68c6810 564 */
8e519e3c 565static
d96bfb0d 566int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 567 struct cds_ja_inode *node,
d96bfb0d 568 struct cds_ja_shadow_node *shadow_node,
e1db2db5 569 uint8_t n,
b4540e8a 570 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 571{
8e519e3c
MD
572 switch (type->type_class) {
573 case RCU_JA_LINEAR:
e1db2db5 574 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
575 child_node_flag);
576 case RCU_JA_POOL:
e1db2db5 577 return ja_pool_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
578 child_node_flag);
579 case RCU_JA_PIGEON:
e1db2db5 580 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 581 child_node_flag);
e1db2db5
MD
582 case RCU_JA_NULL:
583 return -ENOSPC;
8e519e3c
MD
584 default:
585 assert(0);
586 return -EINVAL;
587 }
588
589 return 0;
590}
7a0b2331 591
2e313670
MD
592static
593int ja_linear_node_clear_nth(const struct cds_ja_type *type,
594 struct cds_ja_inode *node,
595 struct cds_ja_shadow_node *shadow_node,
596 uint8_t n)
597{
598 uint8_t nr_child;
599 uint8_t *values, *nr_child_ptr;
600 struct cds_ja_inode_flag **pointers;
601 unsigned int i;
602
603 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
604
605 nr_child_ptr = &node->u.data[0];
606 dbg_printf("linear clear nth: nr_child_ptr %p\n", nr_child_ptr);
607 nr_child = *nr_child_ptr;
608 assert(nr_child <= type->max_linear_child);
609
610 values = &node->u.data[1];
611 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
612 for (i = 0; i < nr_child; i++) {
613 if (values[i] == n) {
614 if (pointers[i])
615 break;
616 else
617 return -ENOENT;
618 }
619 }
620 if (i >= nr_child)
621 return -ENOENT;
622 if (shadow_node->fallback_removal_count) {
623 shadow_node->fallback_removal_count--;
624 } else {
625 if (shadow_node->nr_child <= type->min_child) {
626 /* We need to try recompacting the node */
627 return -EFBIG;
628 }
629 }
630 assert(pointers[i] != NULL);
631 rcu_assign_pointer(pointers[i], NULL);
632 /*
633 * Value and nr_child are never changed (would cause ABA issue).
634 * Instead, we leave the pointer to NULL and recompact the node
635 * once in a while. It is allowed to set a NULL pointer to a new
636 * value without recompaction though.
637 * Only update the shadow node accounting.
638 */
639 shadow_node->nr_child--;
640 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
641 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
642 (unsigned int) shadow_node->nr_child,
643 node, shadow_node);
644
645 return 0;
646}
647
648static
649int ja_pool_node_clear_nth(const struct cds_ja_type *type,
650 struct cds_ja_inode *node,
651 struct cds_ja_shadow_node *shadow_node,
652 uint8_t n)
653{
654 struct cds_ja_inode *linear;
655
656 assert(type->type_class == RCU_JA_POOL);
657 linear = (struct cds_ja_inode *)
658 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
659 return ja_linear_node_clear_nth(type, linear, shadow_node, n);
660}
661
662static
663int ja_pigeon_node_clear_nth(const struct cds_ja_type *type,
664 struct cds_ja_inode *node,
665 struct cds_ja_shadow_node *shadow_node,
666 uint8_t n)
667{
668 struct cds_ja_inode_flag **ptr;
669
670 assert(type->type_class == RCU_JA_PIGEON);
671 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
672 if (!*ptr)
673 return -ENOENT;
674 rcu_assign_pointer(*ptr, NULL);
675 shadow_node->nr_child--;
676 return 0;
677}
678
679/*
680 * _ja_node_clear_nth: clear nth item within a node. Return an error
681 * (negative error value) if it is not found (-ENOENT).
682 */
683static
684int _ja_node_clear_nth(const struct cds_ja_type *type,
685 struct cds_ja_inode *node,
686 struct cds_ja_shadow_node *shadow_node,
687 uint8_t n)
688{
689 switch (type->type_class) {
690 case RCU_JA_LINEAR:
691 return ja_linear_node_clear_nth(type, node, shadow_node, n);
692 case RCU_JA_POOL:
693 return ja_pool_node_clear_nth(type, node, shadow_node, n);
694 case RCU_JA_PIGEON:
695 return ja_pigeon_node_clear_nth(type, node, shadow_node, n);
696 case RCU_JA_NULL:
697 return -ENOENT;
698 default:
699 assert(0);
700 return -EINVAL;
701 }
702
703 return 0;
704}
705
7a0b2331
MD
706/*
707 * ja_node_recompact_add: recompact a node, adding a new child.
e1db2db5 708 * TODO: for pool type, take selection bit(s) into account.
2e313670 709 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 710 * error value otherwise.
7a0b2331
MD
711 */
712static
2e313670
MD
713int ja_node_recompact(enum ja_recompact mode,
714 struct cds_ja *ja,
e1db2db5 715 unsigned int old_type_index,
d96bfb0d 716 const struct cds_ja_type *old_type,
b4540e8a 717 struct cds_ja_inode *old_node,
5a9a87dd 718 struct cds_ja_shadow_node *shadow_node,
b4540e8a
MD
719 struct cds_ja_inode_flag **old_node_flag, uint8_t n,
720 struct cds_ja_inode_flag *child_node_flag)
7a0b2331 721{
e1db2db5 722 unsigned int new_type_index;
b4540e8a 723 struct cds_ja_inode *new_node;
f07b240f 724 struct cds_ja_shadow_node *new_shadow_node;
d96bfb0d 725 const struct cds_ja_type *new_type;
b4540e8a 726 struct cds_ja_inode_flag *new_node_flag;
7a0b2331 727 int ret;
f07b240f 728 int fallback = 0;
7a0b2331 729
2e313670
MD
730 switch (mode) {
731 case JA_RECOMPACT:
732 new_type_index = old_type_index;
733 break;
734 case JA_RECOMPACT_ADD:
735 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
736 new_type_index = 0;
737 } else {
738 new_type_index = old_type_index + 1;
739 }
740 break;
741 case JA_RECOMPACT_DEL:
742 if (old_type_index == 0) {
743 new_type_index = NODE_INDEX_NULL;
744 } else {
745 new_type_index = old_type_index - 1;
746 }
747 break;
748 default:
749 assert(0);
7a0b2331 750 }
a2a7ff59 751
f07b240f 752retry: /* for fallback */
582a6ade
MD
753 dbg_printf("Recompact from type %d to type %d\n",
754 old_type_index, new_type_index);
7a0b2331 755 new_type = &ja_types[new_type_index];
2e313670
MD
756 if (new_type_index != NODE_INDEX_NULL) {
757 new_node = alloc_cds_ja_node(new_type);
758 if (!new_node)
759 return -ENOMEM;
760 new_node_flag = ja_node_flag(new_node, new_type_index);
761 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
762 new_shadow_node = rcuja_shadow_set(ja->ht, new_node, shadow_node);
763 if (!new_shadow_node) {
764 free(new_node);
765 return -ENOMEM;
766 }
767 if (fallback)
768 new_shadow_node->fallback_removal_count =
769 JA_FALLBACK_REMOVAL_COUNT;
770 } else {
771 new_node = NULL;
772 new_node_flag = NULL;
e1db2db5 773 }
11c5e016 774
2e313670
MD
775 assert(mode != JA_RECOMPACT_ADD || old_type->type_class != RCU_JA_PIGEON);
776
777 if (new_type_index == NODE_INDEX_NULL)
778 goto skip_copy;
779
11c5e016
MD
780 switch (old_type->type_class) {
781 case RCU_JA_LINEAR:
782 {
783 uint8_t nr_child =
784 ja_linear_node_get_nr_child(old_type, old_node);
785 unsigned int i;
786
787 for (i = 0; i < nr_child; i++) {
b4540e8a 788 struct cds_ja_inode_flag *iter;
11c5e016
MD
789 uint8_t v;
790
791 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
792 if (!iter)
793 continue;
2e313670
MD
794 if (mode == JA_RECOMPACT_DEL && v == n)
795 continue;
f07b240f
MD
796 ret = _ja_node_set_nth(new_type, new_node,
797 new_shadow_node,
11c5e016 798 v, iter);
f07b240f
MD
799 if (new_type->type_class == RCU_JA_POOL && ret) {
800 goto fallback_toosmall;
801 }
11c5e016
MD
802 assert(!ret);
803 }
804 break;
805 }
806 case RCU_JA_POOL:
807 {
808 unsigned int pool_nr;
809
810 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 811 struct cds_ja_inode *pool =
11c5e016
MD
812 ja_pool_node_get_ith_pool(old_type,
813 old_node, pool_nr);
814 uint8_t nr_child =
815 ja_linear_node_get_nr_child(old_type, pool);
816 unsigned int j;
817
818 for (j = 0; j < nr_child; j++) {
b4540e8a 819 struct cds_ja_inode_flag *iter;
11c5e016
MD
820 uint8_t v;
821
822 ja_linear_node_get_ith_pos(old_type, pool,
823 j, &v, &iter);
824 if (!iter)
825 continue;
2e313670
MD
826 if (mode == JA_RECOMPACT_DEL && v == n)
827 continue;
f07b240f
MD
828 ret = _ja_node_set_nth(new_type, new_node,
829 new_shadow_node,
11c5e016 830 v, iter);
f07b240f
MD
831 if (new_type->type_class == RCU_JA_POOL
832 && ret) {
833 goto fallback_toosmall;
834 }
11c5e016
MD
835 assert(!ret);
836 }
837 }
838 break;
7a0b2331 839 }
a2a7ff59 840 case RCU_JA_NULL:
2e313670 841 assert(mode == JA_RECOMPACT_ADD);
a2a7ff59 842 break;
11c5e016 843 case RCU_JA_PIGEON:
2e313670
MD
844 {
845 uint8_t nr_child;
846 unsigned int i;
847
848 assert(mode == JA_RECOMPACT_DEL);
849 nr_child = shadow_node->nr_child;
850 for (i = 0; i < nr_child; i++) {
851 struct cds_ja_inode_flag *iter;
852
853 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
854 if (!iter)
855 continue;
856 if (mode == JA_RECOMPACT_DEL && i == n)
857 continue;
858 ret = _ja_node_set_nth(new_type, new_node,
859 new_shadow_node,
860 i, iter);
861 if (new_type->type_class == RCU_JA_POOL && ret) {
862 goto fallback_toosmall;
863 }
864 assert(!ret);
865 }
866 break;
867 }
11c5e016
MD
868 default:
869 assert(0);
5a9a87dd 870 ret = -EINVAL;
f07b240f 871 goto end;
11c5e016 872 }
2e313670 873skip_copy:
11c5e016 874
2e313670
MD
875 if (JA_RECOMPACT_ADD) {
876 /* add node */
877 ret = _ja_node_set_nth(new_type, new_node,
878 new_shadow_node,
879 n, child_node_flag);
880 assert(!ret);
881 }
882 /* Return pointer to new recompacted node through old_node_flag */
5a9a87dd 883 *old_node_flag = new_node_flag;
a2a7ff59 884 if (old_node) {
2e313670
MD
885 int flags;
886
887 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
888 /*
889 * It is OK to free the lock associated with a node
890 * going to NULL, since we are holding the parent lock.
891 * This synchronizes removal with re-add of that node.
892 */
893 if (new_type_index == NODE_INDEX_NULL)
894 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
a2a7ff59 895 ret = rcuja_shadow_clear(ja->ht, old_node, shadow_node,
2e313670 896 flags);
a2a7ff59
MD
897 assert(!ret);
898 }
5a9a87dd
MD
899
900 ret = 0;
f07b240f 901end:
5a9a87dd 902 return ret;
f07b240f
MD
903
904fallback_toosmall:
905 /* fallback if next pool is too small */
906 ret = rcuja_shadow_clear(ja->ht, new_node, new_shadow_node,
907 RCUJA_SHADOW_CLEAR_FREE_NODE);
908 assert(!ret);
909
2e313670 910 /* Choose fallback type: pigeon */
f07b240f
MD
911 new_type_index = (1UL << JA_TYPE_BITS) - 1;
912 dbg_printf("Fallback to type %d\n", new_type_index);
913 uatomic_inc(&ja->nr_fallback);
914 fallback = 1;
915 goto retry;
7a0b2331
MD
916}
917
5a9a87dd 918/*
2e313670 919 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
920 * error value otherwise.
921 */
7a0b2331 922static
d96bfb0d 923int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 924 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd
MD
925 struct cds_ja_inode_flag *child_node_flag,
926 struct cds_ja_shadow_node *shadow_node)
7a0b2331
MD
927{
928 int ret;
e1db2db5 929 unsigned int type_index;
d96bfb0d 930 const struct cds_ja_type *type;
b4540e8a 931 struct cds_ja_inode *node;
7a0b2331 932
a2a7ff59
MD
933 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
934 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
935
e1db2db5
MD
936 node = ja_node_ptr(*node_flag);
937 type_index = ja_node_type(*node_flag);
938 type = &ja_types[type_index];
e1db2db5
MD
939 ret = _ja_node_set_nth(type, node, shadow_node,
940 n, child_node_flag);
2e313670
MD
941 switch (ret) {
942 case -ENOSPC:
e1db2db5 943 /* Not enough space in node, need to recompact. */
2e313670
MD
944 ret = ja_node_recompact(JA_RECOMPACT_ADD, ja, type_index, type, node,
945 shadow_node, node_flag, n, child_node_flag);
946 break;
947 case -ERANGE:
948 /* Node needs to be recompacted. */
949 ret = ja_node_recompact(JA_RECOMPACT, ja, type_index, type, node,
5a9a87dd 950 shadow_node, node_flag, n, child_node_flag);
2e313670
MD
951 break;
952 }
953 return ret;
954}
955
956/*
957 * Return 0 on success, -EAGAIN if need to retry, or other negative
958 * error value otherwise.
959 */
960static
961int ja_node_clear_nth(struct cds_ja *ja,
962 struct cds_ja_inode_flag **node_flag, uint8_t n,
963 struct cds_ja_shadow_node *shadow_node)
964{
965 int ret;
966 unsigned int type_index;
967 const struct cds_ja_type *type;
968 struct cds_ja_inode *node;
969
970 dbg_printf("ja_node_clear_nth for n=%u, node %p, shadow %p\n",
971 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
972
973 node = ja_node_ptr(*node_flag);
974 type_index = ja_node_type(*node_flag);
975 type = &ja_types[type_index];
976 ret = _ja_node_clear_nth(type, node, shadow_node, n);
977 if (ret == -EFBIG) {
978 /* Should to try recompaction. */
979 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
980 shadow_node, node_flag, n, NULL);
7a0b2331
MD
981 }
982 return ret;
983}
be9a7474 984
5a9a87dd 985struct cds_hlist_head *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 986{
41975c12
MD
987 unsigned int tree_depth, i;
988 struct cds_ja_inode_flag *node_flag;
989
990 if (caa_unlikely(key > ja->key_max))
991 return NULL;
992 tree_depth = ja->tree_depth;
5a9a87dd 993 node_flag = rcu_dereference(ja->root);
41975c12 994
5a9a87dd
MD
995 /* level 0: root node */
996 if (!ja_node_ptr(node_flag))
997 return NULL;
998
999 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1000 uint8_t iter_key;
1001
1002 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd 1003 node_flag = ja_node_get_nth(node_flag, NULL,
79b41067 1004 iter_key);
582a6ade
MD
1005 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1006 (unsigned int) iter_key, node_flag);
41975c12
MD
1007 if (!ja_node_ptr(node_flag))
1008 return NULL;
41975c12
MD
1009 }
1010
5a9a87dd
MD
1011 /* Last level lookup succeded. We got an actual match. */
1012 return (struct cds_hlist_head *) node_flag;
1013}
1014
1015/*
1016 * We reached an unpopulated node. Create it and the children we need,
1017 * and then attach the entire branch to the current node. This may
1018 * trigger recompaction of the current node. Locks needed: node lock
1019 * (for add), and, possibly, parent node lock (to update pointer due to
1020 * node recompaction).
1021 *
1022 * First take node lock, check if recompaction is needed, then take
1023 * parent lock (if needed). Then we can proceed to create the new
1024 * branch. Publish the new branch, and release locks.
1025 * TODO: we currently always take the parent lock even when not needed.
1026 */
1027static
1028int ja_attach_node(struct cds_ja *ja,
1029 struct cds_ja_inode_flag **node_flag_ptr,
1030 struct cds_ja_inode_flag *node_flag,
1031 struct cds_ja_inode_flag *parent_node_flag,
1032 uint64_t key,
79b41067 1033 unsigned int level,
5a9a87dd
MD
1034 struct cds_ja_node *child_node)
1035{
1036 struct cds_ja_shadow_node *shadow_node = NULL,
f07b240f
MD
1037 *parent_shadow_node = NULL,
1038 *iter_shadow_node;
5a9a87dd
MD
1039 struct cds_ja_inode *node = ja_node_ptr(node_flag);
1040 struct cds_ja_inode *parent_node = ja_node_ptr(parent_node_flag);
1041 struct cds_hlist_head head;
1042 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1043 int ret, i;
a2a7ff59 1044 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1045 int nr_created_nodes = 0;
1046
582a6ade
MD
1047 dbg_printf("Attach node at level %u (node %p, node_flag %p)\n",
1048 level, node, node_flag);
a2a7ff59 1049
5a9a87dd
MD
1050 assert(node);
1051 shadow_node = rcuja_shadow_lookup_lock(ja->ht, node);
1052 if (!shadow_node) {
2e313670 1053 ret = -EAGAIN;
5a9a87dd
MD
1054 goto end;
1055 }
1056 if (parent_node) {
1057 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1058 parent_node);
1059 if (!parent_shadow_node) {
2e313670 1060 ret = -EAGAIN;
5a9a87dd
MD
1061 goto unlock_shadow;
1062 }
1063 }
1064
a2a7ff59 1065 /* Create new branch, starting from bottom */
5a9a87dd
MD
1066 CDS_INIT_HLIST_HEAD(&head);
1067 cds_hlist_add_head_rcu(&child_node->list, &head);
a2a7ff59 1068 iter_node_flag = (struct cds_ja_inode_flag *) head.next;
5a9a87dd 1069
a2a7ff59
MD
1070 /* Create shadow node for the leaf node */
1071 dbg_printf("leaf shadow node creation\n");
f07b240f
MD
1072 iter_shadow_node = rcuja_shadow_set(ja->ht,
1073 ja_node_ptr(iter_node_flag), NULL);
1074 if (!iter_shadow_node) {
1075 ret = -ENOMEM;
a2a7ff59 1076 goto check_error;
f07b240f 1077 }
a2a7ff59 1078 created_nodes[nr_created_nodes++] = iter_node_flag;
5a9a87dd 1079
79b41067
MD
1080 for (i = ja->tree_depth; i > (int) level; i--) {
1081 uint8_t iter_key;
1082
1083 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i)));
1084 dbg_printf("branch creation level %d, key %u\n",
1085 i - 1, (unsigned int) iter_key);
5a9a87dd
MD
1086 iter_dest_node_flag = NULL;
1087 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1088 iter_key,
5a9a87dd
MD
1089 iter_node_flag,
1090 NULL);
1091 if (ret)
1092 goto check_error;
1093 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1094 iter_node_flag = iter_dest_node_flag;
1095 }
1096
79b41067
MD
1097 if (level > 1) {
1098 uint8_t iter_key;
1099
1100 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
a2a7ff59
MD
1101 /* We need to use set_nth on the previous level. */
1102 iter_dest_node_flag = node_flag;
1103 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1104 iter_key,
a2a7ff59
MD
1105 iter_node_flag,
1106 shadow_node);
1107 if (ret)
1108 goto check_error;
1109 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1110 iter_node_flag = iter_dest_node_flag;
1111 }
1112
5a9a87dd 1113 /* Publish new branch */
a2a7ff59
MD
1114 dbg_printf("Publish branch %p, replacing %p\n",
1115 iter_node_flag, *node_flag_ptr);
5a9a87dd
MD
1116 rcu_assign_pointer(*node_flag_ptr, iter_node_flag);
1117
1118 /* Success */
1119 ret = 0;
1120
1121check_error:
1122 if (ret) {
1123 for (i = 0; i < nr_created_nodes; i++) {
1124 int tmpret;
a2a7ff59
MD
1125 int flags;
1126
1127 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1128 if (i)
1129 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd
MD
1130 tmpret = rcuja_shadow_clear(ja->ht,
1131 ja_node_ptr(created_nodes[i]),
a2a7ff59
MD
1132 NULL,
1133 flags);
5a9a87dd
MD
1134 assert(!tmpret);
1135 }
1136 }
5a9a87dd
MD
1137 if (parent_shadow_node)
1138 rcuja_shadow_unlock(parent_shadow_node);
1139unlock_shadow:
1140 if (shadow_node)
1141 rcuja_shadow_unlock(shadow_node);
1142end:
1143 return ret;
1144}
1145
1146/*
1147 * Lock the hlist head shadow node mutex, and add node to list of
1148 * duplicates. Failure can happen if concurrent removal removes the last
1149 * node with same key before we get the lock.
1150 * Return 0 on success, negative error value on failure.
1151 */
1152static
1153int ja_chain_node(struct cds_ja *ja,
1154 struct cds_hlist_head *head,
1155 struct cds_ja_node *node)
1156{
1157 struct cds_ja_shadow_node *shadow_node;
1158
1159 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
1160 (struct cds_ja_inode *) head);
1161 if (!shadow_node)
2e313670 1162 return -EAGAIN;
5a9a87dd
MD
1163 cds_hlist_add_head_rcu(&node->list, head);
1164 rcuja_shadow_unlock(shadow_node);
1165 return 0;
1166}
1167
1168int cds_ja_add(struct cds_ja *ja, uint64_t key,
1169 struct cds_ja_node *new_node)
1170{
1171 unsigned int tree_depth, i;
5a9a87dd
MD
1172 struct cds_ja_inode_flag **node_flag_ptr; /* in parent */
1173 struct cds_ja_inode_flag *node_flag,
1174 *parent_node_flag,
1175 *parent2_node_flag;
1176 int ret;
1177
1178 if (caa_unlikely(key > ja->key_max))
1179 return -EINVAL;
1180 tree_depth = ja->tree_depth;
1181
1182retry:
a2a7ff59
MD
1183 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
1184 key, new_node);
5a9a87dd 1185 parent2_node_flag = NULL;
b0f74e47
MD
1186 parent_node_flag =
1187 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
5a9a87dd 1188 node_flag_ptr = &ja->root;
35170a44 1189 node_flag = rcu_dereference(ja->root);
5a9a87dd
MD
1190
1191 /* Iterate on all internal levels */
a2a7ff59 1192 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1193 uint8_t iter_key;
1194
582a6ade
MD
1195 dbg_printf("cds_ja_add iter node_flag_ptr %p node_flag %p\n",
1196 *node_flag_ptr, node_flag);
5a9a87dd
MD
1197 if (!ja_node_ptr(node_flag)) {
1198 ret = ja_attach_node(ja, node_flag_ptr,
1199 parent_node_flag, parent2_node_flag,
1200 key, i, new_node);
2e313670 1201 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd
MD
1202 goto retry;
1203 else
1204 goto end;
1205 }
79b41067 1206 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
1207 parent2_node_flag = parent_node_flag;
1208 parent_node_flag = node_flag;
1209 node_flag = ja_node_get_nth(node_flag,
1210 &node_flag_ptr,
79b41067 1211 iter_key);
582a6ade
MD
1212 dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p node_flag_ptr %p\n",
1213 (unsigned int) iter_key, node_flag, *node_flag_ptr);
5a9a87dd
MD
1214 }
1215
1216 /*
1217 * We reached bottom of tree, simply add node to last internal
1218 * level, or chain it if key is already present.
1219 */
1220 if (!ja_node_ptr(node_flag)) {
582a6ade
MD
1221 dbg_printf("cds_ja_add last node_flag_ptr %p node_flag %p\n",
1222 *node_flag_ptr, node_flag);
5a9a87dd
MD
1223 ret = ja_attach_node(ja, node_flag_ptr, parent_node_flag,
1224 parent2_node_flag, key, i, new_node);
1225 } else {
1226 ret = ja_chain_node(ja,
1227 (struct cds_hlist_head *) ja_node_ptr(node_flag),
1228 new_node);
1229 }
2e313670 1230 if (ret == -EAGAIN)
5a9a87dd
MD
1231 goto retry;
1232end:
1233 return ret;
b4540e8a
MD
1234}
1235
35170a44
MD
1236static
1237int ja_detach_node(struct cds_ja *ja,
1238 struct cds_ja_inode_flag **snapshot,
1239 int nr_snapshot,
1240 uint64_t key,
1241 struct cds_ja_node *node)
1242{
1243 int ret;
1244
1245 assert(nr_snapshot == ja->tree_depth - 1);
1246
1247 return ret;
1248}
1249
1250int cds_ja_del(struct cds_ja *ja, uint64_t key,
1251 struct cds_ja_node *node)
1252{
1253 unsigned int tree_depth, i;
1254 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
1255 struct cds_ja_inode_flag *node_flag;
1256 int nr_snapshot = 0;
1257 int ret;
1258
1259 if (caa_unlikely(key > ja->key_max))
1260 return -EINVAL;
1261 tree_depth = ja->tree_depth;
1262
1263retry:
1264 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
1265 key, node);
1266
1267 /* snapshot for level 0 is only for shadow node lookup */
1268 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
1269 node_flag = rcu_dereference(ja->root);
1270
1271 /* Iterate on all internal levels */
1272 for (i = 1; i < tree_depth; i++) {
1273 uint8_t iter_key;
1274
1275 dbg_printf("cds_ja_del iter node_flag %p\n",
1276 node_flag);
1277 if (!ja_node_ptr(node_flag)) {
1278 return -ENOENT;
1279 }
1280 snapshot[nr_snapshot++] = node_flag;
1281 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
1282 node_flag = ja_node_get_nth(node_flag,
1283 NULL,
1284 iter_key);
1285 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p\n",
1286 (unsigned int) iter_key, node_flag);
1287 }
1288
1289 /*
1290 * We reached bottom of tree, try to find the node we are trying
1291 * to remove. Fail if we cannot find it.
1292 */
1293 if (!ja_node_ptr(node_flag)) {
1294 return -ENOENT;
1295 } else {
1296 struct cds_hlist_head *hlist_head;
1297 struct cds_hlist_node *hlist_node;
1298 struct cds_ja_node *entry;
1299 int found;
1300
1301 hlist_head = (struct cds_hlist_head *) ja_node_ptr(node_flag);
1302 cds_hlist_for_each_entry(entry,
1303 hlist_node,
1304 hlist_head,
1305 list) {
1306 if (entry == node) {
1307 found = 1;
1308 break;
1309 }
1310 }
1311 if (!found)
1312 return -ENOENT;
1313 ret = ja_detach_node(ja, snapshot, nr_snapshot, key, node);
1314 }
1315 if (ret == -EAGAIN)
1316 goto retry;
1317 return ret;
1318}
1319
b4540e8a
MD
1320struct cds_ja *_cds_ja_new(unsigned int key_bits,
1321 const struct rcu_flavor_struct *flavor)
be9a7474
MD
1322{
1323 struct cds_ja *ja;
b0f74e47 1324 int ret;
f07b240f 1325 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
1326
1327 ja = calloc(sizeof(*ja), 1);
1328 if (!ja)
1329 goto ja_error;
b4540e8a
MD
1330
1331 switch (key_bits) {
1332 case 8:
1333 ja->key_max = UINT8_MAX;
1334 break;
1335 case 16:
1336 ja->key_max = UINT16_MAX;
1337 break;
1338 case 32:
1339 ja->key_max = UINT32_MAX;
1340 break;
1341 case 64:
1342 ja->key_max = UINT64_MAX;
1343 break;
1344 default:
1345 goto check_error;
1346 }
1347
be9a7474 1348 /* ja->root is NULL */
5a9a87dd 1349 /* tree_depth 0 is for pointer to root node */
582a6ade 1350 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 1351 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
1352 ja->ht = rcuja_create_ht(flavor);
1353 if (!ja->ht)
1354 goto ht_error;
b0f74e47
MD
1355
1356 /*
1357 * Note: we should not free this node until judy array destroy.
1358 */
f07b240f 1359 root_shadow_node = rcuja_shadow_set(ja->ht,
b0f74e47
MD
1360 ja_node_ptr((struct cds_ja_inode_flag *) &ja->root),
1361 NULL);
f07b240f
MD
1362 if (!root_shadow_node) {
1363 ret = -ENOMEM;
b0f74e47 1364 goto ht_node_error;
f07b240f 1365 }
582a6ade 1366 root_shadow_node->is_root = 1;
b0f74e47 1367
be9a7474
MD
1368 return ja;
1369
b0f74e47
MD
1370ht_node_error:
1371 ret = rcuja_delete_ht(ja->ht);
1372 assert(!ret);
be9a7474 1373ht_error:
b4540e8a 1374check_error:
be9a7474
MD
1375 free(ja);
1376ja_error:
1377 return NULL;
1378}
1379
1380/*
1381 * There should be no more concurrent add to the judy array while it is
1382 * being destroyed (ensured by the caller).
1383 */
1384int cds_ja_destroy(struct cds_ja *ja)
1385{
b4540e8a
MD
1386 int ret;
1387
be9a7474
MD
1388 rcuja_shadow_prune(ja->ht,
1389 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK);
b4540e8a
MD
1390 ret = rcuja_delete_ht(ja->ht);
1391 if (ret)
1392 return ret;
f07b240f
MD
1393 if (uatomic_read(&ja->nr_fallback))
1394 fprintf(stderr,
1395 "[warning] RCU Judy Array used %lu fallback node(s)\n",
1396 uatomic_read(&ja->nr_fallback));
b4540e8a 1397 free(ja);
41975c12 1398 return 0;
be9a7474 1399}
This page took 0.138692 seconds and 4 git commands to generate.