rcuja: remove unneeded header
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
78ed159a 6 * Copyright (C) 2000 - 2002 Hewlett-Packard Company
170e1186 7 * Copyright 2012-2013 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
61009379
MD
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
195e72d3 24#define _LGPL_SOURCE
e5227865 25#include <stdint.h>
8e519e3c 26#include <errno.h>
d68c6810 27#include <limits.h>
b1a90ce3 28#include <string.h>
61009379 29#include <urcu/rcuja.h>
d68c6810
MD
30#include <urcu/compiler.h>
31#include <urcu/arch.h>
32#include <assert.h>
8e519e3c 33#include <urcu-pointer.h>
f07b240f 34#include <urcu/uatomic.h>
b4540e8a 35#include <stdint.h>
8e519e3c 36
61009379
MD
37#include "rcuja-internal.h"
38
b1a90ce3
MD
39#ifndef abs
40#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
41#endif
42
d96bfb0d 43enum cds_ja_type_class {
e5227865 44 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
45 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
46 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
47 RCU_JA_POOL = 1, /* Type B */
48 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
49 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 50 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
51 /* 32-bit: 101 to 256 children, 1024 bytes */
52 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 53 /* Leaf nodes are implicit from their height in the tree */
1db4943c 54 RCU_JA_NR_TYPES,
e1db2db5
MD
55
56 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
57};
58
d96bfb0d
MD
59struct cds_ja_type {
60 enum cds_ja_type_class type_class;
8e519e3c
MD
61 uint16_t min_child; /* minimum number of children: 1 to 256 */
62 uint16_t max_child; /* maximum number of children: 1 to 256 */
63 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
64 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
65 uint16_t nr_pool_order; /* number of pools */
66 uint16_t pool_size_order; /* pool size */
e5227865
MD
67};
68
69/*
70 * Iteration on the array to find the right node size for the number of
d68c6810 71 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 72 * possible node size, which contains 256 children).
d68c6810
MD
73 * The min_child overlaps with the previous max_child to provide an
74 * hysteresis loop to reallocation for patterns of cyclic add/removal
75 * within the same node.
76 * The node the index within the following arrays is represented on 3
77 * bits. It identifies the node type, min/max number of children, and
78 * the size order.
3d45251f
MD
79 * The max_child values for the RCU_JA_POOL below result from
80 * statistical approximation: over million populations, the max_child
81 * covers between 97% and 99% of the populations generated. Therefore, a
82 * fallback should exist to cover the rare extreme population unbalance
83 * cases, but it will not have a major impact on speed nor space
84 * consumption, since those are rare cases.
e5227865 85 */
e5227865 86
d68c6810
MD
87#if (CAA_BITS_PER_LONG < 64)
88/* 32-bit pointers */
1db4943c
MD
89enum {
90 ja_type_0_max_child = 1,
91 ja_type_1_max_child = 3,
92 ja_type_2_max_child = 6,
93 ja_type_3_max_child = 12,
94 ja_type_4_max_child = 25,
95 ja_type_5_max_child = 48,
96 ja_type_6_max_child = 92,
97 ja_type_7_max_child = 256,
e1db2db5 98 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
99};
100
8e519e3c
MD
101enum {
102 ja_type_0_max_linear_child = 1,
103 ja_type_1_max_linear_child = 3,
104 ja_type_2_max_linear_child = 6,
105 ja_type_3_max_linear_child = 12,
106 ja_type_4_max_linear_child = 25,
107 ja_type_5_max_linear_child = 24,
108 ja_type_6_max_linear_child = 23,
109};
110
1db4943c
MD
111enum {
112 ja_type_5_nr_pool_order = 1,
113 ja_type_6_nr_pool_order = 2,
114};
115
d96bfb0d 116const struct cds_ja_type ja_types[] = {
8e519e3c
MD
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
121 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 122
fd800776 123 /* Pools may fill sooner than max_child */
1cee749c 124 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 125 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
1cee749c 126 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 127 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
128
129 /*
b1a90ce3
MD
130 * Upon node removal below min_child, if child pool is filled
131 * beyond capacity, we roll back to pigeon.
3d45251f 132 */
58c16c03 133 { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
134
135 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 136};
d68c6810
MD
137#else /* !(CAA_BITS_PER_LONG < 64) */
138/* 64-bit pointers */
1db4943c
MD
139enum {
140 ja_type_0_max_child = 1,
141 ja_type_1_max_child = 3,
142 ja_type_2_max_child = 7,
143 ja_type_3_max_child = 14,
144 ja_type_4_max_child = 28,
145 ja_type_5_max_child = 54,
146 ja_type_6_max_child = 104,
147 ja_type_7_max_child = 256,
e1db2db5 148 ja_type_8_max_child = 256,
1db4943c
MD
149};
150
8e519e3c
MD
151enum {
152 ja_type_0_max_linear_child = 1,
153 ja_type_1_max_linear_child = 3,
154 ja_type_2_max_linear_child = 7,
155 ja_type_3_max_linear_child = 14,
156 ja_type_4_max_linear_child = 28,
157 ja_type_5_max_linear_child = 27,
158 ja_type_6_max_linear_child = 26,
159};
160
1db4943c
MD
161enum {
162 ja_type_5_nr_pool_order = 1,
163 ja_type_6_nr_pool_order = 2,
164};
165
d96bfb0d 166const struct cds_ja_type ja_types[] = {
8e519e3c
MD
167 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
170 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
171 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 172
3d45251f 173 /* Pools may fill sooner than max_child. */
1cee749c 174 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 175 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
1cee749c 176 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 177 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 178
3d45251f 179 /*
b1a90ce3
MD
180 * Upon node removal below min_child, if child pool is filled
181 * beyond capacity, we roll back to pigeon.
3d45251f 182 */
64457f6c 183 { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
184
185 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 186};
d68c6810 187#endif /* !(BITS_PER_LONG < 64) */
e5227865 188
1db4943c
MD
189static inline __attribute__((unused))
190void static_array_size_check(void)
191{
e1db2db5 192 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
193}
194
e5227865 195/*
d96bfb0d 196 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
197 * read-side. For linear and pool node configurations, it starts with a
198 * byte counting the number of children in the node. Then, the
199 * node-specific data is placed.
200 * The node mutex, if any is needed, protecting concurrent updated of
201 * each node is placed in a separate hash table indexed by node address.
202 * For the pigeon configuration, the number of children is also kept in
203 * a separate hash table, indexed by node address, because it is only
204 * required for updates.
e5227865 205 */
1db4943c 206
ff38c745
MD
207#define DECLARE_LINEAR_NODE(index) \
208 struct { \
209 uint8_t nr_child; \
210 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 211 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
212 }
213
214#define DECLARE_POOL_NODE(index) \
215 struct { \
216 struct { \
217 uint8_t nr_child; \
218 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 219 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
220 } linear[1U << ja_type_## index ##_nr_pool_order]; \
221 }
1db4943c 222
b4540e8a 223struct cds_ja_inode {
1db4943c
MD
224 union {
225 /* Linear configuration */
226 DECLARE_LINEAR_NODE(0) conf_0;
227 DECLARE_LINEAR_NODE(1) conf_1;
228 DECLARE_LINEAR_NODE(2) conf_2;
229 DECLARE_LINEAR_NODE(3) conf_3;
230 DECLARE_LINEAR_NODE(4) conf_4;
231
232 /* Pool configuration */
233 DECLARE_POOL_NODE(5) conf_5;
234 DECLARE_POOL_NODE(6) conf_6;
235
236 /* Pigeon configuration */
237 struct {
b4540e8a 238 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
239 } conf_7;
240 /* data aliasing nodes for computed accesses */
b4540e8a 241 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 242 } u;
e5227865
MD
243};
244
2e313670 245enum ja_recompact {
19ddcd04
MD
246 JA_RECOMPACT_ADD_SAME,
247 JA_RECOMPACT_ADD_NEXT,
2e313670
MD
248 JA_RECOMPACT_DEL,
249};
250
b023ba9f
MD
251enum ja_lookup_inequality {
252 JA_LOOKUP_BE,
253 JA_LOOKUP_AE,
254};
255
256enum ja_direction {
257 JA_LEFT,
258 JA_RIGHT,
259 JA_LEFTMOST,
260 JA_RIGHTMOST,
261};
262
b1a90ce3
MD
263static
264struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
265{
266 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
267}
268
269unsigned long ja_node_type(struct cds_ja_inode_flag *node)
270{
271 unsigned long type;
272
273 if (_ja_node_mask_ptr(node) == NULL) {
274 return NODE_INDEX_NULL;
275 }
276 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
277 assert(type < (1UL << JA_TYPE_BITS));
278 return type;
279}
280
354981c2
MD
281static
282struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
283 const struct cds_ja_type *ja_type)
e5227865 284{
b1a90ce3
MD
285 size_t len = 1U << ja_type->order;
286 void *p;
287 int ret;
288
289 ret = posix_memalign(&p, len, len);
290 if (ret || !p) {
291 return NULL;
292 }
293 memset(p, 0, len);
354981c2 294 uatomic_inc(&ja->nr_nodes_allocated);
b1a90ce3 295 return p;
e5227865
MD
296}
297
354981c2 298void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
e5227865
MD
299{
300 free(node);
48cbe001 301 if (node)
354981c2 302 uatomic_inc(&ja->nr_nodes_freed);
e5227865
MD
303}
304
d68c6810
MD
305#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
306#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
307#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
308#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
309
310static
1db4943c 311uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 312{
1db4943c 313 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
314}
315
11c5e016 316static
d96bfb0d 317uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 318 struct cds_ja_inode *node)
11c5e016
MD
319{
320 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 321 return rcu_dereference(node->u.data[0]);
11c5e016
MD
322}
323
13a7f5a6
MD
324/*
325 * The order in which values and pointers are does does not matter: if
326 * a value is missing, we return NULL. If a value is there, but its
327 * associated pointers is still NULL, we return NULL too.
328 */
d68c6810 329static
b4540e8a
MD
330struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
331 struct cds_ja_inode *node,
b0ca2d21 332 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 333 uint8_t n)
d68c6810
MD
334{
335 uint8_t nr_child;
336 uint8_t *values;
b4540e8a
MD
337 struct cds_ja_inode_flag **pointers;
338 struct cds_ja_inode_flag *ptr;
d68c6810
MD
339 unsigned int i;
340
8e519e3c 341 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 342
11c5e016 343 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 344 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
345 assert(nr_child <= type->max_linear_child);
346 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 347
1db4943c 348 values = &node->u.data[1];
d68c6810 349 for (i = 0; i < nr_child; i++) {
13a7f5a6 350 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
351 break;
352 }
b0ca2d21
MD
353 if (i >= nr_child) {
354 if (caa_unlikely(node_flag_ptr))
355 *node_flag_ptr = NULL;
d68c6810 356 return NULL;
b0ca2d21 357 }
b4540e8a 358 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 359 ptr = rcu_dereference(pointers[i]);
b0ca2d21
MD
360 if (caa_unlikely(node_flag_ptr))
361 *node_flag_ptr = &pointers[i];
d68c6810
MD
362 return ptr;
363}
364
291b2543 365static
b023ba9f 366struct cds_ja_inode_flag *ja_linear_node_get_direction(const struct cds_ja_type *type,
291b2543 367 struct cds_ja_inode *node,
36305a3d 368 int n, uint8_t *result_key,
b023ba9f 369 enum ja_direction dir)
291b2543
MD
370{
371 uint8_t nr_child;
372 uint8_t *values;
373 struct cds_ja_inode_flag **pointers;
86d700e9 374 struct cds_ja_inode_flag *ptr = NULL;
b023ba9f
MD
375 unsigned int i;
376 int match_idx = -1, match_v;
291b2543
MD
377
378 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
b023ba9f
MD
379 assert(dir == JA_LEFT || dir == JA_RIGHT);
380
381 if (dir == JA_LEFT) {
382 match_v = -1;
383 } else {
384 match_v = JA_ENTRY_PER_NODE;
385 }
291b2543
MD
386
387 nr_child = ja_linear_node_get_nr_child(type, node);
388 cmm_smp_rmb(); /* read nr_child before values and pointers */
389 assert(nr_child <= type->max_linear_child);
390 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
391
392 values = &node->u.data[1];
86d700e9 393 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
291b2543
MD
394 for (i = 0; i < nr_child; i++) {
395 unsigned int v;
396
397 v = CMM_LOAD_SHARED(values[i]);
86d700e9
MD
398 ptr = CMM_LOAD_SHARED(pointers[i]);
399 if (!ptr)
400 continue;
b023ba9f
MD
401 if (dir == JA_LEFT) {
402 if ((int) v < n && (int) v > match_v) {
403 match_v = v;
404 match_idx = i;
405 }
406 } else {
407 if ((int) v > n && (int) v < match_v) {
408 match_v = v;
409 match_idx = i;
410 }
291b2543
MD
411 }
412 }
b023ba9f
MD
413
414 if (match_idx < 0) {
291b2543
MD
415 return NULL;
416 }
b023ba9f
MD
417 assert(match_v >= 0 && match_v < JA_ENTRY_PER_NODE);
418
36305a3d 419 *result_key = (uint8_t) match_v;
291b2543
MD
420 ptr = rcu_dereference(pointers[match_idx]);
421 return ptr;
422}
423
11c5e016 424static
5a9a87dd 425void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 426 struct cds_ja_inode *node,
11c5e016
MD
427 uint8_t i,
428 uint8_t *v,
b4540e8a 429 struct cds_ja_inode_flag **iter)
11c5e016
MD
430{
431 uint8_t *values;
b4540e8a 432 struct cds_ja_inode_flag **pointers;
11c5e016
MD
433
434 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
435 assert(i < ja_linear_node_get_nr_child(type, node));
436
437 values = &node->u.data[1];
438 *v = values[i];
b4540e8a 439 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
440 *iter = pointers[i];
441}
442
d68c6810 443static
b4540e8a
MD
444struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
445 struct cds_ja_inode *node,
b1a90ce3 446 struct cds_ja_inode_flag *node_flag,
b0ca2d21 447 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 448 uint8_t n)
d68c6810 449{
b4540e8a 450 struct cds_ja_inode *linear;
d68c6810 451
fd800776 452 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
453
454 switch (type->nr_pool_order) {
455 case 1:
456 {
457 unsigned long bitsel, index;
458
459 bitsel = ja_node_pool_1d_bitsel(node_flag);
460 assert(bitsel < CHAR_BIT);
19ddcd04 461 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
462 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
463 break;
464 }
465 case 2:
466 {
19ddcd04
MD
467 unsigned long bitsel[2], index[2], rindex;
468
469 ja_node_pool_2d_bitsel(node_flag, bitsel);
470 assert(bitsel[0] < CHAR_BIT);
471 assert(bitsel[1] < CHAR_BIT);
472 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
473 index[0] <<= 1;
474 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
475 rindex = index[0] | index[1];
476 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
477 break;
478 }
479 default:
480 linear = NULL;
481 assert(0);
482 }
48cbe001 483 return ja_linear_node_get_nth(type, linear, node_flag_ptr, n);
d68c6810
MD
484}
485
11c5e016 486static
b4540e8a
MD
487struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
488 struct cds_ja_inode *node,
11c5e016
MD
489 uint8_t i)
490{
491 assert(type->type_class == RCU_JA_POOL);
b4540e8a 492 return (struct cds_ja_inode *)
11c5e016
MD
493 &node->u.data[(unsigned int) i << type->pool_size_order];
494}
495
291b2543 496static
b023ba9f 497struct cds_ja_inode_flag *ja_pool_node_get_direction(const struct cds_ja_type *type,
291b2543 498 struct cds_ja_inode *node,
36305a3d 499 int n, uint8_t *result_key,
b023ba9f 500 enum ja_direction dir)
291b2543
MD
501{
502 unsigned int pool_nr;
b023ba9f 503 int match_v;
291b2543
MD
504 struct cds_ja_inode_flag *match_node_flag = NULL;
505
506 assert(type->type_class == RCU_JA_POOL);
b023ba9f
MD
507 assert(dir == JA_LEFT || dir == JA_RIGHT);
508
509 if (dir == JA_LEFT) {
510 match_v = -1;
511 } else {
512 match_v = JA_ENTRY_PER_NODE;
513 }
291b2543
MD
514
515 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
516 struct cds_ja_inode *pool =
517 ja_pool_node_get_ith_pool(type,
518 node, pool_nr);
519 uint8_t nr_child =
520 ja_linear_node_get_nr_child(type, pool);
521 unsigned int j;
522
523 for (j = 0; j < nr_child; j++) {
524 struct cds_ja_inode_flag *iter;
525 uint8_t v;
526
527 ja_linear_node_get_ith_pos(type, pool,
528 j, &v, &iter);
529 if (!iter)
530 continue;
b023ba9f
MD
531 if (dir == JA_LEFT) {
532 if ((int) v < n && (int) v > match_v) {
533 match_v = v;
534 match_node_flag = iter;
535 }
536 } else {
537 if ((int) v > n && (int) v < match_v) {
538 match_v = v;
539 match_node_flag = iter;
540 }
291b2543
MD
541 }
542 }
543 }
36305a3d
MD
544 if (match_node_flag)
545 *result_key = (uint8_t) match_v;
291b2543
MD
546 return match_node_flag;
547}
548
d68c6810 549static
b4540e8a
MD
550struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
551 struct cds_ja_inode *node,
b0ca2d21 552 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 553 uint8_t n)
d68c6810 554{
48cbe001
MD
555 struct cds_ja_inode_flag **child_node_flag_ptr;
556 struct cds_ja_inode_flag *child_node_flag;
5a9a87dd 557
d68c6810 558 assert(type->type_class == RCU_JA_PIGEON);
48cbe001
MD
559 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
560 child_node_flag = rcu_dereference(*child_node_flag_ptr);
582a6ade 561 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
48cbe001 562 child_node_flag_ptr);
b0ca2d21 563 if (caa_unlikely(node_flag_ptr))
48cbe001
MD
564 *node_flag_ptr = child_node_flag_ptr;
565 return child_node_flag;
d68c6810
MD
566}
567
291b2543 568static
b023ba9f 569struct cds_ja_inode_flag *ja_pigeon_node_get_direction(const struct cds_ja_type *type,
291b2543 570 struct cds_ja_inode *node,
36305a3d 571 int n, uint8_t *result_key,
b023ba9f 572 enum ja_direction dir)
291b2543
MD
573{
574 struct cds_ja_inode_flag **child_node_flag_ptr;
575 struct cds_ja_inode_flag *child_node_flag;
576 int i;
577
578 assert(type->type_class == RCU_JA_PIGEON);
b023ba9f
MD
579 assert(dir == JA_LEFT || dir == JA_RIGHT);
580
581 if (dir == JA_LEFT) {
582 /* n - 1 is first value left of n */
583 for (i = n - 1; i >= 0; i--) {
584 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
585 child_node_flag = rcu_dereference(*child_node_flag_ptr);
586 if (child_node_flag) {
587 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
588 child_node_flag);
53f943b0 589 *result_key = (uint8_t) i;
b023ba9f
MD
590 return child_node_flag;
591 }
592 }
593 } else {
594 /* n + 1 is first value right of n */
595 for (i = n + 1; i < JA_ENTRY_PER_NODE; i++) {
596 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
597 child_node_flag = rcu_dereference(*child_node_flag_ptr);
598 if (child_node_flag) {
599 dbg_printf("ja_pigeon_node_get_right child_node_flag %p\n",
600 child_node_flag);
53f943b0 601 *result_key = (uint8_t) i;
b023ba9f
MD
602 return child_node_flag;
603 }
291b2543
MD
604 }
605 }
606 return NULL;
607}
608
2e313670
MD
609static
610struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
611 struct cds_ja_inode *node,
612 uint8_t i)
613{
48cbe001 614 return ja_pigeon_node_get_nth(type, node, NULL, i);
2e313670
MD
615}
616
13a7f5a6
MD
617/*
618 * ja_node_get_nth: get nth item from a node.
619 * node_flag is already rcu_dereference'd.
620 */
d68c6810 621static
b62a8d0c 622struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
b0ca2d21 623 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 624 uint8_t n)
d68c6810
MD
625{
626 unsigned int type_index;
b4540e8a 627 struct cds_ja_inode *node;
d96bfb0d 628 const struct cds_ja_type *type;
d68c6810 629
d68c6810 630 node = ja_node_ptr(node_flag);
5a9a87dd 631 assert(node != NULL);
d68c6810
MD
632 type_index = ja_node_type(node_flag);
633 type = &ja_types[type_index];
634
635 switch (type->type_class) {
636 case RCU_JA_LINEAR:
5a9a87dd 637 return ja_linear_node_get_nth(type, node,
b62a8d0c 638 node_flag_ptr, n);
fd800776 639 case RCU_JA_POOL:
b1a90ce3 640 return ja_pool_node_get_nth(type, node, node_flag,
b62a8d0c 641 node_flag_ptr, n);
d68c6810 642 case RCU_JA_PIGEON:
5a9a87dd 643 return ja_pigeon_node_get_nth(type, node,
b62a8d0c 644 node_flag_ptr, n);
d68c6810
MD
645 default:
646 assert(0);
647 return (void *) -1UL;
648 }
649}
650
291b2543 651static
b023ba9f 652struct cds_ja_inode_flag *ja_node_get_direction(struct cds_ja_inode_flag *node_flag,
36305a3d 653 int n, uint8_t *result_key,
b023ba9f 654 enum ja_direction dir)
291b2543
MD
655{
656 unsigned int type_index;
657 struct cds_ja_inode *node;
658 const struct cds_ja_type *type;
659
660 node = ja_node_ptr(node_flag);
661 assert(node != NULL);
662 type_index = ja_node_type(node_flag);
663 type = &ja_types[type_index];
664
665 switch (type->type_class) {
666 case RCU_JA_LINEAR:
36305a3d 667 return ja_linear_node_get_direction(type, node, n, result_key, dir);
291b2543 668 case RCU_JA_POOL:
36305a3d 669 return ja_pool_node_get_direction(type, node, n, result_key, dir);
291b2543 670 case RCU_JA_PIGEON:
36305a3d 671 return ja_pigeon_node_get_direction(type, node, n, result_key, dir);
291b2543
MD
672 default:
673 assert(0);
674 return (void *) -1UL;
675 }
676}
677
678static
b023ba9f 679struct cds_ja_inode_flag *ja_node_get_leftright(struct cds_ja_inode_flag *node_flag,
36305a3d 680 unsigned int n, uint8_t *result_key,
b023ba9f 681 enum ja_direction dir)
291b2543 682{
36305a3d 683 return ja_node_get_direction(node_flag, n, result_key, dir);
b023ba9f
MD
684}
685
686static
687struct cds_ja_inode_flag *ja_node_get_minmax(struct cds_ja_inode_flag *node_flag,
36305a3d 688 uint8_t *result_key,
b023ba9f
MD
689 enum ja_direction dir)
690{
691 switch (dir) {
692 case JA_LEFTMOST:
693 return ja_node_get_direction(node_flag,
36305a3d 694 -1, result_key, JA_RIGHT);
b023ba9f
MD
695 case JA_RIGHTMOST:
696 return ja_node_get_direction(node_flag,
36305a3d 697 JA_ENTRY_PER_NODE, result_key, JA_LEFT);
b023ba9f
MD
698 default:
699 assert(0);
700 }
291b2543
MD
701}
702
8e519e3c 703static
d96bfb0d 704int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 705 struct cds_ja_inode *node,
d96bfb0d 706 struct cds_ja_shadow_node *shadow_node,
8e519e3c 707 uint8_t n,
b4540e8a 708 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
709{
710 uint8_t nr_child;
711 uint8_t *values, *nr_child_ptr;
b4540e8a 712 struct cds_ja_inode_flag **pointers;
2e313670 713 unsigned int i, unused = 0;
8e519e3c
MD
714
715 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
716
717 nr_child_ptr = &node->u.data[0];
48cbe001
MD
718 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
719 (unsigned int) n, nr_child_ptr);
8e519e3c
MD
720 nr_child = *nr_child_ptr;
721 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
722
723 values = &node->u.data[1];
2e313670
MD
724 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
725 /* Check if node value is already populated */
8e519e3c 726 for (i = 0; i < nr_child; i++) {
2e313670
MD
727 if (values[i] == n) {
728 if (pointers[i])
729 return -EEXIST;
730 else
731 break;
732 } else {
733 if (!pointers[i])
734 unused++;
735 }
8e519e3c 736 }
2e313670
MD
737 if (i == nr_child && nr_child >= type->max_linear_child) {
738 if (unused)
739 return -ERANGE; /* recompact node */
740 else
741 return -ENOSPC; /* No space left in this node type */
742 }
743
744 assert(pointers[i] == NULL);
745 rcu_assign_pointer(pointers[i], child_node_flag);
746 /* If we expanded the nr_child, increment it */
747 if (i == nr_child) {
748 CMM_STORE_SHARED(values[nr_child], n);
749 /* write pointer and value before nr_child */
750 cmm_smp_wmb();
751 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 752 }
e1db2db5 753 shadow_node->nr_child++;
a2a7ff59
MD
754 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
755 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
756 (unsigned int) shadow_node->nr_child,
757 node, shadow_node);
758
8e519e3c
MD
759 return 0;
760}
761
762static
d96bfb0d 763int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 764 struct cds_ja_inode *node,
b1a90ce3 765 struct cds_ja_inode_flag *node_flag,
d96bfb0d 766 struct cds_ja_shadow_node *shadow_node,
8e519e3c 767 uint8_t n,
b4540e8a 768 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 769{
b4540e8a 770 struct cds_ja_inode *linear;
8e519e3c
MD
771
772 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
773
774 switch (type->nr_pool_order) {
775 case 1:
776 {
777 unsigned long bitsel, index;
778
779 bitsel = ja_node_pool_1d_bitsel(node_flag);
780 assert(bitsel < CHAR_BIT);
19ddcd04 781 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
782 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
783 break;
784 }
785 case 2:
786 {
19ddcd04
MD
787 unsigned long bitsel[2], index[2], rindex;
788
789 ja_node_pool_2d_bitsel(node_flag, bitsel);
790 assert(bitsel[0] < CHAR_BIT);
791 assert(bitsel[1] < CHAR_BIT);
792 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
793 index[0] <<= 1;
794 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
795 rindex = index[0] | index[1];
796 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
797 break;
798 }
799 default:
800 linear = NULL;
801 assert(0);
802 }
803
e1db2db5
MD
804 return ja_linear_node_set_nth(type, linear, shadow_node,
805 n, child_node_flag);
8e519e3c
MD
806}
807
808static
d96bfb0d 809int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 810 struct cds_ja_inode *node,
d96bfb0d 811 struct cds_ja_shadow_node *shadow_node,
8e519e3c 812 uint8_t n,
b4540e8a 813 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 814{
b4540e8a 815 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
816
817 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 818 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 819 if (*ptr)
8e519e3c
MD
820 return -EEXIST;
821 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 822 shadow_node->nr_child++;
8e519e3c
MD
823 return 0;
824}
825
d68c6810 826/*
7a0b2331 827 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 828 * (negative error value) if it is already there.
d68c6810 829 */
8e519e3c 830static
d96bfb0d 831int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 832 struct cds_ja_inode *node,
b1a90ce3 833 struct cds_ja_inode_flag *node_flag,
d96bfb0d 834 struct cds_ja_shadow_node *shadow_node,
e1db2db5 835 uint8_t n,
b4540e8a 836 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 837{
8e519e3c
MD
838 switch (type->type_class) {
839 case RCU_JA_LINEAR:
e1db2db5 840 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
841 child_node_flag);
842 case RCU_JA_POOL:
b1a90ce3 843 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
8e519e3c
MD
844 child_node_flag);
845 case RCU_JA_PIGEON:
e1db2db5 846 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 847 child_node_flag);
e1db2db5
MD
848 case RCU_JA_NULL:
849 return -ENOSPC;
8e519e3c
MD
850 default:
851 assert(0);
852 return -EINVAL;
853 }
854
855 return 0;
856}
7a0b2331 857
2e313670 858static
af3cbd45 859int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
860 struct cds_ja_inode *node,
861 struct cds_ja_shadow_node *shadow_node,
af3cbd45 862 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
863{
864 uint8_t nr_child;
af3cbd45 865 uint8_t *nr_child_ptr;
2e313670
MD
866
867 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
868
869 nr_child_ptr = &node->u.data[0];
2e313670
MD
870 nr_child = *nr_child_ptr;
871 assert(nr_child <= type->max_linear_child);
872
48cbe001
MD
873 if (type->type_class == RCU_JA_LINEAR) {
874 assert(!shadow_node->fallback_removal_count);
875 if (shadow_node->nr_child <= type->min_child) {
2e313670
MD
876 /* We need to try recompacting the node */
877 return -EFBIG;
878 }
879 }
19ddcd04 880 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
af3cbd45
MD
881 assert(*node_flag_ptr != NULL);
882 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
883 /*
884 * Value and nr_child are never changed (would cause ABA issue).
885 * Instead, we leave the pointer to NULL and recompact the node
886 * once in a while. It is allowed to set a NULL pointer to a new
887 * value without recompaction though.
888 * Only update the shadow node accounting.
889 */
890 shadow_node->nr_child--;
af3cbd45 891 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
892 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
893 (unsigned int) shadow_node->nr_child,
894 node, shadow_node);
2e313670
MD
895 return 0;
896}
897
898static
af3cbd45 899int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670 900 struct cds_ja_inode *node,
19ddcd04 901 struct cds_ja_inode_flag *node_flag,
2e313670 902 struct cds_ja_shadow_node *shadow_node,
af3cbd45 903 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
904 uint8_t n)
905{
906 struct cds_ja_inode *linear;
907
908 assert(type->type_class == RCU_JA_POOL);
19ddcd04
MD
909
910 if (shadow_node->fallback_removal_count) {
911 shadow_node->fallback_removal_count--;
912 } else {
913 /* We should try recompacting the node */
914 if (shadow_node->nr_child <= type->min_child)
915 return -EFBIG;
916 }
917
918 switch (type->nr_pool_order) {
919 case 1:
920 {
921 unsigned long bitsel, index;
922
923 bitsel = ja_node_pool_1d_bitsel(node_flag);
924 assert(bitsel < CHAR_BIT);
925 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
926 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
927 break;
928 }
929 case 2:
930 {
931 unsigned long bitsel[2], index[2], rindex;
932
933 ja_node_pool_2d_bitsel(node_flag, bitsel);
934 assert(bitsel[0] < CHAR_BIT);
935 assert(bitsel[1] < CHAR_BIT);
936 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
937 index[0] <<= 1;
938 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
939 rindex = index[0] | index[1];
940 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
941 break;
942 }
943 default:
944 linear = NULL;
945 assert(0);
946 }
947
af3cbd45 948 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
949}
950
951static
af3cbd45 952int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
953 struct cds_ja_inode *node,
954 struct cds_ja_shadow_node *shadow_node,
af3cbd45 955 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 956{
2e313670 957 assert(type->type_class == RCU_JA_PIGEON);
19ddcd04
MD
958
959 if (shadow_node->fallback_removal_count) {
960 shadow_node->fallback_removal_count--;
961 } else {
962 /* We should try recompacting the node */
963 if (shadow_node->nr_child <= type->min_child)
964 return -EFBIG;
965 }
4d6ef45e 966 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 967 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
968 shadow_node->nr_child--;
969 return 0;
970}
971
972/*
af3cbd45 973 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
974 * (negative error value) if it is not found (-ENOENT).
975 */
976static
af3cbd45 977int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670 978 struct cds_ja_inode *node,
19ddcd04 979 struct cds_ja_inode_flag *node_flag,
2e313670 980 struct cds_ja_shadow_node *shadow_node,
af3cbd45 981 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
982 uint8_t n)
983{
984 switch (type->type_class) {
985 case RCU_JA_LINEAR:
af3cbd45 986 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 987 case RCU_JA_POOL:
19ddcd04 988 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
2e313670 989 case RCU_JA_PIGEON:
af3cbd45 990 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
991 case RCU_JA_NULL:
992 return -ENOENT;
993 default:
994 assert(0);
995 return -EINVAL;
996 }
997
998 return 0;
999}
1000
b1a90ce3
MD
1001/*
1002 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
1003 * distribution in two sub-distributions containing as much elements one
1004 * compared to the other.
1005 */
1006static
1007unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
1008 struct cds_ja *ja,
1009 unsigned int type_index,
1010 const struct cds_ja_type *type,
1011 struct cds_ja_inode *node,
1012 struct cds_ja_shadow_node *shadow_node,
1013 uint8_t n,
1014 struct cds_ja_inode_flag *child_node_flag,
1015 struct cds_ja_inode_flag **nullify_node_flag_ptr)
1016{
1017 uint8_t nr_one[JA_BITS_PER_BYTE];
1018 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
1019 unsigned int distrib_nr_child = 0;
1020
1021 memset(nr_one, 0, sizeof(nr_one));
1022
1023 switch (type->type_class) {
1024 case RCU_JA_LINEAR:
1025 {
1026 uint8_t nr_child =
1027 ja_linear_node_get_nr_child(type, node);
1028 unsigned int i;
1029
1030 for (i = 0; i < nr_child; i++) {
1031 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1032 uint8_t v;
1033
1034 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1035 if (!iter)
1036 continue;
1037 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1038 continue;
f5531dd9
MD
1039 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1040 if (v & (1U << bit_i))
1041 nr_one[bit_i]++;
b1a90ce3
MD
1042 }
1043 distrib_nr_child++;
1044 }
1045 break;
1046 }
1047 case RCU_JA_POOL:
1048 {
1049 unsigned int pool_nr;
1050
1051 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1052 struct cds_ja_inode *pool =
1053 ja_pool_node_get_ith_pool(type,
1054 node, pool_nr);
1055 uint8_t nr_child =
1056 ja_linear_node_get_nr_child(type, pool);
1057 unsigned int j;
1058
1059 for (j = 0; j < nr_child; j++) {
1060 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1061 uint8_t v;
1062
1063 ja_linear_node_get_ith_pos(type, pool,
1064 j, &v, &iter);
1065 if (!iter)
1066 continue;
1067 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1068 continue;
f5531dd9
MD
1069 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1070 if (v & (1U << bit_i))
1071 nr_one[bit_i]++;
b1a90ce3
MD
1072 }
1073 distrib_nr_child++;
1074 }
1075 }
1076 break;
1077 }
1078 case RCU_JA_PIGEON:
1079 {
b1a90ce3
MD
1080 unsigned int i;
1081
1082 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1083 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
b1a90ce3 1084 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1085
1086 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1087 if (!iter)
1088 continue;
1089 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1090 continue;
f5531dd9
MD
1091 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1092 if (i & (1U << bit_i))
1093 nr_one[bit_i]++;
b1a90ce3
MD
1094 }
1095 distrib_nr_child++;
1096 }
1097 break;
1098 }
1099 case RCU_JA_NULL:
19ddcd04 1100 assert(mode == JA_RECOMPACT_ADD_NEXT);
b1a90ce3
MD
1101 break;
1102 default:
1103 assert(0);
1104 break;
1105 }
1106
19ddcd04 1107 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
f5531dd9
MD
1108 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1109 if (n & (1U << bit_i))
1110 nr_one[bit_i]++;
b1a90ce3
MD
1111 }
1112 distrib_nr_child++;
1113 }
1114
1115 /*
1116 * The best bit selector is that for which the number of ones is
1117 * closest to half of the number of children in the
f5531dd9
MD
1118 * distribution. We calculate the distance using the double of
1119 * the sub-distribution sizes to eliminate truncation error.
b1a90ce3
MD
1120 */
1121 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1122 unsigned int distance_to_best;
1123
1b34283b 1124 distance_to_best = abs_int(((unsigned int) nr_one[bit_i] << 1U) - distrib_nr_child);
b1a90ce3
MD
1125 if (distance_to_best < overall_best_distance) {
1126 overall_best_distance = distance_to_best;
1127 bitsel = bit_i;
1128 }
1129 }
1130 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
1131 return bitsel;
1132}
1133
19ddcd04
MD
1134/*
1135 * Calculate bit distribution in two dimensions. Returns the two bits
1136 * (each 0 to 7) that splits the distribution in four sub-distributions
1137 * containing as much elements one compared to the other.
1138 */
1139static
1140void ja_node_sum_distribution_2d(enum ja_recompact mode,
1141 struct cds_ja *ja,
1142 unsigned int type_index,
1143 const struct cds_ja_type *type,
1144 struct cds_ja_inode *node,
1145 struct cds_ja_shadow_node *shadow_node,
1146 uint8_t n,
1147 struct cds_ja_inode_flag *child_node_flag,
1148 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1149 unsigned int *_bitsel)
1150{
1151 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1152 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1153 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1154 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
1155 unsigned int bitsel[2] = { 0, 1 };
4a073c53
MD
1156 unsigned int bit_i, bit_j;
1157 int overall_best_distance = INT_MAX;
19ddcd04
MD
1158 unsigned int distrib_nr_child = 0;
1159
1160 memset(nr_2d_11, 0, sizeof(nr_2d_11));
1161 memset(nr_2d_10, 0, sizeof(nr_2d_10));
4a073c53
MD
1162 memset(nr_2d_01, 0, sizeof(nr_2d_01));
1163 memset(nr_2d_00, 0, sizeof(nr_2d_00));
19ddcd04
MD
1164
1165 switch (type->type_class) {
1166 case RCU_JA_LINEAR:
1167 {
1168 uint8_t nr_child =
1169 ja_linear_node_get_nr_child(type, node);
1170 unsigned int i;
1171
1172 for (i = 0; i < nr_child; i++) {
1173 struct cds_ja_inode_flag *iter;
1174 uint8_t v;
1175
1176 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1177 if (!iter)
1178 continue;
1179 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1180 continue;
1181 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1182 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1183 if (v & (1U << bit_i)) {
1184 if (v & (1U << bit_j)) {
1185 nr_2d_11[bit_i][bit_j]++;
1186 } else {
1187 nr_2d_10[bit_i][bit_j]++;
1188 }
1189 } else {
1190 if (v & (1U << bit_j)) {
1191 nr_2d_01[bit_i][bit_j]++;
1192 } else {
1193 nr_2d_00[bit_i][bit_j]++;
1194 }
19ddcd04
MD
1195 }
1196 }
1197 }
1198 distrib_nr_child++;
1199 }
1200 break;
1201 }
1202 case RCU_JA_POOL:
1203 {
1204 unsigned int pool_nr;
1205
1206 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1207 struct cds_ja_inode *pool =
1208 ja_pool_node_get_ith_pool(type,
1209 node, pool_nr);
1210 uint8_t nr_child =
1211 ja_linear_node_get_nr_child(type, pool);
1212 unsigned int j;
1213
1214 for (j = 0; j < nr_child; j++) {
1215 struct cds_ja_inode_flag *iter;
1216 uint8_t v;
1217
1218 ja_linear_node_get_ith_pos(type, pool,
1219 j, &v, &iter);
1220 if (!iter)
1221 continue;
1222 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1223 continue;
1224 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1225 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1226 if (v & (1U << bit_i)) {
1227 if (v & (1U << bit_j)) {
1228 nr_2d_11[bit_i][bit_j]++;
1229 } else {
1230 nr_2d_10[bit_i][bit_j]++;
1231 }
1232 } else {
1233 if (v & (1U << bit_j)) {
1234 nr_2d_01[bit_i][bit_j]++;
1235 } else {
1236 nr_2d_00[bit_i][bit_j]++;
1237 }
19ddcd04
MD
1238 }
1239 }
1240 }
1241 distrib_nr_child++;
1242 }
1243 }
1244 break;
1245 }
1246 case RCU_JA_PIGEON:
1247 {
19ddcd04
MD
1248 unsigned int i;
1249
1250 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1251 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
19ddcd04
MD
1252 struct cds_ja_inode_flag *iter;
1253
1254 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1255 if (!iter)
1256 continue;
1257 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1258 continue;
1259 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1260 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1261 if (i & (1U << bit_i)) {
1262 if (i & (1U << bit_j)) {
1263 nr_2d_11[bit_i][bit_j]++;
1264 } else {
1265 nr_2d_10[bit_i][bit_j]++;
1266 }
1267 } else {
1268 if (i & (1U << bit_j)) {
1269 nr_2d_01[bit_i][bit_j]++;
1270 } else {
1271 nr_2d_00[bit_i][bit_j]++;
1272 }
19ddcd04
MD
1273 }
1274 }
1275 }
1276 distrib_nr_child++;
1277 }
1278 break;
1279 }
1280 case RCU_JA_NULL:
1281 assert(mode == JA_RECOMPACT_ADD_NEXT);
1282 break;
1283 default:
1284 assert(0);
1285 break;
1286 }
1287
1288 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1289 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1290 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1291 if (n & (1U << bit_i)) {
1292 if (n & (1U << bit_j)) {
1293 nr_2d_11[bit_i][bit_j]++;
1294 } else {
1295 nr_2d_10[bit_i][bit_j]++;
1296 }
1297 } else {
1298 if (n & (1U << bit_j)) {
1299 nr_2d_01[bit_i][bit_j]++;
1300 } else {
1301 nr_2d_00[bit_i][bit_j]++;
1302 }
19ddcd04
MD
1303 }
1304 }
1305 }
1306 distrib_nr_child++;
1307 }
1308
1309 /*
1310 * The best bit selector is that for which the number of nodes
1311 * in each sub-class is closest to one-fourth of the number of
1312 * children in the distribution. We calculate the distance using
1313 * 4 times the size of the sub-distribution to eliminate
1314 * truncation error.
1315 */
1316 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1317 for (bit_j = 0; bit_j < bit_i; bit_j++) {
4a073c53 1318 int distance_to_best[4];
19ddcd04 1319
1b34283b
MD
1320 distance_to_best[0] = ((unsigned int) nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1321 distance_to_best[1] = ((unsigned int) nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1322 distance_to_best[2] = ((unsigned int) nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1323 distance_to_best[3] = ((unsigned int) nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
19ddcd04 1324
4a073c53
MD
1325 /* Consider worse distance above best */
1326 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
19ddcd04 1327 distance_to_best[0] = distance_to_best[1];
4a073c53 1328 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
19ddcd04 1329 distance_to_best[0] = distance_to_best[2];
4a073c53 1330 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
19ddcd04 1331 distance_to_best[0] = distance_to_best[3];
4a073c53 1332
19ddcd04
MD
1333 /*
1334 * If our worse distance is better than overall,
1335 * we become new best candidate.
1336 */
1337 if (distance_to_best[0] < overall_best_distance) {
1338 overall_best_distance = distance_to_best[0];
1339 bitsel[0] = bit_i;
1340 bitsel[1] = bit_j;
1341 }
1342 }
1343 }
1344
1345 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1346
1347 /* Return our bit selection */
1348 _bitsel[0] = bitsel[0];
1349 _bitsel[1] = bitsel[1];
1350}
1351
48cbe001
MD
1352static
1353unsigned int find_nearest_type_index(unsigned int type_index,
1354 unsigned int nr_nodes)
1355{
1356 const struct cds_ja_type *type;
1357
1358 assert(type_index != NODE_INDEX_NULL);
1359 if (nr_nodes == 0)
1360 return NODE_INDEX_NULL;
1361 for (;;) {
1362 type = &ja_types[type_index];
1363 if (nr_nodes < type->min_child)
1364 type_index--;
1365 else if (nr_nodes > type->max_child)
1366 type_index++;
1367 else
1368 break;
1369 }
1370 return type_index;
1371}
1372
7a0b2331
MD
1373/*
1374 * ja_node_recompact_add: recompact a node, adding a new child.
2e313670 1375 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 1376 * error value otherwise.
7a0b2331
MD
1377 */
1378static
2e313670
MD
1379int ja_node_recompact(enum ja_recompact mode,
1380 struct cds_ja *ja,
e1db2db5 1381 unsigned int old_type_index,
d96bfb0d 1382 const struct cds_ja_type *old_type,
b4540e8a 1383 struct cds_ja_inode *old_node,
5a9a87dd 1384 struct cds_ja_shadow_node *shadow_node,
3d8fe307 1385 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45 1386 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1387 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1388 int level)
7a0b2331 1389{
e1db2db5 1390 unsigned int new_type_index;
b4540e8a 1391 struct cds_ja_inode *new_node;
af3cbd45 1392 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 1393 const struct cds_ja_type *new_type;
3d8fe307 1394 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 1395 int ret;
f07b240f 1396 int fallback = 0;
7a0b2331 1397
3d8fe307
MD
1398 old_node_flag = *old_node_flag_ptr;
1399
48cbe001
MD
1400 /*
1401 * Need to find nearest type index even for ADD_SAME, because
1402 * this recompaction, when applied to linear nodes, will garbage
1403 * collect dummy (NULL) entries, and can therefore cause a few
1404 * linear representations to be skipped.
1405 */
2e313670 1406 switch (mode) {
19ddcd04 1407 case JA_RECOMPACT_ADD_SAME:
48cbe001
MD
1408 new_type_index = find_nearest_type_index(old_type_index,
1409 shadow_node->nr_child + 1);
1410 dbg_printf("Recompact for node with %u children\n",
1411 shadow_node->nr_child + 1);
2e313670 1412 break;
19ddcd04 1413 case JA_RECOMPACT_ADD_NEXT:
2e313670
MD
1414 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1415 new_type_index = 0;
48cbe001 1416 dbg_printf("Recompact for NULL\n");
2e313670 1417 } else {
48cbe001
MD
1418 new_type_index = find_nearest_type_index(old_type_index,
1419 shadow_node->nr_child + 1);
1420 dbg_printf("Recompact for node with %u children\n",
1421 shadow_node->nr_child + 1);
2e313670
MD
1422 }
1423 break;
1424 case JA_RECOMPACT_DEL:
48cbe001
MD
1425 new_type_index = find_nearest_type_index(old_type_index,
1426 shadow_node->nr_child - 1);
1427 dbg_printf("Recompact for node with %u children\n",
1428 shadow_node->nr_child - 1);
2e313670
MD
1429 break;
1430 default:
1431 assert(0);
7a0b2331 1432 }
a2a7ff59 1433
f07b240f 1434retry: /* for fallback */
582a6ade
MD
1435 dbg_printf("Recompact from type %d to type %d\n",
1436 old_type_index, new_type_index);
7a0b2331 1437 new_type = &ja_types[new_type_index];
2e313670 1438 if (new_type_index != NODE_INDEX_NULL) {
354981c2 1439 new_node = alloc_cds_ja_node(ja, new_type);
2e313670
MD
1440 if (!new_node)
1441 return -ENOMEM;
b1a90ce3
MD
1442
1443 if (new_type->type_class == RCU_JA_POOL) {
1444 switch (new_type->nr_pool_order) {
1445 case 1:
1446 {
19ddcd04
MD
1447 unsigned int node_distrib_bitsel;
1448
b1a90ce3
MD
1449 node_distrib_bitsel =
1450 ja_node_sum_distribution_1d(mode, ja,
1451 old_type_index, old_type,
1452 old_node, shadow_node,
1453 n, child_node_flag,
1454 nullify_node_flag_ptr);
1455 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1456 new_node_flag = ja_node_flag_pool_1d(new_node,
1457 new_type_index, node_distrib_bitsel);
1458 break;
1459 }
1460 case 2:
1461 {
19ddcd04
MD
1462 unsigned int node_distrib_bitsel[2];
1463
1464 ja_node_sum_distribution_2d(mode, ja,
1465 old_type_index, old_type,
1466 old_node, shadow_node,
1467 n, child_node_flag,
1468 nullify_node_flag_ptr,
1469 node_distrib_bitsel);
b1a90ce3
MD
1470 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1471 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
19ddcd04
MD
1472 new_node_flag = ja_node_flag_pool_2d(new_node,
1473 new_type_index, node_distrib_bitsel);
b1a90ce3
MD
1474 break;
1475 }
1476 default:
1477 assert(0);
1478 }
1479 } else {
1480 new_node_flag = ja_node_flag(new_node, new_type_index);
1481 }
1482
2e313670 1483 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
48cbe001 1484 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
2e313670 1485 if (!new_shadow_node) {
354981c2 1486 free_cds_ja_node(ja, new_node);
2e313670
MD
1487 return -ENOMEM;
1488 }
1489 if (fallback)
1490 new_shadow_node->fallback_removal_count =
1491 JA_FALLBACK_REMOVAL_COUNT;
1492 } else {
1493 new_node = NULL;
1494 new_node_flag = NULL;
e1db2db5 1495 }
11c5e016 1496
19ddcd04 1497 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
2e313670
MD
1498
1499 if (new_type_index == NODE_INDEX_NULL)
1500 goto skip_copy;
1501
11c5e016
MD
1502 switch (old_type->type_class) {
1503 case RCU_JA_LINEAR:
1504 {
1505 uint8_t nr_child =
1506 ja_linear_node_get_nr_child(old_type, old_node);
1507 unsigned int i;
1508
1509 for (i = 0; i < nr_child; i++) {
b4540e8a 1510 struct cds_ja_inode_flag *iter;
11c5e016
MD
1511 uint8_t v;
1512
1513 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1514 if (!iter)
1515 continue;
af3cbd45 1516 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1517 continue;
b1a90ce3 1518 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1519 new_shadow_node,
11c5e016 1520 v, iter);
f07b240f
MD
1521 if (new_type->type_class == RCU_JA_POOL && ret) {
1522 goto fallback_toosmall;
1523 }
11c5e016
MD
1524 assert(!ret);
1525 }
1526 break;
1527 }
1528 case RCU_JA_POOL:
1529 {
1530 unsigned int pool_nr;
1531
1532 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 1533 struct cds_ja_inode *pool =
11c5e016
MD
1534 ja_pool_node_get_ith_pool(old_type,
1535 old_node, pool_nr);
1536 uint8_t nr_child =
1537 ja_linear_node_get_nr_child(old_type, pool);
1538 unsigned int j;
1539
1540 for (j = 0; j < nr_child; j++) {
b4540e8a 1541 struct cds_ja_inode_flag *iter;
11c5e016
MD
1542 uint8_t v;
1543
1544 ja_linear_node_get_ith_pos(old_type, pool,
1545 j, &v, &iter);
1546 if (!iter)
1547 continue;
af3cbd45 1548 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1549 continue;
b1a90ce3 1550 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1551 new_shadow_node,
11c5e016 1552 v, iter);
f07b240f
MD
1553 if (new_type->type_class == RCU_JA_POOL
1554 && ret) {
1555 goto fallback_toosmall;
1556 }
11c5e016
MD
1557 assert(!ret);
1558 }
1559 }
1560 break;
7a0b2331 1561 }
a2a7ff59 1562 case RCU_JA_NULL:
19ddcd04 1563 assert(mode == JA_RECOMPACT_ADD_NEXT);
a2a7ff59 1564 break;
11c5e016 1565 case RCU_JA_PIGEON:
2e313670 1566 {
2e313670
MD
1567 unsigned int i;
1568
1569 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1570 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2e313670
MD
1571 struct cds_ja_inode_flag *iter;
1572
1573 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1574 if (!iter)
1575 continue;
af3cbd45 1576 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1577 continue;
b1a90ce3 1578 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1579 new_shadow_node,
1580 i, iter);
1581 if (new_type->type_class == RCU_JA_POOL && ret) {
1582 goto fallback_toosmall;
1583 }
1584 assert(!ret);
1585 }
1586 break;
1587 }
11c5e016
MD
1588 default:
1589 assert(0);
5a9a87dd 1590 ret = -EINVAL;
f07b240f 1591 goto end;
11c5e016 1592 }
2e313670 1593skip_copy:
11c5e016 1594
19ddcd04 1595 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
2e313670 1596 /* add node */
b1a90ce3 1597 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1598 new_shadow_node,
1599 n, child_node_flag);
7b413155
MD
1600 if (new_type->type_class == RCU_JA_POOL && ret) {
1601 goto fallback_toosmall;
1602 }
2e313670
MD
1603 assert(!ret);
1604 }
19ddcd04
MD
1605
1606 if (fallback) {
1607 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1608 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1609 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
354981c2 1610 uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
19ddcd04
MD
1611 }
1612
3d8fe307
MD
1613 /* Return pointer to new recompacted node through old_node_flag_ptr */
1614 *old_node_flag_ptr = new_node_flag;
a2a7ff59 1615 if (old_node) {
2e313670
MD
1616 int flags;
1617
1618 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1619 /*
1620 * It is OK to free the lock associated with a node
1621 * going to NULL, since we are holding the parent lock.
1622 * This synchronizes removal with re-add of that node.
1623 */
1624 if (new_type_index == NODE_INDEX_NULL)
48cbe001 1625 flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 1626 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 1627 flags);
a2a7ff59
MD
1628 assert(!ret);
1629 }
5a9a87dd
MD
1630
1631 ret = 0;
f07b240f 1632end:
5a9a87dd 1633 return ret;
f07b240f
MD
1634
1635fallback_toosmall:
1636 /* fallback if next pool is too small */
af3cbd45 1637 assert(new_shadow_node);
3d8fe307 1638 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
1639 RCUJA_SHADOW_CLEAR_FREE_NODE);
1640 assert(!ret);
1641
19ddcd04
MD
1642 switch (mode) {
1643 case JA_RECOMPACT_ADD_SAME:
1644 /*
1645 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1646 * node within a pool has unused entries. It should
1647 * therefore _never_ be too small.
1648 */
4a073c53 1649 assert(0);
4cde8267
MD
1650
1651 /* Fall-through */
19ddcd04
MD
1652 case JA_RECOMPACT_ADD_NEXT:
1653 {
1654 const struct cds_ja_type *next_type;
1655
1656 /*
1657 * Recompaction attempt on add failed. Should only
1658 * happen if target node type is pool. Caused by
1659 * hard-to-split distribution. Recompact using the next
1660 * distribution size.
1661 */
1662 assert(new_type->type_class == RCU_JA_POOL);
1663 next_type = &ja_types[new_type_index + 1];
1664 /*
1665 * Try going to the next pool size if our population
1666 * fits within its range. This is not flagged as a
1667 * fallback.
1668 */
1669 if (shadow_node->nr_child + 1 >= next_type->min_child
1670 && shadow_node->nr_child + 1 <= next_type->max_child) {
1671 new_type_index++;
1672 goto retry;
1673 } else {
1674 new_type_index++;
1675 dbg_printf("Add fallback to type %d\n", new_type_index);
1676 uatomic_inc(&ja->nr_fallback);
1677 fallback = 1;
1678 goto retry;
1679 }
1680 break;
1681 }
1682 case JA_RECOMPACT_DEL:
1683 /*
1684 * Recompaction attempt on delete failed. Should only
1685 * happen if target node type is pool. This is caused by
1686 * a hard-to-split distribution. Recompact on same node
1687 * size, but flag current node as "fallback" to ensure
1688 * we don't attempt recompaction before some activity
1689 * has reshuffled our node.
1690 */
1691 assert(new_type->type_class == RCU_JA_POOL);
1692 new_type_index = old_type_index;
1693 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1694 uatomic_inc(&ja->nr_fallback);
1695 fallback = 1;
1696 goto retry;
1697 default:
1698 assert(0);
1699 return -EINVAL;
1700 }
1701
1702 /*
1703 * Last resort fallback: pigeon.
1704 */
f07b240f
MD
1705 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1706 dbg_printf("Fallback to type %d\n", new_type_index);
1707 uatomic_inc(&ja->nr_fallback);
1708 fallback = 1;
1709 goto retry;
7a0b2331
MD
1710}
1711
5a9a87dd 1712/*
2e313670 1713 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
1714 * error value otherwise.
1715 */
7a0b2331 1716static
d96bfb0d 1717int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 1718 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd 1719 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1720 struct cds_ja_shadow_node *shadow_node,
1721 int level)
7a0b2331
MD
1722{
1723 int ret;
e1db2db5 1724 unsigned int type_index;
d96bfb0d 1725 const struct cds_ja_type *type;
b4540e8a 1726 struct cds_ja_inode *node;
7a0b2331 1727
a2a7ff59
MD
1728 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1729 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1730
e1db2db5
MD
1731 node = ja_node_ptr(*node_flag);
1732 type_index = ja_node_type(*node_flag);
1733 type = &ja_types[type_index];
b1a90ce3 1734 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
e1db2db5 1735 n, child_node_flag);
2e313670
MD
1736 switch (ret) {
1737 case -ENOSPC:
19ddcd04
MD
1738 /* Not enough space in node, need to recompact to next type. */
1739 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
48cbe001 1740 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1741 break;
1742 case -ERANGE:
1743 /* Node needs to be recompacted. */
19ddcd04 1744 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
48cbe001 1745 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1746 break;
1747 }
1748 return ret;
1749}
1750
1751/*
1752 * Return 0 on success, -EAGAIN if need to retry, or other negative
1753 * error value otherwise.
1754 */
1755static
af3cbd45
MD
1756int ja_node_clear_ptr(struct cds_ja *ja,
1757 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1758 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1759 struct cds_ja_shadow_node *shadow_node, /* of parent */
48cbe001 1760 uint8_t n, int level)
2e313670
MD
1761{
1762 int ret;
1763 unsigned int type_index;
1764 const struct cds_ja_type *type;
1765 struct cds_ja_inode *node;
1766
af3cbd45
MD
1767 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1768 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 1769
af3cbd45
MD
1770 node = ja_node_ptr(*parent_node_flag_ptr);
1771 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 1772 type = &ja_types[type_index];
19ddcd04 1773 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
2e313670 1774 if (ret == -EFBIG) {
19ddcd04 1775 /* Should try recompaction. */
2e313670 1776 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45 1777 shadow_node, parent_node_flag_ptr, n, NULL,
48cbe001 1778 node_flag_ptr, level);
7a0b2331
MD
1779 }
1780 return ret;
1781}
be9a7474 1782
03ec1aeb 1783struct cds_ja_node *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 1784{
41975c12
MD
1785 unsigned int tree_depth, i;
1786 struct cds_ja_inode_flag *node_flag;
1787
1788 if (caa_unlikely(key > ja->key_max))
03ec1aeb 1789 return NULL;
41975c12 1790 tree_depth = ja->tree_depth;
5a9a87dd 1791 node_flag = rcu_dereference(ja->root);
41975c12 1792
5a9a87dd
MD
1793 /* level 0: root node */
1794 if (!ja_node_ptr(node_flag))
03ec1aeb 1795 return NULL;
5a9a87dd
MD
1796
1797 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1798 uint8_t iter_key;
1799
1800 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
48cbe001 1801 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
582a6ade
MD
1802 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1803 (unsigned int) iter_key, node_flag);
41975c12 1804 if (!ja_node_ptr(node_flag))
03ec1aeb 1805 return NULL;
41975c12
MD
1806 }
1807
5a9a87dd 1808 /* Last level lookup succeded. We got an actual match. */
03ec1aeb 1809 return (struct cds_ja_node *) node_flag;
5a9a87dd
MD
1810}
1811
b023ba9f
MD
1812static
1813struct cds_ja_node *cds_ja_lookup_inequality(struct cds_ja *ja, uint64_t key,
36305a3d 1814 uint64_t *result_key, enum ja_lookup_inequality mode)
291b2543
MD
1815{
1816 int tree_depth, level;
1817 struct cds_ja_inode_flag *node_flag, *cur_node_depth[JA_MAX_DEPTH];
36305a3d
MD
1818 uint8_t cur_key[JA_MAX_DEPTH];
1819 uint64_t _result_key = 0;
b023ba9f 1820 enum ja_direction dir;
291b2543 1821
b023ba9f
MD
1822 switch (mode) {
1823 case JA_LOOKUP_BE:
1824 if (caa_unlikely(key > ja->key_max || key == 0))
1825 return NULL;
1826 break;
1827 case JA_LOOKUP_AE:
1828 if (caa_unlikely(key >= ja->key_max))
1829 return NULL;
1830 break;
1831 default:
03ec1aeb 1832 return NULL;
b023ba9f 1833 }
291b2543
MD
1834
1835 memset(cur_node_depth, 0, sizeof(cur_node_depth));
36305a3d 1836 memset(cur_key, 0, sizeof(cur_key));
291b2543
MD
1837 tree_depth = ja->tree_depth;
1838 node_flag = rcu_dereference(ja->root);
1839 cur_node_depth[0] = node_flag;
1840
1841 /* level 0: root node */
1842 if (!ja_node_ptr(node_flag))
03ec1aeb 1843 return NULL;
291b2543
MD
1844
1845 for (level = 1; level < tree_depth; level++) {
1846 uint8_t iter_key;
1847
1848 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1849 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1850 if (!ja_node_ptr(node_flag))
1851 break;
36305a3d 1852 cur_key[level - 1] = iter_key;
291b2543 1853 cur_node_depth[level] = node_flag;
b023ba9f 1854 dbg_printf("cds_ja_lookup_inequality iter key lookup %u finds node_flag %p\n",
291b2543
MD
1855 (unsigned int) iter_key, node_flag);
1856 }
1857
1858 if (level == tree_depth) {
1859 /* Last level lookup succeded. We got an equal match. */
36305a3d
MD
1860 if (result_key)
1861 *result_key = key;
03ec1aeb 1862 return (struct cds_ja_node *) node_flag;
291b2543
MD
1863 }
1864
1865 /*
b023ba9f 1866 * Find highest value left/right of current node.
291b2543 1867 * Current node is cur_node_depth[level].
b023ba9f
MD
1868 * Start at current level. If we cannot find any key left/right
1869 * of ours, go one level up, seek highest value left/right of
1870 * current (recursively), and when we find one, get the
1871 * rightmost/leftmost child of its rightmost/leftmost child
1872 * (recursively).
291b2543 1873 */
b023ba9f
MD
1874 switch (mode) {
1875 case JA_LOOKUP_BE:
1876 dir = JA_LEFT;
1877 break;
1878 case JA_LOOKUP_AE:
1879 dir = JA_RIGHT;
1880 break;
1881 default:
1882 assert(0);
1883 }
291b2543
MD
1884 for (; level > 0; level--) {
1885 uint8_t iter_key;
1886
1887 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
b023ba9f 1888 node_flag = ja_node_get_leftright(cur_node_depth[level - 1],
36305a3d 1889 iter_key, &cur_key[level - 1], dir);
86d700e9
MD
1890 dbg_printf("cds_ja_lookup_inequality find sibling from %u at %u finds node_flag %p\n",
1891 (unsigned int) iter_key, (unsigned int) cur_key[level - 1],
1892 node_flag);
36305a3d 1893 /* If found left/right sibling, find rightmost/leftmost child. */
291b2543
MD
1894 if (ja_node_ptr(node_flag))
1895 break;
1896 }
1897
1898 if (!level) {
b023ba9f 1899 /* Reached the root and could not find a left/right sibling. */
03ec1aeb 1900 return NULL;
291b2543
MD
1901 }
1902
1903 level++;
3c52f0f9
MD
1904
1905 /*
4cef6f97 1906 * From this point, we are guaranteed to be able to find a
b023ba9f
MD
1907 * "below than"/"above than" match. ja_attach_node() and
1908 * ja_detach_node() both guarantee that it is not possible for a
1909 * lookup to reach a dead-end.
3c52f0f9
MD
1910 */
1911
b023ba9f
MD
1912 /*
1913 * Find rightmost/leftmost child of rightmost/leftmost child
1914 * (recursively).
1915 */
1916 switch (mode) {
1917 case JA_LOOKUP_BE:
1918 dir = JA_RIGHTMOST;
1919 break;
1920 case JA_LOOKUP_AE:
1921 dir = JA_LEFTMOST;
1922 break;
1923 default:
1924 assert(0);
1925 }
291b2543 1926 for (; level < tree_depth; level++) {
36305a3d 1927 node_flag = ja_node_get_minmax(node_flag, &cur_key[level - 1], dir);
86d700e9
MD
1928 dbg_printf("cds_ja_lookup_inequality find minmax at %u finds node_flag %p\n",
1929 (unsigned int) cur_key[level - 1],
1930 node_flag);
291b2543
MD
1931 if (!ja_node_ptr(node_flag))
1932 break;
1933 }
1934
4cef6f97 1935 assert(level == tree_depth);
291b2543 1936
36305a3d
MD
1937 if (result_key) {
1938 for (level = 1; level < tree_depth; level++) {
1939 _result_key |= ((uint64_t) cur_key[level - 1])
1940 << (JA_BITS_PER_BYTE * (tree_depth - level - 1));
1941 }
1942 *result_key = _result_key;
1943 }
03ec1aeb 1944 return (struct cds_ja_node *) node_flag;
291b2543
MD
1945}
1946
36305a3d
MD
1947struct cds_ja_node *cds_ja_lookup_below_equal(struct cds_ja *ja,
1948 uint64_t key, uint64_t *result_key)
b023ba9f 1949{
86d700e9 1950 dbg_printf("cds_ja_lookup_below_equal key %" PRIu64 "\n", key);
36305a3d 1951 return cds_ja_lookup_inequality(ja, key, result_key, JA_LOOKUP_BE);
b023ba9f
MD
1952}
1953
36305a3d
MD
1954struct cds_ja_node *cds_ja_lookup_above_equal(struct cds_ja *ja,
1955 uint64_t key, uint64_t *result_key)
b023ba9f 1956{
86d700e9 1957 dbg_printf("cds_ja_lookup_above_equal key %" PRIu64 "\n", key);
36305a3d 1958 return cds_ja_lookup_inequality(ja, key, result_key, JA_LOOKUP_AE);
b023ba9f
MD
1959}
1960
5a9a87dd
MD
1961/*
1962 * We reached an unpopulated node. Create it and the children we need,
1963 * and then attach the entire branch to the current node. This may
1964 * trigger recompaction of the current node. Locks needed: node lock
1965 * (for add), and, possibly, parent node lock (to update pointer due to
1966 * node recompaction).
1967 *
1968 * First take node lock, check if recompaction is needed, then take
1969 * parent lock (if needed). Then we can proceed to create the new
1970 * branch. Publish the new branch, and release locks.
1971 * TODO: we currently always take the parent lock even when not needed.
47d2eab3
MD
1972 *
1973 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1974 * leads to a dead-end: before attaching a branch, the entire content of
1975 * the new branch is populated, thus creating a cluster, before
1976 * attaching the cluster to the rest of the tree, thus making it visible
1977 * to lookups.
5a9a87dd
MD
1978 */
1979static
1980int ja_attach_node(struct cds_ja *ja,
b0ca2d21 1981 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 1982 struct cds_ja_inode_flag *attach_node_flag,
48cbe001
MD
1983 struct cds_ja_inode_flag *parent_attach_node_flag,
1984 struct cds_ja_inode_flag **old_node_flag_ptr,
1985 struct cds_ja_inode_flag *old_node_flag,
5a9a87dd 1986 uint64_t key,
79b41067 1987 unsigned int level,
5a9a87dd
MD
1988 struct cds_ja_node *child_node)
1989{
1990 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1991 *parent_shadow_node = NULL;
5a9a87dd
MD
1992 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1993 int ret, i;
a2a7ff59 1994 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1995 int nr_created_nodes = 0;
1996
48cbe001
MD
1997 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1998 level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag);
a2a7ff59 1999
48cbe001
MD
2000 assert(!old_node_flag);
2001 if (attach_node_flag) {
2002 shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag);
2003 if (!shadow_node) {
2004 ret = -EAGAIN;
2005 goto end;
2006 }
5a9a87dd 2007 }
48cbe001 2008 if (parent_attach_node_flag) {
5a9a87dd 2009 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
48cbe001 2010 parent_attach_node_flag);
5a9a87dd 2011 if (!parent_shadow_node) {
2e313670 2012 ret = -EAGAIN;
5a9a87dd
MD
2013 goto unlock_shadow;
2014 }
2015 }
2016
48cbe001 2017 if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) {
b306a0fe 2018 /*
c112acaa
MD
2019 * Target node has been updated between RCU lookup and
2020 * lock acquisition. We need to re-try lookup and
2021 * attach.
2022 */
2023 ret = -EAGAIN;
2024 goto unlock_parent;
2025 }
2026
9be99d4a
MD
2027 /*
2028 * Perform a lookup query to handle the case where
2029 * old_node_flag_ptr is NULL. We cannot use it to check if the
2030 * node has been populated between RCU lookup and mutex
2031 * acquisition.
2032 */
2033 if (!old_node_flag_ptr) {
2034 uint8_t iter_key;
2035 struct cds_ja_inode_flag *lookup_node_flag;
2036 struct cds_ja_inode_flag **lookup_node_flag_ptr;
2037
2038 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
2039 lookup_node_flag = ja_node_get_nth(attach_node_flag,
2040 &lookup_node_flag_ptr,
2041 iter_key);
2042 if (lookup_node_flag) {
2043 ret = -EEXIST;
2044 goto unlock_parent;
2045 }
2046 }
2047
c112acaa 2048 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 2049 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
2050 /*
2051 * Target node has been updated between RCU lookup and
2052 * lock acquisition. We need to re-try lookup and
2053 * attach.
b306a0fe
MD
2054 */
2055 ret = -EAGAIN;
2056 goto unlock_parent;
2057 }
2058
a2a7ff59 2059 /* Create new branch, starting from bottom */
03ec1aeb 2060 iter_node_flag = (struct cds_ja_inode_flag *) child_node;
5a9a87dd 2061
48cbe001 2062 for (i = ja->tree_depth - 1; i >= (int) level; i--) {
79b41067
MD
2063 uint8_t iter_key;
2064
48cbe001 2065 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1)));
79b41067 2066 dbg_printf("branch creation level %d, key %u\n",
48cbe001 2067 i, (unsigned int) iter_key);
5a9a87dd
MD
2068 iter_dest_node_flag = NULL;
2069 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 2070 iter_key,
5a9a87dd 2071 iter_node_flag,
48cbe001 2072 NULL, i);
9be99d4a
MD
2073 if (ret) {
2074 dbg_printf("branch creation error %d\n", ret);
5a9a87dd 2075 goto check_error;
9be99d4a 2076 }
5a9a87dd
MD
2077 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
2078 iter_node_flag = iter_dest_node_flag;
2079 }
48cbe001 2080 assert(level > 0);
5a9a87dd 2081
48cbe001
MD
2082 /* Publish branch */
2083 if (level == 1) {
2084 /*
2085 * Attaching to root node.
2086 */
2087 rcu_assign_pointer(ja->root, iter_node_flag);
2088 } else {
79b41067
MD
2089 uint8_t iter_key;
2090
2091 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
48cbe001
MD
2092 dbg_printf("publish branch at level %d, key %u\n",
2093 level - 1, (unsigned int) iter_key);
a2a7ff59 2094 /* We need to use set_nth on the previous level. */
48cbe001 2095 iter_dest_node_flag = attach_node_flag;
a2a7ff59 2096 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 2097 iter_key,
a2a7ff59 2098 iter_node_flag,
48cbe001 2099 shadow_node, level - 1);
9be99d4a
MD
2100 if (ret) {
2101 dbg_printf("branch publish error %d\n", ret);
a2a7ff59 2102 goto check_error;
9be99d4a 2103 }
48cbe001
MD
2104 /*
2105 * Attach branch
2106 */
2107 rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag);
a2a7ff59
MD
2108 }
2109
5a9a87dd
MD
2110 /* Success */
2111 ret = 0;
2112
2113check_error:
2114 if (ret) {
2115 for (i = 0; i < nr_created_nodes; i++) {
2116 int tmpret;
a2a7ff59
MD
2117 int flags;
2118
2119 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
2120 if (i)
2121 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 2122 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 2123 created_nodes[i],
a2a7ff59
MD
2124 NULL,
2125 flags);
5a9a87dd
MD
2126 assert(!tmpret);
2127 }
2128 }
b306a0fe 2129unlock_parent:
5a9a87dd
MD
2130 if (parent_shadow_node)
2131 rcuja_shadow_unlock(parent_shadow_node);
2132unlock_shadow:
2133 if (shadow_node)
2134 rcuja_shadow_unlock(shadow_node);
2135end:
2136 return ret;
2137}
2138
2139/*
03ec1aeb
MD
2140 * Lock the parent containing the pointer to list of duplicates, and add
2141 * node to this list. Failure can happen if concurrent update changes
2142 * the parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
2143 * Return 0 on success, negative error value on failure.
2144 */
2145static
2146int ja_chain_node(struct cds_ja *ja,
af3cbd45 2147 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2148 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 2149 struct cds_ja_inode_flag *node_flag,
5a9a87dd
MD
2150 struct cds_ja_node *node)
2151{
2152 struct cds_ja_shadow_node *shadow_node;
fa112799 2153 int ret = 0;
5a9a87dd 2154
3d8fe307 2155 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 2156 if (!shadow_node) {
2e313670 2157 return -EAGAIN;
b306a0fe 2158 }
c112acaa 2159 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2160 ret = -EAGAIN;
2161 goto end;
2162 }
03ec1aeb
MD
2163 /*
2164 * Add node to head of list. Safe against concurrent RCU read
2165 * traversals.
2166 */
2167 node->next = (struct cds_ja_node *) node_flag;
2168 rcu_assign_pointer(*node_flag_ptr, (struct cds_ja_inode_flag *) node);
fa112799 2169end:
5a9a87dd 2170 rcuja_shadow_unlock(shadow_node);
fa112799 2171 return ret;
5a9a87dd
MD
2172}
2173
75d573aa
MD
2174static
2175int _cds_ja_add(struct cds_ja *ja, uint64_t key,
6475613c 2176 struct cds_ja_node *node,
75d573aa 2177 struct cds_ja_node **unique_node_ret)
5a9a87dd
MD
2178{
2179 unsigned int tree_depth, i;
48cbe001 2180 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd 2181 *parent_node_flag,
b62a8d0c 2182 *parent2_node_flag,
48cbe001
MD
2183 *node_flag,
2184 *parent_attach_node_flag;
2185 struct cds_ja_inode_flag **attach_node_flag_ptr,
2186 **parent_node_flag_ptr,
2187 **node_flag_ptr;
5a9a87dd
MD
2188 int ret;
2189
b306a0fe 2190 if (caa_unlikely(key > ja->key_max)) {
5a9a87dd 2191 return -EINVAL;
b306a0fe 2192 }
5a9a87dd
MD
2193 tree_depth = ja->tree_depth;
2194
2195retry:
a2a7ff59 2196 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
6475613c 2197 key, node);
5a9a87dd 2198 parent2_node_flag = NULL;
b0f74e47
MD
2199 parent_node_flag =
2200 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
48cbe001 2201 parent_node_flag_ptr = NULL;
35170a44 2202 node_flag = rcu_dereference(ja->root);
48cbe001 2203 node_flag_ptr = &ja->root;
5a9a87dd
MD
2204
2205 /* Iterate on all internal levels */
a2a7ff59 2206 for (i = 1; i < tree_depth; i++) {
79b41067
MD
2207 uint8_t iter_key;
2208
48cbe001
MD
2209 if (!ja_node_ptr(node_flag))
2210 break;
2211 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2212 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
79b41067 2213 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
2214 parent2_node_flag = parent_node_flag;
2215 parent_node_flag = node_flag;
48cbe001 2216 parent_node_flag_ptr = node_flag_ptr;
5a9a87dd
MD
2217 node_flag = ja_node_get_nth(node_flag,
2218 &node_flag_ptr,
79b41067 2219 iter_key);
5a9a87dd
MD
2220 }
2221
2222 /*
48cbe001
MD
2223 * We reached either bottom of tree or internal NULL node,
2224 * simply add node to last internal level, or chain it if key is
2225 * already present.
5a9a87dd
MD
2226 */
2227 if (!ja_node_ptr(node_flag)) {
48cbe001
MD
2228 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2229 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2230
48cbe001
MD
2231 attach_node_flag = parent_node_flag;
2232 attach_node_flag_ptr = parent_node_flag_ptr;
2233 parent_attach_node_flag = parent2_node_flag;
2234
b0ca2d21 2235 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 2236 attach_node_flag,
48cbe001
MD
2237 parent_attach_node_flag,
2238 node_flag_ptr,
2239 node_flag,
6475613c 2240 key, i, node);
5a9a87dd 2241 } else {
75d573aa
MD
2242 if (unique_node_ret) {
2243 *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag);
2244 return -EEXIST;
2245 }
2246
48cbe001
MD
2247 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2248 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2249
48cbe001
MD
2250 attach_node_flag = node_flag;
2251 attach_node_flag_ptr = node_flag_ptr;
2252 parent_attach_node_flag = parent_node_flag;
2253
5a9a87dd 2254 ret = ja_chain_node(ja,
48cbe001
MD
2255 parent_attach_node_flag,
2256 attach_node_flag_ptr,
2257 attach_node_flag,
6475613c 2258 node);
5a9a87dd 2259 }
b306a0fe 2260 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd 2261 goto retry;
48cbe001 2262
5a9a87dd 2263 return ret;
b4540e8a
MD
2264}
2265
75d573aa 2266int cds_ja_add(struct cds_ja *ja, uint64_t key,
6475613c 2267 struct cds_ja_node *node)
75d573aa 2268{
6475613c 2269 return _cds_ja_add(ja, key, node, NULL);
75d573aa
MD
2270}
2271
2272struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key,
6475613c 2273 struct cds_ja_node *node)
75d573aa
MD
2274{
2275 int ret;
2276 struct cds_ja_node *ret_node;
2277
6475613c 2278 ret = _cds_ja_add(ja, key, node, &ret_node);
75d573aa
MD
2279 if (ret == -EEXIST)
2280 return ret_node;
2281 else
6475613c 2282 return node;
75d573aa
MD
2283}
2284
af3cbd45
MD
2285/*
2286 * Note: there is no need to lookup the pointer address associated with
2287 * each node's nth item after taking the lock: it's already been done by
2288 * cds_ja_del while holding the rcu read-side lock, and our node rules
2289 * ensure that when a match value -> pointer is found in a node, it is
2290 * _NEVER_ changed for that node without recompaction, and recompaction
2291 * reallocates the node.
b306a0fe
MD
2292 * However, when a child is removed from "linear" nodes, its pointer
2293 * is set to NULL. We therefore check, while holding the locks, if this
2294 * pointer is NULL, and return -ENOENT to the caller if it is the case.
4cef6f97
MD
2295 *
2296 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2297 * leads to a dead-end: when removing branch, it makes sure to perform
2298 * the "cut" at the highest node that has only one child, effectively
2299 * replacing it with a NULL pointer.
af3cbd45 2300 */
35170a44
MD
2301static
2302int ja_detach_node(struct cds_ja *ja,
2303 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
2304 struct cds_ja_inode_flag ***snapshot_ptr,
2305 uint8_t *snapshot_n,
35170a44
MD
2306 int nr_snapshot,
2307 uint64_t key,
2308 struct cds_ja_node *node)
2309{
af3cbd45
MD
2310 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
2311 struct cds_ja_inode_flag **node_flag_ptr = NULL,
2312 *parent_node_flag = NULL,
2313 **parent_node_flag_ptr = NULL;
b62a8d0c 2314 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
2315 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
2316 uint8_t n = 0;
35170a44 2317
4d6ef45e 2318 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 2319
af3cbd45
MD
2320 /*
2321 * From the last internal level node going up, get the node
2322 * lock, check if the node has only one child left. If it is the
2323 * case, we continue iterating upward. When we reach a node
2324 * which has more that one child left, we lock the parent, and
2325 * proceed to the node deletion (removing its children too).
2326 */
4d6ef45e 2327 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
2328 struct cds_ja_shadow_node *shadow_node;
2329
2330 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2331 snapshot[i]);
af3cbd45
MD
2332 if (!shadow_node) {
2333 ret = -EAGAIN;
2334 goto end;
2335 }
af3cbd45 2336 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2337
2338 /*
2339 * Check if node has been removed between RCU
2340 * lookup and lock acquisition.
2341 */
2342 assert(snapshot_ptr[i + 1]);
2343 if (ja_node_ptr(*snapshot_ptr[i + 1])
2344 != ja_node_ptr(snapshot[i + 1])) {
2345 ret = -ENOENT;
2346 goto end;
2347 }
2348
2349 assert(shadow_node->nr_child > 0);
d810c97f 2350 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
2351 nr_clear++;
2352 nr_branch++;
af3cbd45
MD
2353 if (shadow_node->nr_child > 1 || i == 1) {
2354 /* Lock parent and break */
2355 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2356 snapshot[i - 1]);
af3cbd45
MD
2357 if (!shadow_node) {
2358 ret = -EAGAIN;
2359 goto end;
2360 }
2361 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 2362
c112acaa
MD
2363 /*
2364 * Check if node has been removed between RCU
2365 * lookup and lock acquisition.
2366 */
b62a8d0c
MD
2367 assert(snapshot_ptr[i]);
2368 if (ja_node_ptr(*snapshot_ptr[i])
2369 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
2370 ret = -ENOENT;
2371 goto end;
2372 }
2373
b62a8d0c 2374 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
2375 n = snapshot_n[i + 1];
2376 parent_node_flag_ptr = snapshot_ptr[i];
2377 parent_node_flag = snapshot[i];
c112acaa 2378
af3cbd45
MD
2379 if (i > 1) {
2380 /*
2381 * Lock parent's parent, in case we need
2382 * to recompact parent.
2383 */
2384 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2385 snapshot[i - 2]);
af3cbd45
MD
2386 if (!shadow_node) {
2387 ret = -EAGAIN;
2388 goto end;
2389 }
2390 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2391
2392 /*
2393 * Check if node has been removed between RCU
2394 * lookup and lock acquisition.
2395 */
2396 assert(snapshot_ptr[i - 1]);
2397 if (ja_node_ptr(*snapshot_ptr[i - 1])
2398 != ja_node_ptr(snapshot[i - 1])) {
2399 ret = -ENOENT;
2400 goto end;
2401 }
af3cbd45 2402 }
b62a8d0c 2403
af3cbd45
MD
2404 break;
2405 }
2406 }
2407
2408 /*
4d6ef45e
MD
2409 * At this point, we want to delete all nodes that are about to
2410 * be removed from shadow_nodes (except the last one, which is
2411 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
2412 * child). OK to free lock here, because RCU read lock is held,
2413 * and free only performed in call_rcu.
af3cbd45
MD
2414 */
2415
2416 for (i = 0; i < nr_clear; i++) {
2417 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 2418 shadow_nodes[i]->node_flag,
af3cbd45
MD
2419 shadow_nodes[i],
2420 RCUJA_SHADOW_CLEAR_FREE_NODE
2421 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2422 assert(!ret);
2423 }
2424
2425 iter_node_flag = parent_node_flag;
2426 /* Remove from parent */
2427 ret = ja_node_clear_ptr(ja,
2428 node_flag_ptr, /* Pointer to location to nullify */
2429 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 2430 shadow_nodes[nr_branch - 1], /* of parent */
48cbe001 2431 n, nr_branch - 1);
b306a0fe
MD
2432 if (ret)
2433 goto end;
af3cbd45 2434
4d6ef45e
MD
2435 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2436 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
2437 /* Update address of parent ptr in its parent */
2438 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2439
2440end:
2441 for (i = 0; i < nr_shadow; i++)
2442 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
2443 return ret;
2444}
2445
af3cbd45
MD
2446static
2447int ja_unchain_node(struct cds_ja *ja,
2448 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2449 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 2450 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
2451 struct cds_ja_node *node)
2452{
2453 struct cds_ja_shadow_node *shadow_node;
03ec1aeb 2454 struct cds_ja_node *iter_node, **iter_node_ptr, **prev_node_ptr = NULL;
013a6083 2455 int ret = 0, count = 0, found = 0;
af3cbd45 2456
3d8fe307 2457 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
2458 if (!shadow_node)
2459 return -EAGAIN;
013a6083 2460 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2461 ret = -EAGAIN;
2462 goto end;
2463 }
af3cbd45 2464 /*
03ec1aeb
MD
2465 * Find the previous node's next pointer pointing to our node,
2466 * so we can update it. Retry if another thread removed all but
2467 * one of duplicates since check (this check was performed
2468 * without lock). Ensure that the node we are about to remove is
2469 * still in the list (while holding lock). No need for RCU
2470 * traversal here since we hold the lock on the parent.
af3cbd45 2471 */
03ec1aeb
MD
2472 iter_node_ptr = (struct cds_ja_node **) node_flag_ptr;
2473 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2474 cds_ja_for_each_duplicate(iter_node) {
f2758d14 2475 count++;
03ec1aeb
MD
2476 if (iter_node == node) {
2477 prev_node_ptr = iter_node_ptr;
013a6083 2478 found++;
03ec1aeb
MD
2479 }
2480 iter_node_ptr = &iter_node->next;
f2758d14 2481 }
013a6083
MD
2482 assert(found <= 1);
2483 if (!found || count == 1) {
af3cbd45
MD
2484 ret = -EAGAIN;
2485 goto end;
2486 }
03ec1aeb 2487 CMM_STORE_SHARED(*prev_node_ptr, node->next);
ade342cb
MD
2488 /*
2489 * Validate that we indeed removed the node from linked list.
2490 */
2491 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
2492end:
2493 rcuja_shadow_unlock(shadow_node);
2494 return ret;
2495}
2496
2497/*
2498 * Called with RCU read lock held.
2499 */
35170a44
MD
2500int cds_ja_del(struct cds_ja *ja, uint64_t key,
2501 struct cds_ja_node *node)
2502{
2503 unsigned int tree_depth, i;
2504 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
2505 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2506 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 2507 struct cds_ja_inode_flag *node_flag;
fa112799
MD
2508 struct cds_ja_inode_flag **prev_node_flag_ptr,
2509 **node_flag_ptr;
4d6ef45e 2510 int nr_snapshot;
35170a44
MD
2511 int ret;
2512
2513 if (caa_unlikely(key > ja->key_max))
2514 return -EINVAL;
2515 tree_depth = ja->tree_depth;
2516
2517retry:
4d6ef45e 2518 nr_snapshot = 0;
35170a44
MD
2519 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2520 key, node);
2521
2522 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
2523 snapshot_n[0] = 0;
2524 snapshot_n[1] = 0;
af3cbd45 2525 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
2526 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2527 node_flag = rcu_dereference(ja->root);
af3cbd45 2528 prev_node_flag_ptr = &ja->root;
fa112799 2529 node_flag_ptr = &ja->root;
35170a44
MD
2530
2531 /* Iterate on all internal levels */
2532 for (i = 1; i < tree_depth; i++) {
2533 uint8_t iter_key;
2534
2535 dbg_printf("cds_ja_del iter node_flag %p\n",
2536 node_flag);
2537 if (!ja_node_ptr(node_flag)) {
2538 return -ENOENT;
2539 }
35170a44 2540 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 2541 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
2542 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2543 snapshot[nr_snapshot++] = node_flag;
35170a44 2544 node_flag = ja_node_get_nth(node_flag,
fa112799 2545 &node_flag_ptr,
35170a44 2546 iter_key);
48cbe001
MD
2547 if (node_flag)
2548 prev_node_flag_ptr = node_flag_ptr;
af3cbd45
MD
2549 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2550 (unsigned int) iter_key, node_flag,
2551 prev_node_flag_ptr);
35170a44 2552 }
35170a44
MD
2553 /*
2554 * We reached bottom of tree, try to find the node we are trying
2555 * to remove. Fail if we cannot find it.
2556 */
2557 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
2558 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2559 key);
35170a44
MD
2560 return -ENOENT;
2561 } else {
03ec1aeb 2562 struct cds_ja_node *iter_node, *match = NULL;
af3cbd45 2563 int count = 0;
35170a44 2564
03ec1aeb
MD
2565 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2566 cds_ja_for_each_duplicate_rcu(iter_node) {
2567 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node, iter_node);
2568 if (iter_node == node)
2569 match = iter_node;
af3cbd45 2570 count++;
35170a44 2571 }
03ec1aeb 2572
4d6ef45e
MD
2573 if (!match) {
2574 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 2575 return -ENOENT;
4d6ef45e 2576 }
af3cbd45
MD
2577 assert(count > 0);
2578 if (count == 1) {
2579 /*
4d6ef45e
MD
2580 * Removing last of duplicates. Last snapshot
2581 * does not have a shadow node (external leafs).
af3cbd45
MD
2582 */
2583 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2584 snapshot[nr_snapshot++] = node_flag;
2585 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2586 snapshot_n, nr_snapshot, key, node);
2587 } else {
f2758d14 2588 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 2589 node_flag_ptr, node_flag, match);
af3cbd45 2590 }
35170a44 2591 }
b306a0fe
MD
2592 /*
2593 * Explanation of -ENOENT handling: caused by concurrent delete
2594 * between RCU lookup and actual removal. Need to re-do the
2595 * lookup and removal attempt.
2596 */
2597 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
2598 goto retry;
2599 return ret;
2600}
2601
b4540e8a
MD
2602struct cds_ja *_cds_ja_new(unsigned int key_bits,
2603 const struct rcu_flavor_struct *flavor)
be9a7474
MD
2604{
2605 struct cds_ja *ja;
b0f74e47 2606 int ret;
f07b240f 2607 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
2608
2609 ja = calloc(sizeof(*ja), 1);
2610 if (!ja)
2611 goto ja_error;
b4540e8a
MD
2612
2613 switch (key_bits) {
2614 case 8:
b4540e8a 2615 case 16:
1216b3d2 2616 case 24:
b4540e8a 2617 case 32:
1216b3d2
MD
2618 case 40:
2619 case 48:
2620 case 56:
2621 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
2622 break;
2623 case 64:
2624 ja->key_max = UINT64_MAX;
2625 break;
2626 default:
2627 goto check_error;
2628 }
2629
be9a7474 2630 /* ja->root is NULL */
5a9a87dd 2631 /* tree_depth 0 is for pointer to root node */
582a6ade 2632 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 2633 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
2634 ja->ht = rcuja_create_ht(flavor);
2635 if (!ja->ht)
2636 goto ht_error;
b0f74e47
MD
2637
2638 /*
2639 * Note: we should not free this node until judy array destroy.
2640 */
f07b240f 2641 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307 2642 (struct cds_ja_inode_flag *) &ja->root,
48cbe001 2643 NULL, ja, 0);
f07b240f
MD
2644 if (!root_shadow_node) {
2645 ret = -ENOMEM;
b0f74e47 2646 goto ht_node_error;
f07b240f 2647 }
b0f74e47 2648
be9a7474
MD
2649 return ja;
2650
b0f74e47
MD
2651ht_node_error:
2652 ret = rcuja_delete_ht(ja->ht);
2653 assert(!ret);
be9a7474 2654ht_error:
b4540e8a 2655check_error:
be9a7474
MD
2656 free(ja);
2657ja_error:
2658 return NULL;
2659}
2660
19ddcd04 2661static
354981c2 2662void print_debug_fallback_distribution(struct cds_ja *ja)
19ddcd04
MD
2663{
2664 int i;
2665
2666 fprintf(stderr, "Fallback node distribution:\n");
2667 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
354981c2 2668 if (!ja->node_fallback_count_distribution[i])
19ddcd04
MD
2669 continue;
2670 fprintf(stderr, " %3u: %4lu\n",
354981c2 2671 i, ja->node_fallback_count_distribution[i]);
19ddcd04
MD
2672 }
2673}
2674
021c72c0 2675static
19a748d9 2676int ja_final_checks(struct cds_ja *ja)
021c72c0
MD
2677{
2678 double fallback_ratio;
2679 unsigned long na, nf, nr_fallback;
19a748d9 2680 int ret = 0;
021c72c0
MD
2681
2682 fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
2683 fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
2684 nr_fallback = uatomic_read(&ja->nr_fallback);
2685 if (nr_fallback)
2686 fprintf(stderr,
2687 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2688 uatomic_read(&ja->nr_fallback),
2689 fallback_ratio);
2690
2691 na = uatomic_read(&ja->nr_nodes_allocated);
2692 nf = uatomic_read(&ja->nr_nodes_freed);
19a748d9
MD
2693 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
2694 if (nr_fallback)
2695 print_debug_fallback_distribution(ja);
2696
021c72c0
MD
2697 if (na != nf) {
2698 fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2699 (long) na - nf, na, nf);
19a748d9 2700 ret = -1;
021c72c0 2701 }
19a748d9 2702 return ret;
021c72c0
MD
2703}
2704
be9a7474 2705/*
dc0e9798
MD
2706 * There should be no more concurrent add, delete, nor look-up performed
2707 * on the Judy array while it is being destroyed (ensured by the
2708 * caller).
be9a7474 2709 */
99e6e3dc 2710int cds_ja_destroy(struct cds_ja *ja)
be9a7474 2711{
48cbe001 2712 const struct rcu_flavor_struct *flavor;
b4540e8a
MD
2713 int ret;
2714
48cbe001 2715 flavor = cds_lfht_rcu_flavor(ja->ht);
be9a7474 2716 rcuja_shadow_prune(ja->ht,
99e6e3dc 2717 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK);
48cbe001 2718 flavor->thread_offline();
b4540e8a
MD
2719 ret = rcuja_delete_ht(ja->ht);
2720 if (ret)
2721 return ret;
f2ae7af7
MD
2722
2723 /* Wait for in-flight call_rcu free to complete. */
2724 flavor->barrier();
2725
48cbe001 2726 flavor->thread_online();
19a748d9 2727 ret = ja_final_checks(ja);
b4540e8a 2728 free(ja);
19a748d9 2729 return ret;
be9a7474 2730}
This page took 0.161319 seconds and 4 git commands to generate.