rcuja: below equal/above equal
[urcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
b1a90ce3 27#include <string.h>
61009379 28#include <urcu/rcuja.h>
d68c6810
MD
29#include <urcu/compiler.h>
30#include <urcu/arch.h>
31#include <assert.h>
8e519e3c 32#include <urcu-pointer.h>
f07b240f 33#include <urcu/uatomic.h>
b4540e8a 34#include <stdint.h>
8e519e3c 35
61009379
MD
36#include "rcuja-internal.h"
37
b1a90ce3
MD
38#ifndef abs
39#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
40#endif
41
d96bfb0d 42enum cds_ja_type_class {
e5227865 43 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL = 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 49 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 52 /* Leaf nodes are implicit from their height in the tree */
1db4943c 53 RCU_JA_NR_TYPES,
e1db2db5
MD
54
55 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
56};
57
d96bfb0d
MD
58struct cds_ja_type {
59 enum cds_ja_type_class type_class;
8e519e3c
MD
60 uint16_t min_child; /* minimum number of children: 1 to 256 */
61 uint16_t max_child; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
64 uint16_t nr_pool_order; /* number of pools */
65 uint16_t pool_size_order; /* pool size */
e5227865
MD
66};
67
68/*
69 * Iteration on the array to find the right node size for the number of
d68c6810 70 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 71 * possible node size, which contains 256 children).
d68c6810
MD
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
77 * the size order.
3d45251f
MD
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
e5227865 84 */
e5227865 85
d68c6810
MD
86#if (CAA_BITS_PER_LONG < 64)
87/* 32-bit pointers */
1db4943c
MD
88enum {
89 ja_type_0_max_child = 1,
90 ja_type_1_max_child = 3,
91 ja_type_2_max_child = 6,
92 ja_type_3_max_child = 12,
93 ja_type_4_max_child = 25,
94 ja_type_5_max_child = 48,
95 ja_type_6_max_child = 92,
96 ja_type_7_max_child = 256,
e1db2db5 97 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
98};
99
8e519e3c
MD
100enum {
101 ja_type_0_max_linear_child = 1,
102 ja_type_1_max_linear_child = 3,
103 ja_type_2_max_linear_child = 6,
104 ja_type_3_max_linear_child = 12,
105 ja_type_4_max_linear_child = 25,
106 ja_type_5_max_linear_child = 24,
107 ja_type_6_max_linear_child = 23,
108};
109
1db4943c
MD
110enum {
111 ja_type_5_nr_pool_order = 1,
112 ja_type_6_nr_pool_order = 2,
113};
114
d96bfb0d 115const struct cds_ja_type ja_types[] = {
8e519e3c
MD
116 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 121
fd800776 122 /* Pools may fill sooner than max_child */
1cee749c 123 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
1cee749c 125 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 126 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
127
128 /*
b1a90ce3
MD
129 * Upon node removal below min_child, if child pool is filled
130 * beyond capacity, we roll back to pigeon.
3d45251f 131 */
58c16c03 132 { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
133
134 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 135};
d68c6810
MD
136#else /* !(CAA_BITS_PER_LONG < 64) */
137/* 64-bit pointers */
1db4943c
MD
138enum {
139 ja_type_0_max_child = 1,
140 ja_type_1_max_child = 3,
141 ja_type_2_max_child = 7,
142 ja_type_3_max_child = 14,
143 ja_type_4_max_child = 28,
144 ja_type_5_max_child = 54,
145 ja_type_6_max_child = 104,
146 ja_type_7_max_child = 256,
e1db2db5 147 ja_type_8_max_child = 256,
1db4943c
MD
148};
149
8e519e3c
MD
150enum {
151 ja_type_0_max_linear_child = 1,
152 ja_type_1_max_linear_child = 3,
153 ja_type_2_max_linear_child = 7,
154 ja_type_3_max_linear_child = 14,
155 ja_type_4_max_linear_child = 28,
156 ja_type_5_max_linear_child = 27,
157 ja_type_6_max_linear_child = 26,
158};
159
1db4943c
MD
160enum {
161 ja_type_5_nr_pool_order = 1,
162 ja_type_6_nr_pool_order = 2,
163};
164
d96bfb0d 165const struct cds_ja_type ja_types[] = {
8e519e3c
MD
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
170 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 171
3d45251f 172 /* Pools may fill sooner than max_child. */
1cee749c 173 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 174 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
1cee749c 175 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 176 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 177
3d45251f 178 /*
b1a90ce3
MD
179 * Upon node removal below min_child, if child pool is filled
180 * beyond capacity, we roll back to pigeon.
3d45251f 181 */
64457f6c 182 { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
183
184 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 185};
d68c6810 186#endif /* !(BITS_PER_LONG < 64) */
e5227865 187
1db4943c
MD
188static inline __attribute__((unused))
189void static_array_size_check(void)
190{
e1db2db5 191 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
192}
193
e5227865 194/*
d96bfb0d 195 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
196 * read-side. For linear and pool node configurations, it starts with a
197 * byte counting the number of children in the node. Then, the
198 * node-specific data is placed.
199 * The node mutex, if any is needed, protecting concurrent updated of
200 * each node is placed in a separate hash table indexed by node address.
201 * For the pigeon configuration, the number of children is also kept in
202 * a separate hash table, indexed by node address, because it is only
203 * required for updates.
e5227865 204 */
1db4943c 205
ff38c745
MD
206#define DECLARE_LINEAR_NODE(index) \
207 struct { \
208 uint8_t nr_child; \
209 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 210 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
211 }
212
213#define DECLARE_POOL_NODE(index) \
214 struct { \
215 struct { \
216 uint8_t nr_child; \
217 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 218 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
219 } linear[1U << ja_type_## index ##_nr_pool_order]; \
220 }
1db4943c 221
b4540e8a 222struct cds_ja_inode {
1db4943c
MD
223 union {
224 /* Linear configuration */
225 DECLARE_LINEAR_NODE(0) conf_0;
226 DECLARE_LINEAR_NODE(1) conf_1;
227 DECLARE_LINEAR_NODE(2) conf_2;
228 DECLARE_LINEAR_NODE(3) conf_3;
229 DECLARE_LINEAR_NODE(4) conf_4;
230
231 /* Pool configuration */
232 DECLARE_POOL_NODE(5) conf_5;
233 DECLARE_POOL_NODE(6) conf_6;
234
235 /* Pigeon configuration */
236 struct {
b4540e8a 237 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
238 } conf_7;
239 /* data aliasing nodes for computed accesses */
b4540e8a 240 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 241 } u;
e5227865
MD
242};
243
2e313670 244enum ja_recompact {
19ddcd04
MD
245 JA_RECOMPACT_ADD_SAME,
246 JA_RECOMPACT_ADD_NEXT,
2e313670
MD
247 JA_RECOMPACT_DEL,
248};
249
b023ba9f
MD
250enum ja_lookup_inequality {
251 JA_LOOKUP_BE,
252 JA_LOOKUP_AE,
253};
254
255enum ja_direction {
256 JA_LEFT,
257 JA_RIGHT,
258 JA_LEFTMOST,
259 JA_RIGHTMOST,
260};
261
b1a90ce3
MD
262static
263struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
264{
265 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
266}
267
268unsigned long ja_node_type(struct cds_ja_inode_flag *node)
269{
270 unsigned long type;
271
272 if (_ja_node_mask_ptr(node) == NULL) {
273 return NODE_INDEX_NULL;
274 }
275 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
276 assert(type < (1UL << JA_TYPE_BITS));
277 return type;
278}
279
354981c2
MD
280static
281struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
282 const struct cds_ja_type *ja_type)
e5227865 283{
b1a90ce3
MD
284 size_t len = 1U << ja_type->order;
285 void *p;
286 int ret;
287
288 ret = posix_memalign(&p, len, len);
289 if (ret || !p) {
290 return NULL;
291 }
292 memset(p, 0, len);
354981c2 293 uatomic_inc(&ja->nr_nodes_allocated);
b1a90ce3 294 return p;
e5227865
MD
295}
296
354981c2 297void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
e5227865
MD
298{
299 free(node);
48cbe001 300 if (node)
354981c2 301 uatomic_inc(&ja->nr_nodes_freed);
e5227865
MD
302}
303
d68c6810
MD
304#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
305#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
306#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
307#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
308
309static
1db4943c 310uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 311{
1db4943c 312 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
313}
314
11c5e016 315static
d96bfb0d 316uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 317 struct cds_ja_inode *node)
11c5e016
MD
318{
319 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 320 return rcu_dereference(node->u.data[0]);
11c5e016
MD
321}
322
13a7f5a6
MD
323/*
324 * The order in which values and pointers are does does not matter: if
325 * a value is missing, we return NULL. If a value is there, but its
326 * associated pointers is still NULL, we return NULL too.
327 */
d68c6810 328static
b4540e8a
MD
329struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
330 struct cds_ja_inode *node,
b0ca2d21 331 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 332 uint8_t n)
d68c6810
MD
333{
334 uint8_t nr_child;
335 uint8_t *values;
b4540e8a
MD
336 struct cds_ja_inode_flag **pointers;
337 struct cds_ja_inode_flag *ptr;
d68c6810
MD
338 unsigned int i;
339
8e519e3c 340 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 341
11c5e016 342 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 343 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
344 assert(nr_child <= type->max_linear_child);
345 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 346
1db4943c 347 values = &node->u.data[1];
d68c6810 348 for (i = 0; i < nr_child; i++) {
13a7f5a6 349 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
350 break;
351 }
b0ca2d21
MD
352 if (i >= nr_child) {
353 if (caa_unlikely(node_flag_ptr))
354 *node_flag_ptr = NULL;
d68c6810 355 return NULL;
b0ca2d21 356 }
b4540e8a 357 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 358 ptr = rcu_dereference(pointers[i]);
b0ca2d21
MD
359 if (caa_unlikely(node_flag_ptr))
360 *node_flag_ptr = &pointers[i];
d68c6810
MD
361 return ptr;
362}
363
291b2543 364static
b023ba9f 365struct cds_ja_inode_flag *ja_linear_node_get_direction(const struct cds_ja_type *type,
291b2543 366 struct cds_ja_inode *node,
b023ba9f
MD
367 int n,
368 enum ja_direction dir)
291b2543
MD
369{
370 uint8_t nr_child;
371 uint8_t *values;
372 struct cds_ja_inode_flag **pointers;
373 struct cds_ja_inode_flag *ptr;
b023ba9f
MD
374 unsigned int i;
375 int match_idx = -1, match_v;
291b2543
MD
376
377 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
b023ba9f
MD
378 assert(dir == JA_LEFT || dir == JA_RIGHT);
379
380 if (dir == JA_LEFT) {
381 match_v = -1;
382 } else {
383 match_v = JA_ENTRY_PER_NODE;
384 }
291b2543
MD
385
386 nr_child = ja_linear_node_get_nr_child(type, node);
387 cmm_smp_rmb(); /* read nr_child before values and pointers */
388 assert(nr_child <= type->max_linear_child);
389 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
390
391 values = &node->u.data[1];
392 for (i = 0; i < nr_child; i++) {
393 unsigned int v;
394
395 v = CMM_LOAD_SHARED(values[i]);
b023ba9f
MD
396 if (dir == JA_LEFT) {
397 if ((int) v < n && (int) v > match_v) {
398 match_v = v;
399 match_idx = i;
400 }
401 } else {
402 if ((int) v > n && (int) v < match_v) {
403 match_v = v;
404 match_idx = i;
405 }
291b2543
MD
406 }
407 }
b023ba9f
MD
408
409 if (match_idx < 0) {
291b2543
MD
410 return NULL;
411 }
b023ba9f
MD
412 assert(match_v >= 0 && match_v < JA_ENTRY_PER_NODE);
413
291b2543
MD
414 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
415 ptr = rcu_dereference(pointers[match_idx]);
416 return ptr;
417}
418
11c5e016 419static
5a9a87dd 420void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 421 struct cds_ja_inode *node,
11c5e016
MD
422 uint8_t i,
423 uint8_t *v,
b4540e8a 424 struct cds_ja_inode_flag **iter)
11c5e016
MD
425{
426 uint8_t *values;
b4540e8a 427 struct cds_ja_inode_flag **pointers;
11c5e016
MD
428
429 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
430 assert(i < ja_linear_node_get_nr_child(type, node));
431
432 values = &node->u.data[1];
433 *v = values[i];
b4540e8a 434 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
435 *iter = pointers[i];
436}
437
d68c6810 438static
b4540e8a
MD
439struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
440 struct cds_ja_inode *node,
b1a90ce3 441 struct cds_ja_inode_flag *node_flag,
b0ca2d21 442 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 443 uint8_t n)
d68c6810 444{
b4540e8a 445 struct cds_ja_inode *linear;
d68c6810 446
fd800776 447 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
448
449 switch (type->nr_pool_order) {
450 case 1:
451 {
452 unsigned long bitsel, index;
453
454 bitsel = ja_node_pool_1d_bitsel(node_flag);
455 assert(bitsel < CHAR_BIT);
19ddcd04 456 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
457 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
458 break;
459 }
460 case 2:
461 {
19ddcd04
MD
462 unsigned long bitsel[2], index[2], rindex;
463
464 ja_node_pool_2d_bitsel(node_flag, bitsel);
465 assert(bitsel[0] < CHAR_BIT);
466 assert(bitsel[1] < CHAR_BIT);
467 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
468 index[0] <<= 1;
469 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
470 rindex = index[0] | index[1];
471 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
472 break;
473 }
474 default:
475 linear = NULL;
476 assert(0);
477 }
48cbe001 478 return ja_linear_node_get_nth(type, linear, node_flag_ptr, n);
d68c6810
MD
479}
480
11c5e016 481static
b4540e8a
MD
482struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
483 struct cds_ja_inode *node,
11c5e016
MD
484 uint8_t i)
485{
486 assert(type->type_class == RCU_JA_POOL);
b4540e8a 487 return (struct cds_ja_inode *)
11c5e016
MD
488 &node->u.data[(unsigned int) i << type->pool_size_order];
489}
490
291b2543 491static
b023ba9f 492struct cds_ja_inode_flag *ja_pool_node_get_direction(const struct cds_ja_type *type,
291b2543 493 struct cds_ja_inode *node,
b023ba9f
MD
494 int n,
495 enum ja_direction dir)
291b2543
MD
496{
497 unsigned int pool_nr;
b023ba9f 498 int match_v;
291b2543
MD
499 struct cds_ja_inode_flag *match_node_flag = NULL;
500
501 assert(type->type_class == RCU_JA_POOL);
b023ba9f
MD
502 assert(dir == JA_LEFT || dir == JA_RIGHT);
503
504 if (dir == JA_LEFT) {
505 match_v = -1;
506 } else {
507 match_v = JA_ENTRY_PER_NODE;
508 }
291b2543
MD
509
510 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
511 struct cds_ja_inode *pool =
512 ja_pool_node_get_ith_pool(type,
513 node, pool_nr);
514 uint8_t nr_child =
515 ja_linear_node_get_nr_child(type, pool);
516 unsigned int j;
517
518 for (j = 0; j < nr_child; j++) {
519 struct cds_ja_inode_flag *iter;
520 uint8_t v;
521
522 ja_linear_node_get_ith_pos(type, pool,
523 j, &v, &iter);
524 if (!iter)
525 continue;
b023ba9f
MD
526 if (dir == JA_LEFT) {
527 if ((int) v < n && (int) v > match_v) {
528 match_v = v;
529 match_node_flag = iter;
530 }
531 } else {
532 if ((int) v > n && (int) v < match_v) {
533 match_v = v;
534 match_node_flag = iter;
535 }
291b2543
MD
536 }
537 }
538 }
539 return match_node_flag;
540}
541
d68c6810 542static
b4540e8a
MD
543struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
544 struct cds_ja_inode *node,
b0ca2d21 545 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 546 uint8_t n)
d68c6810 547{
48cbe001
MD
548 struct cds_ja_inode_flag **child_node_flag_ptr;
549 struct cds_ja_inode_flag *child_node_flag;
5a9a87dd 550
d68c6810 551 assert(type->type_class == RCU_JA_PIGEON);
48cbe001
MD
552 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
553 child_node_flag = rcu_dereference(*child_node_flag_ptr);
582a6ade 554 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
48cbe001 555 child_node_flag_ptr);
b0ca2d21 556 if (caa_unlikely(node_flag_ptr))
48cbe001
MD
557 *node_flag_ptr = child_node_flag_ptr;
558 return child_node_flag;
d68c6810
MD
559}
560
291b2543 561static
b023ba9f 562struct cds_ja_inode_flag *ja_pigeon_node_get_direction(const struct cds_ja_type *type,
291b2543 563 struct cds_ja_inode *node,
b023ba9f
MD
564 int n,
565 enum ja_direction dir)
291b2543
MD
566{
567 struct cds_ja_inode_flag **child_node_flag_ptr;
568 struct cds_ja_inode_flag *child_node_flag;
569 int i;
570
571 assert(type->type_class == RCU_JA_PIGEON);
b023ba9f
MD
572 assert(dir == JA_LEFT || dir == JA_RIGHT);
573
574 if (dir == JA_LEFT) {
575 /* n - 1 is first value left of n */
576 for (i = n - 1; i >= 0; i--) {
577 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
578 child_node_flag = rcu_dereference(*child_node_flag_ptr);
579 if (child_node_flag) {
580 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
581 child_node_flag);
582 return child_node_flag;
583 }
584 }
585 } else {
586 /* n + 1 is first value right of n */
587 for (i = n + 1; i < JA_ENTRY_PER_NODE; i++) {
588 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
589 child_node_flag = rcu_dereference(*child_node_flag_ptr);
590 if (child_node_flag) {
591 dbg_printf("ja_pigeon_node_get_right child_node_flag %p\n",
592 child_node_flag);
593 return child_node_flag;
594 }
291b2543
MD
595 }
596 }
597 return NULL;
598}
599
2e313670
MD
600static
601struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
602 struct cds_ja_inode *node,
603 uint8_t i)
604{
48cbe001 605 return ja_pigeon_node_get_nth(type, node, NULL, i);
2e313670
MD
606}
607
13a7f5a6
MD
608/*
609 * ja_node_get_nth: get nth item from a node.
610 * node_flag is already rcu_dereference'd.
611 */
d68c6810 612static
b62a8d0c 613struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
b0ca2d21 614 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 615 uint8_t n)
d68c6810
MD
616{
617 unsigned int type_index;
b4540e8a 618 struct cds_ja_inode *node;
d96bfb0d 619 const struct cds_ja_type *type;
d68c6810 620
d68c6810 621 node = ja_node_ptr(node_flag);
5a9a87dd 622 assert(node != NULL);
d68c6810
MD
623 type_index = ja_node_type(node_flag);
624 type = &ja_types[type_index];
625
626 switch (type->type_class) {
627 case RCU_JA_LINEAR:
5a9a87dd 628 return ja_linear_node_get_nth(type, node,
b62a8d0c 629 node_flag_ptr, n);
fd800776 630 case RCU_JA_POOL:
b1a90ce3 631 return ja_pool_node_get_nth(type, node, node_flag,
b62a8d0c 632 node_flag_ptr, n);
d68c6810 633 case RCU_JA_PIGEON:
5a9a87dd 634 return ja_pigeon_node_get_nth(type, node,
b62a8d0c 635 node_flag_ptr, n);
d68c6810
MD
636 default:
637 assert(0);
638 return (void *) -1UL;
639 }
640}
641
291b2543 642static
b023ba9f
MD
643struct cds_ja_inode_flag *ja_node_get_direction(struct cds_ja_inode_flag *node_flag,
644 int n,
645 enum ja_direction dir)
291b2543
MD
646{
647 unsigned int type_index;
648 struct cds_ja_inode *node;
649 const struct cds_ja_type *type;
650
651 node = ja_node_ptr(node_flag);
652 assert(node != NULL);
653 type_index = ja_node_type(node_flag);
654 type = &ja_types[type_index];
655
656 switch (type->type_class) {
657 case RCU_JA_LINEAR:
b023ba9f 658 return ja_linear_node_get_direction(type, node, n, dir);
291b2543 659 case RCU_JA_POOL:
b023ba9f 660 return ja_pool_node_get_direction(type, node, n, dir);
291b2543 661 case RCU_JA_PIGEON:
b023ba9f 662 return ja_pigeon_node_get_direction(type, node, n, dir);
291b2543
MD
663 default:
664 assert(0);
665 return (void *) -1UL;
666 }
667}
668
669static
b023ba9f
MD
670struct cds_ja_inode_flag *ja_node_get_leftright(struct cds_ja_inode_flag *node_flag,
671 unsigned int n,
672 enum ja_direction dir)
291b2543 673{
b023ba9f
MD
674 return ja_node_get_direction(node_flag, n, dir);
675}
676
677static
678struct cds_ja_inode_flag *ja_node_get_minmax(struct cds_ja_inode_flag *node_flag,
679 enum ja_direction dir)
680{
681 switch (dir) {
682 case JA_LEFTMOST:
683 return ja_node_get_direction(node_flag,
684 -1, JA_RIGHT);
685 case JA_RIGHTMOST:
686 return ja_node_get_direction(node_flag,
687 JA_ENTRY_PER_NODE, JA_LEFT);
688 default:
689 assert(0);
690 }
291b2543
MD
691}
692
8e519e3c 693static
d96bfb0d 694int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 695 struct cds_ja_inode *node,
d96bfb0d 696 struct cds_ja_shadow_node *shadow_node,
8e519e3c 697 uint8_t n,
b4540e8a 698 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
699{
700 uint8_t nr_child;
701 uint8_t *values, *nr_child_ptr;
b4540e8a 702 struct cds_ja_inode_flag **pointers;
2e313670 703 unsigned int i, unused = 0;
8e519e3c
MD
704
705 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
706
707 nr_child_ptr = &node->u.data[0];
48cbe001
MD
708 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
709 (unsigned int) n, nr_child_ptr);
8e519e3c
MD
710 nr_child = *nr_child_ptr;
711 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
712
713 values = &node->u.data[1];
2e313670
MD
714 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
715 /* Check if node value is already populated */
8e519e3c 716 for (i = 0; i < nr_child; i++) {
2e313670
MD
717 if (values[i] == n) {
718 if (pointers[i])
719 return -EEXIST;
720 else
721 break;
722 } else {
723 if (!pointers[i])
724 unused++;
725 }
8e519e3c 726 }
2e313670
MD
727 if (i == nr_child && nr_child >= type->max_linear_child) {
728 if (unused)
729 return -ERANGE; /* recompact node */
730 else
731 return -ENOSPC; /* No space left in this node type */
732 }
733
734 assert(pointers[i] == NULL);
735 rcu_assign_pointer(pointers[i], child_node_flag);
736 /* If we expanded the nr_child, increment it */
737 if (i == nr_child) {
738 CMM_STORE_SHARED(values[nr_child], n);
739 /* write pointer and value before nr_child */
740 cmm_smp_wmb();
741 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 742 }
e1db2db5 743 shadow_node->nr_child++;
a2a7ff59
MD
744 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
745 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
746 (unsigned int) shadow_node->nr_child,
747 node, shadow_node);
748
8e519e3c
MD
749 return 0;
750}
751
752static
d96bfb0d 753int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 754 struct cds_ja_inode *node,
b1a90ce3 755 struct cds_ja_inode_flag *node_flag,
d96bfb0d 756 struct cds_ja_shadow_node *shadow_node,
8e519e3c 757 uint8_t n,
b4540e8a 758 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 759{
b4540e8a 760 struct cds_ja_inode *linear;
8e519e3c
MD
761
762 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
763
764 switch (type->nr_pool_order) {
765 case 1:
766 {
767 unsigned long bitsel, index;
768
769 bitsel = ja_node_pool_1d_bitsel(node_flag);
770 assert(bitsel < CHAR_BIT);
19ddcd04 771 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
772 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
773 break;
774 }
775 case 2:
776 {
19ddcd04
MD
777 unsigned long bitsel[2], index[2], rindex;
778
779 ja_node_pool_2d_bitsel(node_flag, bitsel);
780 assert(bitsel[0] < CHAR_BIT);
781 assert(bitsel[1] < CHAR_BIT);
782 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
783 index[0] <<= 1;
784 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
785 rindex = index[0] | index[1];
786 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
787 break;
788 }
789 default:
790 linear = NULL;
791 assert(0);
792 }
793
e1db2db5
MD
794 return ja_linear_node_set_nth(type, linear, shadow_node,
795 n, child_node_flag);
8e519e3c
MD
796}
797
798static
d96bfb0d 799int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 800 struct cds_ja_inode *node,
d96bfb0d 801 struct cds_ja_shadow_node *shadow_node,
8e519e3c 802 uint8_t n,
b4540e8a 803 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 804{
b4540e8a 805 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
806
807 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 808 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 809 if (*ptr)
8e519e3c
MD
810 return -EEXIST;
811 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 812 shadow_node->nr_child++;
8e519e3c
MD
813 return 0;
814}
815
d68c6810 816/*
7a0b2331 817 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 818 * (negative error value) if it is already there.
d68c6810 819 */
8e519e3c 820static
d96bfb0d 821int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 822 struct cds_ja_inode *node,
b1a90ce3 823 struct cds_ja_inode_flag *node_flag,
d96bfb0d 824 struct cds_ja_shadow_node *shadow_node,
e1db2db5 825 uint8_t n,
b4540e8a 826 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 827{
8e519e3c
MD
828 switch (type->type_class) {
829 case RCU_JA_LINEAR:
e1db2db5 830 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
831 child_node_flag);
832 case RCU_JA_POOL:
b1a90ce3 833 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
8e519e3c
MD
834 child_node_flag);
835 case RCU_JA_PIGEON:
e1db2db5 836 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 837 child_node_flag);
e1db2db5
MD
838 case RCU_JA_NULL:
839 return -ENOSPC;
8e519e3c
MD
840 default:
841 assert(0);
842 return -EINVAL;
843 }
844
845 return 0;
846}
7a0b2331 847
2e313670 848static
af3cbd45 849int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
850 struct cds_ja_inode *node,
851 struct cds_ja_shadow_node *shadow_node,
af3cbd45 852 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
853{
854 uint8_t nr_child;
af3cbd45 855 uint8_t *nr_child_ptr;
2e313670
MD
856
857 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
858
859 nr_child_ptr = &node->u.data[0];
2e313670
MD
860 nr_child = *nr_child_ptr;
861 assert(nr_child <= type->max_linear_child);
862
48cbe001
MD
863 if (type->type_class == RCU_JA_LINEAR) {
864 assert(!shadow_node->fallback_removal_count);
865 if (shadow_node->nr_child <= type->min_child) {
2e313670
MD
866 /* We need to try recompacting the node */
867 return -EFBIG;
868 }
869 }
19ddcd04 870 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
af3cbd45
MD
871 assert(*node_flag_ptr != NULL);
872 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
873 /*
874 * Value and nr_child are never changed (would cause ABA issue).
875 * Instead, we leave the pointer to NULL and recompact the node
876 * once in a while. It is allowed to set a NULL pointer to a new
877 * value without recompaction though.
878 * Only update the shadow node accounting.
879 */
880 shadow_node->nr_child--;
af3cbd45 881 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
882 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
883 (unsigned int) shadow_node->nr_child,
884 node, shadow_node);
2e313670
MD
885 return 0;
886}
887
888static
af3cbd45 889int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670 890 struct cds_ja_inode *node,
19ddcd04 891 struct cds_ja_inode_flag *node_flag,
2e313670 892 struct cds_ja_shadow_node *shadow_node,
af3cbd45 893 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
894 uint8_t n)
895{
896 struct cds_ja_inode *linear;
897
898 assert(type->type_class == RCU_JA_POOL);
19ddcd04
MD
899
900 if (shadow_node->fallback_removal_count) {
901 shadow_node->fallback_removal_count--;
902 } else {
903 /* We should try recompacting the node */
904 if (shadow_node->nr_child <= type->min_child)
905 return -EFBIG;
906 }
907
908 switch (type->nr_pool_order) {
909 case 1:
910 {
911 unsigned long bitsel, index;
912
913 bitsel = ja_node_pool_1d_bitsel(node_flag);
914 assert(bitsel < CHAR_BIT);
915 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
916 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
917 break;
918 }
919 case 2:
920 {
921 unsigned long bitsel[2], index[2], rindex;
922
923 ja_node_pool_2d_bitsel(node_flag, bitsel);
924 assert(bitsel[0] < CHAR_BIT);
925 assert(bitsel[1] < CHAR_BIT);
926 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
927 index[0] <<= 1;
928 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
929 rindex = index[0] | index[1];
930 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
931 break;
932 }
933 default:
934 linear = NULL;
935 assert(0);
936 }
937
af3cbd45 938 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
939}
940
941static
af3cbd45 942int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
943 struct cds_ja_inode *node,
944 struct cds_ja_shadow_node *shadow_node,
af3cbd45 945 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 946{
2e313670 947 assert(type->type_class == RCU_JA_PIGEON);
19ddcd04
MD
948
949 if (shadow_node->fallback_removal_count) {
950 shadow_node->fallback_removal_count--;
951 } else {
952 /* We should try recompacting the node */
953 if (shadow_node->nr_child <= type->min_child)
954 return -EFBIG;
955 }
4d6ef45e 956 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 957 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
958 shadow_node->nr_child--;
959 return 0;
960}
961
962/*
af3cbd45 963 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
964 * (negative error value) if it is not found (-ENOENT).
965 */
966static
af3cbd45 967int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670 968 struct cds_ja_inode *node,
19ddcd04 969 struct cds_ja_inode_flag *node_flag,
2e313670 970 struct cds_ja_shadow_node *shadow_node,
af3cbd45 971 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
972 uint8_t n)
973{
974 switch (type->type_class) {
975 case RCU_JA_LINEAR:
af3cbd45 976 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 977 case RCU_JA_POOL:
19ddcd04 978 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
2e313670 979 case RCU_JA_PIGEON:
af3cbd45 980 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
981 case RCU_JA_NULL:
982 return -ENOENT;
983 default:
984 assert(0);
985 return -EINVAL;
986 }
987
988 return 0;
989}
990
b1a90ce3
MD
991/*
992 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
993 * distribution in two sub-distributions containing as much elements one
994 * compared to the other.
995 */
996static
997unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
998 struct cds_ja *ja,
999 unsigned int type_index,
1000 const struct cds_ja_type *type,
1001 struct cds_ja_inode *node,
1002 struct cds_ja_shadow_node *shadow_node,
1003 uint8_t n,
1004 struct cds_ja_inode_flag *child_node_flag,
1005 struct cds_ja_inode_flag **nullify_node_flag_ptr)
1006{
1007 uint8_t nr_one[JA_BITS_PER_BYTE];
1008 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
1009 unsigned int distrib_nr_child = 0;
1010
1011 memset(nr_one, 0, sizeof(nr_one));
1012
1013 switch (type->type_class) {
1014 case RCU_JA_LINEAR:
1015 {
1016 uint8_t nr_child =
1017 ja_linear_node_get_nr_child(type, node);
1018 unsigned int i;
1019
1020 for (i = 0; i < nr_child; i++) {
1021 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1022 uint8_t v;
1023
1024 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1025 if (!iter)
1026 continue;
1027 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1028 continue;
f5531dd9
MD
1029 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1030 if (v & (1U << bit_i))
1031 nr_one[bit_i]++;
b1a90ce3
MD
1032 }
1033 distrib_nr_child++;
1034 }
1035 break;
1036 }
1037 case RCU_JA_POOL:
1038 {
1039 unsigned int pool_nr;
1040
1041 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1042 struct cds_ja_inode *pool =
1043 ja_pool_node_get_ith_pool(type,
1044 node, pool_nr);
1045 uint8_t nr_child =
1046 ja_linear_node_get_nr_child(type, pool);
1047 unsigned int j;
1048
1049 for (j = 0; j < nr_child; j++) {
1050 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1051 uint8_t v;
1052
1053 ja_linear_node_get_ith_pos(type, pool,
1054 j, &v, &iter);
1055 if (!iter)
1056 continue;
1057 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1058 continue;
f5531dd9
MD
1059 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1060 if (v & (1U << bit_i))
1061 nr_one[bit_i]++;
b1a90ce3
MD
1062 }
1063 distrib_nr_child++;
1064 }
1065 }
1066 break;
1067 }
1068 case RCU_JA_PIGEON:
1069 {
b1a90ce3
MD
1070 unsigned int i;
1071
1072 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1073 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
b1a90ce3 1074 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1075
1076 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1077 if (!iter)
1078 continue;
1079 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1080 continue;
f5531dd9
MD
1081 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1082 if (i & (1U << bit_i))
1083 nr_one[bit_i]++;
b1a90ce3
MD
1084 }
1085 distrib_nr_child++;
1086 }
1087 break;
1088 }
1089 case RCU_JA_NULL:
19ddcd04 1090 assert(mode == JA_RECOMPACT_ADD_NEXT);
b1a90ce3
MD
1091 break;
1092 default:
1093 assert(0);
1094 break;
1095 }
1096
19ddcd04 1097 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
f5531dd9
MD
1098 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1099 if (n & (1U << bit_i))
1100 nr_one[bit_i]++;
b1a90ce3
MD
1101 }
1102 distrib_nr_child++;
1103 }
1104
1105 /*
1106 * The best bit selector is that for which the number of ones is
1107 * closest to half of the number of children in the
f5531dd9
MD
1108 * distribution. We calculate the distance using the double of
1109 * the sub-distribution sizes to eliminate truncation error.
b1a90ce3
MD
1110 */
1111 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1112 unsigned int distance_to_best;
1113
1b34283b 1114 distance_to_best = abs_int(((unsigned int) nr_one[bit_i] << 1U) - distrib_nr_child);
b1a90ce3
MD
1115 if (distance_to_best < overall_best_distance) {
1116 overall_best_distance = distance_to_best;
1117 bitsel = bit_i;
1118 }
1119 }
1120 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
1121 return bitsel;
1122}
1123
19ddcd04
MD
1124/*
1125 * Calculate bit distribution in two dimensions. Returns the two bits
1126 * (each 0 to 7) that splits the distribution in four sub-distributions
1127 * containing as much elements one compared to the other.
1128 */
1129static
1130void ja_node_sum_distribution_2d(enum ja_recompact mode,
1131 struct cds_ja *ja,
1132 unsigned int type_index,
1133 const struct cds_ja_type *type,
1134 struct cds_ja_inode *node,
1135 struct cds_ja_shadow_node *shadow_node,
1136 uint8_t n,
1137 struct cds_ja_inode_flag *child_node_flag,
1138 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1139 unsigned int *_bitsel)
1140{
1141 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1142 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1143 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1144 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
1145 unsigned int bitsel[2] = { 0, 1 };
4a073c53
MD
1146 unsigned int bit_i, bit_j;
1147 int overall_best_distance = INT_MAX;
19ddcd04
MD
1148 unsigned int distrib_nr_child = 0;
1149
1150 memset(nr_2d_11, 0, sizeof(nr_2d_11));
1151 memset(nr_2d_10, 0, sizeof(nr_2d_10));
4a073c53
MD
1152 memset(nr_2d_01, 0, sizeof(nr_2d_01));
1153 memset(nr_2d_00, 0, sizeof(nr_2d_00));
19ddcd04
MD
1154
1155 switch (type->type_class) {
1156 case RCU_JA_LINEAR:
1157 {
1158 uint8_t nr_child =
1159 ja_linear_node_get_nr_child(type, node);
1160 unsigned int i;
1161
1162 for (i = 0; i < nr_child; i++) {
1163 struct cds_ja_inode_flag *iter;
1164 uint8_t v;
1165
1166 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1167 if (!iter)
1168 continue;
1169 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1170 continue;
1171 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1172 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1173 if (v & (1U << bit_i)) {
1174 if (v & (1U << bit_j)) {
1175 nr_2d_11[bit_i][bit_j]++;
1176 } else {
1177 nr_2d_10[bit_i][bit_j]++;
1178 }
1179 } else {
1180 if (v & (1U << bit_j)) {
1181 nr_2d_01[bit_i][bit_j]++;
1182 } else {
1183 nr_2d_00[bit_i][bit_j]++;
1184 }
19ddcd04
MD
1185 }
1186 }
1187 }
1188 distrib_nr_child++;
1189 }
1190 break;
1191 }
1192 case RCU_JA_POOL:
1193 {
1194 unsigned int pool_nr;
1195
1196 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1197 struct cds_ja_inode *pool =
1198 ja_pool_node_get_ith_pool(type,
1199 node, pool_nr);
1200 uint8_t nr_child =
1201 ja_linear_node_get_nr_child(type, pool);
1202 unsigned int j;
1203
1204 for (j = 0; j < nr_child; j++) {
1205 struct cds_ja_inode_flag *iter;
1206 uint8_t v;
1207
1208 ja_linear_node_get_ith_pos(type, pool,
1209 j, &v, &iter);
1210 if (!iter)
1211 continue;
1212 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1213 continue;
1214 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1215 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1216 if (v & (1U << bit_i)) {
1217 if (v & (1U << bit_j)) {
1218 nr_2d_11[bit_i][bit_j]++;
1219 } else {
1220 nr_2d_10[bit_i][bit_j]++;
1221 }
1222 } else {
1223 if (v & (1U << bit_j)) {
1224 nr_2d_01[bit_i][bit_j]++;
1225 } else {
1226 nr_2d_00[bit_i][bit_j]++;
1227 }
19ddcd04
MD
1228 }
1229 }
1230 }
1231 distrib_nr_child++;
1232 }
1233 }
1234 break;
1235 }
1236 case RCU_JA_PIGEON:
1237 {
19ddcd04
MD
1238 unsigned int i;
1239
1240 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1241 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
19ddcd04
MD
1242 struct cds_ja_inode_flag *iter;
1243
1244 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1245 if (!iter)
1246 continue;
1247 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1248 continue;
1249 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1250 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1251 if (i & (1U << bit_i)) {
1252 if (i & (1U << bit_j)) {
1253 nr_2d_11[bit_i][bit_j]++;
1254 } else {
1255 nr_2d_10[bit_i][bit_j]++;
1256 }
1257 } else {
1258 if (i & (1U << bit_j)) {
1259 nr_2d_01[bit_i][bit_j]++;
1260 } else {
1261 nr_2d_00[bit_i][bit_j]++;
1262 }
19ddcd04
MD
1263 }
1264 }
1265 }
1266 distrib_nr_child++;
1267 }
1268 break;
1269 }
1270 case RCU_JA_NULL:
1271 assert(mode == JA_RECOMPACT_ADD_NEXT);
1272 break;
1273 default:
1274 assert(0);
1275 break;
1276 }
1277
1278 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1279 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1280 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1281 if (n & (1U << bit_i)) {
1282 if (n & (1U << bit_j)) {
1283 nr_2d_11[bit_i][bit_j]++;
1284 } else {
1285 nr_2d_10[bit_i][bit_j]++;
1286 }
1287 } else {
1288 if (n & (1U << bit_j)) {
1289 nr_2d_01[bit_i][bit_j]++;
1290 } else {
1291 nr_2d_00[bit_i][bit_j]++;
1292 }
19ddcd04
MD
1293 }
1294 }
1295 }
1296 distrib_nr_child++;
1297 }
1298
1299 /*
1300 * The best bit selector is that for which the number of nodes
1301 * in each sub-class is closest to one-fourth of the number of
1302 * children in the distribution. We calculate the distance using
1303 * 4 times the size of the sub-distribution to eliminate
1304 * truncation error.
1305 */
1306 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1307 for (bit_j = 0; bit_j < bit_i; bit_j++) {
4a073c53 1308 int distance_to_best[4];
19ddcd04 1309
1b34283b
MD
1310 distance_to_best[0] = ((unsigned int) nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1311 distance_to_best[1] = ((unsigned int) nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1312 distance_to_best[2] = ((unsigned int) nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1313 distance_to_best[3] = ((unsigned int) nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
19ddcd04 1314
4a073c53
MD
1315 /* Consider worse distance above best */
1316 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
19ddcd04 1317 distance_to_best[0] = distance_to_best[1];
4a073c53 1318 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
19ddcd04 1319 distance_to_best[0] = distance_to_best[2];
4a073c53 1320 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
19ddcd04 1321 distance_to_best[0] = distance_to_best[3];
4a073c53 1322
19ddcd04
MD
1323 /*
1324 * If our worse distance is better than overall,
1325 * we become new best candidate.
1326 */
1327 if (distance_to_best[0] < overall_best_distance) {
1328 overall_best_distance = distance_to_best[0];
1329 bitsel[0] = bit_i;
1330 bitsel[1] = bit_j;
1331 }
1332 }
1333 }
1334
1335 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1336
1337 /* Return our bit selection */
1338 _bitsel[0] = bitsel[0];
1339 _bitsel[1] = bitsel[1];
1340}
1341
48cbe001
MD
1342static
1343unsigned int find_nearest_type_index(unsigned int type_index,
1344 unsigned int nr_nodes)
1345{
1346 const struct cds_ja_type *type;
1347
1348 assert(type_index != NODE_INDEX_NULL);
1349 if (nr_nodes == 0)
1350 return NODE_INDEX_NULL;
1351 for (;;) {
1352 type = &ja_types[type_index];
1353 if (nr_nodes < type->min_child)
1354 type_index--;
1355 else if (nr_nodes > type->max_child)
1356 type_index++;
1357 else
1358 break;
1359 }
1360 return type_index;
1361}
1362
7a0b2331
MD
1363/*
1364 * ja_node_recompact_add: recompact a node, adding a new child.
2e313670 1365 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 1366 * error value otherwise.
7a0b2331
MD
1367 */
1368static
2e313670
MD
1369int ja_node_recompact(enum ja_recompact mode,
1370 struct cds_ja *ja,
e1db2db5 1371 unsigned int old_type_index,
d96bfb0d 1372 const struct cds_ja_type *old_type,
b4540e8a 1373 struct cds_ja_inode *old_node,
5a9a87dd 1374 struct cds_ja_shadow_node *shadow_node,
3d8fe307 1375 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45 1376 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1377 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1378 int level)
7a0b2331 1379{
e1db2db5 1380 unsigned int new_type_index;
b4540e8a 1381 struct cds_ja_inode *new_node;
af3cbd45 1382 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 1383 const struct cds_ja_type *new_type;
3d8fe307 1384 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 1385 int ret;
f07b240f 1386 int fallback = 0;
7a0b2331 1387
3d8fe307
MD
1388 old_node_flag = *old_node_flag_ptr;
1389
48cbe001
MD
1390 /*
1391 * Need to find nearest type index even for ADD_SAME, because
1392 * this recompaction, when applied to linear nodes, will garbage
1393 * collect dummy (NULL) entries, and can therefore cause a few
1394 * linear representations to be skipped.
1395 */
2e313670 1396 switch (mode) {
19ddcd04 1397 case JA_RECOMPACT_ADD_SAME:
48cbe001
MD
1398 new_type_index = find_nearest_type_index(old_type_index,
1399 shadow_node->nr_child + 1);
1400 dbg_printf("Recompact for node with %u children\n",
1401 shadow_node->nr_child + 1);
2e313670 1402 break;
19ddcd04 1403 case JA_RECOMPACT_ADD_NEXT:
2e313670
MD
1404 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1405 new_type_index = 0;
48cbe001 1406 dbg_printf("Recompact for NULL\n");
2e313670 1407 } else {
48cbe001
MD
1408 new_type_index = find_nearest_type_index(old_type_index,
1409 shadow_node->nr_child + 1);
1410 dbg_printf("Recompact for node with %u children\n",
1411 shadow_node->nr_child + 1);
2e313670
MD
1412 }
1413 break;
1414 case JA_RECOMPACT_DEL:
48cbe001
MD
1415 new_type_index = find_nearest_type_index(old_type_index,
1416 shadow_node->nr_child - 1);
1417 dbg_printf("Recompact for node with %u children\n",
1418 shadow_node->nr_child - 1);
2e313670
MD
1419 break;
1420 default:
1421 assert(0);
7a0b2331 1422 }
a2a7ff59 1423
f07b240f 1424retry: /* for fallback */
582a6ade
MD
1425 dbg_printf("Recompact from type %d to type %d\n",
1426 old_type_index, new_type_index);
7a0b2331 1427 new_type = &ja_types[new_type_index];
2e313670 1428 if (new_type_index != NODE_INDEX_NULL) {
354981c2 1429 new_node = alloc_cds_ja_node(ja, new_type);
2e313670
MD
1430 if (!new_node)
1431 return -ENOMEM;
b1a90ce3
MD
1432
1433 if (new_type->type_class == RCU_JA_POOL) {
1434 switch (new_type->nr_pool_order) {
1435 case 1:
1436 {
19ddcd04
MD
1437 unsigned int node_distrib_bitsel;
1438
b1a90ce3
MD
1439 node_distrib_bitsel =
1440 ja_node_sum_distribution_1d(mode, ja,
1441 old_type_index, old_type,
1442 old_node, shadow_node,
1443 n, child_node_flag,
1444 nullify_node_flag_ptr);
1445 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1446 new_node_flag = ja_node_flag_pool_1d(new_node,
1447 new_type_index, node_distrib_bitsel);
1448 break;
1449 }
1450 case 2:
1451 {
19ddcd04
MD
1452 unsigned int node_distrib_bitsel[2];
1453
1454 ja_node_sum_distribution_2d(mode, ja,
1455 old_type_index, old_type,
1456 old_node, shadow_node,
1457 n, child_node_flag,
1458 nullify_node_flag_ptr,
1459 node_distrib_bitsel);
b1a90ce3
MD
1460 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1461 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
19ddcd04
MD
1462 new_node_flag = ja_node_flag_pool_2d(new_node,
1463 new_type_index, node_distrib_bitsel);
b1a90ce3
MD
1464 break;
1465 }
1466 default:
1467 assert(0);
1468 }
1469 } else {
1470 new_node_flag = ja_node_flag(new_node, new_type_index);
1471 }
1472
2e313670 1473 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
48cbe001 1474 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
2e313670 1475 if (!new_shadow_node) {
354981c2 1476 free_cds_ja_node(ja, new_node);
2e313670
MD
1477 return -ENOMEM;
1478 }
1479 if (fallback)
1480 new_shadow_node->fallback_removal_count =
1481 JA_FALLBACK_REMOVAL_COUNT;
1482 } else {
1483 new_node = NULL;
1484 new_node_flag = NULL;
e1db2db5 1485 }
11c5e016 1486
19ddcd04 1487 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
2e313670
MD
1488
1489 if (new_type_index == NODE_INDEX_NULL)
1490 goto skip_copy;
1491
11c5e016
MD
1492 switch (old_type->type_class) {
1493 case RCU_JA_LINEAR:
1494 {
1495 uint8_t nr_child =
1496 ja_linear_node_get_nr_child(old_type, old_node);
1497 unsigned int i;
1498
1499 for (i = 0; i < nr_child; i++) {
b4540e8a 1500 struct cds_ja_inode_flag *iter;
11c5e016
MD
1501 uint8_t v;
1502
1503 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1504 if (!iter)
1505 continue;
af3cbd45 1506 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1507 continue;
b1a90ce3 1508 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1509 new_shadow_node,
11c5e016 1510 v, iter);
f07b240f
MD
1511 if (new_type->type_class == RCU_JA_POOL && ret) {
1512 goto fallback_toosmall;
1513 }
11c5e016
MD
1514 assert(!ret);
1515 }
1516 break;
1517 }
1518 case RCU_JA_POOL:
1519 {
1520 unsigned int pool_nr;
1521
1522 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 1523 struct cds_ja_inode *pool =
11c5e016
MD
1524 ja_pool_node_get_ith_pool(old_type,
1525 old_node, pool_nr);
1526 uint8_t nr_child =
1527 ja_linear_node_get_nr_child(old_type, pool);
1528 unsigned int j;
1529
1530 for (j = 0; j < nr_child; j++) {
b4540e8a 1531 struct cds_ja_inode_flag *iter;
11c5e016
MD
1532 uint8_t v;
1533
1534 ja_linear_node_get_ith_pos(old_type, pool,
1535 j, &v, &iter);
1536 if (!iter)
1537 continue;
af3cbd45 1538 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1539 continue;
b1a90ce3 1540 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1541 new_shadow_node,
11c5e016 1542 v, iter);
f07b240f
MD
1543 if (new_type->type_class == RCU_JA_POOL
1544 && ret) {
1545 goto fallback_toosmall;
1546 }
11c5e016
MD
1547 assert(!ret);
1548 }
1549 }
1550 break;
7a0b2331 1551 }
a2a7ff59 1552 case RCU_JA_NULL:
19ddcd04 1553 assert(mode == JA_RECOMPACT_ADD_NEXT);
a2a7ff59 1554 break;
11c5e016 1555 case RCU_JA_PIGEON:
2e313670 1556 {
2e313670
MD
1557 unsigned int i;
1558
1559 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1560 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2e313670
MD
1561 struct cds_ja_inode_flag *iter;
1562
1563 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1564 if (!iter)
1565 continue;
af3cbd45 1566 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1567 continue;
b1a90ce3 1568 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1569 new_shadow_node,
1570 i, iter);
1571 if (new_type->type_class == RCU_JA_POOL && ret) {
1572 goto fallback_toosmall;
1573 }
1574 assert(!ret);
1575 }
1576 break;
1577 }
11c5e016
MD
1578 default:
1579 assert(0);
5a9a87dd 1580 ret = -EINVAL;
f07b240f 1581 goto end;
11c5e016 1582 }
2e313670 1583skip_copy:
11c5e016 1584
19ddcd04 1585 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
2e313670 1586 /* add node */
b1a90ce3 1587 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1588 new_shadow_node,
1589 n, child_node_flag);
7b413155
MD
1590 if (new_type->type_class == RCU_JA_POOL && ret) {
1591 goto fallback_toosmall;
1592 }
2e313670
MD
1593 assert(!ret);
1594 }
19ddcd04
MD
1595
1596 if (fallback) {
1597 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1598 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1599 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
354981c2 1600 uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
19ddcd04
MD
1601 }
1602
3d8fe307
MD
1603 /* Return pointer to new recompacted node through old_node_flag_ptr */
1604 *old_node_flag_ptr = new_node_flag;
a2a7ff59 1605 if (old_node) {
2e313670
MD
1606 int flags;
1607
1608 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1609 /*
1610 * It is OK to free the lock associated with a node
1611 * going to NULL, since we are holding the parent lock.
1612 * This synchronizes removal with re-add of that node.
1613 */
1614 if (new_type_index == NODE_INDEX_NULL)
48cbe001 1615 flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 1616 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 1617 flags);
a2a7ff59
MD
1618 assert(!ret);
1619 }
5a9a87dd
MD
1620
1621 ret = 0;
f07b240f 1622end:
5a9a87dd 1623 return ret;
f07b240f
MD
1624
1625fallback_toosmall:
1626 /* fallback if next pool is too small */
af3cbd45 1627 assert(new_shadow_node);
3d8fe307 1628 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
1629 RCUJA_SHADOW_CLEAR_FREE_NODE);
1630 assert(!ret);
1631
19ddcd04
MD
1632 switch (mode) {
1633 case JA_RECOMPACT_ADD_SAME:
1634 /*
1635 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1636 * node within a pool has unused entries. It should
1637 * therefore _never_ be too small.
1638 */
4a073c53 1639 assert(0);
4cde8267
MD
1640
1641 /* Fall-through */
19ddcd04
MD
1642 case JA_RECOMPACT_ADD_NEXT:
1643 {
1644 const struct cds_ja_type *next_type;
1645
1646 /*
1647 * Recompaction attempt on add failed. Should only
1648 * happen if target node type is pool. Caused by
1649 * hard-to-split distribution. Recompact using the next
1650 * distribution size.
1651 */
1652 assert(new_type->type_class == RCU_JA_POOL);
1653 next_type = &ja_types[new_type_index + 1];
1654 /*
1655 * Try going to the next pool size if our population
1656 * fits within its range. This is not flagged as a
1657 * fallback.
1658 */
1659 if (shadow_node->nr_child + 1 >= next_type->min_child
1660 && shadow_node->nr_child + 1 <= next_type->max_child) {
1661 new_type_index++;
1662 goto retry;
1663 } else {
1664 new_type_index++;
1665 dbg_printf("Add fallback to type %d\n", new_type_index);
1666 uatomic_inc(&ja->nr_fallback);
1667 fallback = 1;
1668 goto retry;
1669 }
1670 break;
1671 }
1672 case JA_RECOMPACT_DEL:
1673 /*
1674 * Recompaction attempt on delete failed. Should only
1675 * happen if target node type is pool. This is caused by
1676 * a hard-to-split distribution. Recompact on same node
1677 * size, but flag current node as "fallback" to ensure
1678 * we don't attempt recompaction before some activity
1679 * has reshuffled our node.
1680 */
1681 assert(new_type->type_class == RCU_JA_POOL);
1682 new_type_index = old_type_index;
1683 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1684 uatomic_inc(&ja->nr_fallback);
1685 fallback = 1;
1686 goto retry;
1687 default:
1688 assert(0);
1689 return -EINVAL;
1690 }
1691
1692 /*
1693 * Last resort fallback: pigeon.
1694 */
f07b240f
MD
1695 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1696 dbg_printf("Fallback to type %d\n", new_type_index);
1697 uatomic_inc(&ja->nr_fallback);
1698 fallback = 1;
1699 goto retry;
7a0b2331
MD
1700}
1701
5a9a87dd 1702/*
2e313670 1703 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
1704 * error value otherwise.
1705 */
7a0b2331 1706static
d96bfb0d 1707int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 1708 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd 1709 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1710 struct cds_ja_shadow_node *shadow_node,
1711 int level)
7a0b2331
MD
1712{
1713 int ret;
e1db2db5 1714 unsigned int type_index;
d96bfb0d 1715 const struct cds_ja_type *type;
b4540e8a 1716 struct cds_ja_inode *node;
7a0b2331 1717
a2a7ff59
MD
1718 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1719 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1720
e1db2db5
MD
1721 node = ja_node_ptr(*node_flag);
1722 type_index = ja_node_type(*node_flag);
1723 type = &ja_types[type_index];
b1a90ce3 1724 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
e1db2db5 1725 n, child_node_flag);
2e313670
MD
1726 switch (ret) {
1727 case -ENOSPC:
19ddcd04
MD
1728 /* Not enough space in node, need to recompact to next type. */
1729 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
48cbe001 1730 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1731 break;
1732 case -ERANGE:
1733 /* Node needs to be recompacted. */
19ddcd04 1734 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
48cbe001 1735 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1736 break;
1737 }
1738 return ret;
1739}
1740
1741/*
1742 * Return 0 on success, -EAGAIN if need to retry, or other negative
1743 * error value otherwise.
1744 */
1745static
af3cbd45
MD
1746int ja_node_clear_ptr(struct cds_ja *ja,
1747 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1748 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1749 struct cds_ja_shadow_node *shadow_node, /* of parent */
48cbe001 1750 uint8_t n, int level)
2e313670
MD
1751{
1752 int ret;
1753 unsigned int type_index;
1754 const struct cds_ja_type *type;
1755 struct cds_ja_inode *node;
1756
af3cbd45
MD
1757 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1758 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 1759
af3cbd45
MD
1760 node = ja_node_ptr(*parent_node_flag_ptr);
1761 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 1762 type = &ja_types[type_index];
19ddcd04 1763 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
2e313670 1764 if (ret == -EFBIG) {
19ddcd04 1765 /* Should try recompaction. */
2e313670 1766 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45 1767 shadow_node, parent_node_flag_ptr, n, NULL,
48cbe001 1768 node_flag_ptr, level);
7a0b2331
MD
1769 }
1770 return ret;
1771}
be9a7474 1772
03ec1aeb 1773struct cds_ja_node *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 1774{
41975c12
MD
1775 unsigned int tree_depth, i;
1776 struct cds_ja_inode_flag *node_flag;
1777
1778 if (caa_unlikely(key > ja->key_max))
03ec1aeb 1779 return NULL;
41975c12 1780 tree_depth = ja->tree_depth;
5a9a87dd 1781 node_flag = rcu_dereference(ja->root);
41975c12 1782
5a9a87dd
MD
1783 /* level 0: root node */
1784 if (!ja_node_ptr(node_flag))
03ec1aeb 1785 return NULL;
5a9a87dd
MD
1786
1787 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1788 uint8_t iter_key;
1789
1790 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
48cbe001 1791 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
582a6ade
MD
1792 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1793 (unsigned int) iter_key, node_flag);
41975c12 1794 if (!ja_node_ptr(node_flag))
03ec1aeb 1795 return NULL;
41975c12
MD
1796 }
1797
5a9a87dd 1798 /* Last level lookup succeded. We got an actual match. */
03ec1aeb 1799 return (struct cds_ja_node *) node_flag;
5a9a87dd
MD
1800}
1801
b023ba9f
MD
1802static
1803struct cds_ja_node *cds_ja_lookup_inequality(struct cds_ja *ja, uint64_t key,
1804 enum ja_lookup_inequality mode)
291b2543
MD
1805{
1806 int tree_depth, level;
1807 struct cds_ja_inode_flag *node_flag, *cur_node_depth[JA_MAX_DEPTH];
b023ba9f 1808 enum ja_direction dir;
291b2543 1809
b023ba9f
MD
1810 switch (mode) {
1811 case JA_LOOKUP_BE:
1812 if (caa_unlikely(key > ja->key_max || key == 0))
1813 return NULL;
1814 break;
1815 case JA_LOOKUP_AE:
1816 if (caa_unlikely(key >= ja->key_max))
1817 return NULL;
1818 break;
1819 default:
03ec1aeb 1820 return NULL;
b023ba9f 1821 }
291b2543
MD
1822
1823 memset(cur_node_depth, 0, sizeof(cur_node_depth));
1824 tree_depth = ja->tree_depth;
1825 node_flag = rcu_dereference(ja->root);
1826 cur_node_depth[0] = node_flag;
1827
1828 /* level 0: root node */
1829 if (!ja_node_ptr(node_flag))
03ec1aeb 1830 return NULL;
291b2543
MD
1831
1832 for (level = 1; level < tree_depth; level++) {
1833 uint8_t iter_key;
1834
1835 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1836 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1837 if (!ja_node_ptr(node_flag))
1838 break;
1839 cur_node_depth[level] = node_flag;
b023ba9f 1840 dbg_printf("cds_ja_lookup_inequality iter key lookup %u finds node_flag %p\n",
291b2543
MD
1841 (unsigned int) iter_key, node_flag);
1842 }
1843
1844 if (level == tree_depth) {
1845 /* Last level lookup succeded. We got an equal match. */
03ec1aeb 1846 return (struct cds_ja_node *) node_flag;
291b2543
MD
1847 }
1848
1849 /*
b023ba9f 1850 * Find highest value left/right of current node.
291b2543 1851 * Current node is cur_node_depth[level].
b023ba9f
MD
1852 * Start at current level. If we cannot find any key left/right
1853 * of ours, go one level up, seek highest value left/right of
1854 * current (recursively), and when we find one, get the
1855 * rightmost/leftmost child of its rightmost/leftmost child
1856 * (recursively).
291b2543 1857 */
b023ba9f
MD
1858 switch (mode) {
1859 case JA_LOOKUP_BE:
1860 dir = JA_LEFT;
1861 break;
1862 case JA_LOOKUP_AE:
1863 dir = JA_RIGHT;
1864 break;
1865 default:
1866 assert(0);
1867 }
291b2543
MD
1868 for (; level > 0; level--) {
1869 uint8_t iter_key;
1870
1871 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
b023ba9f
MD
1872 node_flag = ja_node_get_leftright(cur_node_depth[level - 1],
1873 iter_key, dir);
1874 /* If found left sibling, find rightmost/leftmost child. */
291b2543
MD
1875 if (ja_node_ptr(node_flag))
1876 break;
1877 }
1878
1879 if (!level) {
b023ba9f 1880 /* Reached the root and could not find a left/right sibling. */
03ec1aeb 1881 return NULL;
291b2543
MD
1882 }
1883
1884 level++;
3c52f0f9
MD
1885
1886 /*
4cef6f97 1887 * From this point, we are guaranteed to be able to find a
b023ba9f
MD
1888 * "below than"/"above than" match. ja_attach_node() and
1889 * ja_detach_node() both guarantee that it is not possible for a
1890 * lookup to reach a dead-end.
3c52f0f9
MD
1891 */
1892
b023ba9f
MD
1893 /*
1894 * Find rightmost/leftmost child of rightmost/leftmost child
1895 * (recursively).
1896 */
1897 switch (mode) {
1898 case JA_LOOKUP_BE:
1899 dir = JA_RIGHTMOST;
1900 break;
1901 case JA_LOOKUP_AE:
1902 dir = JA_LEFTMOST;
1903 break;
1904 default:
1905 assert(0);
1906 }
291b2543 1907 for (; level < tree_depth; level++) {
b023ba9f 1908 node_flag = ja_node_get_minmax(node_flag, dir);
291b2543
MD
1909 if (!ja_node_ptr(node_flag))
1910 break;
1911 }
1912
4cef6f97 1913 assert(level == tree_depth);
291b2543 1914
03ec1aeb 1915 return (struct cds_ja_node *) node_flag;
291b2543
MD
1916}
1917
b023ba9f
MD
1918struct cds_ja_node *cds_ja_lookup_below_equal(struct cds_ja *ja, uint64_t key)
1919{
1920 return cds_ja_lookup_inequality(ja, key, JA_LOOKUP_BE);
1921}
1922
1923struct cds_ja_node *cds_ja_lookup_above_equal(struct cds_ja *ja, uint64_t key)
1924{
1925 return cds_ja_lookup_inequality(ja, key, JA_LOOKUP_AE);
1926}
1927
5a9a87dd
MD
1928/*
1929 * We reached an unpopulated node. Create it and the children we need,
1930 * and then attach the entire branch to the current node. This may
1931 * trigger recompaction of the current node. Locks needed: node lock
1932 * (for add), and, possibly, parent node lock (to update pointer due to
1933 * node recompaction).
1934 *
1935 * First take node lock, check if recompaction is needed, then take
1936 * parent lock (if needed). Then we can proceed to create the new
1937 * branch. Publish the new branch, and release locks.
1938 * TODO: we currently always take the parent lock even when not needed.
47d2eab3
MD
1939 *
1940 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1941 * leads to a dead-end: before attaching a branch, the entire content of
1942 * the new branch is populated, thus creating a cluster, before
1943 * attaching the cluster to the rest of the tree, thus making it visible
1944 * to lookups.
5a9a87dd
MD
1945 */
1946static
1947int ja_attach_node(struct cds_ja *ja,
b0ca2d21 1948 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 1949 struct cds_ja_inode_flag *attach_node_flag,
48cbe001
MD
1950 struct cds_ja_inode_flag *parent_attach_node_flag,
1951 struct cds_ja_inode_flag **old_node_flag_ptr,
1952 struct cds_ja_inode_flag *old_node_flag,
5a9a87dd 1953 uint64_t key,
79b41067 1954 unsigned int level,
5a9a87dd
MD
1955 struct cds_ja_node *child_node)
1956{
1957 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1958 *parent_shadow_node = NULL;
5a9a87dd
MD
1959 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1960 int ret, i;
a2a7ff59 1961 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1962 int nr_created_nodes = 0;
1963
48cbe001
MD
1964 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1965 level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag);
a2a7ff59 1966
48cbe001
MD
1967 assert(!old_node_flag);
1968 if (attach_node_flag) {
1969 shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag);
1970 if (!shadow_node) {
1971 ret = -EAGAIN;
1972 goto end;
1973 }
5a9a87dd 1974 }
48cbe001 1975 if (parent_attach_node_flag) {
5a9a87dd 1976 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
48cbe001 1977 parent_attach_node_flag);
5a9a87dd 1978 if (!parent_shadow_node) {
2e313670 1979 ret = -EAGAIN;
5a9a87dd
MD
1980 goto unlock_shadow;
1981 }
1982 }
1983
48cbe001 1984 if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) {
b306a0fe 1985 /*
c112acaa
MD
1986 * Target node has been updated between RCU lookup and
1987 * lock acquisition. We need to re-try lookup and
1988 * attach.
1989 */
1990 ret = -EAGAIN;
1991 goto unlock_parent;
1992 }
1993
9be99d4a
MD
1994 /*
1995 * Perform a lookup query to handle the case where
1996 * old_node_flag_ptr is NULL. We cannot use it to check if the
1997 * node has been populated between RCU lookup and mutex
1998 * acquisition.
1999 */
2000 if (!old_node_flag_ptr) {
2001 uint8_t iter_key;
2002 struct cds_ja_inode_flag *lookup_node_flag;
2003 struct cds_ja_inode_flag **lookup_node_flag_ptr;
2004
2005 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
2006 lookup_node_flag = ja_node_get_nth(attach_node_flag,
2007 &lookup_node_flag_ptr,
2008 iter_key);
2009 if (lookup_node_flag) {
2010 ret = -EEXIST;
2011 goto unlock_parent;
2012 }
2013 }
2014
c112acaa 2015 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 2016 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
2017 /*
2018 * Target node has been updated between RCU lookup and
2019 * lock acquisition. We need to re-try lookup and
2020 * attach.
b306a0fe
MD
2021 */
2022 ret = -EAGAIN;
2023 goto unlock_parent;
2024 }
2025
a2a7ff59 2026 /* Create new branch, starting from bottom */
03ec1aeb 2027 iter_node_flag = (struct cds_ja_inode_flag *) child_node;
5a9a87dd 2028
48cbe001 2029 for (i = ja->tree_depth - 1; i >= (int) level; i--) {
79b41067
MD
2030 uint8_t iter_key;
2031
48cbe001 2032 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1)));
79b41067 2033 dbg_printf("branch creation level %d, key %u\n",
48cbe001 2034 i, (unsigned int) iter_key);
5a9a87dd
MD
2035 iter_dest_node_flag = NULL;
2036 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 2037 iter_key,
5a9a87dd 2038 iter_node_flag,
48cbe001 2039 NULL, i);
9be99d4a
MD
2040 if (ret) {
2041 dbg_printf("branch creation error %d\n", ret);
5a9a87dd 2042 goto check_error;
9be99d4a 2043 }
5a9a87dd
MD
2044 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
2045 iter_node_flag = iter_dest_node_flag;
2046 }
48cbe001 2047 assert(level > 0);
5a9a87dd 2048
48cbe001
MD
2049 /* Publish branch */
2050 if (level == 1) {
2051 /*
2052 * Attaching to root node.
2053 */
2054 rcu_assign_pointer(ja->root, iter_node_flag);
2055 } else {
79b41067
MD
2056 uint8_t iter_key;
2057
2058 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
48cbe001
MD
2059 dbg_printf("publish branch at level %d, key %u\n",
2060 level - 1, (unsigned int) iter_key);
a2a7ff59 2061 /* We need to use set_nth on the previous level. */
48cbe001 2062 iter_dest_node_flag = attach_node_flag;
a2a7ff59 2063 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 2064 iter_key,
a2a7ff59 2065 iter_node_flag,
48cbe001 2066 shadow_node, level - 1);
9be99d4a
MD
2067 if (ret) {
2068 dbg_printf("branch publish error %d\n", ret);
a2a7ff59 2069 goto check_error;
9be99d4a 2070 }
48cbe001
MD
2071 /*
2072 * Attach branch
2073 */
2074 rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag);
a2a7ff59
MD
2075 }
2076
5a9a87dd
MD
2077 /* Success */
2078 ret = 0;
2079
2080check_error:
2081 if (ret) {
2082 for (i = 0; i < nr_created_nodes; i++) {
2083 int tmpret;
a2a7ff59
MD
2084 int flags;
2085
2086 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
2087 if (i)
2088 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 2089 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 2090 created_nodes[i],
a2a7ff59
MD
2091 NULL,
2092 flags);
5a9a87dd
MD
2093 assert(!tmpret);
2094 }
2095 }
b306a0fe 2096unlock_parent:
5a9a87dd
MD
2097 if (parent_shadow_node)
2098 rcuja_shadow_unlock(parent_shadow_node);
2099unlock_shadow:
2100 if (shadow_node)
2101 rcuja_shadow_unlock(shadow_node);
2102end:
2103 return ret;
2104}
2105
2106/*
03ec1aeb
MD
2107 * Lock the parent containing the pointer to list of duplicates, and add
2108 * node to this list. Failure can happen if concurrent update changes
2109 * the parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
2110 * Return 0 on success, negative error value on failure.
2111 */
2112static
2113int ja_chain_node(struct cds_ja *ja,
af3cbd45 2114 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2115 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 2116 struct cds_ja_inode_flag *node_flag,
5a9a87dd
MD
2117 struct cds_ja_node *node)
2118{
2119 struct cds_ja_shadow_node *shadow_node;
fa112799 2120 int ret = 0;
5a9a87dd 2121
3d8fe307 2122 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 2123 if (!shadow_node) {
2e313670 2124 return -EAGAIN;
b306a0fe 2125 }
c112acaa 2126 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2127 ret = -EAGAIN;
2128 goto end;
2129 }
03ec1aeb
MD
2130 /*
2131 * Add node to head of list. Safe against concurrent RCU read
2132 * traversals.
2133 */
2134 node->next = (struct cds_ja_node *) node_flag;
2135 rcu_assign_pointer(*node_flag_ptr, (struct cds_ja_inode_flag *) node);
fa112799 2136end:
5a9a87dd 2137 rcuja_shadow_unlock(shadow_node);
fa112799 2138 return ret;
5a9a87dd
MD
2139}
2140
75d573aa
MD
2141static
2142int _cds_ja_add(struct cds_ja *ja, uint64_t key,
6475613c 2143 struct cds_ja_node *node,
75d573aa 2144 struct cds_ja_node **unique_node_ret)
5a9a87dd
MD
2145{
2146 unsigned int tree_depth, i;
48cbe001 2147 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd 2148 *parent_node_flag,
b62a8d0c 2149 *parent2_node_flag,
48cbe001
MD
2150 *node_flag,
2151 *parent_attach_node_flag;
2152 struct cds_ja_inode_flag **attach_node_flag_ptr,
2153 **parent_node_flag_ptr,
2154 **node_flag_ptr;
5a9a87dd
MD
2155 int ret;
2156
b306a0fe 2157 if (caa_unlikely(key > ja->key_max)) {
5a9a87dd 2158 return -EINVAL;
b306a0fe 2159 }
5a9a87dd
MD
2160 tree_depth = ja->tree_depth;
2161
2162retry:
a2a7ff59 2163 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
6475613c 2164 key, node);
5a9a87dd 2165 parent2_node_flag = NULL;
b0f74e47
MD
2166 parent_node_flag =
2167 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
48cbe001 2168 parent_node_flag_ptr = NULL;
35170a44 2169 node_flag = rcu_dereference(ja->root);
48cbe001 2170 node_flag_ptr = &ja->root;
5a9a87dd
MD
2171
2172 /* Iterate on all internal levels */
a2a7ff59 2173 for (i = 1; i < tree_depth; i++) {
79b41067
MD
2174 uint8_t iter_key;
2175
48cbe001
MD
2176 if (!ja_node_ptr(node_flag))
2177 break;
2178 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2179 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
79b41067 2180 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
2181 parent2_node_flag = parent_node_flag;
2182 parent_node_flag = node_flag;
48cbe001 2183 parent_node_flag_ptr = node_flag_ptr;
5a9a87dd
MD
2184 node_flag = ja_node_get_nth(node_flag,
2185 &node_flag_ptr,
79b41067 2186 iter_key);
5a9a87dd
MD
2187 }
2188
2189 /*
48cbe001
MD
2190 * We reached either bottom of tree or internal NULL node,
2191 * simply add node to last internal level, or chain it if key is
2192 * already present.
5a9a87dd
MD
2193 */
2194 if (!ja_node_ptr(node_flag)) {
48cbe001
MD
2195 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2196 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2197
48cbe001
MD
2198 attach_node_flag = parent_node_flag;
2199 attach_node_flag_ptr = parent_node_flag_ptr;
2200 parent_attach_node_flag = parent2_node_flag;
2201
b0ca2d21 2202 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 2203 attach_node_flag,
48cbe001
MD
2204 parent_attach_node_flag,
2205 node_flag_ptr,
2206 node_flag,
6475613c 2207 key, i, node);
5a9a87dd 2208 } else {
75d573aa
MD
2209 if (unique_node_ret) {
2210 *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag);
2211 return -EEXIST;
2212 }
2213
48cbe001
MD
2214 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2215 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2216
48cbe001
MD
2217 attach_node_flag = node_flag;
2218 attach_node_flag_ptr = node_flag_ptr;
2219 parent_attach_node_flag = parent_node_flag;
2220
5a9a87dd 2221 ret = ja_chain_node(ja,
48cbe001
MD
2222 parent_attach_node_flag,
2223 attach_node_flag_ptr,
2224 attach_node_flag,
6475613c 2225 node);
5a9a87dd 2226 }
b306a0fe 2227 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd 2228 goto retry;
48cbe001 2229
5a9a87dd 2230 return ret;
b4540e8a
MD
2231}
2232
75d573aa 2233int cds_ja_add(struct cds_ja *ja, uint64_t key,
6475613c 2234 struct cds_ja_node *node)
75d573aa 2235{
6475613c 2236 return _cds_ja_add(ja, key, node, NULL);
75d573aa
MD
2237}
2238
2239struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key,
6475613c 2240 struct cds_ja_node *node)
75d573aa
MD
2241{
2242 int ret;
2243 struct cds_ja_node *ret_node;
2244
6475613c 2245 ret = _cds_ja_add(ja, key, node, &ret_node);
75d573aa
MD
2246 if (ret == -EEXIST)
2247 return ret_node;
2248 else
6475613c 2249 return node;
75d573aa
MD
2250}
2251
af3cbd45
MD
2252/*
2253 * Note: there is no need to lookup the pointer address associated with
2254 * each node's nth item after taking the lock: it's already been done by
2255 * cds_ja_del while holding the rcu read-side lock, and our node rules
2256 * ensure that when a match value -> pointer is found in a node, it is
2257 * _NEVER_ changed for that node without recompaction, and recompaction
2258 * reallocates the node.
b306a0fe
MD
2259 * However, when a child is removed from "linear" nodes, its pointer
2260 * is set to NULL. We therefore check, while holding the locks, if this
2261 * pointer is NULL, and return -ENOENT to the caller if it is the case.
4cef6f97
MD
2262 *
2263 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2264 * leads to a dead-end: when removing branch, it makes sure to perform
2265 * the "cut" at the highest node that has only one child, effectively
2266 * replacing it with a NULL pointer.
af3cbd45 2267 */
35170a44
MD
2268static
2269int ja_detach_node(struct cds_ja *ja,
2270 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
2271 struct cds_ja_inode_flag ***snapshot_ptr,
2272 uint8_t *snapshot_n,
35170a44
MD
2273 int nr_snapshot,
2274 uint64_t key,
2275 struct cds_ja_node *node)
2276{
af3cbd45
MD
2277 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
2278 struct cds_ja_inode_flag **node_flag_ptr = NULL,
2279 *parent_node_flag = NULL,
2280 **parent_node_flag_ptr = NULL;
b62a8d0c 2281 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
2282 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
2283 uint8_t n = 0;
35170a44 2284
4d6ef45e 2285 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 2286
af3cbd45
MD
2287 /*
2288 * From the last internal level node going up, get the node
2289 * lock, check if the node has only one child left. If it is the
2290 * case, we continue iterating upward. When we reach a node
2291 * which has more that one child left, we lock the parent, and
2292 * proceed to the node deletion (removing its children too).
2293 */
4d6ef45e 2294 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
2295 struct cds_ja_shadow_node *shadow_node;
2296
2297 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2298 snapshot[i]);
af3cbd45
MD
2299 if (!shadow_node) {
2300 ret = -EAGAIN;
2301 goto end;
2302 }
af3cbd45 2303 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2304
2305 /*
2306 * Check if node has been removed between RCU
2307 * lookup and lock acquisition.
2308 */
2309 assert(snapshot_ptr[i + 1]);
2310 if (ja_node_ptr(*snapshot_ptr[i + 1])
2311 != ja_node_ptr(snapshot[i + 1])) {
2312 ret = -ENOENT;
2313 goto end;
2314 }
2315
2316 assert(shadow_node->nr_child > 0);
d810c97f 2317 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
2318 nr_clear++;
2319 nr_branch++;
af3cbd45
MD
2320 if (shadow_node->nr_child > 1 || i == 1) {
2321 /* Lock parent and break */
2322 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2323 snapshot[i - 1]);
af3cbd45
MD
2324 if (!shadow_node) {
2325 ret = -EAGAIN;
2326 goto end;
2327 }
2328 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 2329
c112acaa
MD
2330 /*
2331 * Check if node has been removed between RCU
2332 * lookup and lock acquisition.
2333 */
b62a8d0c
MD
2334 assert(snapshot_ptr[i]);
2335 if (ja_node_ptr(*snapshot_ptr[i])
2336 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
2337 ret = -ENOENT;
2338 goto end;
2339 }
2340
b62a8d0c 2341 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
2342 n = snapshot_n[i + 1];
2343 parent_node_flag_ptr = snapshot_ptr[i];
2344 parent_node_flag = snapshot[i];
c112acaa 2345
af3cbd45
MD
2346 if (i > 1) {
2347 /*
2348 * Lock parent's parent, in case we need
2349 * to recompact parent.
2350 */
2351 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2352 snapshot[i - 2]);
af3cbd45
MD
2353 if (!shadow_node) {
2354 ret = -EAGAIN;
2355 goto end;
2356 }
2357 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2358
2359 /*
2360 * Check if node has been removed between RCU
2361 * lookup and lock acquisition.
2362 */
2363 assert(snapshot_ptr[i - 1]);
2364 if (ja_node_ptr(*snapshot_ptr[i - 1])
2365 != ja_node_ptr(snapshot[i - 1])) {
2366 ret = -ENOENT;
2367 goto end;
2368 }
af3cbd45 2369 }
b62a8d0c 2370
af3cbd45
MD
2371 break;
2372 }
2373 }
2374
2375 /*
4d6ef45e
MD
2376 * At this point, we want to delete all nodes that are about to
2377 * be removed from shadow_nodes (except the last one, which is
2378 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
2379 * child). OK to free lock here, because RCU read lock is held,
2380 * and free only performed in call_rcu.
af3cbd45
MD
2381 */
2382
2383 for (i = 0; i < nr_clear; i++) {
2384 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 2385 shadow_nodes[i]->node_flag,
af3cbd45
MD
2386 shadow_nodes[i],
2387 RCUJA_SHADOW_CLEAR_FREE_NODE
2388 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2389 assert(!ret);
2390 }
2391
2392 iter_node_flag = parent_node_flag;
2393 /* Remove from parent */
2394 ret = ja_node_clear_ptr(ja,
2395 node_flag_ptr, /* Pointer to location to nullify */
2396 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 2397 shadow_nodes[nr_branch - 1], /* of parent */
48cbe001 2398 n, nr_branch - 1);
b306a0fe
MD
2399 if (ret)
2400 goto end;
af3cbd45 2401
4d6ef45e
MD
2402 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2403 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
2404 /* Update address of parent ptr in its parent */
2405 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2406
2407end:
2408 for (i = 0; i < nr_shadow; i++)
2409 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
2410 return ret;
2411}
2412
af3cbd45
MD
2413static
2414int ja_unchain_node(struct cds_ja *ja,
2415 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2416 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 2417 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
2418 struct cds_ja_node *node)
2419{
2420 struct cds_ja_shadow_node *shadow_node;
03ec1aeb 2421 struct cds_ja_node *iter_node, **iter_node_ptr, **prev_node_ptr = NULL;
013a6083 2422 int ret = 0, count = 0, found = 0;
af3cbd45 2423
3d8fe307 2424 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
2425 if (!shadow_node)
2426 return -EAGAIN;
013a6083 2427 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2428 ret = -EAGAIN;
2429 goto end;
2430 }
af3cbd45 2431 /*
03ec1aeb
MD
2432 * Find the previous node's next pointer pointing to our node,
2433 * so we can update it. Retry if another thread removed all but
2434 * one of duplicates since check (this check was performed
2435 * without lock). Ensure that the node we are about to remove is
2436 * still in the list (while holding lock). No need for RCU
2437 * traversal here since we hold the lock on the parent.
af3cbd45 2438 */
03ec1aeb
MD
2439 iter_node_ptr = (struct cds_ja_node **) node_flag_ptr;
2440 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2441 cds_ja_for_each_duplicate(iter_node) {
f2758d14 2442 count++;
03ec1aeb
MD
2443 if (iter_node == node) {
2444 prev_node_ptr = iter_node_ptr;
013a6083 2445 found++;
03ec1aeb
MD
2446 }
2447 iter_node_ptr = &iter_node->next;
f2758d14 2448 }
013a6083
MD
2449 assert(found <= 1);
2450 if (!found || count == 1) {
af3cbd45
MD
2451 ret = -EAGAIN;
2452 goto end;
2453 }
03ec1aeb 2454 CMM_STORE_SHARED(*prev_node_ptr, node->next);
ade342cb
MD
2455 /*
2456 * Validate that we indeed removed the node from linked list.
2457 */
2458 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
2459end:
2460 rcuja_shadow_unlock(shadow_node);
2461 return ret;
2462}
2463
2464/*
2465 * Called with RCU read lock held.
2466 */
35170a44
MD
2467int cds_ja_del(struct cds_ja *ja, uint64_t key,
2468 struct cds_ja_node *node)
2469{
2470 unsigned int tree_depth, i;
2471 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
2472 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2473 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 2474 struct cds_ja_inode_flag *node_flag;
fa112799
MD
2475 struct cds_ja_inode_flag **prev_node_flag_ptr,
2476 **node_flag_ptr;
4d6ef45e 2477 int nr_snapshot;
35170a44
MD
2478 int ret;
2479
2480 if (caa_unlikely(key > ja->key_max))
2481 return -EINVAL;
2482 tree_depth = ja->tree_depth;
2483
2484retry:
4d6ef45e 2485 nr_snapshot = 0;
35170a44
MD
2486 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2487 key, node);
2488
2489 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
2490 snapshot_n[0] = 0;
2491 snapshot_n[1] = 0;
af3cbd45 2492 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
2493 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2494 node_flag = rcu_dereference(ja->root);
af3cbd45 2495 prev_node_flag_ptr = &ja->root;
fa112799 2496 node_flag_ptr = &ja->root;
35170a44
MD
2497
2498 /* Iterate on all internal levels */
2499 for (i = 1; i < tree_depth; i++) {
2500 uint8_t iter_key;
2501
2502 dbg_printf("cds_ja_del iter node_flag %p\n",
2503 node_flag);
2504 if (!ja_node_ptr(node_flag)) {
2505 return -ENOENT;
2506 }
35170a44 2507 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 2508 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
2509 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2510 snapshot[nr_snapshot++] = node_flag;
35170a44 2511 node_flag = ja_node_get_nth(node_flag,
fa112799 2512 &node_flag_ptr,
35170a44 2513 iter_key);
48cbe001
MD
2514 if (node_flag)
2515 prev_node_flag_ptr = node_flag_ptr;
af3cbd45
MD
2516 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2517 (unsigned int) iter_key, node_flag,
2518 prev_node_flag_ptr);
35170a44 2519 }
35170a44
MD
2520 /*
2521 * We reached bottom of tree, try to find the node we are trying
2522 * to remove. Fail if we cannot find it.
2523 */
2524 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
2525 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2526 key);
35170a44
MD
2527 return -ENOENT;
2528 } else {
03ec1aeb 2529 struct cds_ja_node *iter_node, *match = NULL;
af3cbd45 2530 int count = 0;
35170a44 2531
03ec1aeb
MD
2532 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2533 cds_ja_for_each_duplicate_rcu(iter_node) {
2534 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node, iter_node);
2535 if (iter_node == node)
2536 match = iter_node;
af3cbd45 2537 count++;
35170a44 2538 }
03ec1aeb 2539
4d6ef45e
MD
2540 if (!match) {
2541 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 2542 return -ENOENT;
4d6ef45e 2543 }
af3cbd45
MD
2544 assert(count > 0);
2545 if (count == 1) {
2546 /*
4d6ef45e
MD
2547 * Removing last of duplicates. Last snapshot
2548 * does not have a shadow node (external leafs).
af3cbd45
MD
2549 */
2550 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2551 snapshot[nr_snapshot++] = node_flag;
2552 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2553 snapshot_n, nr_snapshot, key, node);
2554 } else {
f2758d14 2555 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 2556 node_flag_ptr, node_flag, match);
af3cbd45 2557 }
35170a44 2558 }
b306a0fe
MD
2559 /*
2560 * Explanation of -ENOENT handling: caused by concurrent delete
2561 * between RCU lookup and actual removal. Need to re-do the
2562 * lookup and removal attempt.
2563 */
2564 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
2565 goto retry;
2566 return ret;
2567}
2568
b4540e8a
MD
2569struct cds_ja *_cds_ja_new(unsigned int key_bits,
2570 const struct rcu_flavor_struct *flavor)
be9a7474
MD
2571{
2572 struct cds_ja *ja;
b0f74e47 2573 int ret;
f07b240f 2574 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
2575
2576 ja = calloc(sizeof(*ja), 1);
2577 if (!ja)
2578 goto ja_error;
b4540e8a
MD
2579
2580 switch (key_bits) {
2581 case 8:
b4540e8a 2582 case 16:
1216b3d2 2583 case 24:
b4540e8a 2584 case 32:
1216b3d2
MD
2585 case 40:
2586 case 48:
2587 case 56:
2588 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
2589 break;
2590 case 64:
2591 ja->key_max = UINT64_MAX;
2592 break;
2593 default:
2594 goto check_error;
2595 }
2596
be9a7474 2597 /* ja->root is NULL */
5a9a87dd 2598 /* tree_depth 0 is for pointer to root node */
582a6ade 2599 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 2600 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
2601 ja->ht = rcuja_create_ht(flavor);
2602 if (!ja->ht)
2603 goto ht_error;
b0f74e47
MD
2604
2605 /*
2606 * Note: we should not free this node until judy array destroy.
2607 */
f07b240f 2608 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307 2609 (struct cds_ja_inode_flag *) &ja->root,
48cbe001 2610 NULL, ja, 0);
f07b240f
MD
2611 if (!root_shadow_node) {
2612 ret = -ENOMEM;
b0f74e47 2613 goto ht_node_error;
f07b240f 2614 }
b0f74e47 2615
be9a7474
MD
2616 return ja;
2617
b0f74e47
MD
2618ht_node_error:
2619 ret = rcuja_delete_ht(ja->ht);
2620 assert(!ret);
be9a7474 2621ht_error:
b4540e8a 2622check_error:
be9a7474
MD
2623 free(ja);
2624ja_error:
2625 return NULL;
2626}
2627
3d8fe307
MD
2628/*
2629 * Called from RCU read-side CS.
2630 */
2631__attribute__((visibility("protected")))
2632void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2633 struct cds_ja_inode_flag *node_flag,
21ac4c56 2634 void (*rcu_free_node)(struct cds_ja_node *node))
3d8fe307 2635{
3d8fe307
MD
2636 unsigned int type_index;
2637 struct cds_ja_inode *node;
2638 const struct cds_ja_type *type;
2639
3d8fe307
MD
2640 node = ja_node_ptr(node_flag);
2641 assert(node != NULL);
2642 type_index = ja_node_type(node_flag);
2643 type = &ja_types[type_index];
2644
2645 switch (type->type_class) {
2646 case RCU_JA_LINEAR:
2647 {
2648 uint8_t nr_child =
2649 ja_linear_node_get_nr_child(type, node);
2650 unsigned int i;
2651
2652 for (i = 0; i < nr_child; i++) {
2653 struct cds_ja_inode_flag *iter;
03ec1aeb 2654 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2655 uint8_t v;
2656
2657 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
03ec1aeb
MD
2658 node_iter = (struct cds_ja_node *) iter;
2659 cds_ja_for_each_duplicate_safe(node_iter, n) {
2660 rcu_free_node(node_iter);
3d8fe307
MD
2661 }
2662 }
2663 break;
2664 }
2665 case RCU_JA_POOL:
2666 {
2667 unsigned int pool_nr;
2668
2669 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2670 struct cds_ja_inode *pool =
2671 ja_pool_node_get_ith_pool(type, node, pool_nr);
2672 uint8_t nr_child =
2673 ja_linear_node_get_nr_child(type, pool);
2674 unsigned int j;
2675
2676 for (j = 0; j < nr_child; j++) {
2677 struct cds_ja_inode_flag *iter;
03ec1aeb 2678 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2679 uint8_t v;
2680
75d573aa 2681 ja_linear_node_get_ith_pos(type, pool, j, &v, &iter);
03ec1aeb
MD
2682 node_iter = (struct cds_ja_node *) iter;
2683 cds_ja_for_each_duplicate_safe(node_iter, n) {
2684 rcu_free_node(node_iter);
3d8fe307
MD
2685 }
2686 }
2687 }
2688 break;
2689 }
2690 case RCU_JA_NULL:
2691 break;
2692 case RCU_JA_PIGEON:
2693 {
3d8fe307
MD
2694 unsigned int i;
2695
48cbe001 2696 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
3d8fe307 2697 struct cds_ja_inode_flag *iter;
03ec1aeb 2698 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2699
2700 iter = ja_pigeon_node_get_ith_pos(type, node, i);
03ec1aeb
MD
2701 node_iter = (struct cds_ja_node *) iter;
2702 cds_ja_for_each_duplicate_safe(node_iter, n) {
2703 rcu_free_node(node_iter);
3d8fe307
MD
2704 }
2705 }
2706 break;
2707 }
2708 default:
2709 assert(0);
2710 }
2711}
2712
19ddcd04 2713static
354981c2 2714void print_debug_fallback_distribution(struct cds_ja *ja)
19ddcd04
MD
2715{
2716 int i;
2717
2718 fprintf(stderr, "Fallback node distribution:\n");
2719 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
354981c2 2720 if (!ja->node_fallback_count_distribution[i])
19ddcd04
MD
2721 continue;
2722 fprintf(stderr, " %3u: %4lu\n",
354981c2 2723 i, ja->node_fallback_count_distribution[i]);
19ddcd04
MD
2724 }
2725}
2726
021c72c0 2727static
19a748d9 2728int ja_final_checks(struct cds_ja *ja)
021c72c0
MD
2729{
2730 double fallback_ratio;
2731 unsigned long na, nf, nr_fallback;
19a748d9 2732 int ret = 0;
021c72c0
MD
2733
2734 fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
2735 fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
2736 nr_fallback = uatomic_read(&ja->nr_fallback);
2737 if (nr_fallback)
2738 fprintf(stderr,
2739 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2740 uatomic_read(&ja->nr_fallback),
2741 fallback_ratio);
2742
2743 na = uatomic_read(&ja->nr_nodes_allocated);
2744 nf = uatomic_read(&ja->nr_nodes_freed);
19a748d9
MD
2745 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
2746 if (nr_fallback)
2747 print_debug_fallback_distribution(ja);
2748
021c72c0
MD
2749 if (na != nf) {
2750 fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2751 (long) na - nf, na, nf);
19a748d9 2752 ret = -1;
021c72c0 2753 }
19a748d9 2754 return ret;
021c72c0
MD
2755}
2756
be9a7474 2757/*
dc0e9798
MD
2758 * There should be no more concurrent add, delete, nor look-up performed
2759 * on the Judy array while it is being destroyed (ensured by the
2760 * caller).
be9a7474 2761 */
3d8fe307 2762int cds_ja_destroy(struct cds_ja *ja,
dc0e9798 2763 void (*free_node_cb)(struct cds_ja_node *node))
be9a7474 2764{
48cbe001 2765 const struct rcu_flavor_struct *flavor;
b4540e8a
MD
2766 int ret;
2767
48cbe001 2768 flavor = cds_lfht_rcu_flavor(ja->ht);
be9a7474 2769 rcuja_shadow_prune(ja->ht,
3d8fe307 2770 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
dc0e9798 2771 free_node_cb);
48cbe001 2772 flavor->thread_offline();
b4540e8a
MD
2773 ret = rcuja_delete_ht(ja->ht);
2774 if (ret)
2775 return ret;
f2ae7af7
MD
2776
2777 /* Wait for in-flight call_rcu free to complete. */
2778 flavor->barrier();
2779
48cbe001 2780 flavor->thread_online();
19a748d9 2781 ret = ja_final_checks(ja);
b4540e8a 2782 free(ja);
19a748d9 2783 return ret;
be9a7474 2784}
This page took 0.156571 seconds and 4 git commands to generate.