Fix: rcuja merge fixes
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
78ed159a 6 * Copyright (C) 2000 - 2002 Hewlett-Packard Company
170e1186 7 * Copyright 2012-2013 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
61009379
MD
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
195e72d3 24#define _LGPL_SOURCE
e5227865 25#include <stdint.h>
8e519e3c 26#include <errno.h>
d68c6810 27#include <limits.h>
b1a90ce3 28#include <string.h>
12f246ac 29#include <assert.h>
61009379 30#include <urcu/rcuja.h>
d68c6810
MD
31#include <urcu/compiler.h>
32#include <urcu/arch.h>
8e519e3c 33#include <urcu-pointer.h>
f07b240f 34#include <urcu/uatomic.h>
8e519e3c 35
61009379
MD
36#include "rcuja-internal.h"
37
b1a90ce3
MD
38#ifndef abs
39#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
40#endif
41
d96bfb0d 42enum cds_ja_type_class {
e5227865 43 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL = 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 49 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 52 /* Leaf nodes are implicit from their height in the tree */
1db4943c 53 RCU_JA_NR_TYPES,
e1db2db5
MD
54
55 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
56};
57
d96bfb0d
MD
58struct cds_ja_type {
59 enum cds_ja_type_class type_class;
8e519e3c
MD
60 uint16_t min_child; /* minimum number of children: 1 to 256 */
61 uint16_t max_child; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
64 uint16_t nr_pool_order; /* number of pools */
65 uint16_t pool_size_order; /* pool size */
e5227865
MD
66};
67
68/*
69 * Iteration on the array to find the right node size for the number of
d68c6810 70 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 71 * possible node size, which contains 256 children).
d68c6810
MD
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
77 * the size order.
3d45251f
MD
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
e5227865 84 */
e5227865 85
d68c6810
MD
86#if (CAA_BITS_PER_LONG < 64)
87/* 32-bit pointers */
1db4943c
MD
88enum {
89 ja_type_0_max_child = 1,
90 ja_type_1_max_child = 3,
91 ja_type_2_max_child = 6,
92 ja_type_3_max_child = 12,
93 ja_type_4_max_child = 25,
94 ja_type_5_max_child = 48,
95 ja_type_6_max_child = 92,
96 ja_type_7_max_child = 256,
e1db2db5 97 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
98};
99
8e519e3c
MD
100enum {
101 ja_type_0_max_linear_child = 1,
102 ja_type_1_max_linear_child = 3,
103 ja_type_2_max_linear_child = 6,
104 ja_type_3_max_linear_child = 12,
105 ja_type_4_max_linear_child = 25,
106 ja_type_5_max_linear_child = 24,
107 ja_type_6_max_linear_child = 23,
108};
109
1db4943c
MD
110enum {
111 ja_type_5_nr_pool_order = 1,
112 ja_type_6_nr_pool_order = 2,
113};
114
d96bfb0d 115const struct cds_ja_type ja_types[] = {
8e519e3c
MD
116 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 121
fd800776 122 /* Pools may fill sooner than max_child */
1cee749c 123 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
1cee749c 125 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 126 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
127
128 /*
b1a90ce3
MD
129 * Upon node removal below min_child, if child pool is filled
130 * beyond capacity, we roll back to pigeon.
3d45251f 131 */
58c16c03 132 { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
133
134 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 135};
d68c6810
MD
136#else /* !(CAA_BITS_PER_LONG < 64) */
137/* 64-bit pointers */
1db4943c
MD
138enum {
139 ja_type_0_max_child = 1,
140 ja_type_1_max_child = 3,
141 ja_type_2_max_child = 7,
142 ja_type_3_max_child = 14,
143 ja_type_4_max_child = 28,
144 ja_type_5_max_child = 54,
145 ja_type_6_max_child = 104,
146 ja_type_7_max_child = 256,
e1db2db5 147 ja_type_8_max_child = 256,
1db4943c
MD
148};
149
8e519e3c
MD
150enum {
151 ja_type_0_max_linear_child = 1,
152 ja_type_1_max_linear_child = 3,
153 ja_type_2_max_linear_child = 7,
154 ja_type_3_max_linear_child = 14,
155 ja_type_4_max_linear_child = 28,
156 ja_type_5_max_linear_child = 27,
157 ja_type_6_max_linear_child = 26,
158};
159
1db4943c
MD
160enum {
161 ja_type_5_nr_pool_order = 1,
162 ja_type_6_nr_pool_order = 2,
163};
164
d96bfb0d 165const struct cds_ja_type ja_types[] = {
8e519e3c
MD
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
170 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 171
3d45251f 172 /* Pools may fill sooner than max_child. */
1cee749c 173 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 174 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
1cee749c 175 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 176 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 177
3d45251f 178 /*
b1a90ce3
MD
179 * Upon node removal below min_child, if child pool is filled
180 * beyond capacity, we roll back to pigeon.
3d45251f 181 */
64457f6c 182 { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
183
184 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 185};
d68c6810 186#endif /* !(BITS_PER_LONG < 64) */
e5227865 187
1db4943c
MD
188static inline __attribute__((unused))
189void static_array_size_check(void)
190{
e1db2db5 191 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
192}
193
e5227865 194/*
d96bfb0d 195 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
196 * read-side. For linear and pool node configurations, it starts with a
197 * byte counting the number of children in the node. Then, the
198 * node-specific data is placed.
199 * The node mutex, if any is needed, protecting concurrent updated of
200 * each node is placed in a separate hash table indexed by node address.
201 * For the pigeon configuration, the number of children is also kept in
202 * a separate hash table, indexed by node address, because it is only
203 * required for updates.
e5227865 204 */
1db4943c 205
ff38c745
MD
206#define DECLARE_LINEAR_NODE(index) \
207 struct { \
208 uint8_t nr_child; \
209 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 210 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
211 }
212
213#define DECLARE_POOL_NODE(index) \
214 struct { \
215 struct { \
216 uint8_t nr_child; \
217 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 218 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
219 } linear[1U << ja_type_## index ##_nr_pool_order]; \
220 }
1db4943c 221
b4540e8a 222struct cds_ja_inode {
1db4943c
MD
223 union {
224 /* Linear configuration */
225 DECLARE_LINEAR_NODE(0) conf_0;
226 DECLARE_LINEAR_NODE(1) conf_1;
227 DECLARE_LINEAR_NODE(2) conf_2;
228 DECLARE_LINEAR_NODE(3) conf_3;
229 DECLARE_LINEAR_NODE(4) conf_4;
230
231 /* Pool configuration */
232 DECLARE_POOL_NODE(5) conf_5;
233 DECLARE_POOL_NODE(6) conf_6;
234
235 /* Pigeon configuration */
236 struct {
b4540e8a 237 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
238 } conf_7;
239 /* data aliasing nodes for computed accesses */
b4540e8a 240 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 241 } u;
e5227865
MD
242};
243
2e313670 244enum ja_recompact {
19ddcd04
MD
245 JA_RECOMPACT_ADD_SAME,
246 JA_RECOMPACT_ADD_NEXT,
2e313670
MD
247 JA_RECOMPACT_DEL,
248};
249
b023ba9f
MD
250enum ja_lookup_inequality {
251 JA_LOOKUP_BE,
252 JA_LOOKUP_AE,
253};
254
255enum ja_direction {
256 JA_LEFT,
257 JA_RIGHT,
258 JA_LEFTMOST,
259 JA_RIGHTMOST,
260};
261
b1a90ce3
MD
262static
263struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
264{
265 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
266}
267
268unsigned long ja_node_type(struct cds_ja_inode_flag *node)
269{
270 unsigned long type;
271
272 if (_ja_node_mask_ptr(node) == NULL) {
273 return NODE_INDEX_NULL;
274 }
275 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
276 assert(type < (1UL << JA_TYPE_BITS));
277 return type;
278}
279
354981c2
MD
280static
281struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
282 const struct cds_ja_type *ja_type)
e5227865 283{
b1a90ce3
MD
284 size_t len = 1U << ja_type->order;
285 void *p;
286 int ret;
287
288 ret = posix_memalign(&p, len, len);
289 if (ret || !p) {
290 return NULL;
291 }
292 memset(p, 0, len);
f83b3e90
MD
293 if (ja_debug_counters())
294 uatomic_inc(&ja->nr_nodes_allocated);
b1a90ce3 295 return p;
e5227865
MD
296}
297
354981c2 298void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
e5227865
MD
299{
300 free(node);
f83b3e90 301 if (ja_debug_counters() && node)
354981c2 302 uatomic_inc(&ja->nr_nodes_freed);
e5227865
MD
303}
304
d68c6810
MD
305#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
306#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
307#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
308#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
309
310static
1db4943c 311uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 312{
1db4943c 313 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
314}
315
11c5e016 316static
d96bfb0d 317uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 318 struct cds_ja_inode *node)
11c5e016
MD
319{
320 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 321 return rcu_dereference(node->u.data[0]);
11c5e016
MD
322}
323
13a7f5a6
MD
324/*
325 * The order in which values and pointers are does does not matter: if
326 * a value is missing, we return NULL. If a value is there, but its
327 * associated pointers is still NULL, we return NULL too.
328 */
d68c6810 329static
b4540e8a
MD
330struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
331 struct cds_ja_inode *node,
b0ca2d21 332 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 333 uint8_t n)
d68c6810
MD
334{
335 uint8_t nr_child;
336 uint8_t *values;
b4540e8a
MD
337 struct cds_ja_inode_flag **pointers;
338 struct cds_ja_inode_flag *ptr;
d68c6810
MD
339 unsigned int i;
340
8e519e3c 341 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 342
11c5e016 343 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 344 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
345 assert(nr_child <= type->max_linear_child);
346 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 347
1db4943c 348 values = &node->u.data[1];
d68c6810 349 for (i = 0; i < nr_child; i++) {
13a7f5a6 350 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
351 break;
352 }
b0ca2d21
MD
353 if (i >= nr_child) {
354 if (caa_unlikely(node_flag_ptr))
355 *node_flag_ptr = NULL;
d68c6810 356 return NULL;
b0ca2d21 357 }
b4540e8a 358 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 359 ptr = rcu_dereference(pointers[i]);
b0ca2d21
MD
360 if (caa_unlikely(node_flag_ptr))
361 *node_flag_ptr = &pointers[i];
d68c6810
MD
362 return ptr;
363}
364
291b2543 365static
b023ba9f 366struct cds_ja_inode_flag *ja_linear_node_get_direction(const struct cds_ja_type *type,
291b2543 367 struct cds_ja_inode *node,
36305a3d 368 int n, uint8_t *result_key,
b023ba9f 369 enum ja_direction dir)
291b2543
MD
370{
371 uint8_t nr_child;
372 uint8_t *values;
373 struct cds_ja_inode_flag **pointers;
102b3189 374 struct cds_ja_inode_flag *ptr, *match_ptr = NULL;
b023ba9f 375 unsigned int i;
102b3189 376 int match_v;
291b2543
MD
377
378 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
b023ba9f
MD
379 assert(dir == JA_LEFT || dir == JA_RIGHT);
380
381 if (dir == JA_LEFT) {
382 match_v = -1;
383 } else {
384 match_v = JA_ENTRY_PER_NODE;
385 }
291b2543
MD
386
387 nr_child = ja_linear_node_get_nr_child(type, node);
388 cmm_smp_rmb(); /* read nr_child before values and pointers */
389 assert(nr_child <= type->max_linear_child);
390 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
391
392 values = &node->u.data[1];
86d700e9 393 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
291b2543
MD
394 for (i = 0; i < nr_child; i++) {
395 unsigned int v;
396
397 v = CMM_LOAD_SHARED(values[i]);
86d700e9
MD
398 ptr = CMM_LOAD_SHARED(pointers[i]);
399 if (!ptr)
400 continue;
b023ba9f
MD
401 if (dir == JA_LEFT) {
402 if ((int) v < n && (int) v > match_v) {
403 match_v = v;
102b3189 404 match_ptr = ptr;
b023ba9f
MD
405 }
406 } else {
407 if ((int) v > n && (int) v < match_v) {
408 match_v = v;
102b3189 409 match_ptr = ptr;
b023ba9f 410 }
291b2543
MD
411 }
412 }
b023ba9f 413
102b3189 414 if (!match_ptr) {
291b2543
MD
415 return NULL;
416 }
b023ba9f
MD
417 assert(match_v >= 0 && match_v < JA_ENTRY_PER_NODE);
418
36305a3d 419 *result_key = (uint8_t) match_v;
7dcebfc5 420 return rcu_dereference(match_ptr);
291b2543
MD
421}
422
11c5e016 423static
5a9a87dd 424void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 425 struct cds_ja_inode *node,
11c5e016
MD
426 uint8_t i,
427 uint8_t *v,
b4540e8a 428 struct cds_ja_inode_flag **iter)
11c5e016
MD
429{
430 uint8_t *values;
b4540e8a 431 struct cds_ja_inode_flag **pointers;
11c5e016
MD
432
433 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
434 assert(i < ja_linear_node_get_nr_child(type, node));
435
436 values = &node->u.data[1];
437 *v = values[i];
b4540e8a 438 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
439 *iter = pointers[i];
440}
441
d68c6810 442static
b4540e8a
MD
443struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
444 struct cds_ja_inode *node,
b1a90ce3 445 struct cds_ja_inode_flag *node_flag,
b0ca2d21 446 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 447 uint8_t n)
d68c6810 448{
b4540e8a 449 struct cds_ja_inode *linear;
d68c6810 450
fd800776 451 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
452
453 switch (type->nr_pool_order) {
454 case 1:
455 {
456 unsigned long bitsel, index;
457
458 bitsel = ja_node_pool_1d_bitsel(node_flag);
459 assert(bitsel < CHAR_BIT);
19ddcd04 460 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
461 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
462 break;
463 }
464 case 2:
465 {
19ddcd04
MD
466 unsigned long bitsel[2], index[2], rindex;
467
468 ja_node_pool_2d_bitsel(node_flag, bitsel);
469 assert(bitsel[0] < CHAR_BIT);
470 assert(bitsel[1] < CHAR_BIT);
471 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
472 index[0] <<= 1;
473 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
474 rindex = index[0] | index[1];
475 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
476 break;
477 }
478 default:
479 linear = NULL;
480 assert(0);
481 }
48cbe001 482 return ja_linear_node_get_nth(type, linear, node_flag_ptr, n);
d68c6810
MD
483}
484
11c5e016 485static
b4540e8a
MD
486struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
487 struct cds_ja_inode *node,
11c5e016
MD
488 uint8_t i)
489{
490 assert(type->type_class == RCU_JA_POOL);
b4540e8a 491 return (struct cds_ja_inode *)
11c5e016
MD
492 &node->u.data[(unsigned int) i << type->pool_size_order];
493}
494
291b2543 495static
b023ba9f 496struct cds_ja_inode_flag *ja_pool_node_get_direction(const struct cds_ja_type *type,
291b2543 497 struct cds_ja_inode *node,
36305a3d 498 int n, uint8_t *result_key,
b023ba9f 499 enum ja_direction dir)
291b2543
MD
500{
501 unsigned int pool_nr;
b023ba9f 502 int match_v;
291b2543
MD
503 struct cds_ja_inode_flag *match_node_flag = NULL;
504
505 assert(type->type_class == RCU_JA_POOL);
b023ba9f
MD
506 assert(dir == JA_LEFT || dir == JA_RIGHT);
507
508 if (dir == JA_LEFT) {
509 match_v = -1;
510 } else {
511 match_v = JA_ENTRY_PER_NODE;
512 }
291b2543
MD
513
514 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
515 struct cds_ja_inode *pool =
516 ja_pool_node_get_ith_pool(type,
517 node, pool_nr);
518 uint8_t nr_child =
519 ja_linear_node_get_nr_child(type, pool);
520 unsigned int j;
521
522 for (j = 0; j < nr_child; j++) {
523 struct cds_ja_inode_flag *iter;
524 uint8_t v;
525
526 ja_linear_node_get_ith_pos(type, pool,
527 j, &v, &iter);
528 if (!iter)
529 continue;
b023ba9f
MD
530 if (dir == JA_LEFT) {
531 if ((int) v < n && (int) v > match_v) {
532 match_v = v;
533 match_node_flag = iter;
534 }
535 } else {
536 if ((int) v > n && (int) v < match_v) {
537 match_v = v;
538 match_node_flag = iter;
539 }
291b2543
MD
540 }
541 }
542 }
36305a3d
MD
543 if (match_node_flag)
544 *result_key = (uint8_t) match_v;
291b2543
MD
545 return match_node_flag;
546}
547
d68c6810 548static
b4540e8a
MD
549struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
550 struct cds_ja_inode *node,
b0ca2d21 551 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 552 uint8_t n)
d68c6810 553{
48cbe001
MD
554 struct cds_ja_inode_flag **child_node_flag_ptr;
555 struct cds_ja_inode_flag *child_node_flag;
5a9a87dd 556
d68c6810 557 assert(type->type_class == RCU_JA_PIGEON);
48cbe001
MD
558 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
559 child_node_flag = rcu_dereference(*child_node_flag_ptr);
582a6ade 560 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
48cbe001 561 child_node_flag_ptr);
b0ca2d21 562 if (caa_unlikely(node_flag_ptr))
48cbe001
MD
563 *node_flag_ptr = child_node_flag_ptr;
564 return child_node_flag;
d68c6810
MD
565}
566
291b2543 567static
b023ba9f 568struct cds_ja_inode_flag *ja_pigeon_node_get_direction(const struct cds_ja_type *type,
291b2543 569 struct cds_ja_inode *node,
36305a3d 570 int n, uint8_t *result_key,
b023ba9f 571 enum ja_direction dir)
291b2543
MD
572{
573 struct cds_ja_inode_flag **child_node_flag_ptr;
574 struct cds_ja_inode_flag *child_node_flag;
575 int i;
576
577 assert(type->type_class == RCU_JA_PIGEON);
b023ba9f
MD
578 assert(dir == JA_LEFT || dir == JA_RIGHT);
579
580 if (dir == JA_LEFT) {
581 /* n - 1 is first value left of n */
582 for (i = n - 1; i >= 0; i--) {
583 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
584 child_node_flag = rcu_dereference(*child_node_flag_ptr);
585 if (child_node_flag) {
586 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
587 child_node_flag);
53f943b0 588 *result_key = (uint8_t) i;
b023ba9f
MD
589 return child_node_flag;
590 }
591 }
592 } else {
593 /* n + 1 is first value right of n */
594 for (i = n + 1; i < JA_ENTRY_PER_NODE; i++) {
595 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
596 child_node_flag = rcu_dereference(*child_node_flag_ptr);
597 if (child_node_flag) {
598 dbg_printf("ja_pigeon_node_get_right child_node_flag %p\n",
599 child_node_flag);
53f943b0 600 *result_key = (uint8_t) i;
b023ba9f
MD
601 return child_node_flag;
602 }
291b2543
MD
603 }
604 }
605 return NULL;
606}
607
2e313670
MD
608static
609struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
610 struct cds_ja_inode *node,
611 uint8_t i)
612{
48cbe001 613 return ja_pigeon_node_get_nth(type, node, NULL, i);
2e313670
MD
614}
615
13a7f5a6
MD
616/*
617 * ja_node_get_nth: get nth item from a node.
618 * node_flag is already rcu_dereference'd.
619 */
d68c6810 620static
b62a8d0c 621struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
b0ca2d21 622 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 623 uint8_t n)
d68c6810
MD
624{
625 unsigned int type_index;
b4540e8a 626 struct cds_ja_inode *node;
d96bfb0d 627 const struct cds_ja_type *type;
d68c6810 628
d68c6810 629 node = ja_node_ptr(node_flag);
5a9a87dd 630 assert(node != NULL);
d68c6810
MD
631 type_index = ja_node_type(node_flag);
632 type = &ja_types[type_index];
633
634 switch (type->type_class) {
635 case RCU_JA_LINEAR:
5a9a87dd 636 return ja_linear_node_get_nth(type, node,
b62a8d0c 637 node_flag_ptr, n);
fd800776 638 case RCU_JA_POOL:
b1a90ce3 639 return ja_pool_node_get_nth(type, node, node_flag,
b62a8d0c 640 node_flag_ptr, n);
d68c6810 641 case RCU_JA_PIGEON:
5a9a87dd 642 return ja_pigeon_node_get_nth(type, node,
b62a8d0c 643 node_flag_ptr, n);
d68c6810
MD
644 default:
645 assert(0);
646 return (void *) -1UL;
647 }
648}
649
291b2543 650static
b023ba9f 651struct cds_ja_inode_flag *ja_node_get_direction(struct cds_ja_inode_flag *node_flag,
36305a3d 652 int n, uint8_t *result_key,
b023ba9f 653 enum ja_direction dir)
291b2543
MD
654{
655 unsigned int type_index;
656 struct cds_ja_inode *node;
657 const struct cds_ja_type *type;
658
659 node = ja_node_ptr(node_flag);
660 assert(node != NULL);
661 type_index = ja_node_type(node_flag);
662 type = &ja_types[type_index];
663
664 switch (type->type_class) {
665 case RCU_JA_LINEAR:
36305a3d 666 return ja_linear_node_get_direction(type, node, n, result_key, dir);
291b2543 667 case RCU_JA_POOL:
36305a3d 668 return ja_pool_node_get_direction(type, node, n, result_key, dir);
291b2543 669 case RCU_JA_PIGEON:
36305a3d 670 return ja_pigeon_node_get_direction(type, node, n, result_key, dir);
291b2543
MD
671 default:
672 assert(0);
673 return (void *) -1UL;
674 }
675}
676
677static
b023ba9f 678struct cds_ja_inode_flag *ja_node_get_leftright(struct cds_ja_inode_flag *node_flag,
36305a3d 679 unsigned int n, uint8_t *result_key,
b023ba9f 680 enum ja_direction dir)
291b2543 681{
36305a3d 682 return ja_node_get_direction(node_flag, n, result_key, dir);
b023ba9f
MD
683}
684
685static
686struct cds_ja_inode_flag *ja_node_get_minmax(struct cds_ja_inode_flag *node_flag,
36305a3d 687 uint8_t *result_key,
b023ba9f
MD
688 enum ja_direction dir)
689{
690 switch (dir) {
691 case JA_LEFTMOST:
692 return ja_node_get_direction(node_flag,
36305a3d 693 -1, result_key, JA_RIGHT);
b023ba9f
MD
694 case JA_RIGHTMOST:
695 return ja_node_get_direction(node_flag,
36305a3d 696 JA_ENTRY_PER_NODE, result_key, JA_LEFT);
b023ba9f
MD
697 default:
698 assert(0);
699 }
291b2543
MD
700}
701
8e519e3c 702static
d96bfb0d 703int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 704 struct cds_ja_inode *node,
d96bfb0d 705 struct cds_ja_shadow_node *shadow_node,
8e519e3c 706 uint8_t n,
b4540e8a 707 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
708{
709 uint8_t nr_child;
710 uint8_t *values, *nr_child_ptr;
b4540e8a 711 struct cds_ja_inode_flag **pointers;
2e313670 712 unsigned int i, unused = 0;
8e519e3c
MD
713
714 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
715
716 nr_child_ptr = &node->u.data[0];
48cbe001
MD
717 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
718 (unsigned int) n, nr_child_ptr);
8e519e3c
MD
719 nr_child = *nr_child_ptr;
720 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
721
722 values = &node->u.data[1];
2e313670
MD
723 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
724 /* Check if node value is already populated */
8e519e3c 725 for (i = 0; i < nr_child; i++) {
2e313670
MD
726 if (values[i] == n) {
727 if (pointers[i])
728 return -EEXIST;
729 else
730 break;
731 } else {
732 if (!pointers[i])
733 unused++;
734 }
8e519e3c 735 }
2e313670
MD
736 if (i == nr_child && nr_child >= type->max_linear_child) {
737 if (unused)
738 return -ERANGE; /* recompact node */
739 else
740 return -ENOSPC; /* No space left in this node type */
741 }
742
743 assert(pointers[i] == NULL);
744 rcu_assign_pointer(pointers[i], child_node_flag);
745 /* If we expanded the nr_child, increment it */
746 if (i == nr_child) {
747 CMM_STORE_SHARED(values[nr_child], n);
748 /* write pointer and value before nr_child */
749 cmm_smp_wmb();
750 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 751 }
e1db2db5 752 shadow_node->nr_child++;
a2a7ff59
MD
753 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
754 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
755 (unsigned int) shadow_node->nr_child,
756 node, shadow_node);
757
8e519e3c
MD
758 return 0;
759}
760
761static
d96bfb0d 762int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 763 struct cds_ja_inode *node,
b1a90ce3 764 struct cds_ja_inode_flag *node_flag,
d96bfb0d 765 struct cds_ja_shadow_node *shadow_node,
8e519e3c 766 uint8_t n,
b4540e8a 767 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 768{
b4540e8a 769 struct cds_ja_inode *linear;
8e519e3c
MD
770
771 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
772
773 switch (type->nr_pool_order) {
774 case 1:
775 {
776 unsigned long bitsel, index;
777
778 bitsel = ja_node_pool_1d_bitsel(node_flag);
779 assert(bitsel < CHAR_BIT);
19ddcd04 780 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
781 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
782 break;
783 }
784 case 2:
785 {
19ddcd04
MD
786 unsigned long bitsel[2], index[2], rindex;
787
788 ja_node_pool_2d_bitsel(node_flag, bitsel);
789 assert(bitsel[0] < CHAR_BIT);
790 assert(bitsel[1] < CHAR_BIT);
791 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
792 index[0] <<= 1;
793 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
794 rindex = index[0] | index[1];
795 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
796 break;
797 }
798 default:
799 linear = NULL;
800 assert(0);
801 }
802
e1db2db5
MD
803 return ja_linear_node_set_nth(type, linear, shadow_node,
804 n, child_node_flag);
8e519e3c
MD
805}
806
807static
d96bfb0d 808int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 809 struct cds_ja_inode *node,
d96bfb0d 810 struct cds_ja_shadow_node *shadow_node,
8e519e3c 811 uint8_t n,
b4540e8a 812 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 813{
b4540e8a 814 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
815
816 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 817 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 818 if (*ptr)
8e519e3c
MD
819 return -EEXIST;
820 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 821 shadow_node->nr_child++;
8e519e3c
MD
822 return 0;
823}
824
d68c6810 825/*
7a0b2331 826 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 827 * (negative error value) if it is already there.
d68c6810 828 */
8e519e3c 829static
d96bfb0d 830int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 831 struct cds_ja_inode *node,
b1a90ce3 832 struct cds_ja_inode_flag *node_flag,
d96bfb0d 833 struct cds_ja_shadow_node *shadow_node,
e1db2db5 834 uint8_t n,
b4540e8a 835 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 836{
8e519e3c
MD
837 switch (type->type_class) {
838 case RCU_JA_LINEAR:
e1db2db5 839 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
840 child_node_flag);
841 case RCU_JA_POOL:
b1a90ce3 842 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
8e519e3c
MD
843 child_node_flag);
844 case RCU_JA_PIGEON:
e1db2db5 845 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 846 child_node_flag);
e1db2db5
MD
847 case RCU_JA_NULL:
848 return -ENOSPC;
8e519e3c
MD
849 default:
850 assert(0);
851 return -EINVAL;
852 }
853
854 return 0;
855}
7a0b2331 856
2e313670 857static
af3cbd45 858int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
859 struct cds_ja_inode *node,
860 struct cds_ja_shadow_node *shadow_node,
af3cbd45 861 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
862{
863 uint8_t nr_child;
af3cbd45 864 uint8_t *nr_child_ptr;
2e313670
MD
865
866 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
867
868 nr_child_ptr = &node->u.data[0];
2e313670
MD
869 nr_child = *nr_child_ptr;
870 assert(nr_child <= type->max_linear_child);
871
48cbe001
MD
872 if (type->type_class == RCU_JA_LINEAR) {
873 assert(!shadow_node->fallback_removal_count);
874 if (shadow_node->nr_child <= type->min_child) {
2e313670
MD
875 /* We need to try recompacting the node */
876 return -EFBIG;
877 }
878 }
19ddcd04 879 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
af3cbd45
MD
880 assert(*node_flag_ptr != NULL);
881 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
882 /*
883 * Value and nr_child are never changed (would cause ABA issue).
884 * Instead, we leave the pointer to NULL and recompact the node
885 * once in a while. It is allowed to set a NULL pointer to a new
886 * value without recompaction though.
887 * Only update the shadow node accounting.
888 */
889 shadow_node->nr_child--;
af3cbd45 890 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
891 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
892 (unsigned int) shadow_node->nr_child,
893 node, shadow_node);
2e313670
MD
894 return 0;
895}
896
897static
af3cbd45 898int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670 899 struct cds_ja_inode *node,
19ddcd04 900 struct cds_ja_inode_flag *node_flag,
2e313670 901 struct cds_ja_shadow_node *shadow_node,
af3cbd45 902 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
903 uint8_t n)
904{
905 struct cds_ja_inode *linear;
906
907 assert(type->type_class == RCU_JA_POOL);
19ddcd04
MD
908
909 if (shadow_node->fallback_removal_count) {
910 shadow_node->fallback_removal_count--;
911 } else {
912 /* We should try recompacting the node */
913 if (shadow_node->nr_child <= type->min_child)
914 return -EFBIG;
915 }
916
917 switch (type->nr_pool_order) {
918 case 1:
919 {
920 unsigned long bitsel, index;
921
922 bitsel = ja_node_pool_1d_bitsel(node_flag);
923 assert(bitsel < CHAR_BIT);
924 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
925 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
926 break;
927 }
928 case 2:
929 {
930 unsigned long bitsel[2], index[2], rindex;
931
932 ja_node_pool_2d_bitsel(node_flag, bitsel);
933 assert(bitsel[0] < CHAR_BIT);
934 assert(bitsel[1] < CHAR_BIT);
935 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
936 index[0] <<= 1;
937 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
938 rindex = index[0] | index[1];
939 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
940 break;
941 }
942 default:
943 linear = NULL;
944 assert(0);
945 }
946
af3cbd45 947 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
948}
949
950static
af3cbd45 951int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
952 struct cds_ja_inode *node,
953 struct cds_ja_shadow_node *shadow_node,
af3cbd45 954 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 955{
2e313670 956 assert(type->type_class == RCU_JA_PIGEON);
19ddcd04
MD
957
958 if (shadow_node->fallback_removal_count) {
959 shadow_node->fallback_removal_count--;
960 } else {
961 /* We should try recompacting the node */
962 if (shadow_node->nr_child <= type->min_child)
963 return -EFBIG;
964 }
4d6ef45e 965 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 966 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
967 shadow_node->nr_child--;
968 return 0;
969}
970
971/*
af3cbd45 972 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
973 * (negative error value) if it is not found (-ENOENT).
974 */
975static
af3cbd45 976int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670 977 struct cds_ja_inode *node,
19ddcd04 978 struct cds_ja_inode_flag *node_flag,
2e313670 979 struct cds_ja_shadow_node *shadow_node,
af3cbd45 980 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
981 uint8_t n)
982{
983 switch (type->type_class) {
984 case RCU_JA_LINEAR:
af3cbd45 985 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 986 case RCU_JA_POOL:
19ddcd04 987 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
2e313670 988 case RCU_JA_PIGEON:
af3cbd45 989 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
990 case RCU_JA_NULL:
991 return -ENOENT;
992 default:
993 assert(0);
994 return -EINVAL;
995 }
996
997 return 0;
998}
999
b1a90ce3
MD
1000/*
1001 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
1002 * distribution in two sub-distributions containing as much elements one
1003 * compared to the other.
1004 */
1005static
1006unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
1007 struct cds_ja *ja,
1008 unsigned int type_index,
1009 const struct cds_ja_type *type,
1010 struct cds_ja_inode *node,
1011 struct cds_ja_shadow_node *shadow_node,
1012 uint8_t n,
1013 struct cds_ja_inode_flag *child_node_flag,
1014 struct cds_ja_inode_flag **nullify_node_flag_ptr)
1015{
1016 uint8_t nr_one[JA_BITS_PER_BYTE];
1017 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
1018 unsigned int distrib_nr_child = 0;
1019
1020 memset(nr_one, 0, sizeof(nr_one));
1021
1022 switch (type->type_class) {
1023 case RCU_JA_LINEAR:
1024 {
1025 uint8_t nr_child =
1026 ja_linear_node_get_nr_child(type, node);
1027 unsigned int i;
1028
1029 for (i = 0; i < nr_child; i++) {
1030 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1031 uint8_t v;
1032
1033 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1034 if (!iter)
1035 continue;
1036 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1037 continue;
f5531dd9
MD
1038 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1039 if (v & (1U << bit_i))
1040 nr_one[bit_i]++;
b1a90ce3
MD
1041 }
1042 distrib_nr_child++;
1043 }
1044 break;
1045 }
1046 case RCU_JA_POOL:
1047 {
1048 unsigned int pool_nr;
1049
1050 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1051 struct cds_ja_inode *pool =
1052 ja_pool_node_get_ith_pool(type,
1053 node, pool_nr);
1054 uint8_t nr_child =
1055 ja_linear_node_get_nr_child(type, pool);
1056 unsigned int j;
1057
1058 for (j = 0; j < nr_child; j++) {
1059 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1060 uint8_t v;
1061
1062 ja_linear_node_get_ith_pos(type, pool,
1063 j, &v, &iter);
1064 if (!iter)
1065 continue;
1066 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1067 continue;
f5531dd9
MD
1068 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1069 if (v & (1U << bit_i))
1070 nr_one[bit_i]++;
b1a90ce3
MD
1071 }
1072 distrib_nr_child++;
1073 }
1074 }
1075 break;
1076 }
1077 case RCU_JA_PIGEON:
1078 {
b1a90ce3
MD
1079 unsigned int i;
1080
1081 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1082 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
b1a90ce3 1083 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1084
1085 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1086 if (!iter)
1087 continue;
1088 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1089 continue;
f5531dd9
MD
1090 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1091 if (i & (1U << bit_i))
1092 nr_one[bit_i]++;
b1a90ce3
MD
1093 }
1094 distrib_nr_child++;
1095 }
1096 break;
1097 }
1098 case RCU_JA_NULL:
19ddcd04 1099 assert(mode == JA_RECOMPACT_ADD_NEXT);
b1a90ce3
MD
1100 break;
1101 default:
1102 assert(0);
1103 break;
1104 }
1105
19ddcd04 1106 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
f5531dd9
MD
1107 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1108 if (n & (1U << bit_i))
1109 nr_one[bit_i]++;
b1a90ce3
MD
1110 }
1111 distrib_nr_child++;
1112 }
1113
1114 /*
1115 * The best bit selector is that for which the number of ones is
1116 * closest to half of the number of children in the
f5531dd9
MD
1117 * distribution. We calculate the distance using the double of
1118 * the sub-distribution sizes to eliminate truncation error.
b1a90ce3
MD
1119 */
1120 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1121 unsigned int distance_to_best;
1122
1b34283b 1123 distance_to_best = abs_int(((unsigned int) nr_one[bit_i] << 1U) - distrib_nr_child);
b1a90ce3
MD
1124 if (distance_to_best < overall_best_distance) {
1125 overall_best_distance = distance_to_best;
1126 bitsel = bit_i;
1127 }
1128 }
1129 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
1130 return bitsel;
1131}
1132
19ddcd04
MD
1133/*
1134 * Calculate bit distribution in two dimensions. Returns the two bits
1135 * (each 0 to 7) that splits the distribution in four sub-distributions
1136 * containing as much elements one compared to the other.
1137 */
1138static
1139void ja_node_sum_distribution_2d(enum ja_recompact mode,
1140 struct cds_ja *ja,
1141 unsigned int type_index,
1142 const struct cds_ja_type *type,
1143 struct cds_ja_inode *node,
1144 struct cds_ja_shadow_node *shadow_node,
1145 uint8_t n,
1146 struct cds_ja_inode_flag *child_node_flag,
1147 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1148 unsigned int *_bitsel)
1149{
1150 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1151 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1152 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1153 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
1154 unsigned int bitsel[2] = { 0, 1 };
4a073c53
MD
1155 unsigned int bit_i, bit_j;
1156 int overall_best_distance = INT_MAX;
19ddcd04
MD
1157 unsigned int distrib_nr_child = 0;
1158
1159 memset(nr_2d_11, 0, sizeof(nr_2d_11));
1160 memset(nr_2d_10, 0, sizeof(nr_2d_10));
4a073c53
MD
1161 memset(nr_2d_01, 0, sizeof(nr_2d_01));
1162 memset(nr_2d_00, 0, sizeof(nr_2d_00));
19ddcd04
MD
1163
1164 switch (type->type_class) {
1165 case RCU_JA_LINEAR:
1166 {
1167 uint8_t nr_child =
1168 ja_linear_node_get_nr_child(type, node);
1169 unsigned int i;
1170
1171 for (i = 0; i < nr_child; i++) {
1172 struct cds_ja_inode_flag *iter;
1173 uint8_t v;
1174
1175 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1176 if (!iter)
1177 continue;
1178 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1179 continue;
1180 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1181 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1182 if (v & (1U << bit_i)) {
1183 if (v & (1U << bit_j)) {
1184 nr_2d_11[bit_i][bit_j]++;
1185 } else {
1186 nr_2d_10[bit_i][bit_j]++;
1187 }
1188 } else {
1189 if (v & (1U << bit_j)) {
1190 nr_2d_01[bit_i][bit_j]++;
1191 } else {
1192 nr_2d_00[bit_i][bit_j]++;
1193 }
19ddcd04
MD
1194 }
1195 }
1196 }
1197 distrib_nr_child++;
1198 }
1199 break;
1200 }
1201 case RCU_JA_POOL:
1202 {
1203 unsigned int pool_nr;
1204
1205 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1206 struct cds_ja_inode *pool =
1207 ja_pool_node_get_ith_pool(type,
1208 node, pool_nr);
1209 uint8_t nr_child =
1210 ja_linear_node_get_nr_child(type, pool);
1211 unsigned int j;
1212
1213 for (j = 0; j < nr_child; j++) {
1214 struct cds_ja_inode_flag *iter;
1215 uint8_t v;
1216
1217 ja_linear_node_get_ith_pos(type, pool,
1218 j, &v, &iter);
1219 if (!iter)
1220 continue;
1221 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1222 continue;
1223 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1224 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1225 if (v & (1U << bit_i)) {
1226 if (v & (1U << bit_j)) {
1227 nr_2d_11[bit_i][bit_j]++;
1228 } else {
1229 nr_2d_10[bit_i][bit_j]++;
1230 }
1231 } else {
1232 if (v & (1U << bit_j)) {
1233 nr_2d_01[bit_i][bit_j]++;
1234 } else {
1235 nr_2d_00[bit_i][bit_j]++;
1236 }
19ddcd04
MD
1237 }
1238 }
1239 }
1240 distrib_nr_child++;
1241 }
1242 }
1243 break;
1244 }
1245 case RCU_JA_PIGEON:
1246 {
19ddcd04
MD
1247 unsigned int i;
1248
1249 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1250 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
19ddcd04
MD
1251 struct cds_ja_inode_flag *iter;
1252
1253 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1254 if (!iter)
1255 continue;
1256 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1257 continue;
1258 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1259 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1260 if (i & (1U << bit_i)) {
1261 if (i & (1U << bit_j)) {
1262 nr_2d_11[bit_i][bit_j]++;
1263 } else {
1264 nr_2d_10[bit_i][bit_j]++;
1265 }
1266 } else {
1267 if (i & (1U << bit_j)) {
1268 nr_2d_01[bit_i][bit_j]++;
1269 } else {
1270 nr_2d_00[bit_i][bit_j]++;
1271 }
19ddcd04
MD
1272 }
1273 }
1274 }
1275 distrib_nr_child++;
1276 }
1277 break;
1278 }
1279 case RCU_JA_NULL:
1280 assert(mode == JA_RECOMPACT_ADD_NEXT);
1281 break;
1282 default:
1283 assert(0);
1284 break;
1285 }
1286
1287 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1288 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1289 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1290 if (n & (1U << bit_i)) {
1291 if (n & (1U << bit_j)) {
1292 nr_2d_11[bit_i][bit_j]++;
1293 } else {
1294 nr_2d_10[bit_i][bit_j]++;
1295 }
1296 } else {
1297 if (n & (1U << bit_j)) {
1298 nr_2d_01[bit_i][bit_j]++;
1299 } else {
1300 nr_2d_00[bit_i][bit_j]++;
1301 }
19ddcd04
MD
1302 }
1303 }
1304 }
1305 distrib_nr_child++;
1306 }
1307
1308 /*
1309 * The best bit selector is that for which the number of nodes
1310 * in each sub-class is closest to one-fourth of the number of
1311 * children in the distribution. We calculate the distance using
1312 * 4 times the size of the sub-distribution to eliminate
1313 * truncation error.
1314 */
1315 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1316 for (bit_j = 0; bit_j < bit_i; bit_j++) {
4a073c53 1317 int distance_to_best[4];
19ddcd04 1318
1b34283b
MD
1319 distance_to_best[0] = ((unsigned int) nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1320 distance_to_best[1] = ((unsigned int) nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1321 distance_to_best[2] = ((unsigned int) nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1322 distance_to_best[3] = ((unsigned int) nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
19ddcd04 1323
4a073c53
MD
1324 /* Consider worse distance above best */
1325 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
19ddcd04 1326 distance_to_best[0] = distance_to_best[1];
4a073c53 1327 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
19ddcd04 1328 distance_to_best[0] = distance_to_best[2];
4a073c53 1329 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
19ddcd04 1330 distance_to_best[0] = distance_to_best[3];
4a073c53 1331
19ddcd04
MD
1332 /*
1333 * If our worse distance is better than overall,
1334 * we become new best candidate.
1335 */
1336 if (distance_to_best[0] < overall_best_distance) {
1337 overall_best_distance = distance_to_best[0];
1338 bitsel[0] = bit_i;
1339 bitsel[1] = bit_j;
1340 }
1341 }
1342 }
1343
1344 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1345
1346 /* Return our bit selection */
1347 _bitsel[0] = bitsel[0];
1348 _bitsel[1] = bitsel[1];
1349}
1350
48cbe001
MD
1351static
1352unsigned int find_nearest_type_index(unsigned int type_index,
1353 unsigned int nr_nodes)
1354{
1355 const struct cds_ja_type *type;
1356
1357 assert(type_index != NODE_INDEX_NULL);
1358 if (nr_nodes == 0)
1359 return NODE_INDEX_NULL;
1360 for (;;) {
1361 type = &ja_types[type_index];
1362 if (nr_nodes < type->min_child)
1363 type_index--;
1364 else if (nr_nodes > type->max_child)
1365 type_index++;
1366 else
1367 break;
1368 }
1369 return type_index;
1370}
1371
7a0b2331
MD
1372/*
1373 * ja_node_recompact_add: recompact a node, adding a new child.
2e313670 1374 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 1375 * error value otherwise.
7a0b2331
MD
1376 */
1377static
2e313670
MD
1378int ja_node_recompact(enum ja_recompact mode,
1379 struct cds_ja *ja,
e1db2db5 1380 unsigned int old_type_index,
d96bfb0d 1381 const struct cds_ja_type *old_type,
b4540e8a 1382 struct cds_ja_inode *old_node,
5a9a87dd 1383 struct cds_ja_shadow_node *shadow_node,
3d8fe307 1384 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45 1385 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1386 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1387 int level)
7a0b2331 1388{
e1db2db5 1389 unsigned int new_type_index;
b4540e8a 1390 struct cds_ja_inode *new_node;
af3cbd45 1391 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 1392 const struct cds_ja_type *new_type;
3d8fe307 1393 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 1394 int ret;
f07b240f 1395 int fallback = 0;
7a0b2331 1396
3d8fe307
MD
1397 old_node_flag = *old_node_flag_ptr;
1398
48cbe001
MD
1399 /*
1400 * Need to find nearest type index even for ADD_SAME, because
1401 * this recompaction, when applied to linear nodes, will garbage
1402 * collect dummy (NULL) entries, and can therefore cause a few
1403 * linear representations to be skipped.
1404 */
2e313670 1405 switch (mode) {
19ddcd04 1406 case JA_RECOMPACT_ADD_SAME:
48cbe001
MD
1407 new_type_index = find_nearest_type_index(old_type_index,
1408 shadow_node->nr_child + 1);
1409 dbg_printf("Recompact for node with %u children\n",
1410 shadow_node->nr_child + 1);
2e313670 1411 break;
19ddcd04 1412 case JA_RECOMPACT_ADD_NEXT:
2e313670
MD
1413 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1414 new_type_index = 0;
48cbe001 1415 dbg_printf("Recompact for NULL\n");
2e313670 1416 } else {
48cbe001
MD
1417 new_type_index = find_nearest_type_index(old_type_index,
1418 shadow_node->nr_child + 1);
1419 dbg_printf("Recompact for node with %u children\n",
1420 shadow_node->nr_child + 1);
2e313670
MD
1421 }
1422 break;
1423 case JA_RECOMPACT_DEL:
48cbe001
MD
1424 new_type_index = find_nearest_type_index(old_type_index,
1425 shadow_node->nr_child - 1);
1426 dbg_printf("Recompact for node with %u children\n",
1427 shadow_node->nr_child - 1);
2e313670
MD
1428 break;
1429 default:
1430 assert(0);
7a0b2331 1431 }
a2a7ff59 1432
f07b240f 1433retry: /* for fallback */
582a6ade
MD
1434 dbg_printf("Recompact from type %d to type %d\n",
1435 old_type_index, new_type_index);
7a0b2331 1436 new_type = &ja_types[new_type_index];
2e313670 1437 if (new_type_index != NODE_INDEX_NULL) {
354981c2 1438 new_node = alloc_cds_ja_node(ja, new_type);
2e313670
MD
1439 if (!new_node)
1440 return -ENOMEM;
b1a90ce3
MD
1441
1442 if (new_type->type_class == RCU_JA_POOL) {
1443 switch (new_type->nr_pool_order) {
1444 case 1:
1445 {
19ddcd04
MD
1446 unsigned int node_distrib_bitsel;
1447
b1a90ce3
MD
1448 node_distrib_bitsel =
1449 ja_node_sum_distribution_1d(mode, ja,
1450 old_type_index, old_type,
1451 old_node, shadow_node,
1452 n, child_node_flag,
1453 nullify_node_flag_ptr);
1454 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1455 new_node_flag = ja_node_flag_pool_1d(new_node,
1456 new_type_index, node_distrib_bitsel);
1457 break;
1458 }
1459 case 2:
1460 {
19ddcd04
MD
1461 unsigned int node_distrib_bitsel[2];
1462
1463 ja_node_sum_distribution_2d(mode, ja,
1464 old_type_index, old_type,
1465 old_node, shadow_node,
1466 n, child_node_flag,
1467 nullify_node_flag_ptr,
1468 node_distrib_bitsel);
b1a90ce3
MD
1469 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1470 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
19ddcd04
MD
1471 new_node_flag = ja_node_flag_pool_2d(new_node,
1472 new_type_index, node_distrib_bitsel);
b1a90ce3
MD
1473 break;
1474 }
1475 default:
1476 assert(0);
1477 }
1478 } else {
1479 new_node_flag = ja_node_flag(new_node, new_type_index);
1480 }
1481
2e313670 1482 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
48cbe001 1483 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
2e313670 1484 if (!new_shadow_node) {
354981c2 1485 free_cds_ja_node(ja, new_node);
2e313670
MD
1486 return -ENOMEM;
1487 }
1488 if (fallback)
1489 new_shadow_node->fallback_removal_count =
1490 JA_FALLBACK_REMOVAL_COUNT;
1491 } else {
1492 new_node = NULL;
1493 new_node_flag = NULL;
e1db2db5 1494 }
11c5e016 1495
19ddcd04 1496 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
2e313670
MD
1497
1498 if (new_type_index == NODE_INDEX_NULL)
1499 goto skip_copy;
1500
11c5e016
MD
1501 switch (old_type->type_class) {
1502 case RCU_JA_LINEAR:
1503 {
1504 uint8_t nr_child =
1505 ja_linear_node_get_nr_child(old_type, old_node);
1506 unsigned int i;
1507
1508 for (i = 0; i < nr_child; i++) {
b4540e8a 1509 struct cds_ja_inode_flag *iter;
11c5e016
MD
1510 uint8_t v;
1511
1512 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1513 if (!iter)
1514 continue;
af3cbd45 1515 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1516 continue;
b1a90ce3 1517 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1518 new_shadow_node,
11c5e016 1519 v, iter);
f07b240f
MD
1520 if (new_type->type_class == RCU_JA_POOL && ret) {
1521 goto fallback_toosmall;
1522 }
11c5e016
MD
1523 assert(!ret);
1524 }
1525 break;
1526 }
1527 case RCU_JA_POOL:
1528 {
1529 unsigned int pool_nr;
1530
1531 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 1532 struct cds_ja_inode *pool =
11c5e016
MD
1533 ja_pool_node_get_ith_pool(old_type,
1534 old_node, pool_nr);
1535 uint8_t nr_child =
1536 ja_linear_node_get_nr_child(old_type, pool);
1537 unsigned int j;
1538
1539 for (j = 0; j < nr_child; j++) {
b4540e8a 1540 struct cds_ja_inode_flag *iter;
11c5e016
MD
1541 uint8_t v;
1542
1543 ja_linear_node_get_ith_pos(old_type, pool,
1544 j, &v, &iter);
1545 if (!iter)
1546 continue;
af3cbd45 1547 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1548 continue;
b1a90ce3 1549 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1550 new_shadow_node,
11c5e016 1551 v, iter);
f07b240f
MD
1552 if (new_type->type_class == RCU_JA_POOL
1553 && ret) {
1554 goto fallback_toosmall;
1555 }
11c5e016
MD
1556 assert(!ret);
1557 }
1558 }
1559 break;
7a0b2331 1560 }
a2a7ff59 1561 case RCU_JA_NULL:
19ddcd04 1562 assert(mode == JA_RECOMPACT_ADD_NEXT);
a2a7ff59 1563 break;
11c5e016 1564 case RCU_JA_PIGEON:
2e313670 1565 {
2e313670
MD
1566 unsigned int i;
1567
1568 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1569 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2e313670
MD
1570 struct cds_ja_inode_flag *iter;
1571
1572 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1573 if (!iter)
1574 continue;
af3cbd45 1575 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1576 continue;
b1a90ce3 1577 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1578 new_shadow_node,
1579 i, iter);
1580 if (new_type->type_class == RCU_JA_POOL && ret) {
1581 goto fallback_toosmall;
1582 }
1583 assert(!ret);
1584 }
1585 break;
1586 }
11c5e016
MD
1587 default:
1588 assert(0);
5a9a87dd 1589 ret = -EINVAL;
f07b240f 1590 goto end;
11c5e016 1591 }
2e313670 1592skip_copy:
11c5e016 1593
19ddcd04 1594 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
2e313670 1595 /* add node */
b1a90ce3 1596 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1597 new_shadow_node,
1598 n, child_node_flag);
7b413155
MD
1599 if (new_type->type_class == RCU_JA_POOL && ret) {
1600 goto fallback_toosmall;
1601 }
2e313670
MD
1602 assert(!ret);
1603 }
19ddcd04
MD
1604
1605 if (fallback) {
1606 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1607 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1608 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
f83b3e90
MD
1609 if (ja_debug_counters())
1610 uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
19ddcd04
MD
1611 }
1612
3d8fe307
MD
1613 /* Return pointer to new recompacted node through old_node_flag_ptr */
1614 *old_node_flag_ptr = new_node_flag;
a2a7ff59 1615 if (old_node) {
2e313670
MD
1616 int flags;
1617
1618 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1619 /*
1620 * It is OK to free the lock associated with a node
1621 * going to NULL, since we are holding the parent lock.
1622 * This synchronizes removal with re-add of that node.
1623 */
1624 if (new_type_index == NODE_INDEX_NULL)
48cbe001 1625 flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 1626 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 1627 flags);
a2a7ff59
MD
1628 assert(!ret);
1629 }
5a9a87dd
MD
1630
1631 ret = 0;
f07b240f 1632end:
5a9a87dd 1633 return ret;
f07b240f
MD
1634
1635fallback_toosmall:
1636 /* fallback if next pool is too small */
af3cbd45 1637 assert(new_shadow_node);
3d8fe307 1638 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
1639 RCUJA_SHADOW_CLEAR_FREE_NODE);
1640 assert(!ret);
1641
19ddcd04
MD
1642 switch (mode) {
1643 case JA_RECOMPACT_ADD_SAME:
1644 /*
1645 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1646 * node within a pool has unused entries. It should
1647 * therefore _never_ be too small.
1648 */
4a073c53 1649 assert(0);
4cde8267
MD
1650
1651 /* Fall-through */
19ddcd04
MD
1652 case JA_RECOMPACT_ADD_NEXT:
1653 {
1654 const struct cds_ja_type *next_type;
1655
1656 /*
1657 * Recompaction attempt on add failed. Should only
1658 * happen if target node type is pool. Caused by
1659 * hard-to-split distribution. Recompact using the next
1660 * distribution size.
1661 */
1662 assert(new_type->type_class == RCU_JA_POOL);
1663 next_type = &ja_types[new_type_index + 1];
1664 /*
1665 * Try going to the next pool size if our population
1666 * fits within its range. This is not flagged as a
1667 * fallback.
1668 */
1669 if (shadow_node->nr_child + 1 >= next_type->min_child
1670 && shadow_node->nr_child + 1 <= next_type->max_child) {
1671 new_type_index++;
1672 goto retry;
1673 } else {
1674 new_type_index++;
1675 dbg_printf("Add fallback to type %d\n", new_type_index);
f83b3e90
MD
1676 if (ja_debug_counters())
1677 uatomic_inc(&ja->nr_fallback);
19ddcd04
MD
1678 fallback = 1;
1679 goto retry;
1680 }
1681 break;
1682 }
1683 case JA_RECOMPACT_DEL:
1684 /*
1685 * Recompaction attempt on delete failed. Should only
1686 * happen if target node type is pool. This is caused by
1687 * a hard-to-split distribution. Recompact on same node
1688 * size, but flag current node as "fallback" to ensure
1689 * we don't attempt recompaction before some activity
1690 * has reshuffled our node.
1691 */
1692 assert(new_type->type_class == RCU_JA_POOL);
1693 new_type_index = old_type_index;
1694 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1695 uatomic_inc(&ja->nr_fallback);
1696 fallback = 1;
1697 goto retry;
1698 default:
1699 assert(0);
1700 return -EINVAL;
1701 }
1702
1703 /*
1704 * Last resort fallback: pigeon.
1705 */
f07b240f
MD
1706 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1707 dbg_printf("Fallback to type %d\n", new_type_index);
1708 uatomic_inc(&ja->nr_fallback);
1709 fallback = 1;
1710 goto retry;
7a0b2331
MD
1711}
1712
5a9a87dd 1713/*
2e313670 1714 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
1715 * error value otherwise.
1716 */
7a0b2331 1717static
d96bfb0d 1718int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 1719 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd 1720 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1721 struct cds_ja_shadow_node *shadow_node,
1722 int level)
7a0b2331
MD
1723{
1724 int ret;
e1db2db5 1725 unsigned int type_index;
d96bfb0d 1726 const struct cds_ja_type *type;
b4540e8a 1727 struct cds_ja_inode *node;
7a0b2331 1728
a2a7ff59
MD
1729 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1730 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1731
e1db2db5
MD
1732 node = ja_node_ptr(*node_flag);
1733 type_index = ja_node_type(*node_flag);
1734 type = &ja_types[type_index];
b1a90ce3 1735 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
e1db2db5 1736 n, child_node_flag);
2e313670
MD
1737 switch (ret) {
1738 case -ENOSPC:
19ddcd04
MD
1739 /* Not enough space in node, need to recompact to next type. */
1740 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
48cbe001 1741 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1742 break;
1743 case -ERANGE:
1744 /* Node needs to be recompacted. */
19ddcd04 1745 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
48cbe001 1746 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1747 break;
1748 }
1749 return ret;
1750}
1751
1752/*
1753 * Return 0 on success, -EAGAIN if need to retry, or other negative
1754 * error value otherwise.
1755 */
1756static
af3cbd45
MD
1757int ja_node_clear_ptr(struct cds_ja *ja,
1758 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1759 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1760 struct cds_ja_shadow_node *shadow_node, /* of parent */
48cbe001 1761 uint8_t n, int level)
2e313670
MD
1762{
1763 int ret;
1764 unsigned int type_index;
1765 const struct cds_ja_type *type;
1766 struct cds_ja_inode *node;
1767
af3cbd45
MD
1768 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1769 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 1770
af3cbd45
MD
1771 node = ja_node_ptr(*parent_node_flag_ptr);
1772 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 1773 type = &ja_types[type_index];
19ddcd04 1774 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
2e313670 1775 if (ret == -EFBIG) {
19ddcd04 1776 /* Should try recompaction. */
2e313670 1777 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45 1778 shadow_node, parent_node_flag_ptr, n, NULL,
48cbe001 1779 node_flag_ptr, level);
7a0b2331
MD
1780 }
1781 return ret;
1782}
be9a7474 1783
03ec1aeb 1784struct cds_ja_node *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 1785{
41975c12
MD
1786 unsigned int tree_depth, i;
1787 struct cds_ja_inode_flag *node_flag;
1788
102b3189 1789 if (caa_unlikely(key > ja->key_max || key == UINT64_MAX))
03ec1aeb 1790 return NULL;
41975c12 1791 tree_depth = ja->tree_depth;
5a9a87dd 1792 node_flag = rcu_dereference(ja->root);
41975c12 1793
5a9a87dd
MD
1794 /* level 0: root node */
1795 if (!ja_node_ptr(node_flag))
03ec1aeb 1796 return NULL;
5a9a87dd
MD
1797
1798 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1799 uint8_t iter_key;
1800
1801 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
48cbe001 1802 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
582a6ade
MD
1803 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1804 (unsigned int) iter_key, node_flag);
41975c12 1805 if (!ja_node_ptr(node_flag))
03ec1aeb 1806 return NULL;
41975c12
MD
1807 }
1808
5a9a87dd 1809 /* Last level lookup succeded. We got an actual match. */
03ec1aeb 1810 return (struct cds_ja_node *) node_flag;
5a9a87dd
MD
1811}
1812
b023ba9f
MD
1813static
1814struct cds_ja_node *cds_ja_lookup_inequality(struct cds_ja *ja, uint64_t key,
36305a3d 1815 uint64_t *result_key, enum ja_lookup_inequality mode)
291b2543
MD
1816{
1817 int tree_depth, level;
1818 struct cds_ja_inode_flag *node_flag, *cur_node_depth[JA_MAX_DEPTH];
36305a3d
MD
1819 uint8_t cur_key[JA_MAX_DEPTH];
1820 uint64_t _result_key = 0;
b023ba9f 1821 enum ja_direction dir;
291b2543 1822
b023ba9f
MD
1823 switch (mode) {
1824 case JA_LOOKUP_BE:
b023ba9f 1825 case JA_LOOKUP_AE:
102b3189 1826 if (caa_unlikely(key > ja->key_max || key == UINT64_MAX))
b023ba9f
MD
1827 return NULL;
1828 break;
1829 default:
03ec1aeb 1830 return NULL;
b023ba9f 1831 }
291b2543
MD
1832
1833 memset(cur_node_depth, 0, sizeof(cur_node_depth));
36305a3d 1834 memset(cur_key, 0, sizeof(cur_key));
291b2543
MD
1835 tree_depth = ja->tree_depth;
1836 node_flag = rcu_dereference(ja->root);
1837 cur_node_depth[0] = node_flag;
1838
1839 /* level 0: root node */
1840 if (!ja_node_ptr(node_flag))
03ec1aeb 1841 return NULL;
291b2543
MD
1842
1843 for (level = 1; level < tree_depth; level++) {
1844 uint8_t iter_key;
1845
1846 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1847 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1848 if (!ja_node_ptr(node_flag))
1849 break;
36305a3d 1850 cur_key[level - 1] = iter_key;
291b2543 1851 cur_node_depth[level] = node_flag;
b023ba9f 1852 dbg_printf("cds_ja_lookup_inequality iter key lookup %u finds node_flag %p\n",
291b2543
MD
1853 (unsigned int) iter_key, node_flag);
1854 }
1855
1856 if (level == tree_depth) {
1857 /* Last level lookup succeded. We got an equal match. */
36305a3d
MD
1858 if (result_key)
1859 *result_key = key;
03ec1aeb 1860 return (struct cds_ja_node *) node_flag;
291b2543
MD
1861 }
1862
1863 /*
b023ba9f 1864 * Find highest value left/right of current node.
291b2543 1865 * Current node is cur_node_depth[level].
b023ba9f
MD
1866 * Start at current level. If we cannot find any key left/right
1867 * of ours, go one level up, seek highest value left/right of
1868 * current (recursively), and when we find one, get the
1869 * rightmost/leftmost child of its rightmost/leftmost child
1870 * (recursively).
291b2543 1871 */
b023ba9f
MD
1872 switch (mode) {
1873 case JA_LOOKUP_BE:
1874 dir = JA_LEFT;
1875 break;
1876 case JA_LOOKUP_AE:
1877 dir = JA_RIGHT;
1878 break;
1879 default:
1880 assert(0);
1881 }
291b2543
MD
1882 for (; level > 0; level--) {
1883 uint8_t iter_key;
1884
1885 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
b023ba9f 1886 node_flag = ja_node_get_leftright(cur_node_depth[level - 1],
36305a3d 1887 iter_key, &cur_key[level - 1], dir);
86d700e9
MD
1888 dbg_printf("cds_ja_lookup_inequality find sibling from %u at %u finds node_flag %p\n",
1889 (unsigned int) iter_key, (unsigned int) cur_key[level - 1],
1890 node_flag);
36305a3d 1891 /* If found left/right sibling, find rightmost/leftmost child. */
291b2543
MD
1892 if (ja_node_ptr(node_flag))
1893 break;
1894 }
1895
1896 if (!level) {
b023ba9f 1897 /* Reached the root and could not find a left/right sibling. */
03ec1aeb 1898 return NULL;
291b2543
MD
1899 }
1900
1901 level++;
3c52f0f9
MD
1902
1903 /*
4cef6f97 1904 * From this point, we are guaranteed to be able to find a
b023ba9f
MD
1905 * "below than"/"above than" match. ja_attach_node() and
1906 * ja_detach_node() both guarantee that it is not possible for a
1907 * lookup to reach a dead-end.
3c52f0f9
MD
1908 */
1909
b023ba9f
MD
1910 /*
1911 * Find rightmost/leftmost child of rightmost/leftmost child
1912 * (recursively).
1913 */
1914 switch (mode) {
1915 case JA_LOOKUP_BE:
1916 dir = JA_RIGHTMOST;
1917 break;
1918 case JA_LOOKUP_AE:
1919 dir = JA_LEFTMOST;
1920 break;
1921 default:
1922 assert(0);
1923 }
291b2543 1924 for (; level < tree_depth; level++) {
36305a3d 1925 node_flag = ja_node_get_minmax(node_flag, &cur_key[level - 1], dir);
86d700e9
MD
1926 dbg_printf("cds_ja_lookup_inequality find minmax at %u finds node_flag %p\n",
1927 (unsigned int) cur_key[level - 1],
1928 node_flag);
291b2543
MD
1929 if (!ja_node_ptr(node_flag))
1930 break;
1931 }
1932
4cef6f97 1933 assert(level == tree_depth);
291b2543 1934
36305a3d
MD
1935 if (result_key) {
1936 for (level = 1; level < tree_depth; level++) {
1937 _result_key |= ((uint64_t) cur_key[level - 1])
1938 << (JA_BITS_PER_BYTE * (tree_depth - level - 1));
1939 }
1940 *result_key = _result_key;
1941 }
03ec1aeb 1942 return (struct cds_ja_node *) node_flag;
291b2543
MD
1943}
1944
36305a3d
MD
1945struct cds_ja_node *cds_ja_lookup_below_equal(struct cds_ja *ja,
1946 uint64_t key, uint64_t *result_key)
b023ba9f 1947{
86d700e9 1948 dbg_printf("cds_ja_lookup_below_equal key %" PRIu64 "\n", key);
36305a3d 1949 return cds_ja_lookup_inequality(ja, key, result_key, JA_LOOKUP_BE);
b023ba9f
MD
1950}
1951
36305a3d
MD
1952struct cds_ja_node *cds_ja_lookup_above_equal(struct cds_ja *ja,
1953 uint64_t key, uint64_t *result_key)
b023ba9f 1954{
86d700e9 1955 dbg_printf("cds_ja_lookup_above_equal key %" PRIu64 "\n", key);
36305a3d 1956 return cds_ja_lookup_inequality(ja, key, result_key, JA_LOOKUP_AE);
b023ba9f
MD
1957}
1958
5a9a87dd
MD
1959/*
1960 * We reached an unpopulated node. Create it and the children we need,
1961 * and then attach the entire branch to the current node. This may
1962 * trigger recompaction of the current node. Locks needed: node lock
1963 * (for add), and, possibly, parent node lock (to update pointer due to
1964 * node recompaction).
1965 *
1966 * First take node lock, check if recompaction is needed, then take
1967 * parent lock (if needed). Then we can proceed to create the new
1968 * branch. Publish the new branch, and release locks.
1969 * TODO: we currently always take the parent lock even when not needed.
47d2eab3
MD
1970 *
1971 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1972 * leads to a dead-end: before attaching a branch, the entire content of
1973 * the new branch is populated, thus creating a cluster, before
1974 * attaching the cluster to the rest of the tree, thus making it visible
1975 * to lookups.
5a9a87dd
MD
1976 */
1977static
1978int ja_attach_node(struct cds_ja *ja,
b0ca2d21 1979 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 1980 struct cds_ja_inode_flag *attach_node_flag,
48cbe001
MD
1981 struct cds_ja_inode_flag *parent_attach_node_flag,
1982 struct cds_ja_inode_flag **old_node_flag_ptr,
1983 struct cds_ja_inode_flag *old_node_flag,
5a9a87dd 1984 uint64_t key,
79b41067 1985 unsigned int level,
5a9a87dd
MD
1986 struct cds_ja_node *child_node)
1987{
1988 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1989 *parent_shadow_node = NULL;
5a9a87dd
MD
1990 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1991 int ret, i;
a2a7ff59 1992 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1993 int nr_created_nodes = 0;
1994
48cbe001
MD
1995 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1996 level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag);
a2a7ff59 1997
48cbe001
MD
1998 assert(!old_node_flag);
1999 if (attach_node_flag) {
2000 shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag);
2001 if (!shadow_node) {
2002 ret = -EAGAIN;
2003 goto end;
2004 }
5a9a87dd 2005 }
48cbe001 2006 if (parent_attach_node_flag) {
5a9a87dd 2007 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
48cbe001 2008 parent_attach_node_flag);
5a9a87dd 2009 if (!parent_shadow_node) {
2e313670 2010 ret = -EAGAIN;
5a9a87dd
MD
2011 goto unlock_shadow;
2012 }
2013 }
2014
48cbe001 2015 if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) {
b306a0fe 2016 /*
c112acaa
MD
2017 * Target node has been updated between RCU lookup and
2018 * lock acquisition. We need to re-try lookup and
2019 * attach.
2020 */
2021 ret = -EAGAIN;
2022 goto unlock_parent;
2023 }
2024
9be99d4a
MD
2025 /*
2026 * Perform a lookup query to handle the case where
2027 * old_node_flag_ptr is NULL. We cannot use it to check if the
2028 * node has been populated between RCU lookup and mutex
2029 * acquisition.
2030 */
2031 if (!old_node_flag_ptr) {
2032 uint8_t iter_key;
2033 struct cds_ja_inode_flag *lookup_node_flag;
2034 struct cds_ja_inode_flag **lookup_node_flag_ptr;
2035
2036 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
2037 lookup_node_flag = ja_node_get_nth(attach_node_flag,
2038 &lookup_node_flag_ptr,
2039 iter_key);
2040 if (lookup_node_flag) {
2041 ret = -EEXIST;
2042 goto unlock_parent;
2043 }
2044 }
2045
c112acaa 2046 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 2047 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
2048 /*
2049 * Target node has been updated between RCU lookup and
2050 * lock acquisition. We need to re-try lookup and
2051 * attach.
b306a0fe
MD
2052 */
2053 ret = -EAGAIN;
2054 goto unlock_parent;
2055 }
2056
a2a7ff59 2057 /* Create new branch, starting from bottom */
03ec1aeb 2058 iter_node_flag = (struct cds_ja_inode_flag *) child_node;
5a9a87dd 2059
48cbe001 2060 for (i = ja->tree_depth - 1; i >= (int) level; i--) {
79b41067
MD
2061 uint8_t iter_key;
2062
48cbe001 2063 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1)));
79b41067 2064 dbg_printf("branch creation level %d, key %u\n",
48cbe001 2065 i, (unsigned int) iter_key);
5a9a87dd
MD
2066 iter_dest_node_flag = NULL;
2067 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 2068 iter_key,
5a9a87dd 2069 iter_node_flag,
48cbe001 2070 NULL, i);
9be99d4a
MD
2071 if (ret) {
2072 dbg_printf("branch creation error %d\n", ret);
5a9a87dd 2073 goto check_error;
9be99d4a 2074 }
5a9a87dd
MD
2075 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
2076 iter_node_flag = iter_dest_node_flag;
2077 }
48cbe001 2078 assert(level > 0);
5a9a87dd 2079
48cbe001
MD
2080 /* Publish branch */
2081 if (level == 1) {
2082 /*
2083 * Attaching to root node.
2084 */
2085 rcu_assign_pointer(ja->root, iter_node_flag);
2086 } else {
79b41067
MD
2087 uint8_t iter_key;
2088
2089 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
48cbe001
MD
2090 dbg_printf("publish branch at level %d, key %u\n",
2091 level - 1, (unsigned int) iter_key);
a2a7ff59 2092 /* We need to use set_nth on the previous level. */
48cbe001 2093 iter_dest_node_flag = attach_node_flag;
a2a7ff59 2094 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 2095 iter_key,
a2a7ff59 2096 iter_node_flag,
48cbe001 2097 shadow_node, level - 1);
9be99d4a
MD
2098 if (ret) {
2099 dbg_printf("branch publish error %d\n", ret);
a2a7ff59 2100 goto check_error;
9be99d4a 2101 }
48cbe001
MD
2102 /*
2103 * Attach branch
2104 */
2105 rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag);
a2a7ff59
MD
2106 }
2107
5a9a87dd
MD
2108 /* Success */
2109 ret = 0;
2110
2111check_error:
2112 if (ret) {
2113 for (i = 0; i < nr_created_nodes; i++) {
2114 int tmpret;
a2a7ff59
MD
2115 int flags;
2116
2117 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
2118 if (i)
2119 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 2120 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 2121 created_nodes[i],
a2a7ff59
MD
2122 NULL,
2123 flags);
5a9a87dd
MD
2124 assert(!tmpret);
2125 }
2126 }
b306a0fe 2127unlock_parent:
5a9a87dd
MD
2128 if (parent_shadow_node)
2129 rcuja_shadow_unlock(parent_shadow_node);
2130unlock_shadow:
2131 if (shadow_node)
2132 rcuja_shadow_unlock(shadow_node);
2133end:
2134 return ret;
2135}
2136
2137/*
03ec1aeb
MD
2138 * Lock the parent containing the pointer to list of duplicates, and add
2139 * node to this list. Failure can happen if concurrent update changes
2140 * the parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
2141 * Return 0 on success, negative error value on failure.
2142 */
2143static
2144int ja_chain_node(struct cds_ja *ja,
af3cbd45 2145 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2146 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 2147 struct cds_ja_inode_flag *node_flag,
28fd1038 2148 struct cds_ja_node *last_node,
5a9a87dd
MD
2149 struct cds_ja_node *node)
2150{
2151 struct cds_ja_shadow_node *shadow_node;
28fd1038
MD
2152 struct cds_ja_node *iter_node;
2153 int ret = 0, found = 0;
5a9a87dd 2154
3d8fe307 2155 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 2156 if (!shadow_node) {
2e313670 2157 return -EAGAIN;
b306a0fe 2158 }
28fd1038
MD
2159 /*
2160 * Ensure that previous node is still there at end of list.
2161 */
2162 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2163 if ((struct cds_ja_node *) ja_node_ptr(*node_flag_ptr) != iter_node) {
2164 ret = -EAGAIN;
2165 goto end;
2166 }
2167 cds_ja_for_each_duplicate(iter_node) {
2168 if (iter_node == last_node)
2169 found = 1;
2170 }
2171 if (!found) {
fa112799
MD
2172 ret = -EAGAIN;
2173 goto end;
2174 }
03ec1aeb 2175 /*
28fd1038
MD
2176 * Add node to tail of list to ensure that RCU traversals will
2177 * always see either the prior node or the newly added if
2178 * executed concurrently with a sequence of add followed by del
2179 * on the same key. Safe against concurrent RCU read traversals.
03ec1aeb 2180 */
28fd1038
MD
2181 node->next = NULL;
2182 rcu_assign_pointer(last_node->next, node);
fa112799 2183end:
5a9a87dd 2184 rcuja_shadow_unlock(shadow_node);
fa112799 2185 return ret;
5a9a87dd
MD
2186}
2187
75d573aa
MD
2188static
2189int _cds_ja_add(struct cds_ja *ja, uint64_t key,
6475613c 2190 struct cds_ja_node *node,
75d573aa 2191 struct cds_ja_node **unique_node_ret)
5a9a87dd
MD
2192{
2193 unsigned int tree_depth, i;
48cbe001 2194 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd 2195 *parent_node_flag,
b62a8d0c 2196 *parent2_node_flag,
48cbe001
MD
2197 *node_flag,
2198 *parent_attach_node_flag;
2199 struct cds_ja_inode_flag **attach_node_flag_ptr,
2200 **parent_node_flag_ptr,
2201 **node_flag_ptr;
5a9a87dd
MD
2202 int ret;
2203
102b3189 2204 if (caa_unlikely(key > ja->key_max || key == UINT64_MAX)) {
5a9a87dd 2205 return -EINVAL;
b306a0fe 2206 }
5a9a87dd
MD
2207 tree_depth = ja->tree_depth;
2208
2209retry:
a2a7ff59 2210 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
6475613c 2211 key, node);
5a9a87dd 2212 parent2_node_flag = NULL;
b0f74e47
MD
2213 parent_node_flag =
2214 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
48cbe001 2215 parent_node_flag_ptr = NULL;
35170a44 2216 node_flag = rcu_dereference(ja->root);
48cbe001 2217 node_flag_ptr = &ja->root;
5a9a87dd
MD
2218
2219 /* Iterate on all internal levels */
a2a7ff59 2220 for (i = 1; i < tree_depth; i++) {
79b41067
MD
2221 uint8_t iter_key;
2222
48cbe001
MD
2223 if (!ja_node_ptr(node_flag))
2224 break;
2225 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2226 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
79b41067 2227 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
2228 parent2_node_flag = parent_node_flag;
2229 parent_node_flag = node_flag;
48cbe001 2230 parent_node_flag_ptr = node_flag_ptr;
5a9a87dd
MD
2231 node_flag = ja_node_get_nth(node_flag,
2232 &node_flag_ptr,
79b41067 2233 iter_key);
5a9a87dd
MD
2234 }
2235
2236 /*
48cbe001
MD
2237 * We reached either bottom of tree or internal NULL node,
2238 * simply add node to last internal level, or chain it if key is
2239 * already present.
5a9a87dd
MD
2240 */
2241 if (!ja_node_ptr(node_flag)) {
48cbe001
MD
2242 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2243 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2244
48cbe001
MD
2245 attach_node_flag = parent_node_flag;
2246 attach_node_flag_ptr = parent_node_flag_ptr;
2247 parent_attach_node_flag = parent2_node_flag;
2248
b0ca2d21 2249 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 2250 attach_node_flag,
48cbe001
MD
2251 parent_attach_node_flag,
2252 node_flag_ptr,
2253 node_flag,
6475613c 2254 key, i, node);
5a9a87dd 2255 } else {
28fd1038
MD
2256 struct cds_ja_node *iter_node, *last_node = NULL;
2257
75d573aa
MD
2258 if (unique_node_ret) {
2259 *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag);
2260 return -EEXIST;
2261 }
2262
28fd1038
MD
2263 /* Find last duplicate */
2264 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2265 cds_ja_for_each_duplicate_rcu(iter_node)
2266 last_node = iter_node;
2267
48cbe001
MD
2268 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2269 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2270
48cbe001
MD
2271 attach_node_flag = node_flag;
2272 attach_node_flag_ptr = node_flag_ptr;
2273 parent_attach_node_flag = parent_node_flag;
2274
5a9a87dd 2275 ret = ja_chain_node(ja,
48cbe001
MD
2276 parent_attach_node_flag,
2277 attach_node_flag_ptr,
2278 attach_node_flag,
28fd1038 2279 last_node,
6475613c 2280 node);
5a9a87dd 2281 }
b306a0fe 2282 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd 2283 goto retry;
48cbe001 2284
5a9a87dd 2285 return ret;
b4540e8a
MD
2286}
2287
75d573aa 2288int cds_ja_add(struct cds_ja *ja, uint64_t key,
6475613c 2289 struct cds_ja_node *node)
75d573aa 2290{
6475613c 2291 return _cds_ja_add(ja, key, node, NULL);
75d573aa
MD
2292}
2293
2294struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key,
6475613c 2295 struct cds_ja_node *node)
75d573aa
MD
2296{
2297 int ret;
2298 struct cds_ja_node *ret_node;
2299
6475613c 2300 ret = _cds_ja_add(ja, key, node, &ret_node);
75d573aa
MD
2301 if (ret == -EEXIST)
2302 return ret_node;
2303 else
6475613c 2304 return node;
75d573aa
MD
2305}
2306
af3cbd45
MD
2307/*
2308 * Note: there is no need to lookup the pointer address associated with
2309 * each node's nth item after taking the lock: it's already been done by
2310 * cds_ja_del while holding the rcu read-side lock, and our node rules
2311 * ensure that when a match value -> pointer is found in a node, it is
2312 * _NEVER_ changed for that node without recompaction, and recompaction
2313 * reallocates the node.
b306a0fe
MD
2314 * However, when a child is removed from "linear" nodes, its pointer
2315 * is set to NULL. We therefore check, while holding the locks, if this
2316 * pointer is NULL, and return -ENOENT to the caller if it is the case.
4cef6f97
MD
2317 *
2318 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2319 * leads to a dead-end: when removing branch, it makes sure to perform
2320 * the "cut" at the highest node that has only one child, effectively
2321 * replacing it with a NULL pointer.
af3cbd45 2322 */
35170a44
MD
2323static
2324int ja_detach_node(struct cds_ja *ja,
2325 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
2326 struct cds_ja_inode_flag ***snapshot_ptr,
2327 uint8_t *snapshot_n,
35170a44
MD
2328 int nr_snapshot,
2329 uint64_t key,
2330 struct cds_ja_node *node)
2331{
af3cbd45
MD
2332 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
2333 struct cds_ja_inode_flag **node_flag_ptr = NULL,
2334 *parent_node_flag = NULL,
2335 **parent_node_flag_ptr = NULL;
b62a8d0c 2336 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
2337 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
2338 uint8_t n = 0;
35170a44 2339
4d6ef45e 2340 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 2341
af3cbd45
MD
2342 /*
2343 * From the last internal level node going up, get the node
2344 * lock, check if the node has only one child left. If it is the
2345 * case, we continue iterating upward. When we reach a node
2346 * which has more that one child left, we lock the parent, and
2347 * proceed to the node deletion (removing its children too).
2348 */
4d6ef45e 2349 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
2350 struct cds_ja_shadow_node *shadow_node;
2351
2352 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2353 snapshot[i]);
af3cbd45
MD
2354 if (!shadow_node) {
2355 ret = -EAGAIN;
2356 goto end;
2357 }
af3cbd45 2358 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2359
2360 /*
2361 * Check if node has been removed between RCU
2362 * lookup and lock acquisition.
2363 */
2364 assert(snapshot_ptr[i + 1]);
2365 if (ja_node_ptr(*snapshot_ptr[i + 1])
2366 != ja_node_ptr(snapshot[i + 1])) {
2367 ret = -ENOENT;
2368 goto end;
2369 }
2370
2371 assert(shadow_node->nr_child > 0);
d810c97f 2372 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
2373 nr_clear++;
2374 nr_branch++;
af3cbd45
MD
2375 if (shadow_node->nr_child > 1 || i == 1) {
2376 /* Lock parent and break */
2377 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2378 snapshot[i - 1]);
af3cbd45
MD
2379 if (!shadow_node) {
2380 ret = -EAGAIN;
2381 goto end;
2382 }
2383 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 2384
c112acaa
MD
2385 /*
2386 * Check if node has been removed between RCU
2387 * lookup and lock acquisition.
2388 */
b62a8d0c
MD
2389 assert(snapshot_ptr[i]);
2390 if (ja_node_ptr(*snapshot_ptr[i])
2391 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
2392 ret = -ENOENT;
2393 goto end;
2394 }
2395
b62a8d0c 2396 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
2397 n = snapshot_n[i + 1];
2398 parent_node_flag_ptr = snapshot_ptr[i];
2399 parent_node_flag = snapshot[i];
c112acaa 2400
af3cbd45
MD
2401 if (i > 1) {
2402 /*
2403 * Lock parent's parent, in case we need
2404 * to recompact parent.
2405 */
2406 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2407 snapshot[i - 2]);
af3cbd45
MD
2408 if (!shadow_node) {
2409 ret = -EAGAIN;
2410 goto end;
2411 }
2412 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2413
2414 /*
2415 * Check if node has been removed between RCU
2416 * lookup and lock acquisition.
2417 */
2418 assert(snapshot_ptr[i - 1]);
2419 if (ja_node_ptr(*snapshot_ptr[i - 1])
2420 != ja_node_ptr(snapshot[i - 1])) {
2421 ret = -ENOENT;
2422 goto end;
2423 }
af3cbd45 2424 }
b62a8d0c 2425
af3cbd45
MD
2426 break;
2427 }
2428 }
2429
2430 /*
4d6ef45e
MD
2431 * At this point, we want to delete all nodes that are about to
2432 * be removed from shadow_nodes (except the last one, which is
2433 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
2434 * child). OK to free lock here, because RCU read lock is held,
2435 * and free only performed in call_rcu.
af3cbd45
MD
2436 */
2437
2438 for (i = 0; i < nr_clear; i++) {
2439 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 2440 shadow_nodes[i]->node_flag,
af3cbd45
MD
2441 shadow_nodes[i],
2442 RCUJA_SHADOW_CLEAR_FREE_NODE
2443 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2444 assert(!ret);
2445 }
2446
2447 iter_node_flag = parent_node_flag;
2448 /* Remove from parent */
2449 ret = ja_node_clear_ptr(ja,
2450 node_flag_ptr, /* Pointer to location to nullify */
2451 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 2452 shadow_nodes[nr_branch - 1], /* of parent */
48cbe001 2453 n, nr_branch - 1);
b306a0fe
MD
2454 if (ret)
2455 goto end;
af3cbd45 2456
4d6ef45e
MD
2457 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2458 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
2459 /* Update address of parent ptr in its parent */
2460 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2461
2462end:
2463 for (i = 0; i < nr_shadow; i++)
2464 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
2465 return ret;
2466}
2467
af3cbd45
MD
2468static
2469int ja_unchain_node(struct cds_ja *ja,
2470 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2471 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 2472 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
2473 struct cds_ja_node *node)
2474{
2475 struct cds_ja_shadow_node *shadow_node;
03ec1aeb 2476 struct cds_ja_node *iter_node, **iter_node_ptr, **prev_node_ptr = NULL;
013a6083 2477 int ret = 0, count = 0, found = 0;
af3cbd45 2478
3d8fe307 2479 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
2480 if (!shadow_node)
2481 return -EAGAIN;
013a6083 2482 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2483 ret = -EAGAIN;
2484 goto end;
2485 }
af3cbd45 2486 /*
03ec1aeb
MD
2487 * Find the previous node's next pointer pointing to our node,
2488 * so we can update it. Retry if another thread removed all but
2489 * one of duplicates since check (this check was performed
2490 * without lock). Ensure that the node we are about to remove is
2491 * still in the list (while holding lock). No need for RCU
2492 * traversal here since we hold the lock on the parent.
af3cbd45 2493 */
03ec1aeb
MD
2494 iter_node_ptr = (struct cds_ja_node **) node_flag_ptr;
2495 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2496 cds_ja_for_each_duplicate(iter_node) {
f2758d14 2497 count++;
03ec1aeb
MD
2498 if (iter_node == node) {
2499 prev_node_ptr = iter_node_ptr;
013a6083 2500 found++;
03ec1aeb
MD
2501 }
2502 iter_node_ptr = &iter_node->next;
f2758d14 2503 }
013a6083
MD
2504 assert(found <= 1);
2505 if (!found || count == 1) {
af3cbd45
MD
2506 ret = -EAGAIN;
2507 goto end;
2508 }
03ec1aeb 2509 CMM_STORE_SHARED(*prev_node_ptr, node->next);
ade342cb
MD
2510 /*
2511 * Validate that we indeed removed the node from linked list.
2512 */
2513 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
2514end:
2515 rcuja_shadow_unlock(shadow_node);
2516 return ret;
2517}
2518
2519/*
2520 * Called with RCU read lock held.
2521 */
35170a44
MD
2522int cds_ja_del(struct cds_ja *ja, uint64_t key,
2523 struct cds_ja_node *node)
2524{
2525 unsigned int tree_depth, i;
2526 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
2527 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2528 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 2529 struct cds_ja_inode_flag *node_flag;
fa112799
MD
2530 struct cds_ja_inode_flag **prev_node_flag_ptr,
2531 **node_flag_ptr;
4d6ef45e 2532 int nr_snapshot;
35170a44
MD
2533 int ret;
2534
102b3189 2535 if (caa_unlikely(key > ja->key_max || key == UINT64_MAX))
35170a44
MD
2536 return -EINVAL;
2537 tree_depth = ja->tree_depth;
2538
2539retry:
4d6ef45e 2540 nr_snapshot = 0;
35170a44
MD
2541 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2542 key, node);
2543
2544 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
2545 snapshot_n[0] = 0;
2546 snapshot_n[1] = 0;
af3cbd45 2547 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
2548 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2549 node_flag = rcu_dereference(ja->root);
af3cbd45 2550 prev_node_flag_ptr = &ja->root;
fa112799 2551 node_flag_ptr = &ja->root;
35170a44
MD
2552
2553 /* Iterate on all internal levels */
2554 for (i = 1; i < tree_depth; i++) {
2555 uint8_t iter_key;
2556
2557 dbg_printf("cds_ja_del iter node_flag %p\n",
2558 node_flag);
2559 if (!ja_node_ptr(node_flag)) {
2560 return -ENOENT;
2561 }
35170a44 2562 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 2563 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
2564 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2565 snapshot[nr_snapshot++] = node_flag;
35170a44 2566 node_flag = ja_node_get_nth(node_flag,
fa112799 2567 &node_flag_ptr,
35170a44 2568 iter_key);
48cbe001
MD
2569 if (node_flag)
2570 prev_node_flag_ptr = node_flag_ptr;
af3cbd45
MD
2571 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2572 (unsigned int) iter_key, node_flag,
2573 prev_node_flag_ptr);
35170a44 2574 }
35170a44
MD
2575 /*
2576 * We reached bottom of tree, try to find the node we are trying
2577 * to remove. Fail if we cannot find it.
2578 */
2579 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
2580 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2581 key);
35170a44
MD
2582 return -ENOENT;
2583 } else {
03ec1aeb 2584 struct cds_ja_node *iter_node, *match = NULL;
af3cbd45 2585 int count = 0;
35170a44 2586
03ec1aeb
MD
2587 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2588 cds_ja_for_each_duplicate_rcu(iter_node) {
2589 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node, iter_node);
2590 if (iter_node == node)
2591 match = iter_node;
af3cbd45 2592 count++;
35170a44 2593 }
03ec1aeb 2594
4d6ef45e
MD
2595 if (!match) {
2596 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 2597 return -ENOENT;
4d6ef45e 2598 }
af3cbd45
MD
2599 assert(count > 0);
2600 if (count == 1) {
2601 /*
4d6ef45e
MD
2602 * Removing last of duplicates. Last snapshot
2603 * does not have a shadow node (external leafs).
af3cbd45
MD
2604 */
2605 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2606 snapshot[nr_snapshot++] = node_flag;
2607 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2608 snapshot_n, nr_snapshot, key, node);
2609 } else {
f2758d14 2610 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 2611 node_flag_ptr, node_flag, match);
af3cbd45 2612 }
35170a44 2613 }
b306a0fe
MD
2614 /*
2615 * Explanation of -ENOENT handling: caused by concurrent delete
2616 * between RCU lookup and actual removal. Need to re-do the
2617 * lookup and removal attempt.
2618 */
2619 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
2620 goto retry;
2621 return ret;
2622}
2623
b4540e8a
MD
2624struct cds_ja *_cds_ja_new(unsigned int key_bits,
2625 const struct rcu_flavor_struct *flavor)
be9a7474
MD
2626{
2627 struct cds_ja *ja;
b0f74e47 2628 int ret;
f07b240f 2629 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
2630
2631 ja = calloc(sizeof(*ja), 1);
2632 if (!ja)
2633 goto ja_error;
b4540e8a
MD
2634
2635 switch (key_bits) {
2636 case 8:
b4540e8a 2637 case 16:
1216b3d2 2638 case 24:
b4540e8a 2639 case 32:
1216b3d2
MD
2640 case 40:
2641 case 48:
2642 case 56:
2643 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
2644 break;
2645 case 64:
2646 ja->key_max = UINT64_MAX;
2647 break;
2648 default:
2649 goto check_error;
2650 }
2651
be9a7474 2652 /* ja->root is NULL */
5a9a87dd 2653 /* tree_depth 0 is for pointer to root node */
582a6ade 2654 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 2655 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
2656 ja->ht = rcuja_create_ht(flavor);
2657 if (!ja->ht)
2658 goto ht_error;
b0f74e47
MD
2659
2660 /*
2661 * Note: we should not free this node until judy array destroy.
2662 */
f07b240f 2663 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307 2664 (struct cds_ja_inode_flag *) &ja->root,
48cbe001 2665 NULL, ja, 0);
f07b240f
MD
2666 if (!root_shadow_node) {
2667 ret = -ENOMEM;
b0f74e47 2668 goto ht_node_error;
f07b240f 2669 }
b0f74e47 2670
be9a7474
MD
2671 return ja;
2672
b0f74e47
MD
2673ht_node_error:
2674 ret = rcuja_delete_ht(ja->ht);
2675 assert(!ret);
be9a7474 2676ht_error:
b4540e8a 2677check_error:
be9a7474
MD
2678 free(ja);
2679ja_error:
2680 return NULL;
2681}
2682
19ddcd04 2683static
354981c2 2684void print_debug_fallback_distribution(struct cds_ja *ja)
19ddcd04
MD
2685{
2686 int i;
2687
2688 fprintf(stderr, "Fallback node distribution:\n");
2689 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
354981c2 2690 if (!ja->node_fallback_count_distribution[i])
19ddcd04
MD
2691 continue;
2692 fprintf(stderr, " %3u: %4lu\n",
354981c2 2693 i, ja->node_fallback_count_distribution[i]);
19ddcd04
MD
2694 }
2695}
2696
021c72c0 2697static
19a748d9 2698int ja_final_checks(struct cds_ja *ja)
021c72c0
MD
2699{
2700 double fallback_ratio;
2701 unsigned long na, nf, nr_fallback;
19a748d9 2702 int ret = 0;
021c72c0 2703
f83b3e90
MD
2704 if (!ja_debug_counters())
2705 return 0;
2706
021c72c0
MD
2707 fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
2708 fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
2709 nr_fallback = uatomic_read(&ja->nr_fallback);
2710 if (nr_fallback)
2711 fprintf(stderr,
2712 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2713 uatomic_read(&ja->nr_fallback),
2714 fallback_ratio);
2715
2716 na = uatomic_read(&ja->nr_nodes_allocated);
2717 nf = uatomic_read(&ja->nr_nodes_freed);
19a748d9
MD
2718 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
2719 if (nr_fallback)
2720 print_debug_fallback_distribution(ja);
2721
021c72c0
MD
2722 if (na != nf) {
2723 fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2724 (long) na - nf, na, nf);
19a748d9 2725 ret = -1;
021c72c0 2726 }
19a748d9 2727 return ret;
021c72c0
MD
2728}
2729
be9a7474 2730/*
dc0e9798
MD
2731 * There should be no more concurrent add, delete, nor look-up performed
2732 * on the Judy array while it is being destroyed (ensured by the
2733 * caller).
be9a7474 2734 */
99e6e3dc 2735int cds_ja_destroy(struct cds_ja *ja)
be9a7474 2736{
48cbe001 2737 const struct rcu_flavor_struct *flavor;
b4540e8a
MD
2738 int ret;
2739
48cbe001 2740 flavor = cds_lfht_rcu_flavor(ja->ht);
be9a7474 2741 rcuja_shadow_prune(ja->ht,
99e6e3dc 2742 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK);
48cbe001 2743 flavor->thread_offline();
b4540e8a
MD
2744 ret = rcuja_delete_ht(ja->ht);
2745 if (ret)
2746 return ret;
f2ae7af7
MD
2747
2748 /* Wait for in-flight call_rcu free to complete. */
2749 flavor->barrier();
2750
48cbe001 2751 flavor->thread_online();
19a748d9 2752 ret = ja_final_checks(ja);
b4540e8a 2753 free(ja);
19a748d9 2754 return ret;
be9a7474 2755}
This page took 0.16452 seconds and 4 git commands to generate.