rcuja API: remove dependency on hlist
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
b1a90ce3 27#include <string.h>
61009379 28#include <urcu/rcuja.h>
d68c6810
MD
29#include <urcu/compiler.h>
30#include <urcu/arch.h>
31#include <assert.h>
8e519e3c 32#include <urcu-pointer.h>
f07b240f 33#include <urcu/uatomic.h>
b4540e8a 34#include <stdint.h>
8e519e3c 35
61009379
MD
36#include "rcuja-internal.h"
37
b1a90ce3
MD
38#ifndef abs
39#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
40#endif
41
d96bfb0d 42enum cds_ja_type_class {
e5227865 43 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL = 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 49 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 52 /* Leaf nodes are implicit from their height in the tree */
1db4943c 53 RCU_JA_NR_TYPES,
e1db2db5
MD
54
55 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
56};
57
d96bfb0d
MD
58struct cds_ja_type {
59 enum cds_ja_type_class type_class;
8e519e3c
MD
60 uint16_t min_child; /* minimum number of children: 1 to 256 */
61 uint16_t max_child; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
64 uint16_t nr_pool_order; /* number of pools */
65 uint16_t pool_size_order; /* pool size */
e5227865
MD
66};
67
68/*
69 * Iteration on the array to find the right node size for the number of
d68c6810 70 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 71 * possible node size, which contains 256 children).
d68c6810
MD
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
77 * the size order.
3d45251f
MD
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
e5227865 84 */
e5227865 85
d68c6810
MD
86#if (CAA_BITS_PER_LONG < 64)
87/* 32-bit pointers */
1db4943c
MD
88enum {
89 ja_type_0_max_child = 1,
90 ja_type_1_max_child = 3,
91 ja_type_2_max_child = 6,
92 ja_type_3_max_child = 12,
93 ja_type_4_max_child = 25,
94 ja_type_5_max_child = 48,
95 ja_type_6_max_child = 92,
96 ja_type_7_max_child = 256,
e1db2db5 97 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
98};
99
8e519e3c
MD
100enum {
101 ja_type_0_max_linear_child = 1,
102 ja_type_1_max_linear_child = 3,
103 ja_type_2_max_linear_child = 6,
104 ja_type_3_max_linear_child = 12,
105 ja_type_4_max_linear_child = 25,
106 ja_type_5_max_linear_child = 24,
107 ja_type_6_max_linear_child = 23,
108};
109
1db4943c
MD
110enum {
111 ja_type_5_nr_pool_order = 1,
112 ja_type_6_nr_pool_order = 2,
113};
114
d96bfb0d 115const struct cds_ja_type ja_types[] = {
8e519e3c
MD
116 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 121
fd800776 122 /* Pools may fill sooner than max_child */
8e519e3c
MD
123 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
124 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
125
126 /*
b1a90ce3
MD
127 * Upon node removal below min_child, if child pool is filled
128 * beyond capacity, we roll back to pigeon.
3d45251f 129 */
58c16c03 130 { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
131
132 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 133};
d68c6810
MD
134#else /* !(CAA_BITS_PER_LONG < 64) */
135/* 64-bit pointers */
1db4943c
MD
136enum {
137 ja_type_0_max_child = 1,
138 ja_type_1_max_child = 3,
139 ja_type_2_max_child = 7,
140 ja_type_3_max_child = 14,
141 ja_type_4_max_child = 28,
142 ja_type_5_max_child = 54,
143 ja_type_6_max_child = 104,
144 ja_type_7_max_child = 256,
e1db2db5 145 ja_type_8_max_child = 256,
1db4943c
MD
146};
147
8e519e3c
MD
148enum {
149 ja_type_0_max_linear_child = 1,
150 ja_type_1_max_linear_child = 3,
151 ja_type_2_max_linear_child = 7,
152 ja_type_3_max_linear_child = 14,
153 ja_type_4_max_linear_child = 28,
154 ja_type_5_max_linear_child = 27,
155 ja_type_6_max_linear_child = 26,
156};
157
1db4943c
MD
158enum {
159 ja_type_5_nr_pool_order = 1,
160 ja_type_6_nr_pool_order = 2,
161};
162
d96bfb0d 163const struct cds_ja_type ja_types[] = {
8e519e3c
MD
164 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
165 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
166 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 169
3d45251f 170 /* Pools may fill sooner than max_child. */
8e519e3c
MD
171 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
172 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 173
3d45251f 174 /*
b1a90ce3
MD
175 * Upon node removal below min_child, if child pool is filled
176 * beyond capacity, we roll back to pigeon.
3d45251f 177 */
64457f6c 178 { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
179
180 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 181};
d68c6810 182#endif /* !(BITS_PER_LONG < 64) */
e5227865 183
1db4943c
MD
184static inline __attribute__((unused))
185void static_array_size_check(void)
186{
e1db2db5 187 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
188}
189
e5227865 190/*
d96bfb0d 191 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
192 * read-side. For linear and pool node configurations, it starts with a
193 * byte counting the number of children in the node. Then, the
194 * node-specific data is placed.
195 * The node mutex, if any is needed, protecting concurrent updated of
196 * each node is placed in a separate hash table indexed by node address.
197 * For the pigeon configuration, the number of children is also kept in
198 * a separate hash table, indexed by node address, because it is only
199 * required for updates.
e5227865 200 */
1db4943c 201
ff38c745
MD
202#define DECLARE_LINEAR_NODE(index) \
203 struct { \
204 uint8_t nr_child; \
205 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 206 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
207 }
208
209#define DECLARE_POOL_NODE(index) \
210 struct { \
211 struct { \
212 uint8_t nr_child; \
213 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 214 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
215 } linear[1U << ja_type_## index ##_nr_pool_order]; \
216 }
1db4943c 217
b4540e8a 218struct cds_ja_inode {
1db4943c
MD
219 union {
220 /* Linear configuration */
221 DECLARE_LINEAR_NODE(0) conf_0;
222 DECLARE_LINEAR_NODE(1) conf_1;
223 DECLARE_LINEAR_NODE(2) conf_2;
224 DECLARE_LINEAR_NODE(3) conf_3;
225 DECLARE_LINEAR_NODE(4) conf_4;
226
227 /* Pool configuration */
228 DECLARE_POOL_NODE(5) conf_5;
229 DECLARE_POOL_NODE(6) conf_6;
230
231 /* Pigeon configuration */
232 struct {
b4540e8a 233 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
234 } conf_7;
235 /* data aliasing nodes for computed accesses */
b4540e8a 236 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 237 } u;
e5227865
MD
238};
239
2e313670 240enum ja_recompact {
19ddcd04
MD
241 JA_RECOMPACT_ADD_SAME,
242 JA_RECOMPACT_ADD_NEXT,
2e313670
MD
243 JA_RECOMPACT_DEL,
244};
245
b1a90ce3
MD
246static
247struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
248{
249 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
250}
251
252unsigned long ja_node_type(struct cds_ja_inode_flag *node)
253{
254 unsigned long type;
255
256 if (_ja_node_mask_ptr(node) == NULL) {
257 return NODE_INDEX_NULL;
258 }
259 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
260 assert(type < (1UL << JA_TYPE_BITS));
261 return type;
262}
263
264struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
265{
266 unsigned long type_index = ja_node_type(node);
267 const struct cds_ja_type *type;
268
269 type = &ja_types[type_index];
270 switch (type->type_class) {
271 case RCU_JA_LINEAR:
272 case RCU_JA_PIGEON: /* fall-through */
273 case RCU_JA_NULL: /* fall-through */
274 default: /* fall-through */
275 return _ja_node_mask_ptr(node);
276 case RCU_JA_POOL:
277 switch (type->nr_pool_order) {
278 case 1:
279 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_1D_MASK | JA_TYPE_MASK));
280 case 2:
281 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_2D_MASK | JA_POOL_1D_MASK | JA_TYPE_MASK));
282 default:
283 assert(0);
284 }
285 }
286}
287
354981c2
MD
288static
289struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
290 const struct cds_ja_type *ja_type)
e5227865 291{
b1a90ce3
MD
292 size_t len = 1U << ja_type->order;
293 void *p;
294 int ret;
295
296 ret = posix_memalign(&p, len, len);
297 if (ret || !p) {
298 return NULL;
299 }
300 memset(p, 0, len);
354981c2 301 uatomic_inc(&ja->nr_nodes_allocated);
b1a90ce3 302 return p;
e5227865
MD
303}
304
354981c2 305void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
e5227865
MD
306{
307 free(node);
48cbe001 308 if (node)
354981c2 309 uatomic_inc(&ja->nr_nodes_freed);
e5227865
MD
310}
311
d68c6810
MD
312#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
313#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
314#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
315#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
316
317static
1db4943c 318uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 319{
1db4943c 320 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
321}
322
11c5e016 323static
d96bfb0d 324uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 325 struct cds_ja_inode *node)
11c5e016
MD
326{
327 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 328 return rcu_dereference(node->u.data[0]);
11c5e016
MD
329}
330
13a7f5a6
MD
331/*
332 * The order in which values and pointers are does does not matter: if
333 * a value is missing, we return NULL. If a value is there, but its
334 * associated pointers is still NULL, we return NULL too.
335 */
d68c6810 336static
b4540e8a
MD
337struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
338 struct cds_ja_inode *node,
b0ca2d21 339 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 340 uint8_t n)
d68c6810
MD
341{
342 uint8_t nr_child;
343 uint8_t *values;
b4540e8a
MD
344 struct cds_ja_inode_flag **pointers;
345 struct cds_ja_inode_flag *ptr;
d68c6810
MD
346 unsigned int i;
347
8e519e3c 348 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 349
11c5e016 350 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 351 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
352 assert(nr_child <= type->max_linear_child);
353 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 354
1db4943c 355 values = &node->u.data[1];
d68c6810 356 for (i = 0; i < nr_child; i++) {
13a7f5a6 357 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
358 break;
359 }
b0ca2d21
MD
360 if (i >= nr_child) {
361 if (caa_unlikely(node_flag_ptr))
362 *node_flag_ptr = NULL;
d68c6810 363 return NULL;
b0ca2d21 364 }
b4540e8a 365 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 366 ptr = rcu_dereference(pointers[i]);
b0ca2d21
MD
367 if (caa_unlikely(node_flag_ptr))
368 *node_flag_ptr = &pointers[i];
d68c6810
MD
369 return ptr;
370}
371
291b2543
MD
372static
373struct cds_ja_inode_flag *ja_linear_node_get_left(const struct cds_ja_type *type,
374 struct cds_ja_inode *node,
375 unsigned int n)
376{
377 uint8_t nr_child;
378 uint8_t *values;
379 struct cds_ja_inode_flag **pointers;
380 struct cds_ja_inode_flag *ptr;
381 unsigned int i, match_idx;
382 int match_v = -1;
383
384 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
385
386 nr_child = ja_linear_node_get_nr_child(type, node);
387 cmm_smp_rmb(); /* read nr_child before values and pointers */
388 assert(nr_child <= type->max_linear_child);
389 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
390
391 values = &node->u.data[1];
392 for (i = 0; i < nr_child; i++) {
393 unsigned int v;
394
395 v = CMM_LOAD_SHARED(values[i]);
396 if (v < n && (int) v > match_v) {
397 match_v = v;
398 match_idx = i;
399 }
400 }
401 if (match_v < 0) {
402 return NULL;
403 }
404 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
405 ptr = rcu_dereference(pointers[match_idx]);
406 return ptr;
407}
408
11c5e016 409static
5a9a87dd 410void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 411 struct cds_ja_inode *node,
11c5e016
MD
412 uint8_t i,
413 uint8_t *v,
b4540e8a 414 struct cds_ja_inode_flag **iter)
11c5e016
MD
415{
416 uint8_t *values;
b4540e8a 417 struct cds_ja_inode_flag **pointers;
11c5e016
MD
418
419 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
420 assert(i < ja_linear_node_get_nr_child(type, node));
421
422 values = &node->u.data[1];
423 *v = values[i];
b4540e8a 424 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
425 *iter = pointers[i];
426}
427
d68c6810 428static
b4540e8a
MD
429struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
430 struct cds_ja_inode *node,
b1a90ce3 431 struct cds_ja_inode_flag *node_flag,
b0ca2d21 432 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 433 uint8_t n)
d68c6810 434{
b4540e8a 435 struct cds_ja_inode *linear;
d68c6810 436
fd800776 437 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
438
439 switch (type->nr_pool_order) {
440 case 1:
441 {
442 unsigned long bitsel, index;
443
444 bitsel = ja_node_pool_1d_bitsel(node_flag);
445 assert(bitsel < CHAR_BIT);
19ddcd04 446 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
447 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
448 break;
449 }
450 case 2:
451 {
19ddcd04
MD
452 unsigned long bitsel[2], index[2], rindex;
453
454 ja_node_pool_2d_bitsel(node_flag, bitsel);
455 assert(bitsel[0] < CHAR_BIT);
456 assert(bitsel[1] < CHAR_BIT);
457 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
458 index[0] <<= 1;
459 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
460 rindex = index[0] | index[1];
461 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
462 break;
463 }
464 default:
465 linear = NULL;
466 assert(0);
467 }
48cbe001 468 return ja_linear_node_get_nth(type, linear, node_flag_ptr, n);
d68c6810
MD
469}
470
11c5e016 471static
b4540e8a
MD
472struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
473 struct cds_ja_inode *node,
11c5e016
MD
474 uint8_t i)
475{
476 assert(type->type_class == RCU_JA_POOL);
b4540e8a 477 return (struct cds_ja_inode *)
11c5e016
MD
478 &node->u.data[(unsigned int) i << type->pool_size_order];
479}
480
291b2543
MD
481static
482struct cds_ja_inode_flag *ja_pool_node_get_left(const struct cds_ja_type *type,
483 struct cds_ja_inode *node,
484 unsigned int n)
485{
486 unsigned int pool_nr;
487 int match_v = -1;
488 struct cds_ja_inode_flag *match_node_flag = NULL;
489
490 assert(type->type_class == RCU_JA_POOL);
491
492 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
493 struct cds_ja_inode *pool =
494 ja_pool_node_get_ith_pool(type,
495 node, pool_nr);
496 uint8_t nr_child =
497 ja_linear_node_get_nr_child(type, pool);
498 unsigned int j;
499
500 for (j = 0; j < nr_child; j++) {
501 struct cds_ja_inode_flag *iter;
502 uint8_t v;
503
504 ja_linear_node_get_ith_pos(type, pool,
505 j, &v, &iter);
506 if (!iter)
507 continue;
508 if (v < n && (int) v > match_v) {
509 match_v = v;
510 match_node_flag = iter;
511 }
512 }
513 }
514 return match_node_flag;
515}
516
d68c6810 517static
b4540e8a
MD
518struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
519 struct cds_ja_inode *node,
b0ca2d21 520 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 521 uint8_t n)
d68c6810 522{
48cbe001
MD
523 struct cds_ja_inode_flag **child_node_flag_ptr;
524 struct cds_ja_inode_flag *child_node_flag;
5a9a87dd 525
d68c6810 526 assert(type->type_class == RCU_JA_PIGEON);
48cbe001
MD
527 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
528 child_node_flag = rcu_dereference(*child_node_flag_ptr);
582a6ade 529 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
48cbe001 530 child_node_flag_ptr);
b0ca2d21 531 if (caa_unlikely(node_flag_ptr))
48cbe001
MD
532 *node_flag_ptr = child_node_flag_ptr;
533 return child_node_flag;
d68c6810
MD
534}
535
291b2543
MD
536static
537struct cds_ja_inode_flag *ja_pigeon_node_get_left(const struct cds_ja_type *type,
538 struct cds_ja_inode *node,
539 unsigned int n)
540{
541 struct cds_ja_inode_flag **child_node_flag_ptr;
542 struct cds_ja_inode_flag *child_node_flag;
543 int i;
544
545 assert(type->type_class == RCU_JA_PIGEON);
546
547 /* n - 1 is first value left of n */
548 for (i = n - 1; i >= 0; i--) {
549 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
550 child_node_flag = rcu_dereference(*child_node_flag_ptr);
551 if (child_node_flag) {
552 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
553 child_node_flag);
554 return child_node_flag;
555 }
556 }
557 return NULL;
558}
559
2e313670
MD
560static
561struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
562 struct cds_ja_inode *node,
563 uint8_t i)
564{
48cbe001 565 return ja_pigeon_node_get_nth(type, node, NULL, i);
2e313670
MD
566}
567
13a7f5a6
MD
568/*
569 * ja_node_get_nth: get nth item from a node.
570 * node_flag is already rcu_dereference'd.
571 */
d68c6810 572static
b62a8d0c 573struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
b0ca2d21 574 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 575 uint8_t n)
d68c6810
MD
576{
577 unsigned int type_index;
b4540e8a 578 struct cds_ja_inode *node;
d96bfb0d 579 const struct cds_ja_type *type;
d68c6810 580
d68c6810 581 node = ja_node_ptr(node_flag);
5a9a87dd 582 assert(node != NULL);
d68c6810
MD
583 type_index = ja_node_type(node_flag);
584 type = &ja_types[type_index];
585
586 switch (type->type_class) {
587 case RCU_JA_LINEAR:
5a9a87dd 588 return ja_linear_node_get_nth(type, node,
b62a8d0c 589 node_flag_ptr, n);
fd800776 590 case RCU_JA_POOL:
b1a90ce3 591 return ja_pool_node_get_nth(type, node, node_flag,
b62a8d0c 592 node_flag_ptr, n);
d68c6810 593 case RCU_JA_PIGEON:
5a9a87dd 594 return ja_pigeon_node_get_nth(type, node,
b62a8d0c 595 node_flag_ptr, n);
d68c6810
MD
596 default:
597 assert(0);
598 return (void *) -1UL;
599 }
600}
601
291b2543
MD
602static
603struct cds_ja_inode_flag *ja_node_get_left(struct cds_ja_inode_flag *node_flag,
604 unsigned int n)
605{
606 unsigned int type_index;
607 struct cds_ja_inode *node;
608 const struct cds_ja_type *type;
609
610 node = ja_node_ptr(node_flag);
611 assert(node != NULL);
612 type_index = ja_node_type(node_flag);
613 type = &ja_types[type_index];
614
615 switch (type->type_class) {
616 case RCU_JA_LINEAR:
617 return ja_linear_node_get_left(type, node, n);
618 case RCU_JA_POOL:
619 return ja_pool_node_get_left(type, node, n);
620 case RCU_JA_PIGEON:
621 return ja_pigeon_node_get_left(type, node, n);
622 default:
623 assert(0);
624 return (void *) -1UL;
625 }
626}
627
628static
629struct cds_ja_inode_flag *ja_node_get_rightmost(struct cds_ja_inode_flag *node_flag)
630{
631 return ja_node_get_left(node_flag, JA_ENTRY_PER_NODE);
632}
633
8e519e3c 634static
d96bfb0d 635int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 636 struct cds_ja_inode *node,
d96bfb0d 637 struct cds_ja_shadow_node *shadow_node,
8e519e3c 638 uint8_t n,
b4540e8a 639 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
640{
641 uint8_t nr_child;
642 uint8_t *values, *nr_child_ptr;
b4540e8a 643 struct cds_ja_inode_flag **pointers;
2e313670 644 unsigned int i, unused = 0;
8e519e3c
MD
645
646 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
647
648 nr_child_ptr = &node->u.data[0];
48cbe001
MD
649 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
650 (unsigned int) n, nr_child_ptr);
8e519e3c
MD
651 nr_child = *nr_child_ptr;
652 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
653
654 values = &node->u.data[1];
2e313670
MD
655 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
656 /* Check if node value is already populated */
8e519e3c 657 for (i = 0; i < nr_child; i++) {
2e313670
MD
658 if (values[i] == n) {
659 if (pointers[i])
660 return -EEXIST;
661 else
662 break;
663 } else {
664 if (!pointers[i])
665 unused++;
666 }
8e519e3c 667 }
2e313670
MD
668 if (i == nr_child && nr_child >= type->max_linear_child) {
669 if (unused)
670 return -ERANGE; /* recompact node */
671 else
672 return -ENOSPC; /* No space left in this node type */
673 }
674
675 assert(pointers[i] == NULL);
676 rcu_assign_pointer(pointers[i], child_node_flag);
677 /* If we expanded the nr_child, increment it */
678 if (i == nr_child) {
679 CMM_STORE_SHARED(values[nr_child], n);
680 /* write pointer and value before nr_child */
681 cmm_smp_wmb();
682 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 683 }
e1db2db5 684 shadow_node->nr_child++;
a2a7ff59
MD
685 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
686 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
687 (unsigned int) shadow_node->nr_child,
688 node, shadow_node);
689
8e519e3c
MD
690 return 0;
691}
692
693static
d96bfb0d 694int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 695 struct cds_ja_inode *node,
b1a90ce3 696 struct cds_ja_inode_flag *node_flag,
d96bfb0d 697 struct cds_ja_shadow_node *shadow_node,
8e519e3c 698 uint8_t n,
b4540e8a 699 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 700{
b4540e8a 701 struct cds_ja_inode *linear;
8e519e3c
MD
702
703 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
704
705 switch (type->nr_pool_order) {
706 case 1:
707 {
708 unsigned long bitsel, index;
709
710 bitsel = ja_node_pool_1d_bitsel(node_flag);
711 assert(bitsel < CHAR_BIT);
19ddcd04 712 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
713 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
714 break;
715 }
716 case 2:
717 {
19ddcd04
MD
718 unsigned long bitsel[2], index[2], rindex;
719
720 ja_node_pool_2d_bitsel(node_flag, bitsel);
721 assert(bitsel[0] < CHAR_BIT);
722 assert(bitsel[1] < CHAR_BIT);
723 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
724 index[0] <<= 1;
725 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
726 rindex = index[0] | index[1];
727 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
728 break;
729 }
730 default:
731 linear = NULL;
732 assert(0);
733 }
734
e1db2db5
MD
735 return ja_linear_node_set_nth(type, linear, shadow_node,
736 n, child_node_flag);
8e519e3c
MD
737}
738
739static
d96bfb0d 740int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 741 struct cds_ja_inode *node,
d96bfb0d 742 struct cds_ja_shadow_node *shadow_node,
8e519e3c 743 uint8_t n,
b4540e8a 744 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 745{
b4540e8a 746 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
747
748 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 749 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 750 if (*ptr)
8e519e3c
MD
751 return -EEXIST;
752 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 753 shadow_node->nr_child++;
8e519e3c
MD
754 return 0;
755}
756
d68c6810 757/*
7a0b2331 758 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 759 * (negative error value) if it is already there.
d68c6810 760 */
8e519e3c 761static
d96bfb0d 762int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 763 struct cds_ja_inode *node,
b1a90ce3 764 struct cds_ja_inode_flag *node_flag,
d96bfb0d 765 struct cds_ja_shadow_node *shadow_node,
e1db2db5 766 uint8_t n,
b4540e8a 767 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 768{
8e519e3c
MD
769 switch (type->type_class) {
770 case RCU_JA_LINEAR:
e1db2db5 771 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
772 child_node_flag);
773 case RCU_JA_POOL:
b1a90ce3 774 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
8e519e3c
MD
775 child_node_flag);
776 case RCU_JA_PIGEON:
e1db2db5 777 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 778 child_node_flag);
e1db2db5
MD
779 case RCU_JA_NULL:
780 return -ENOSPC;
8e519e3c
MD
781 default:
782 assert(0);
783 return -EINVAL;
784 }
785
786 return 0;
787}
7a0b2331 788
2e313670 789static
af3cbd45 790int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
791 struct cds_ja_inode *node,
792 struct cds_ja_shadow_node *shadow_node,
af3cbd45 793 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
794{
795 uint8_t nr_child;
af3cbd45 796 uint8_t *nr_child_ptr;
2e313670
MD
797
798 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
799
800 nr_child_ptr = &node->u.data[0];
2e313670
MD
801 nr_child = *nr_child_ptr;
802 assert(nr_child <= type->max_linear_child);
803
48cbe001
MD
804 if (type->type_class == RCU_JA_LINEAR) {
805 assert(!shadow_node->fallback_removal_count);
806 if (shadow_node->nr_child <= type->min_child) {
2e313670
MD
807 /* We need to try recompacting the node */
808 return -EFBIG;
809 }
810 }
19ddcd04 811 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
af3cbd45
MD
812 assert(*node_flag_ptr != NULL);
813 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
814 /*
815 * Value and nr_child are never changed (would cause ABA issue).
816 * Instead, we leave the pointer to NULL and recompact the node
817 * once in a while. It is allowed to set a NULL pointer to a new
818 * value without recompaction though.
819 * Only update the shadow node accounting.
820 */
821 shadow_node->nr_child--;
af3cbd45 822 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
823 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
824 (unsigned int) shadow_node->nr_child,
825 node, shadow_node);
2e313670
MD
826 return 0;
827}
828
829static
af3cbd45 830int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670 831 struct cds_ja_inode *node,
19ddcd04 832 struct cds_ja_inode_flag *node_flag,
2e313670 833 struct cds_ja_shadow_node *shadow_node,
af3cbd45 834 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
835 uint8_t n)
836{
837 struct cds_ja_inode *linear;
838
839 assert(type->type_class == RCU_JA_POOL);
19ddcd04
MD
840
841 if (shadow_node->fallback_removal_count) {
842 shadow_node->fallback_removal_count--;
843 } else {
844 /* We should try recompacting the node */
845 if (shadow_node->nr_child <= type->min_child)
846 return -EFBIG;
847 }
848
849 switch (type->nr_pool_order) {
850 case 1:
851 {
852 unsigned long bitsel, index;
853
854 bitsel = ja_node_pool_1d_bitsel(node_flag);
855 assert(bitsel < CHAR_BIT);
856 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
857 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
858 break;
859 }
860 case 2:
861 {
862 unsigned long bitsel[2], index[2], rindex;
863
864 ja_node_pool_2d_bitsel(node_flag, bitsel);
865 assert(bitsel[0] < CHAR_BIT);
866 assert(bitsel[1] < CHAR_BIT);
867 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
868 index[0] <<= 1;
869 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
870 rindex = index[0] | index[1];
871 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
872 break;
873 }
874 default:
875 linear = NULL;
876 assert(0);
877 }
878
af3cbd45 879 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
880}
881
882static
af3cbd45 883int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
884 struct cds_ja_inode *node,
885 struct cds_ja_shadow_node *shadow_node,
af3cbd45 886 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 887{
2e313670 888 assert(type->type_class == RCU_JA_PIGEON);
19ddcd04
MD
889
890 if (shadow_node->fallback_removal_count) {
891 shadow_node->fallback_removal_count--;
892 } else {
893 /* We should try recompacting the node */
894 if (shadow_node->nr_child <= type->min_child)
895 return -EFBIG;
896 }
4d6ef45e 897 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 898 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
899 shadow_node->nr_child--;
900 return 0;
901}
902
903/*
af3cbd45 904 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
905 * (negative error value) if it is not found (-ENOENT).
906 */
907static
af3cbd45 908int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670 909 struct cds_ja_inode *node,
19ddcd04 910 struct cds_ja_inode_flag *node_flag,
2e313670 911 struct cds_ja_shadow_node *shadow_node,
af3cbd45 912 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
913 uint8_t n)
914{
915 switch (type->type_class) {
916 case RCU_JA_LINEAR:
af3cbd45 917 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 918 case RCU_JA_POOL:
19ddcd04 919 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
2e313670 920 case RCU_JA_PIGEON:
af3cbd45 921 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
922 case RCU_JA_NULL:
923 return -ENOENT;
924 default:
925 assert(0);
926 return -EINVAL;
927 }
928
929 return 0;
930}
931
b1a90ce3
MD
932/*
933 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
934 * distribution in two sub-distributions containing as much elements one
935 * compared to the other.
936 */
937static
938unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
939 struct cds_ja *ja,
940 unsigned int type_index,
941 const struct cds_ja_type *type,
942 struct cds_ja_inode *node,
943 struct cds_ja_shadow_node *shadow_node,
944 uint8_t n,
945 struct cds_ja_inode_flag *child_node_flag,
946 struct cds_ja_inode_flag **nullify_node_flag_ptr)
947{
948 uint8_t nr_one[JA_BITS_PER_BYTE];
949 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
950 unsigned int distrib_nr_child = 0;
951
952 memset(nr_one, 0, sizeof(nr_one));
953
954 switch (type->type_class) {
955 case RCU_JA_LINEAR:
956 {
957 uint8_t nr_child =
958 ja_linear_node_get_nr_child(type, node);
959 unsigned int i;
960
961 for (i = 0; i < nr_child; i++) {
962 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
963 uint8_t v;
964
965 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
966 if (!iter)
967 continue;
968 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
969 continue;
f5531dd9
MD
970 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
971 if (v & (1U << bit_i))
972 nr_one[bit_i]++;
b1a90ce3
MD
973 }
974 distrib_nr_child++;
975 }
976 break;
977 }
978 case RCU_JA_POOL:
979 {
980 unsigned int pool_nr;
981
982 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
983 struct cds_ja_inode *pool =
984 ja_pool_node_get_ith_pool(type,
985 node, pool_nr);
986 uint8_t nr_child =
987 ja_linear_node_get_nr_child(type, pool);
988 unsigned int j;
989
990 for (j = 0; j < nr_child; j++) {
991 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
992 uint8_t v;
993
994 ja_linear_node_get_ith_pos(type, pool,
995 j, &v, &iter);
996 if (!iter)
997 continue;
998 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
999 continue;
f5531dd9
MD
1000 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1001 if (v & (1U << bit_i))
1002 nr_one[bit_i]++;
b1a90ce3
MD
1003 }
1004 distrib_nr_child++;
1005 }
1006 }
1007 break;
1008 }
1009 case RCU_JA_PIGEON:
1010 {
b1a90ce3
MD
1011 unsigned int i;
1012
1013 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1014 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
b1a90ce3 1015 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1016
1017 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1018 if (!iter)
1019 continue;
1020 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1021 continue;
f5531dd9
MD
1022 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1023 if (i & (1U << bit_i))
1024 nr_one[bit_i]++;
b1a90ce3
MD
1025 }
1026 distrib_nr_child++;
1027 }
1028 break;
1029 }
1030 case RCU_JA_NULL:
19ddcd04 1031 assert(mode == JA_RECOMPACT_ADD_NEXT);
b1a90ce3
MD
1032 break;
1033 default:
1034 assert(0);
1035 break;
1036 }
1037
19ddcd04 1038 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
f5531dd9
MD
1039 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1040 if (n & (1U << bit_i))
1041 nr_one[bit_i]++;
b1a90ce3
MD
1042 }
1043 distrib_nr_child++;
1044 }
1045
1046 /*
1047 * The best bit selector is that for which the number of ones is
1048 * closest to half of the number of children in the
f5531dd9
MD
1049 * distribution. We calculate the distance using the double of
1050 * the sub-distribution sizes to eliminate truncation error.
b1a90ce3
MD
1051 */
1052 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1053 unsigned int distance_to_best;
1054
f5531dd9 1055 distance_to_best = abs_int((nr_one[bit_i] << 1U) - distrib_nr_child);
b1a90ce3
MD
1056 if (distance_to_best < overall_best_distance) {
1057 overall_best_distance = distance_to_best;
1058 bitsel = bit_i;
1059 }
1060 }
1061 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
1062 return bitsel;
1063}
1064
19ddcd04
MD
1065/*
1066 * Calculate bit distribution in two dimensions. Returns the two bits
1067 * (each 0 to 7) that splits the distribution in four sub-distributions
1068 * containing as much elements one compared to the other.
1069 */
1070static
1071void ja_node_sum_distribution_2d(enum ja_recompact mode,
1072 struct cds_ja *ja,
1073 unsigned int type_index,
1074 const struct cds_ja_type *type,
1075 struct cds_ja_inode *node,
1076 struct cds_ja_shadow_node *shadow_node,
1077 uint8_t n,
1078 struct cds_ja_inode_flag *child_node_flag,
1079 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1080 unsigned int *_bitsel)
1081{
1082 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1083 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1084 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1085 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
1086 unsigned int bitsel[2] = { 0, 1 };
4a073c53
MD
1087 unsigned int bit_i, bit_j;
1088 int overall_best_distance = INT_MAX;
19ddcd04
MD
1089 unsigned int distrib_nr_child = 0;
1090
1091 memset(nr_2d_11, 0, sizeof(nr_2d_11));
1092 memset(nr_2d_10, 0, sizeof(nr_2d_10));
4a073c53
MD
1093 memset(nr_2d_01, 0, sizeof(nr_2d_01));
1094 memset(nr_2d_00, 0, sizeof(nr_2d_00));
19ddcd04
MD
1095
1096 switch (type->type_class) {
1097 case RCU_JA_LINEAR:
1098 {
1099 uint8_t nr_child =
1100 ja_linear_node_get_nr_child(type, node);
1101 unsigned int i;
1102
1103 for (i = 0; i < nr_child; i++) {
1104 struct cds_ja_inode_flag *iter;
1105 uint8_t v;
1106
1107 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1108 if (!iter)
1109 continue;
1110 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1111 continue;
1112 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1113 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1114 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1115 nr_2d_11[bit_i][bit_j]++;
1116 }
1117 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1118 nr_2d_10[bit_i][bit_j]++;
1119 }
1120 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1121 nr_2d_01[bit_i][bit_j]++;
1122 }
1123 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1124 nr_2d_00[bit_i][bit_j]++;
1125 }
1126 }
1127 }
1128 distrib_nr_child++;
1129 }
1130 break;
1131 }
1132 case RCU_JA_POOL:
1133 {
1134 unsigned int pool_nr;
1135
1136 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1137 struct cds_ja_inode *pool =
1138 ja_pool_node_get_ith_pool(type,
1139 node, pool_nr);
1140 uint8_t nr_child =
1141 ja_linear_node_get_nr_child(type, pool);
1142 unsigned int j;
1143
1144 for (j = 0; j < nr_child; j++) {
1145 struct cds_ja_inode_flag *iter;
1146 uint8_t v;
1147
1148 ja_linear_node_get_ith_pos(type, pool,
1149 j, &v, &iter);
1150 if (!iter)
1151 continue;
1152 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1153 continue;
1154 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1155 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1156 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1157 nr_2d_11[bit_i][bit_j]++;
1158 }
1159 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1160 nr_2d_10[bit_i][bit_j]++;
1161 }
1162 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1163 nr_2d_01[bit_i][bit_j]++;
1164 }
1165 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1166 nr_2d_00[bit_i][bit_j]++;
1167 }
1168 }
1169 }
1170 distrib_nr_child++;
1171 }
1172 }
1173 break;
1174 }
1175 case RCU_JA_PIGEON:
1176 {
19ddcd04
MD
1177 unsigned int i;
1178
1179 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1180 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
19ddcd04
MD
1181 struct cds_ja_inode_flag *iter;
1182
1183 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1184 if (!iter)
1185 continue;
1186 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1187 continue;
1188 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1189 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1190 if ((i & (1U << bit_i)) && (i & (1U << bit_j))) {
1191 nr_2d_11[bit_i][bit_j]++;
1192 }
1193 if ((i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1194 nr_2d_10[bit_i][bit_j]++;
1195 }
1196 if (!(i & (1U << bit_i)) && (i & (1U << bit_j))) {
1197 nr_2d_01[bit_i][bit_j]++;
1198 }
1199 if (!(i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1200 nr_2d_00[bit_i][bit_j]++;
1201 }
1202 }
1203 }
1204 distrib_nr_child++;
1205 }
1206 break;
1207 }
1208 case RCU_JA_NULL:
1209 assert(mode == JA_RECOMPACT_ADD_NEXT);
1210 break;
1211 default:
1212 assert(0);
1213 break;
1214 }
1215
1216 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1217 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1218 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1219 if ((n & (1U << bit_i)) && (n & (1U << bit_j))) {
1220 nr_2d_11[bit_i][bit_j]++;
1221 }
1222 if ((n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1223 nr_2d_10[bit_i][bit_j]++;
1224 }
1225 if (!(n & (1U << bit_i)) && (n & (1U << bit_j))) {
1226 nr_2d_01[bit_i][bit_j]++;
1227 }
1228 if (!(n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1229 nr_2d_00[bit_i][bit_j]++;
1230 }
1231 }
1232 }
1233 distrib_nr_child++;
1234 }
1235
1236 /*
1237 * The best bit selector is that for which the number of nodes
1238 * in each sub-class is closest to one-fourth of the number of
1239 * children in the distribution. We calculate the distance using
1240 * 4 times the size of the sub-distribution to eliminate
1241 * truncation error.
1242 */
1243 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1244 for (bit_j = 0; bit_j < bit_i; bit_j++) {
4a073c53 1245 int distance_to_best[4];
19ddcd04 1246
4a073c53
MD
1247 distance_to_best[0] = (nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1248 distance_to_best[1] = (nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1249 distance_to_best[2] = (nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1250 distance_to_best[3] = (nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
19ddcd04 1251
4a073c53
MD
1252 /* Consider worse distance above best */
1253 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
19ddcd04 1254 distance_to_best[0] = distance_to_best[1];
4a073c53 1255 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
19ddcd04 1256 distance_to_best[0] = distance_to_best[2];
4a073c53 1257 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
19ddcd04 1258 distance_to_best[0] = distance_to_best[3];
4a073c53 1259
19ddcd04
MD
1260 /*
1261 * If our worse distance is better than overall,
1262 * we become new best candidate.
1263 */
1264 if (distance_to_best[0] < overall_best_distance) {
1265 overall_best_distance = distance_to_best[0];
1266 bitsel[0] = bit_i;
1267 bitsel[1] = bit_j;
1268 }
1269 }
1270 }
1271
1272 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1273
1274 /* Return our bit selection */
1275 _bitsel[0] = bitsel[0];
1276 _bitsel[1] = bitsel[1];
1277}
1278
48cbe001
MD
1279static
1280unsigned int find_nearest_type_index(unsigned int type_index,
1281 unsigned int nr_nodes)
1282{
1283 const struct cds_ja_type *type;
1284
1285 assert(type_index != NODE_INDEX_NULL);
1286 if (nr_nodes == 0)
1287 return NODE_INDEX_NULL;
1288 for (;;) {
1289 type = &ja_types[type_index];
1290 if (nr_nodes < type->min_child)
1291 type_index--;
1292 else if (nr_nodes > type->max_child)
1293 type_index++;
1294 else
1295 break;
1296 }
1297 return type_index;
1298}
1299
7a0b2331
MD
1300/*
1301 * ja_node_recompact_add: recompact a node, adding a new child.
2e313670 1302 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 1303 * error value otherwise.
7a0b2331
MD
1304 */
1305static
2e313670
MD
1306int ja_node_recompact(enum ja_recompact mode,
1307 struct cds_ja *ja,
e1db2db5 1308 unsigned int old_type_index,
d96bfb0d 1309 const struct cds_ja_type *old_type,
b4540e8a 1310 struct cds_ja_inode *old_node,
5a9a87dd 1311 struct cds_ja_shadow_node *shadow_node,
3d8fe307 1312 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45 1313 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1314 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1315 int level)
7a0b2331 1316{
e1db2db5 1317 unsigned int new_type_index;
b4540e8a 1318 struct cds_ja_inode *new_node;
af3cbd45 1319 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 1320 const struct cds_ja_type *new_type;
3d8fe307 1321 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 1322 int ret;
f07b240f 1323 int fallback = 0;
7a0b2331 1324
3d8fe307
MD
1325 old_node_flag = *old_node_flag_ptr;
1326
48cbe001
MD
1327 /*
1328 * Need to find nearest type index even for ADD_SAME, because
1329 * this recompaction, when applied to linear nodes, will garbage
1330 * collect dummy (NULL) entries, and can therefore cause a few
1331 * linear representations to be skipped.
1332 */
2e313670 1333 switch (mode) {
19ddcd04 1334 case JA_RECOMPACT_ADD_SAME:
48cbe001
MD
1335 new_type_index = find_nearest_type_index(old_type_index,
1336 shadow_node->nr_child + 1);
1337 dbg_printf("Recompact for node with %u children\n",
1338 shadow_node->nr_child + 1);
2e313670 1339 break;
19ddcd04 1340 case JA_RECOMPACT_ADD_NEXT:
2e313670
MD
1341 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1342 new_type_index = 0;
48cbe001 1343 dbg_printf("Recompact for NULL\n");
2e313670 1344 } else {
48cbe001
MD
1345 new_type_index = find_nearest_type_index(old_type_index,
1346 shadow_node->nr_child + 1);
1347 dbg_printf("Recompact for node with %u children\n",
1348 shadow_node->nr_child + 1);
2e313670
MD
1349 }
1350 break;
1351 case JA_RECOMPACT_DEL:
48cbe001
MD
1352 new_type_index = find_nearest_type_index(old_type_index,
1353 shadow_node->nr_child - 1);
1354 dbg_printf("Recompact for node with %u children\n",
1355 shadow_node->nr_child - 1);
2e313670
MD
1356 break;
1357 default:
1358 assert(0);
7a0b2331 1359 }
a2a7ff59 1360
f07b240f 1361retry: /* for fallback */
582a6ade
MD
1362 dbg_printf("Recompact from type %d to type %d\n",
1363 old_type_index, new_type_index);
7a0b2331 1364 new_type = &ja_types[new_type_index];
2e313670 1365 if (new_type_index != NODE_INDEX_NULL) {
354981c2 1366 new_node = alloc_cds_ja_node(ja, new_type);
2e313670
MD
1367 if (!new_node)
1368 return -ENOMEM;
b1a90ce3
MD
1369
1370 if (new_type->type_class == RCU_JA_POOL) {
1371 switch (new_type->nr_pool_order) {
1372 case 1:
1373 {
19ddcd04
MD
1374 unsigned int node_distrib_bitsel;
1375
b1a90ce3
MD
1376 node_distrib_bitsel =
1377 ja_node_sum_distribution_1d(mode, ja,
1378 old_type_index, old_type,
1379 old_node, shadow_node,
1380 n, child_node_flag,
1381 nullify_node_flag_ptr);
1382 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1383 new_node_flag = ja_node_flag_pool_1d(new_node,
1384 new_type_index, node_distrib_bitsel);
1385 break;
1386 }
1387 case 2:
1388 {
19ddcd04
MD
1389 unsigned int node_distrib_bitsel[2];
1390
1391 ja_node_sum_distribution_2d(mode, ja,
1392 old_type_index, old_type,
1393 old_node, shadow_node,
1394 n, child_node_flag,
1395 nullify_node_flag_ptr,
1396 node_distrib_bitsel);
b1a90ce3
MD
1397 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1398 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
19ddcd04
MD
1399 new_node_flag = ja_node_flag_pool_2d(new_node,
1400 new_type_index, node_distrib_bitsel);
b1a90ce3
MD
1401 break;
1402 }
1403 default:
1404 assert(0);
1405 }
1406 } else {
1407 new_node_flag = ja_node_flag(new_node, new_type_index);
1408 }
1409
2e313670 1410 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
48cbe001 1411 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
2e313670 1412 if (!new_shadow_node) {
354981c2 1413 free_cds_ja_node(ja, new_node);
2e313670
MD
1414 return -ENOMEM;
1415 }
1416 if (fallback)
1417 new_shadow_node->fallback_removal_count =
1418 JA_FALLBACK_REMOVAL_COUNT;
1419 } else {
1420 new_node = NULL;
1421 new_node_flag = NULL;
e1db2db5 1422 }
11c5e016 1423
19ddcd04 1424 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
2e313670
MD
1425
1426 if (new_type_index == NODE_INDEX_NULL)
1427 goto skip_copy;
1428
11c5e016
MD
1429 switch (old_type->type_class) {
1430 case RCU_JA_LINEAR:
1431 {
1432 uint8_t nr_child =
1433 ja_linear_node_get_nr_child(old_type, old_node);
1434 unsigned int i;
1435
1436 for (i = 0; i < nr_child; i++) {
b4540e8a 1437 struct cds_ja_inode_flag *iter;
11c5e016
MD
1438 uint8_t v;
1439
1440 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1441 if (!iter)
1442 continue;
af3cbd45 1443 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1444 continue;
b1a90ce3 1445 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1446 new_shadow_node,
11c5e016 1447 v, iter);
f07b240f
MD
1448 if (new_type->type_class == RCU_JA_POOL && ret) {
1449 goto fallback_toosmall;
1450 }
11c5e016
MD
1451 assert(!ret);
1452 }
1453 break;
1454 }
1455 case RCU_JA_POOL:
1456 {
1457 unsigned int pool_nr;
1458
1459 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 1460 struct cds_ja_inode *pool =
11c5e016
MD
1461 ja_pool_node_get_ith_pool(old_type,
1462 old_node, pool_nr);
1463 uint8_t nr_child =
1464 ja_linear_node_get_nr_child(old_type, pool);
1465 unsigned int j;
1466
1467 for (j = 0; j < nr_child; j++) {
b4540e8a 1468 struct cds_ja_inode_flag *iter;
11c5e016
MD
1469 uint8_t v;
1470
1471 ja_linear_node_get_ith_pos(old_type, pool,
1472 j, &v, &iter);
1473 if (!iter)
1474 continue;
af3cbd45 1475 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1476 continue;
b1a90ce3 1477 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1478 new_shadow_node,
11c5e016 1479 v, iter);
f07b240f
MD
1480 if (new_type->type_class == RCU_JA_POOL
1481 && ret) {
1482 goto fallback_toosmall;
1483 }
11c5e016
MD
1484 assert(!ret);
1485 }
1486 }
1487 break;
7a0b2331 1488 }
a2a7ff59 1489 case RCU_JA_NULL:
19ddcd04 1490 assert(mode == JA_RECOMPACT_ADD_NEXT);
a2a7ff59 1491 break;
11c5e016 1492 case RCU_JA_PIGEON:
2e313670 1493 {
2e313670
MD
1494 unsigned int i;
1495
1496 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1497 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2e313670
MD
1498 struct cds_ja_inode_flag *iter;
1499
1500 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1501 if (!iter)
1502 continue;
af3cbd45 1503 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1504 continue;
b1a90ce3 1505 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1506 new_shadow_node,
1507 i, iter);
1508 if (new_type->type_class == RCU_JA_POOL && ret) {
1509 goto fallback_toosmall;
1510 }
1511 assert(!ret);
1512 }
1513 break;
1514 }
11c5e016
MD
1515 default:
1516 assert(0);
5a9a87dd 1517 ret = -EINVAL;
f07b240f 1518 goto end;
11c5e016 1519 }
2e313670 1520skip_copy:
11c5e016 1521
19ddcd04 1522 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
2e313670 1523 /* add node */
b1a90ce3 1524 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1525 new_shadow_node,
1526 n, child_node_flag);
7b413155
MD
1527 if (new_type->type_class == RCU_JA_POOL && ret) {
1528 goto fallback_toosmall;
1529 }
2e313670
MD
1530 assert(!ret);
1531 }
19ddcd04
MD
1532
1533 if (fallback) {
1534 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1535 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1536 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
354981c2 1537 uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
19ddcd04
MD
1538 }
1539
3d8fe307
MD
1540 /* Return pointer to new recompacted node through old_node_flag_ptr */
1541 *old_node_flag_ptr = new_node_flag;
a2a7ff59 1542 if (old_node) {
2e313670
MD
1543 int flags;
1544
1545 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1546 /*
1547 * It is OK to free the lock associated with a node
1548 * going to NULL, since we are holding the parent lock.
1549 * This synchronizes removal with re-add of that node.
1550 */
1551 if (new_type_index == NODE_INDEX_NULL)
48cbe001 1552 flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 1553 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 1554 flags);
a2a7ff59
MD
1555 assert(!ret);
1556 }
5a9a87dd
MD
1557
1558 ret = 0;
f07b240f 1559end:
5a9a87dd 1560 return ret;
f07b240f
MD
1561
1562fallback_toosmall:
1563 /* fallback if next pool is too small */
af3cbd45 1564 assert(new_shadow_node);
3d8fe307 1565 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
1566 RCUJA_SHADOW_CLEAR_FREE_NODE);
1567 assert(!ret);
1568
19ddcd04
MD
1569 switch (mode) {
1570 case JA_RECOMPACT_ADD_SAME:
1571 /*
1572 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1573 * node within a pool has unused entries. It should
1574 * therefore _never_ be too small.
1575 */
4a073c53 1576 assert(0);
4cde8267
MD
1577
1578 /* Fall-through */
19ddcd04
MD
1579 case JA_RECOMPACT_ADD_NEXT:
1580 {
1581 const struct cds_ja_type *next_type;
1582
1583 /*
1584 * Recompaction attempt on add failed. Should only
1585 * happen if target node type is pool. Caused by
1586 * hard-to-split distribution. Recompact using the next
1587 * distribution size.
1588 */
1589 assert(new_type->type_class == RCU_JA_POOL);
1590 next_type = &ja_types[new_type_index + 1];
1591 /*
1592 * Try going to the next pool size if our population
1593 * fits within its range. This is not flagged as a
1594 * fallback.
1595 */
1596 if (shadow_node->nr_child + 1 >= next_type->min_child
1597 && shadow_node->nr_child + 1 <= next_type->max_child) {
1598 new_type_index++;
1599 goto retry;
1600 } else {
1601 new_type_index++;
1602 dbg_printf("Add fallback to type %d\n", new_type_index);
1603 uatomic_inc(&ja->nr_fallback);
1604 fallback = 1;
1605 goto retry;
1606 }
1607 break;
1608 }
1609 case JA_RECOMPACT_DEL:
1610 /*
1611 * Recompaction attempt on delete failed. Should only
1612 * happen if target node type is pool. This is caused by
1613 * a hard-to-split distribution. Recompact on same node
1614 * size, but flag current node as "fallback" to ensure
1615 * we don't attempt recompaction before some activity
1616 * has reshuffled our node.
1617 */
1618 assert(new_type->type_class == RCU_JA_POOL);
1619 new_type_index = old_type_index;
1620 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1621 uatomic_inc(&ja->nr_fallback);
1622 fallback = 1;
1623 goto retry;
1624 default:
1625 assert(0);
1626 return -EINVAL;
1627 }
1628
1629 /*
1630 * Last resort fallback: pigeon.
1631 */
f07b240f
MD
1632 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1633 dbg_printf("Fallback to type %d\n", new_type_index);
1634 uatomic_inc(&ja->nr_fallback);
1635 fallback = 1;
1636 goto retry;
7a0b2331
MD
1637}
1638
5a9a87dd 1639/*
2e313670 1640 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
1641 * error value otherwise.
1642 */
7a0b2331 1643static
d96bfb0d 1644int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 1645 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd 1646 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1647 struct cds_ja_shadow_node *shadow_node,
1648 int level)
7a0b2331
MD
1649{
1650 int ret;
e1db2db5 1651 unsigned int type_index;
d96bfb0d 1652 const struct cds_ja_type *type;
b4540e8a 1653 struct cds_ja_inode *node;
7a0b2331 1654
a2a7ff59
MD
1655 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1656 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1657
e1db2db5
MD
1658 node = ja_node_ptr(*node_flag);
1659 type_index = ja_node_type(*node_flag);
1660 type = &ja_types[type_index];
b1a90ce3 1661 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
e1db2db5 1662 n, child_node_flag);
2e313670
MD
1663 switch (ret) {
1664 case -ENOSPC:
19ddcd04
MD
1665 /* Not enough space in node, need to recompact to next type. */
1666 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
48cbe001 1667 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1668 break;
1669 case -ERANGE:
1670 /* Node needs to be recompacted. */
19ddcd04 1671 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
48cbe001 1672 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1673 break;
1674 }
1675 return ret;
1676}
1677
1678/*
1679 * Return 0 on success, -EAGAIN if need to retry, or other negative
1680 * error value otherwise.
1681 */
1682static
af3cbd45
MD
1683int ja_node_clear_ptr(struct cds_ja *ja,
1684 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1685 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1686 struct cds_ja_shadow_node *shadow_node, /* of parent */
48cbe001 1687 uint8_t n, int level)
2e313670
MD
1688{
1689 int ret;
1690 unsigned int type_index;
1691 const struct cds_ja_type *type;
1692 struct cds_ja_inode *node;
1693
af3cbd45
MD
1694 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1695 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 1696
af3cbd45
MD
1697 node = ja_node_ptr(*parent_node_flag_ptr);
1698 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 1699 type = &ja_types[type_index];
19ddcd04 1700 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
2e313670 1701 if (ret == -EFBIG) {
19ddcd04 1702 /* Should try recompaction. */
2e313670 1703 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45 1704 shadow_node, parent_node_flag_ptr, n, NULL,
48cbe001 1705 node_flag_ptr, level);
7a0b2331
MD
1706 }
1707 return ret;
1708}
be9a7474 1709
03ec1aeb 1710struct cds_ja_node *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 1711{
41975c12
MD
1712 unsigned int tree_depth, i;
1713 struct cds_ja_inode_flag *node_flag;
1714
1715 if (caa_unlikely(key > ja->key_max))
03ec1aeb 1716 return NULL;
41975c12 1717 tree_depth = ja->tree_depth;
5a9a87dd 1718 node_flag = rcu_dereference(ja->root);
41975c12 1719
5a9a87dd
MD
1720 /* level 0: root node */
1721 if (!ja_node_ptr(node_flag))
03ec1aeb 1722 return NULL;
5a9a87dd
MD
1723
1724 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1725 uint8_t iter_key;
1726
1727 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
48cbe001 1728 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
582a6ade
MD
1729 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1730 (unsigned int) iter_key, node_flag);
41975c12 1731 if (!ja_node_ptr(node_flag))
03ec1aeb 1732 return NULL;
41975c12
MD
1733 }
1734
5a9a87dd 1735 /* Last level lookup succeded. We got an actual match. */
03ec1aeb 1736 return (struct cds_ja_node *) node_flag;
5a9a87dd
MD
1737}
1738
03ec1aeb 1739struct cds_ja_node *cds_ja_lookup_lower_equal(struct cds_ja *ja, uint64_t key)
291b2543
MD
1740{
1741 int tree_depth, level;
1742 struct cds_ja_inode_flag *node_flag, *cur_node_depth[JA_MAX_DEPTH];
291b2543
MD
1743
1744 if (caa_unlikely(key > ja->key_max || !key))
03ec1aeb 1745 return NULL;
291b2543
MD
1746
1747 memset(cur_node_depth, 0, sizeof(cur_node_depth));
1748 tree_depth = ja->tree_depth;
1749 node_flag = rcu_dereference(ja->root);
1750 cur_node_depth[0] = node_flag;
1751
1752 /* level 0: root node */
1753 if (!ja_node_ptr(node_flag))
03ec1aeb 1754 return NULL;
291b2543
MD
1755
1756 for (level = 1; level < tree_depth; level++) {
1757 uint8_t iter_key;
1758
1759 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1760 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1761 if (!ja_node_ptr(node_flag))
1762 break;
1763 cur_node_depth[level] = node_flag;
1764 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1765 (unsigned int) iter_key, node_flag);
1766 }
1767
1768 if (level == tree_depth) {
1769 /* Last level lookup succeded. We got an equal match. */
03ec1aeb 1770 return (struct cds_ja_node *) node_flag;
291b2543
MD
1771 }
1772
1773 /*
1774 * Find highest value left of current node.
1775 * Current node is cur_node_depth[level].
1776 * Start at current level. If we cannot find any key left of
1777 * ours, go one level up, seek highest value left of current
1778 * (recursively), and when we find one, get the rightmost child
1779 * of its rightmost child (recursively).
1780 */
1781 for (; level > 0; level--) {
1782 uint8_t iter_key;
1783
1784 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1785 node_flag = ja_node_get_left(cur_node_depth[level - 1],
1786 iter_key);
1787 /* If found left sibling, find rightmost child. */
1788 if (ja_node_ptr(node_flag))
1789 break;
1790 }
1791
1792 if (!level) {
1793 /* Reached the root and could not find a left sibling. */
03ec1aeb 1794 return NULL;
291b2543
MD
1795 }
1796
1797 level++;
3c52f0f9
MD
1798
1799 /*
4cef6f97 1800 * From this point, we are guaranteed to be able to find a
47d2eab3
MD
1801 * "lower than" match. ja_attach_node() and ja_detach_node()
1802 * both guarantee that it is not possible for a lookup to reach
1803 * a dead-end.
3c52f0f9
MD
1804 */
1805
291b2543
MD
1806 /* Find rightmost child of rightmost child (recursively). */
1807 for (; level < tree_depth; level++) {
1808 node_flag = ja_node_get_rightmost(node_flag);
1809 /* If found left sibling, find rightmost child. */
1810 if (!ja_node_ptr(node_flag))
1811 break;
1812 }
1813
4cef6f97 1814 assert(level == tree_depth);
291b2543 1815
03ec1aeb 1816 return (struct cds_ja_node *) node_flag;
291b2543
MD
1817}
1818
5a9a87dd
MD
1819/*
1820 * We reached an unpopulated node. Create it and the children we need,
1821 * and then attach the entire branch to the current node. This may
1822 * trigger recompaction of the current node. Locks needed: node lock
1823 * (for add), and, possibly, parent node lock (to update pointer due to
1824 * node recompaction).
1825 *
1826 * First take node lock, check if recompaction is needed, then take
1827 * parent lock (if needed). Then we can proceed to create the new
1828 * branch. Publish the new branch, and release locks.
1829 * TODO: we currently always take the parent lock even when not needed.
47d2eab3
MD
1830 *
1831 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1832 * leads to a dead-end: before attaching a branch, the entire content of
1833 * the new branch is populated, thus creating a cluster, before
1834 * attaching the cluster to the rest of the tree, thus making it visible
1835 * to lookups.
5a9a87dd
MD
1836 */
1837static
1838int ja_attach_node(struct cds_ja *ja,
b0ca2d21 1839 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 1840 struct cds_ja_inode_flag *attach_node_flag,
48cbe001
MD
1841 struct cds_ja_inode_flag *parent_attach_node_flag,
1842 struct cds_ja_inode_flag **old_node_flag_ptr,
1843 struct cds_ja_inode_flag *old_node_flag,
5a9a87dd 1844 uint64_t key,
79b41067 1845 unsigned int level,
5a9a87dd
MD
1846 struct cds_ja_node *child_node)
1847{
1848 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1849 *parent_shadow_node = NULL;
5a9a87dd
MD
1850 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1851 int ret, i;
a2a7ff59 1852 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1853 int nr_created_nodes = 0;
1854
48cbe001
MD
1855 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1856 level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag);
a2a7ff59 1857
48cbe001
MD
1858 assert(!old_node_flag);
1859 if (attach_node_flag) {
1860 shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag);
1861 if (!shadow_node) {
1862 ret = -EAGAIN;
1863 goto end;
1864 }
5a9a87dd 1865 }
48cbe001 1866 if (parent_attach_node_flag) {
5a9a87dd 1867 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
48cbe001 1868 parent_attach_node_flag);
5a9a87dd 1869 if (!parent_shadow_node) {
2e313670 1870 ret = -EAGAIN;
5a9a87dd
MD
1871 goto unlock_shadow;
1872 }
1873 }
1874
48cbe001 1875 if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) {
b306a0fe 1876 /*
c112acaa
MD
1877 * Target node has been updated between RCU lookup and
1878 * lock acquisition. We need to re-try lookup and
1879 * attach.
1880 */
1881 ret = -EAGAIN;
1882 goto unlock_parent;
1883 }
1884
9be99d4a
MD
1885 /*
1886 * Perform a lookup query to handle the case where
1887 * old_node_flag_ptr is NULL. We cannot use it to check if the
1888 * node has been populated between RCU lookup and mutex
1889 * acquisition.
1890 */
1891 if (!old_node_flag_ptr) {
1892 uint8_t iter_key;
1893 struct cds_ja_inode_flag *lookup_node_flag;
1894 struct cds_ja_inode_flag **lookup_node_flag_ptr;
1895
1896 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
1897 lookup_node_flag = ja_node_get_nth(attach_node_flag,
1898 &lookup_node_flag_ptr,
1899 iter_key);
1900 if (lookup_node_flag) {
1901 ret = -EEXIST;
1902 goto unlock_parent;
1903 }
1904 }
1905
c112acaa 1906 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 1907 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
1908 /*
1909 * Target node has been updated between RCU lookup and
1910 * lock acquisition. We need to re-try lookup and
1911 * attach.
b306a0fe
MD
1912 */
1913 ret = -EAGAIN;
1914 goto unlock_parent;
1915 }
1916
a2a7ff59 1917 /* Create new branch, starting from bottom */
03ec1aeb 1918 iter_node_flag = (struct cds_ja_inode_flag *) child_node;
5a9a87dd 1919
48cbe001 1920 for (i = ja->tree_depth - 1; i >= (int) level; i--) {
79b41067
MD
1921 uint8_t iter_key;
1922
48cbe001 1923 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1)));
79b41067 1924 dbg_printf("branch creation level %d, key %u\n",
48cbe001 1925 i, (unsigned int) iter_key);
5a9a87dd
MD
1926 iter_dest_node_flag = NULL;
1927 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1928 iter_key,
5a9a87dd 1929 iter_node_flag,
48cbe001 1930 NULL, i);
9be99d4a
MD
1931 if (ret) {
1932 dbg_printf("branch creation error %d\n", ret);
5a9a87dd 1933 goto check_error;
9be99d4a 1934 }
5a9a87dd
MD
1935 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1936 iter_node_flag = iter_dest_node_flag;
1937 }
48cbe001 1938 assert(level > 0);
5a9a87dd 1939
48cbe001
MD
1940 /* Publish branch */
1941 if (level == 1) {
1942 /*
1943 * Attaching to root node.
1944 */
1945 rcu_assign_pointer(ja->root, iter_node_flag);
1946 } else {
79b41067
MD
1947 uint8_t iter_key;
1948
1949 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
48cbe001
MD
1950 dbg_printf("publish branch at level %d, key %u\n",
1951 level - 1, (unsigned int) iter_key);
a2a7ff59 1952 /* We need to use set_nth on the previous level. */
48cbe001 1953 iter_dest_node_flag = attach_node_flag;
a2a7ff59 1954 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1955 iter_key,
a2a7ff59 1956 iter_node_flag,
48cbe001 1957 shadow_node, level - 1);
9be99d4a
MD
1958 if (ret) {
1959 dbg_printf("branch publish error %d\n", ret);
a2a7ff59 1960 goto check_error;
9be99d4a 1961 }
48cbe001
MD
1962 /*
1963 * Attach branch
1964 */
1965 rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag);
a2a7ff59
MD
1966 }
1967
5a9a87dd
MD
1968 /* Success */
1969 ret = 0;
1970
1971check_error:
1972 if (ret) {
1973 for (i = 0; i < nr_created_nodes; i++) {
1974 int tmpret;
a2a7ff59
MD
1975 int flags;
1976
1977 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1978 if (i)
1979 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 1980 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 1981 created_nodes[i],
a2a7ff59
MD
1982 NULL,
1983 flags);
5a9a87dd
MD
1984 assert(!tmpret);
1985 }
1986 }
b306a0fe 1987unlock_parent:
5a9a87dd
MD
1988 if (parent_shadow_node)
1989 rcuja_shadow_unlock(parent_shadow_node);
1990unlock_shadow:
1991 if (shadow_node)
1992 rcuja_shadow_unlock(shadow_node);
1993end:
1994 return ret;
1995}
1996
1997/*
03ec1aeb
MD
1998 * Lock the parent containing the pointer to list of duplicates, and add
1999 * node to this list. Failure can happen if concurrent update changes
2000 * the parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
2001 * Return 0 on success, negative error value on failure.
2002 */
2003static
2004int ja_chain_node(struct cds_ja *ja,
af3cbd45 2005 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2006 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 2007 struct cds_ja_inode_flag *node_flag,
5a9a87dd
MD
2008 struct cds_ja_node *node)
2009{
2010 struct cds_ja_shadow_node *shadow_node;
fa112799 2011 int ret = 0;
5a9a87dd 2012
3d8fe307 2013 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 2014 if (!shadow_node) {
2e313670 2015 return -EAGAIN;
b306a0fe 2016 }
c112acaa 2017 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2018 ret = -EAGAIN;
2019 goto end;
2020 }
03ec1aeb
MD
2021 /*
2022 * Add node to head of list. Safe against concurrent RCU read
2023 * traversals.
2024 */
2025 node->next = (struct cds_ja_node *) node_flag;
2026 rcu_assign_pointer(*node_flag_ptr, (struct cds_ja_inode_flag *) node);
fa112799 2027end:
5a9a87dd 2028 rcuja_shadow_unlock(shadow_node);
fa112799 2029 return ret;
5a9a87dd
MD
2030}
2031
75d573aa
MD
2032static
2033int _cds_ja_add(struct cds_ja *ja, uint64_t key,
2034 struct cds_ja_node *new_node,
2035 struct cds_ja_node **unique_node_ret)
5a9a87dd
MD
2036{
2037 unsigned int tree_depth, i;
48cbe001 2038 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd 2039 *parent_node_flag,
b62a8d0c 2040 *parent2_node_flag,
48cbe001
MD
2041 *node_flag,
2042 *parent_attach_node_flag;
2043 struct cds_ja_inode_flag **attach_node_flag_ptr,
2044 **parent_node_flag_ptr,
2045 **node_flag_ptr;
5a9a87dd
MD
2046 int ret;
2047
b306a0fe 2048 if (caa_unlikely(key > ja->key_max)) {
5a9a87dd 2049 return -EINVAL;
b306a0fe 2050 }
5a9a87dd
MD
2051 tree_depth = ja->tree_depth;
2052
2053retry:
a2a7ff59
MD
2054 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
2055 key, new_node);
5a9a87dd 2056 parent2_node_flag = NULL;
b0f74e47
MD
2057 parent_node_flag =
2058 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
48cbe001 2059 parent_node_flag_ptr = NULL;
35170a44 2060 node_flag = rcu_dereference(ja->root);
48cbe001 2061 node_flag_ptr = &ja->root;
5a9a87dd
MD
2062
2063 /* Iterate on all internal levels */
a2a7ff59 2064 for (i = 1; i < tree_depth; i++) {
79b41067
MD
2065 uint8_t iter_key;
2066
48cbe001
MD
2067 if (!ja_node_ptr(node_flag))
2068 break;
2069 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2070 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
79b41067 2071 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
2072 parent2_node_flag = parent_node_flag;
2073 parent_node_flag = node_flag;
48cbe001 2074 parent_node_flag_ptr = node_flag_ptr;
5a9a87dd
MD
2075 node_flag = ja_node_get_nth(node_flag,
2076 &node_flag_ptr,
79b41067 2077 iter_key);
5a9a87dd
MD
2078 }
2079
2080 /*
48cbe001
MD
2081 * We reached either bottom of tree or internal NULL node,
2082 * simply add node to last internal level, or chain it if key is
2083 * already present.
5a9a87dd
MD
2084 */
2085 if (!ja_node_ptr(node_flag)) {
48cbe001
MD
2086 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2087 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2088
48cbe001
MD
2089 attach_node_flag = parent_node_flag;
2090 attach_node_flag_ptr = parent_node_flag_ptr;
2091 parent_attach_node_flag = parent2_node_flag;
2092
b0ca2d21 2093 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 2094 attach_node_flag,
48cbe001
MD
2095 parent_attach_node_flag,
2096 node_flag_ptr,
2097 node_flag,
2098 key, i, new_node);
5a9a87dd 2099 } else {
75d573aa
MD
2100 if (unique_node_ret) {
2101 *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag);
2102 return -EEXIST;
2103 }
2104
48cbe001
MD
2105 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2106 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2107
48cbe001
MD
2108 attach_node_flag = node_flag;
2109 attach_node_flag_ptr = node_flag_ptr;
2110 parent_attach_node_flag = parent_node_flag;
2111
5a9a87dd 2112 ret = ja_chain_node(ja,
48cbe001
MD
2113 parent_attach_node_flag,
2114 attach_node_flag_ptr,
2115 attach_node_flag,
5a9a87dd
MD
2116 new_node);
2117 }
b306a0fe 2118 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd 2119 goto retry;
48cbe001 2120
5a9a87dd 2121 return ret;
b4540e8a
MD
2122}
2123
75d573aa
MD
2124int cds_ja_add(struct cds_ja *ja, uint64_t key,
2125 struct cds_ja_node *new_node)
2126{
2127 return _cds_ja_add(ja, key, new_node, NULL);
2128}
2129
2130struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key,
2131 struct cds_ja_node *new_node)
2132{
2133 int ret;
2134 struct cds_ja_node *ret_node;
2135
2136 ret = _cds_ja_add(ja, key, new_node, &ret_node);
2137 if (ret == -EEXIST)
2138 return ret_node;
2139 else
2140 return new_node;
2141}
2142
af3cbd45
MD
2143/*
2144 * Note: there is no need to lookup the pointer address associated with
2145 * each node's nth item after taking the lock: it's already been done by
2146 * cds_ja_del while holding the rcu read-side lock, and our node rules
2147 * ensure that when a match value -> pointer is found in a node, it is
2148 * _NEVER_ changed for that node without recompaction, and recompaction
2149 * reallocates the node.
b306a0fe
MD
2150 * However, when a child is removed from "linear" nodes, its pointer
2151 * is set to NULL. We therefore check, while holding the locks, if this
2152 * pointer is NULL, and return -ENOENT to the caller if it is the case.
4cef6f97
MD
2153 *
2154 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2155 * leads to a dead-end: when removing branch, it makes sure to perform
2156 * the "cut" at the highest node that has only one child, effectively
2157 * replacing it with a NULL pointer.
af3cbd45 2158 */
35170a44
MD
2159static
2160int ja_detach_node(struct cds_ja *ja,
2161 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
2162 struct cds_ja_inode_flag ***snapshot_ptr,
2163 uint8_t *snapshot_n,
35170a44
MD
2164 int nr_snapshot,
2165 uint64_t key,
2166 struct cds_ja_node *node)
2167{
af3cbd45
MD
2168 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
2169 struct cds_ja_inode_flag **node_flag_ptr = NULL,
2170 *parent_node_flag = NULL,
2171 **parent_node_flag_ptr = NULL;
b62a8d0c 2172 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
2173 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
2174 uint8_t n = 0;
35170a44 2175
4d6ef45e 2176 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 2177
af3cbd45
MD
2178 /*
2179 * From the last internal level node going up, get the node
2180 * lock, check if the node has only one child left. If it is the
2181 * case, we continue iterating upward. When we reach a node
2182 * which has more that one child left, we lock the parent, and
2183 * proceed to the node deletion (removing its children too).
2184 */
4d6ef45e 2185 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
2186 struct cds_ja_shadow_node *shadow_node;
2187
2188 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2189 snapshot[i]);
af3cbd45
MD
2190 if (!shadow_node) {
2191 ret = -EAGAIN;
2192 goto end;
2193 }
af3cbd45 2194 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2195
2196 /*
2197 * Check if node has been removed between RCU
2198 * lookup and lock acquisition.
2199 */
2200 assert(snapshot_ptr[i + 1]);
2201 if (ja_node_ptr(*snapshot_ptr[i + 1])
2202 != ja_node_ptr(snapshot[i + 1])) {
2203 ret = -ENOENT;
2204 goto end;
2205 }
2206
2207 assert(shadow_node->nr_child > 0);
d810c97f 2208 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
2209 nr_clear++;
2210 nr_branch++;
af3cbd45
MD
2211 if (shadow_node->nr_child > 1 || i == 1) {
2212 /* Lock parent and break */
2213 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2214 snapshot[i - 1]);
af3cbd45
MD
2215 if (!shadow_node) {
2216 ret = -EAGAIN;
2217 goto end;
2218 }
2219 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 2220
c112acaa
MD
2221 /*
2222 * Check if node has been removed between RCU
2223 * lookup and lock acquisition.
2224 */
b62a8d0c
MD
2225 assert(snapshot_ptr[i]);
2226 if (ja_node_ptr(*snapshot_ptr[i])
2227 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
2228 ret = -ENOENT;
2229 goto end;
2230 }
2231
b62a8d0c 2232 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
2233 n = snapshot_n[i + 1];
2234 parent_node_flag_ptr = snapshot_ptr[i];
2235 parent_node_flag = snapshot[i];
c112acaa 2236
af3cbd45
MD
2237 if (i > 1) {
2238 /*
2239 * Lock parent's parent, in case we need
2240 * to recompact parent.
2241 */
2242 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2243 snapshot[i - 2]);
af3cbd45
MD
2244 if (!shadow_node) {
2245 ret = -EAGAIN;
2246 goto end;
2247 }
2248 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2249
2250 /*
2251 * Check if node has been removed between RCU
2252 * lookup and lock acquisition.
2253 */
2254 assert(snapshot_ptr[i - 1]);
2255 if (ja_node_ptr(*snapshot_ptr[i - 1])
2256 != ja_node_ptr(snapshot[i - 1])) {
2257 ret = -ENOENT;
2258 goto end;
2259 }
af3cbd45 2260 }
b62a8d0c 2261
af3cbd45
MD
2262 break;
2263 }
2264 }
2265
2266 /*
4d6ef45e
MD
2267 * At this point, we want to delete all nodes that are about to
2268 * be removed from shadow_nodes (except the last one, which is
2269 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
2270 * child). OK to free lock here, because RCU read lock is held,
2271 * and free only performed in call_rcu.
af3cbd45
MD
2272 */
2273
2274 for (i = 0; i < nr_clear; i++) {
2275 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 2276 shadow_nodes[i]->node_flag,
af3cbd45
MD
2277 shadow_nodes[i],
2278 RCUJA_SHADOW_CLEAR_FREE_NODE
2279 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2280 assert(!ret);
2281 }
2282
2283 iter_node_flag = parent_node_flag;
2284 /* Remove from parent */
2285 ret = ja_node_clear_ptr(ja,
2286 node_flag_ptr, /* Pointer to location to nullify */
2287 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 2288 shadow_nodes[nr_branch - 1], /* of parent */
48cbe001 2289 n, nr_branch - 1);
b306a0fe
MD
2290 if (ret)
2291 goto end;
af3cbd45 2292
4d6ef45e
MD
2293 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2294 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
2295 /* Update address of parent ptr in its parent */
2296 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2297
2298end:
2299 for (i = 0; i < nr_shadow; i++)
2300 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
2301 return ret;
2302}
2303
af3cbd45
MD
2304static
2305int ja_unchain_node(struct cds_ja *ja,
2306 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2307 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 2308 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
2309 struct cds_ja_node *node)
2310{
2311 struct cds_ja_shadow_node *shadow_node;
03ec1aeb 2312 struct cds_ja_node *iter_node, **iter_node_ptr, **prev_node_ptr = NULL;
013a6083 2313 int ret = 0, count = 0, found = 0;
af3cbd45 2314
3d8fe307 2315 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
2316 if (!shadow_node)
2317 return -EAGAIN;
013a6083 2318 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2319 ret = -EAGAIN;
2320 goto end;
2321 }
af3cbd45 2322 /*
03ec1aeb
MD
2323 * Find the previous node's next pointer pointing to our node,
2324 * so we can update it. Retry if another thread removed all but
2325 * one of duplicates since check (this check was performed
2326 * without lock). Ensure that the node we are about to remove is
2327 * still in the list (while holding lock). No need for RCU
2328 * traversal here since we hold the lock on the parent.
af3cbd45 2329 */
03ec1aeb
MD
2330 iter_node_ptr = (struct cds_ja_node **) node_flag_ptr;
2331 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2332 cds_ja_for_each_duplicate(iter_node) {
f2758d14 2333 count++;
03ec1aeb
MD
2334 if (iter_node == node) {
2335 prev_node_ptr = iter_node_ptr;
013a6083 2336 found++;
03ec1aeb
MD
2337 }
2338 iter_node_ptr = &iter_node->next;
f2758d14 2339 }
013a6083
MD
2340 assert(found <= 1);
2341 if (!found || count == 1) {
af3cbd45
MD
2342 ret = -EAGAIN;
2343 goto end;
2344 }
03ec1aeb 2345 CMM_STORE_SHARED(*prev_node_ptr, node->next);
ade342cb
MD
2346 /*
2347 * Validate that we indeed removed the node from linked list.
2348 */
2349 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
2350end:
2351 rcuja_shadow_unlock(shadow_node);
2352 return ret;
2353}
2354
2355/*
2356 * Called with RCU read lock held.
2357 */
35170a44
MD
2358int cds_ja_del(struct cds_ja *ja, uint64_t key,
2359 struct cds_ja_node *node)
2360{
2361 unsigned int tree_depth, i;
2362 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
2363 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2364 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 2365 struct cds_ja_inode_flag *node_flag;
fa112799
MD
2366 struct cds_ja_inode_flag **prev_node_flag_ptr,
2367 **node_flag_ptr;
4d6ef45e 2368 int nr_snapshot;
35170a44
MD
2369 int ret;
2370
2371 if (caa_unlikely(key > ja->key_max))
2372 return -EINVAL;
2373 tree_depth = ja->tree_depth;
2374
2375retry:
4d6ef45e 2376 nr_snapshot = 0;
35170a44
MD
2377 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2378 key, node);
2379
2380 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
2381 snapshot_n[0] = 0;
2382 snapshot_n[1] = 0;
af3cbd45 2383 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
2384 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2385 node_flag = rcu_dereference(ja->root);
af3cbd45 2386 prev_node_flag_ptr = &ja->root;
fa112799 2387 node_flag_ptr = &ja->root;
35170a44
MD
2388
2389 /* Iterate on all internal levels */
2390 for (i = 1; i < tree_depth; i++) {
2391 uint8_t iter_key;
2392
2393 dbg_printf("cds_ja_del iter node_flag %p\n",
2394 node_flag);
2395 if (!ja_node_ptr(node_flag)) {
2396 return -ENOENT;
2397 }
35170a44 2398 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 2399 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
2400 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2401 snapshot[nr_snapshot++] = node_flag;
35170a44 2402 node_flag = ja_node_get_nth(node_flag,
fa112799 2403 &node_flag_ptr,
35170a44 2404 iter_key);
48cbe001
MD
2405 if (node_flag)
2406 prev_node_flag_ptr = node_flag_ptr;
af3cbd45
MD
2407 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2408 (unsigned int) iter_key, node_flag,
2409 prev_node_flag_ptr);
35170a44 2410 }
35170a44
MD
2411 /*
2412 * We reached bottom of tree, try to find the node we are trying
2413 * to remove. Fail if we cannot find it.
2414 */
2415 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
2416 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2417 key);
35170a44
MD
2418 return -ENOENT;
2419 } else {
03ec1aeb 2420 struct cds_ja_node *iter_node, *match = NULL;
af3cbd45 2421 int count = 0;
35170a44 2422
03ec1aeb
MD
2423 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2424 cds_ja_for_each_duplicate_rcu(iter_node) {
2425 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node, iter_node);
2426 if (iter_node == node)
2427 match = iter_node;
af3cbd45 2428 count++;
35170a44 2429 }
03ec1aeb 2430
4d6ef45e
MD
2431 if (!match) {
2432 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 2433 return -ENOENT;
4d6ef45e 2434 }
af3cbd45
MD
2435 assert(count > 0);
2436 if (count == 1) {
2437 /*
4d6ef45e
MD
2438 * Removing last of duplicates. Last snapshot
2439 * does not have a shadow node (external leafs).
af3cbd45
MD
2440 */
2441 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2442 snapshot[nr_snapshot++] = node_flag;
2443 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2444 snapshot_n, nr_snapshot, key, node);
2445 } else {
f2758d14 2446 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 2447 node_flag_ptr, node_flag, match);
af3cbd45 2448 }
35170a44 2449 }
b306a0fe
MD
2450 /*
2451 * Explanation of -ENOENT handling: caused by concurrent delete
2452 * between RCU lookup and actual removal. Need to re-do the
2453 * lookup and removal attempt.
2454 */
2455 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
2456 goto retry;
2457 return ret;
2458}
2459
b4540e8a
MD
2460struct cds_ja *_cds_ja_new(unsigned int key_bits,
2461 const struct rcu_flavor_struct *flavor)
be9a7474
MD
2462{
2463 struct cds_ja *ja;
b0f74e47 2464 int ret;
f07b240f 2465 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
2466
2467 ja = calloc(sizeof(*ja), 1);
2468 if (!ja)
2469 goto ja_error;
b4540e8a
MD
2470
2471 switch (key_bits) {
2472 case 8:
b4540e8a 2473 case 16:
1216b3d2 2474 case 24:
b4540e8a 2475 case 32:
1216b3d2
MD
2476 case 40:
2477 case 48:
2478 case 56:
2479 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
2480 break;
2481 case 64:
2482 ja->key_max = UINT64_MAX;
2483 break;
2484 default:
2485 goto check_error;
2486 }
2487
be9a7474 2488 /* ja->root is NULL */
5a9a87dd 2489 /* tree_depth 0 is for pointer to root node */
582a6ade 2490 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 2491 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
2492 ja->ht = rcuja_create_ht(flavor);
2493 if (!ja->ht)
2494 goto ht_error;
b0f74e47
MD
2495
2496 /*
2497 * Note: we should not free this node until judy array destroy.
2498 */
f07b240f 2499 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307 2500 (struct cds_ja_inode_flag *) &ja->root,
48cbe001 2501 NULL, ja, 0);
f07b240f
MD
2502 if (!root_shadow_node) {
2503 ret = -ENOMEM;
b0f74e47 2504 goto ht_node_error;
f07b240f 2505 }
b0f74e47 2506
be9a7474
MD
2507 return ja;
2508
b0f74e47
MD
2509ht_node_error:
2510 ret = rcuja_delete_ht(ja->ht);
2511 assert(!ret);
be9a7474 2512ht_error:
b4540e8a 2513check_error:
be9a7474
MD
2514 free(ja);
2515ja_error:
2516 return NULL;
2517}
2518
3d8fe307
MD
2519/*
2520 * Called from RCU read-side CS.
2521 */
2522__attribute__((visibility("protected")))
2523void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2524 struct cds_ja_inode_flag *node_flag,
21ac4c56 2525 void (*rcu_free_node)(struct cds_ja_node *node))
3d8fe307 2526{
3d8fe307
MD
2527 unsigned int type_index;
2528 struct cds_ja_inode *node;
2529 const struct cds_ja_type *type;
2530
3d8fe307
MD
2531 node = ja_node_ptr(node_flag);
2532 assert(node != NULL);
2533 type_index = ja_node_type(node_flag);
2534 type = &ja_types[type_index];
2535
2536 switch (type->type_class) {
2537 case RCU_JA_LINEAR:
2538 {
2539 uint8_t nr_child =
2540 ja_linear_node_get_nr_child(type, node);
2541 unsigned int i;
2542
2543 for (i = 0; i < nr_child; i++) {
2544 struct cds_ja_inode_flag *iter;
03ec1aeb 2545 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2546 uint8_t v;
2547
2548 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
03ec1aeb
MD
2549 node_iter = (struct cds_ja_node *) iter;
2550 cds_ja_for_each_duplicate_safe(node_iter, n) {
2551 rcu_free_node(node_iter);
3d8fe307
MD
2552 }
2553 }
2554 break;
2555 }
2556 case RCU_JA_POOL:
2557 {
2558 unsigned int pool_nr;
2559
2560 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2561 struct cds_ja_inode *pool =
2562 ja_pool_node_get_ith_pool(type, node, pool_nr);
2563 uint8_t nr_child =
2564 ja_linear_node_get_nr_child(type, pool);
2565 unsigned int j;
2566
2567 for (j = 0; j < nr_child; j++) {
2568 struct cds_ja_inode_flag *iter;
03ec1aeb 2569 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2570 uint8_t v;
2571
75d573aa 2572 ja_linear_node_get_ith_pos(type, pool, j, &v, &iter);
03ec1aeb
MD
2573 node_iter = (struct cds_ja_node *) iter;
2574 cds_ja_for_each_duplicate_safe(node_iter, n) {
2575 rcu_free_node(node_iter);
3d8fe307
MD
2576 }
2577 }
2578 }
2579 break;
2580 }
2581 case RCU_JA_NULL:
2582 break;
2583 case RCU_JA_PIGEON:
2584 {
3d8fe307
MD
2585 unsigned int i;
2586
48cbe001 2587 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
3d8fe307 2588 struct cds_ja_inode_flag *iter;
03ec1aeb 2589 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2590
2591 iter = ja_pigeon_node_get_ith_pos(type, node, i);
03ec1aeb
MD
2592 node_iter = (struct cds_ja_node *) iter;
2593 cds_ja_for_each_duplicate_safe(node_iter, n) {
2594 rcu_free_node(node_iter);
3d8fe307
MD
2595 }
2596 }
2597 break;
2598 }
2599 default:
2600 assert(0);
2601 }
2602}
2603
19ddcd04 2604static
354981c2 2605void print_debug_fallback_distribution(struct cds_ja *ja)
19ddcd04
MD
2606{
2607 int i;
2608
2609 fprintf(stderr, "Fallback node distribution:\n");
2610 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
354981c2 2611 if (!ja->node_fallback_count_distribution[i])
19ddcd04
MD
2612 continue;
2613 fprintf(stderr, " %3u: %4lu\n",
354981c2 2614 i, ja->node_fallback_count_distribution[i]);
19ddcd04
MD
2615 }
2616}
2617
021c72c0 2618static
19a748d9 2619int ja_final_checks(struct cds_ja *ja)
021c72c0
MD
2620{
2621 double fallback_ratio;
2622 unsigned long na, nf, nr_fallback;
19a748d9 2623 int ret = 0;
021c72c0
MD
2624
2625 fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
2626 fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
2627 nr_fallback = uatomic_read(&ja->nr_fallback);
2628 if (nr_fallback)
2629 fprintf(stderr,
2630 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2631 uatomic_read(&ja->nr_fallback),
2632 fallback_ratio);
2633
2634 na = uatomic_read(&ja->nr_nodes_allocated);
2635 nf = uatomic_read(&ja->nr_nodes_freed);
19a748d9
MD
2636 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
2637 if (nr_fallback)
2638 print_debug_fallback_distribution(ja);
2639
021c72c0
MD
2640 if (na != nf) {
2641 fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2642 (long) na - nf, na, nf);
19a748d9 2643 ret = -1;
021c72c0 2644 }
19a748d9 2645 return ret;
021c72c0
MD
2646}
2647
be9a7474
MD
2648/*
2649 * There should be no more concurrent add to the judy array while it is
2650 * being destroyed (ensured by the caller).
2651 */
3d8fe307 2652int cds_ja_destroy(struct cds_ja *ja,
21ac4c56 2653 void (*rcu_free_node)(struct cds_ja_node *node))
be9a7474 2654{
48cbe001 2655 const struct rcu_flavor_struct *flavor;
b4540e8a
MD
2656 int ret;
2657
48cbe001 2658 flavor = cds_lfht_rcu_flavor(ja->ht);
be9a7474 2659 rcuja_shadow_prune(ja->ht,
3d8fe307 2660 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
21ac4c56 2661 rcu_free_node);
48cbe001 2662 flavor->thread_offline();
b4540e8a
MD
2663 ret = rcuja_delete_ht(ja->ht);
2664 if (ret)
2665 return ret;
f2ae7af7
MD
2666
2667 /* Wait for in-flight call_rcu free to complete. */
2668 flavor->barrier();
2669
48cbe001 2670 flavor->thread_online();
19a748d9 2671 ret = ja_final_checks(ja);
b4540e8a 2672 free(ja);
19a748d9 2673 return ret;
be9a7474 2674}
This page took 0.159194 seconds and 4 git commands to generate.