rcuja: implement ja_node_ptr fast path
[urcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
b1a90ce3 27#include <string.h>
61009379 28#include <urcu/rcuja.h>
d68c6810
MD
29#include <urcu/compiler.h>
30#include <urcu/arch.h>
31#include <assert.h>
8e519e3c 32#include <urcu-pointer.h>
f07b240f 33#include <urcu/uatomic.h>
b4540e8a 34#include <stdint.h>
8e519e3c 35
61009379
MD
36#include "rcuja-internal.h"
37
b1a90ce3
MD
38#ifndef abs
39#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
40#endif
41
d96bfb0d 42enum cds_ja_type_class {
e5227865 43 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL = 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 49 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 52 /* Leaf nodes are implicit from their height in the tree */
1db4943c 53 RCU_JA_NR_TYPES,
e1db2db5
MD
54
55 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
56};
57
d96bfb0d
MD
58struct cds_ja_type {
59 enum cds_ja_type_class type_class;
8e519e3c
MD
60 uint16_t min_child; /* minimum number of children: 1 to 256 */
61 uint16_t max_child; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
64 uint16_t nr_pool_order; /* number of pools */
65 uint16_t pool_size_order; /* pool size */
e5227865
MD
66};
67
68/*
69 * Iteration on the array to find the right node size for the number of
d68c6810 70 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 71 * possible node size, which contains 256 children).
d68c6810
MD
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
77 * the size order.
3d45251f
MD
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
e5227865 84 */
e5227865 85
d68c6810
MD
86#if (CAA_BITS_PER_LONG < 64)
87/* 32-bit pointers */
1db4943c
MD
88enum {
89 ja_type_0_max_child = 1,
90 ja_type_1_max_child = 3,
91 ja_type_2_max_child = 6,
92 ja_type_3_max_child = 12,
93 ja_type_4_max_child = 25,
94 ja_type_5_max_child = 48,
95 ja_type_6_max_child = 92,
96 ja_type_7_max_child = 256,
e1db2db5 97 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
98};
99
8e519e3c
MD
100enum {
101 ja_type_0_max_linear_child = 1,
102 ja_type_1_max_linear_child = 3,
103 ja_type_2_max_linear_child = 6,
104 ja_type_3_max_linear_child = 12,
105 ja_type_4_max_linear_child = 25,
106 ja_type_5_max_linear_child = 24,
107 ja_type_6_max_linear_child = 23,
108};
109
1db4943c
MD
110enum {
111 ja_type_5_nr_pool_order = 1,
112 ja_type_6_nr_pool_order = 2,
113};
114
d96bfb0d 115const struct cds_ja_type ja_types[] = {
8e519e3c
MD
116 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 121
fd800776 122 /* Pools may fill sooner than max_child */
1cee749c 123 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
1cee749c 125 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 126 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
127
128 /*
b1a90ce3
MD
129 * Upon node removal below min_child, if child pool is filled
130 * beyond capacity, we roll back to pigeon.
3d45251f 131 */
58c16c03 132 { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
133
134 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 135};
d68c6810
MD
136#else /* !(CAA_BITS_PER_LONG < 64) */
137/* 64-bit pointers */
1db4943c
MD
138enum {
139 ja_type_0_max_child = 1,
140 ja_type_1_max_child = 3,
141 ja_type_2_max_child = 7,
142 ja_type_3_max_child = 14,
143 ja_type_4_max_child = 28,
144 ja_type_5_max_child = 54,
145 ja_type_6_max_child = 104,
146 ja_type_7_max_child = 256,
e1db2db5 147 ja_type_8_max_child = 256,
1db4943c
MD
148};
149
8e519e3c
MD
150enum {
151 ja_type_0_max_linear_child = 1,
152 ja_type_1_max_linear_child = 3,
153 ja_type_2_max_linear_child = 7,
154 ja_type_3_max_linear_child = 14,
155 ja_type_4_max_linear_child = 28,
156 ja_type_5_max_linear_child = 27,
157 ja_type_6_max_linear_child = 26,
158};
159
1db4943c
MD
160enum {
161 ja_type_5_nr_pool_order = 1,
162 ja_type_6_nr_pool_order = 2,
163};
164
d96bfb0d 165const struct cds_ja_type ja_types[] = {
8e519e3c
MD
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
170 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 171
3d45251f 172 /* Pools may fill sooner than max_child. */
1cee749c 173 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 174 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
1cee749c 175 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 176 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 177
3d45251f 178 /*
b1a90ce3
MD
179 * Upon node removal below min_child, if child pool is filled
180 * beyond capacity, we roll back to pigeon.
3d45251f 181 */
64457f6c 182 { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
183
184 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 185};
d68c6810 186#endif /* !(BITS_PER_LONG < 64) */
e5227865 187
1db4943c
MD
188static inline __attribute__((unused))
189void static_array_size_check(void)
190{
e1db2db5 191 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
192}
193
e5227865 194/*
d96bfb0d 195 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
196 * read-side. For linear and pool node configurations, it starts with a
197 * byte counting the number of children in the node. Then, the
198 * node-specific data is placed.
199 * The node mutex, if any is needed, protecting concurrent updated of
200 * each node is placed in a separate hash table indexed by node address.
201 * For the pigeon configuration, the number of children is also kept in
202 * a separate hash table, indexed by node address, because it is only
203 * required for updates.
e5227865 204 */
1db4943c 205
ff38c745
MD
206#define DECLARE_LINEAR_NODE(index) \
207 struct { \
208 uint8_t nr_child; \
209 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 210 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
211 }
212
213#define DECLARE_POOL_NODE(index) \
214 struct { \
215 struct { \
216 uint8_t nr_child; \
217 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 218 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
219 } linear[1U << ja_type_## index ##_nr_pool_order]; \
220 }
1db4943c 221
b4540e8a 222struct cds_ja_inode {
1db4943c
MD
223 union {
224 /* Linear configuration */
225 DECLARE_LINEAR_NODE(0) conf_0;
226 DECLARE_LINEAR_NODE(1) conf_1;
227 DECLARE_LINEAR_NODE(2) conf_2;
228 DECLARE_LINEAR_NODE(3) conf_3;
229 DECLARE_LINEAR_NODE(4) conf_4;
230
231 /* Pool configuration */
232 DECLARE_POOL_NODE(5) conf_5;
233 DECLARE_POOL_NODE(6) conf_6;
234
235 /* Pigeon configuration */
236 struct {
b4540e8a 237 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
238 } conf_7;
239 /* data aliasing nodes for computed accesses */
b4540e8a 240 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 241 } u;
e5227865
MD
242};
243
2e313670 244enum ja_recompact {
19ddcd04
MD
245 JA_RECOMPACT_ADD_SAME,
246 JA_RECOMPACT_ADD_NEXT,
2e313670
MD
247 JA_RECOMPACT_DEL,
248};
249
b1a90ce3
MD
250static
251struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
252{
253 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
254}
255
256unsigned long ja_node_type(struct cds_ja_inode_flag *node)
257{
258 unsigned long type;
259
260 if (_ja_node_mask_ptr(node) == NULL) {
261 return NODE_INDEX_NULL;
262 }
263 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
264 assert(type < (1UL << JA_TYPE_BITS));
265 return type;
266}
267
354981c2
MD
268static
269struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
270 const struct cds_ja_type *ja_type)
e5227865 271{
b1a90ce3
MD
272 size_t len = 1U << ja_type->order;
273 void *p;
274 int ret;
275
276 ret = posix_memalign(&p, len, len);
277 if (ret || !p) {
278 return NULL;
279 }
280 memset(p, 0, len);
354981c2 281 uatomic_inc(&ja->nr_nodes_allocated);
b1a90ce3 282 return p;
e5227865
MD
283}
284
354981c2 285void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
e5227865
MD
286{
287 free(node);
48cbe001 288 if (node)
354981c2 289 uatomic_inc(&ja->nr_nodes_freed);
e5227865
MD
290}
291
d68c6810
MD
292#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
293#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
294#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
295#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
296
297static
1db4943c 298uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 299{
1db4943c 300 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
301}
302
11c5e016 303static
d96bfb0d 304uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 305 struct cds_ja_inode *node)
11c5e016
MD
306{
307 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 308 return rcu_dereference(node->u.data[0]);
11c5e016
MD
309}
310
13a7f5a6
MD
311/*
312 * The order in which values and pointers are does does not matter: if
313 * a value is missing, we return NULL. If a value is there, but its
314 * associated pointers is still NULL, we return NULL too.
315 */
d68c6810 316static
b4540e8a
MD
317struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
318 struct cds_ja_inode *node,
b0ca2d21 319 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 320 uint8_t n)
d68c6810
MD
321{
322 uint8_t nr_child;
323 uint8_t *values;
b4540e8a
MD
324 struct cds_ja_inode_flag **pointers;
325 struct cds_ja_inode_flag *ptr;
d68c6810
MD
326 unsigned int i;
327
8e519e3c 328 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 329
11c5e016 330 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 331 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
332 assert(nr_child <= type->max_linear_child);
333 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 334
1db4943c 335 values = &node->u.data[1];
d68c6810 336 for (i = 0; i < nr_child; i++) {
13a7f5a6 337 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
338 break;
339 }
b0ca2d21
MD
340 if (i >= nr_child) {
341 if (caa_unlikely(node_flag_ptr))
342 *node_flag_ptr = NULL;
d68c6810 343 return NULL;
b0ca2d21 344 }
b4540e8a 345 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 346 ptr = rcu_dereference(pointers[i]);
b0ca2d21
MD
347 if (caa_unlikely(node_flag_ptr))
348 *node_flag_ptr = &pointers[i];
d68c6810
MD
349 return ptr;
350}
351
291b2543
MD
352static
353struct cds_ja_inode_flag *ja_linear_node_get_left(const struct cds_ja_type *type,
354 struct cds_ja_inode *node,
355 unsigned int n)
356{
357 uint8_t nr_child;
358 uint8_t *values;
359 struct cds_ja_inode_flag **pointers;
360 struct cds_ja_inode_flag *ptr;
361 unsigned int i, match_idx;
362 int match_v = -1;
363
364 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
365
366 nr_child = ja_linear_node_get_nr_child(type, node);
367 cmm_smp_rmb(); /* read nr_child before values and pointers */
368 assert(nr_child <= type->max_linear_child);
369 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
370
371 values = &node->u.data[1];
372 for (i = 0; i < nr_child; i++) {
373 unsigned int v;
374
375 v = CMM_LOAD_SHARED(values[i]);
376 if (v < n && (int) v > match_v) {
377 match_v = v;
378 match_idx = i;
379 }
380 }
381 if (match_v < 0) {
382 return NULL;
383 }
384 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
385 ptr = rcu_dereference(pointers[match_idx]);
386 return ptr;
387}
388
11c5e016 389static
5a9a87dd 390void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 391 struct cds_ja_inode *node,
11c5e016
MD
392 uint8_t i,
393 uint8_t *v,
b4540e8a 394 struct cds_ja_inode_flag **iter)
11c5e016
MD
395{
396 uint8_t *values;
b4540e8a 397 struct cds_ja_inode_flag **pointers;
11c5e016
MD
398
399 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
400 assert(i < ja_linear_node_get_nr_child(type, node));
401
402 values = &node->u.data[1];
403 *v = values[i];
b4540e8a 404 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
405 *iter = pointers[i];
406}
407
d68c6810 408static
b4540e8a
MD
409struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
410 struct cds_ja_inode *node,
b1a90ce3 411 struct cds_ja_inode_flag *node_flag,
b0ca2d21 412 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 413 uint8_t n)
d68c6810 414{
b4540e8a 415 struct cds_ja_inode *linear;
d68c6810 416
fd800776 417 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
418
419 switch (type->nr_pool_order) {
420 case 1:
421 {
422 unsigned long bitsel, index;
423
424 bitsel = ja_node_pool_1d_bitsel(node_flag);
425 assert(bitsel < CHAR_BIT);
19ddcd04 426 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
427 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
428 break;
429 }
430 case 2:
431 {
19ddcd04
MD
432 unsigned long bitsel[2], index[2], rindex;
433
434 ja_node_pool_2d_bitsel(node_flag, bitsel);
435 assert(bitsel[0] < CHAR_BIT);
436 assert(bitsel[1] < CHAR_BIT);
437 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
438 index[0] <<= 1;
439 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
440 rindex = index[0] | index[1];
441 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
442 break;
443 }
444 default:
445 linear = NULL;
446 assert(0);
447 }
48cbe001 448 return ja_linear_node_get_nth(type, linear, node_flag_ptr, n);
d68c6810
MD
449}
450
11c5e016 451static
b4540e8a
MD
452struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
453 struct cds_ja_inode *node,
11c5e016
MD
454 uint8_t i)
455{
456 assert(type->type_class == RCU_JA_POOL);
b4540e8a 457 return (struct cds_ja_inode *)
11c5e016
MD
458 &node->u.data[(unsigned int) i << type->pool_size_order];
459}
460
291b2543
MD
461static
462struct cds_ja_inode_flag *ja_pool_node_get_left(const struct cds_ja_type *type,
463 struct cds_ja_inode *node,
464 unsigned int n)
465{
466 unsigned int pool_nr;
467 int match_v = -1;
468 struct cds_ja_inode_flag *match_node_flag = NULL;
469
470 assert(type->type_class == RCU_JA_POOL);
471
472 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
473 struct cds_ja_inode *pool =
474 ja_pool_node_get_ith_pool(type,
475 node, pool_nr);
476 uint8_t nr_child =
477 ja_linear_node_get_nr_child(type, pool);
478 unsigned int j;
479
480 for (j = 0; j < nr_child; j++) {
481 struct cds_ja_inode_flag *iter;
482 uint8_t v;
483
484 ja_linear_node_get_ith_pos(type, pool,
485 j, &v, &iter);
486 if (!iter)
487 continue;
488 if (v < n && (int) v > match_v) {
489 match_v = v;
490 match_node_flag = iter;
491 }
492 }
493 }
494 return match_node_flag;
495}
496
d68c6810 497static
b4540e8a
MD
498struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
499 struct cds_ja_inode *node,
b0ca2d21 500 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 501 uint8_t n)
d68c6810 502{
48cbe001
MD
503 struct cds_ja_inode_flag **child_node_flag_ptr;
504 struct cds_ja_inode_flag *child_node_flag;
5a9a87dd 505
d68c6810 506 assert(type->type_class == RCU_JA_PIGEON);
48cbe001
MD
507 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
508 child_node_flag = rcu_dereference(*child_node_flag_ptr);
582a6ade 509 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
48cbe001 510 child_node_flag_ptr);
b0ca2d21 511 if (caa_unlikely(node_flag_ptr))
48cbe001
MD
512 *node_flag_ptr = child_node_flag_ptr;
513 return child_node_flag;
d68c6810
MD
514}
515
291b2543
MD
516static
517struct cds_ja_inode_flag *ja_pigeon_node_get_left(const struct cds_ja_type *type,
518 struct cds_ja_inode *node,
519 unsigned int n)
520{
521 struct cds_ja_inode_flag **child_node_flag_ptr;
522 struct cds_ja_inode_flag *child_node_flag;
523 int i;
524
525 assert(type->type_class == RCU_JA_PIGEON);
526
527 /* n - 1 is first value left of n */
528 for (i = n - 1; i >= 0; i--) {
529 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
530 child_node_flag = rcu_dereference(*child_node_flag_ptr);
531 if (child_node_flag) {
532 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
533 child_node_flag);
534 return child_node_flag;
535 }
536 }
537 return NULL;
538}
539
2e313670
MD
540static
541struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
542 struct cds_ja_inode *node,
543 uint8_t i)
544{
48cbe001 545 return ja_pigeon_node_get_nth(type, node, NULL, i);
2e313670
MD
546}
547
13a7f5a6
MD
548/*
549 * ja_node_get_nth: get nth item from a node.
550 * node_flag is already rcu_dereference'd.
551 */
d68c6810 552static
b62a8d0c 553struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
b0ca2d21 554 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 555 uint8_t n)
d68c6810
MD
556{
557 unsigned int type_index;
b4540e8a 558 struct cds_ja_inode *node;
d96bfb0d 559 const struct cds_ja_type *type;
d68c6810 560
d68c6810 561 node = ja_node_ptr(node_flag);
5a9a87dd 562 assert(node != NULL);
d68c6810
MD
563 type_index = ja_node_type(node_flag);
564 type = &ja_types[type_index];
565
566 switch (type->type_class) {
567 case RCU_JA_LINEAR:
5a9a87dd 568 return ja_linear_node_get_nth(type, node,
b62a8d0c 569 node_flag_ptr, n);
fd800776 570 case RCU_JA_POOL:
b1a90ce3 571 return ja_pool_node_get_nth(type, node, node_flag,
b62a8d0c 572 node_flag_ptr, n);
d68c6810 573 case RCU_JA_PIGEON:
5a9a87dd 574 return ja_pigeon_node_get_nth(type, node,
b62a8d0c 575 node_flag_ptr, n);
d68c6810
MD
576 default:
577 assert(0);
578 return (void *) -1UL;
579 }
580}
581
291b2543
MD
582static
583struct cds_ja_inode_flag *ja_node_get_left(struct cds_ja_inode_flag *node_flag,
584 unsigned int n)
585{
586 unsigned int type_index;
587 struct cds_ja_inode *node;
588 const struct cds_ja_type *type;
589
590 node = ja_node_ptr(node_flag);
591 assert(node != NULL);
592 type_index = ja_node_type(node_flag);
593 type = &ja_types[type_index];
594
595 switch (type->type_class) {
596 case RCU_JA_LINEAR:
597 return ja_linear_node_get_left(type, node, n);
598 case RCU_JA_POOL:
599 return ja_pool_node_get_left(type, node, n);
600 case RCU_JA_PIGEON:
601 return ja_pigeon_node_get_left(type, node, n);
602 default:
603 assert(0);
604 return (void *) -1UL;
605 }
606}
607
608static
609struct cds_ja_inode_flag *ja_node_get_rightmost(struct cds_ja_inode_flag *node_flag)
610{
611 return ja_node_get_left(node_flag, JA_ENTRY_PER_NODE);
612}
613
8e519e3c 614static
d96bfb0d 615int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 616 struct cds_ja_inode *node,
d96bfb0d 617 struct cds_ja_shadow_node *shadow_node,
8e519e3c 618 uint8_t n,
b4540e8a 619 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
620{
621 uint8_t nr_child;
622 uint8_t *values, *nr_child_ptr;
b4540e8a 623 struct cds_ja_inode_flag **pointers;
2e313670 624 unsigned int i, unused = 0;
8e519e3c
MD
625
626 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
627
628 nr_child_ptr = &node->u.data[0];
48cbe001
MD
629 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
630 (unsigned int) n, nr_child_ptr);
8e519e3c
MD
631 nr_child = *nr_child_ptr;
632 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
633
634 values = &node->u.data[1];
2e313670
MD
635 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
636 /* Check if node value is already populated */
8e519e3c 637 for (i = 0; i < nr_child; i++) {
2e313670
MD
638 if (values[i] == n) {
639 if (pointers[i])
640 return -EEXIST;
641 else
642 break;
643 } else {
644 if (!pointers[i])
645 unused++;
646 }
8e519e3c 647 }
2e313670
MD
648 if (i == nr_child && nr_child >= type->max_linear_child) {
649 if (unused)
650 return -ERANGE; /* recompact node */
651 else
652 return -ENOSPC; /* No space left in this node type */
653 }
654
655 assert(pointers[i] == NULL);
656 rcu_assign_pointer(pointers[i], child_node_flag);
657 /* If we expanded the nr_child, increment it */
658 if (i == nr_child) {
659 CMM_STORE_SHARED(values[nr_child], n);
660 /* write pointer and value before nr_child */
661 cmm_smp_wmb();
662 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 663 }
e1db2db5 664 shadow_node->nr_child++;
a2a7ff59
MD
665 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
666 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
667 (unsigned int) shadow_node->nr_child,
668 node, shadow_node);
669
8e519e3c
MD
670 return 0;
671}
672
673static
d96bfb0d 674int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 675 struct cds_ja_inode *node,
b1a90ce3 676 struct cds_ja_inode_flag *node_flag,
d96bfb0d 677 struct cds_ja_shadow_node *shadow_node,
8e519e3c 678 uint8_t n,
b4540e8a 679 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 680{
b4540e8a 681 struct cds_ja_inode *linear;
8e519e3c
MD
682
683 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
684
685 switch (type->nr_pool_order) {
686 case 1:
687 {
688 unsigned long bitsel, index;
689
690 bitsel = ja_node_pool_1d_bitsel(node_flag);
691 assert(bitsel < CHAR_BIT);
19ddcd04 692 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
693 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
694 break;
695 }
696 case 2:
697 {
19ddcd04
MD
698 unsigned long bitsel[2], index[2], rindex;
699
700 ja_node_pool_2d_bitsel(node_flag, bitsel);
701 assert(bitsel[0] < CHAR_BIT);
702 assert(bitsel[1] < CHAR_BIT);
703 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
704 index[0] <<= 1;
705 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
706 rindex = index[0] | index[1];
707 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
708 break;
709 }
710 default:
711 linear = NULL;
712 assert(0);
713 }
714
e1db2db5
MD
715 return ja_linear_node_set_nth(type, linear, shadow_node,
716 n, child_node_flag);
8e519e3c
MD
717}
718
719static
d96bfb0d 720int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 721 struct cds_ja_inode *node,
d96bfb0d 722 struct cds_ja_shadow_node *shadow_node,
8e519e3c 723 uint8_t n,
b4540e8a 724 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 725{
b4540e8a 726 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
727
728 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 729 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 730 if (*ptr)
8e519e3c
MD
731 return -EEXIST;
732 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 733 shadow_node->nr_child++;
8e519e3c
MD
734 return 0;
735}
736
d68c6810 737/*
7a0b2331 738 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 739 * (negative error value) if it is already there.
d68c6810 740 */
8e519e3c 741static
d96bfb0d 742int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 743 struct cds_ja_inode *node,
b1a90ce3 744 struct cds_ja_inode_flag *node_flag,
d96bfb0d 745 struct cds_ja_shadow_node *shadow_node,
e1db2db5 746 uint8_t n,
b4540e8a 747 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 748{
8e519e3c
MD
749 switch (type->type_class) {
750 case RCU_JA_LINEAR:
e1db2db5 751 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
752 child_node_flag);
753 case RCU_JA_POOL:
b1a90ce3 754 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
8e519e3c
MD
755 child_node_flag);
756 case RCU_JA_PIGEON:
e1db2db5 757 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 758 child_node_flag);
e1db2db5
MD
759 case RCU_JA_NULL:
760 return -ENOSPC;
8e519e3c
MD
761 default:
762 assert(0);
763 return -EINVAL;
764 }
765
766 return 0;
767}
7a0b2331 768
2e313670 769static
af3cbd45 770int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
771 struct cds_ja_inode *node,
772 struct cds_ja_shadow_node *shadow_node,
af3cbd45 773 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
774{
775 uint8_t nr_child;
af3cbd45 776 uint8_t *nr_child_ptr;
2e313670
MD
777
778 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
779
780 nr_child_ptr = &node->u.data[0];
2e313670
MD
781 nr_child = *nr_child_ptr;
782 assert(nr_child <= type->max_linear_child);
783
48cbe001
MD
784 if (type->type_class == RCU_JA_LINEAR) {
785 assert(!shadow_node->fallback_removal_count);
786 if (shadow_node->nr_child <= type->min_child) {
2e313670
MD
787 /* We need to try recompacting the node */
788 return -EFBIG;
789 }
790 }
19ddcd04 791 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
af3cbd45
MD
792 assert(*node_flag_ptr != NULL);
793 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
794 /*
795 * Value and nr_child are never changed (would cause ABA issue).
796 * Instead, we leave the pointer to NULL and recompact the node
797 * once in a while. It is allowed to set a NULL pointer to a new
798 * value without recompaction though.
799 * Only update the shadow node accounting.
800 */
801 shadow_node->nr_child--;
af3cbd45 802 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
803 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
804 (unsigned int) shadow_node->nr_child,
805 node, shadow_node);
2e313670
MD
806 return 0;
807}
808
809static
af3cbd45 810int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670 811 struct cds_ja_inode *node,
19ddcd04 812 struct cds_ja_inode_flag *node_flag,
2e313670 813 struct cds_ja_shadow_node *shadow_node,
af3cbd45 814 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
815 uint8_t n)
816{
817 struct cds_ja_inode *linear;
818
819 assert(type->type_class == RCU_JA_POOL);
19ddcd04
MD
820
821 if (shadow_node->fallback_removal_count) {
822 shadow_node->fallback_removal_count--;
823 } else {
824 /* We should try recompacting the node */
825 if (shadow_node->nr_child <= type->min_child)
826 return -EFBIG;
827 }
828
829 switch (type->nr_pool_order) {
830 case 1:
831 {
832 unsigned long bitsel, index;
833
834 bitsel = ja_node_pool_1d_bitsel(node_flag);
835 assert(bitsel < CHAR_BIT);
836 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
837 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
838 break;
839 }
840 case 2:
841 {
842 unsigned long bitsel[2], index[2], rindex;
843
844 ja_node_pool_2d_bitsel(node_flag, bitsel);
845 assert(bitsel[0] < CHAR_BIT);
846 assert(bitsel[1] < CHAR_BIT);
847 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
848 index[0] <<= 1;
849 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
850 rindex = index[0] | index[1];
851 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
852 break;
853 }
854 default:
855 linear = NULL;
856 assert(0);
857 }
858
af3cbd45 859 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
860}
861
862static
af3cbd45 863int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
864 struct cds_ja_inode *node,
865 struct cds_ja_shadow_node *shadow_node,
af3cbd45 866 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 867{
2e313670 868 assert(type->type_class == RCU_JA_PIGEON);
19ddcd04
MD
869
870 if (shadow_node->fallback_removal_count) {
871 shadow_node->fallback_removal_count--;
872 } else {
873 /* We should try recompacting the node */
874 if (shadow_node->nr_child <= type->min_child)
875 return -EFBIG;
876 }
4d6ef45e 877 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 878 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
879 shadow_node->nr_child--;
880 return 0;
881}
882
883/*
af3cbd45 884 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
885 * (negative error value) if it is not found (-ENOENT).
886 */
887static
af3cbd45 888int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670 889 struct cds_ja_inode *node,
19ddcd04 890 struct cds_ja_inode_flag *node_flag,
2e313670 891 struct cds_ja_shadow_node *shadow_node,
af3cbd45 892 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
893 uint8_t n)
894{
895 switch (type->type_class) {
896 case RCU_JA_LINEAR:
af3cbd45 897 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 898 case RCU_JA_POOL:
19ddcd04 899 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
2e313670 900 case RCU_JA_PIGEON:
af3cbd45 901 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
902 case RCU_JA_NULL:
903 return -ENOENT;
904 default:
905 assert(0);
906 return -EINVAL;
907 }
908
909 return 0;
910}
911
b1a90ce3
MD
912/*
913 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
914 * distribution in two sub-distributions containing as much elements one
915 * compared to the other.
916 */
917static
918unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
919 struct cds_ja *ja,
920 unsigned int type_index,
921 const struct cds_ja_type *type,
922 struct cds_ja_inode *node,
923 struct cds_ja_shadow_node *shadow_node,
924 uint8_t n,
925 struct cds_ja_inode_flag *child_node_flag,
926 struct cds_ja_inode_flag **nullify_node_flag_ptr)
927{
928 uint8_t nr_one[JA_BITS_PER_BYTE];
929 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
930 unsigned int distrib_nr_child = 0;
931
932 memset(nr_one, 0, sizeof(nr_one));
933
934 switch (type->type_class) {
935 case RCU_JA_LINEAR:
936 {
937 uint8_t nr_child =
938 ja_linear_node_get_nr_child(type, node);
939 unsigned int i;
940
941 for (i = 0; i < nr_child; i++) {
942 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
943 uint8_t v;
944
945 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
946 if (!iter)
947 continue;
948 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
949 continue;
f5531dd9
MD
950 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
951 if (v & (1U << bit_i))
952 nr_one[bit_i]++;
b1a90ce3
MD
953 }
954 distrib_nr_child++;
955 }
956 break;
957 }
958 case RCU_JA_POOL:
959 {
960 unsigned int pool_nr;
961
962 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
963 struct cds_ja_inode *pool =
964 ja_pool_node_get_ith_pool(type,
965 node, pool_nr);
966 uint8_t nr_child =
967 ja_linear_node_get_nr_child(type, pool);
968 unsigned int j;
969
970 for (j = 0; j < nr_child; j++) {
971 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
972 uint8_t v;
973
974 ja_linear_node_get_ith_pos(type, pool,
975 j, &v, &iter);
976 if (!iter)
977 continue;
978 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
979 continue;
f5531dd9
MD
980 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
981 if (v & (1U << bit_i))
982 nr_one[bit_i]++;
b1a90ce3
MD
983 }
984 distrib_nr_child++;
985 }
986 }
987 break;
988 }
989 case RCU_JA_PIGEON:
990 {
b1a90ce3
MD
991 unsigned int i;
992
993 assert(mode == JA_RECOMPACT_DEL);
48cbe001 994 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
b1a90ce3 995 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
996
997 iter = ja_pigeon_node_get_ith_pos(type, node, i);
998 if (!iter)
999 continue;
1000 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1001 continue;
f5531dd9
MD
1002 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1003 if (i & (1U << bit_i))
1004 nr_one[bit_i]++;
b1a90ce3
MD
1005 }
1006 distrib_nr_child++;
1007 }
1008 break;
1009 }
1010 case RCU_JA_NULL:
19ddcd04 1011 assert(mode == JA_RECOMPACT_ADD_NEXT);
b1a90ce3
MD
1012 break;
1013 default:
1014 assert(0);
1015 break;
1016 }
1017
19ddcd04 1018 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
f5531dd9
MD
1019 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1020 if (n & (1U << bit_i))
1021 nr_one[bit_i]++;
b1a90ce3
MD
1022 }
1023 distrib_nr_child++;
1024 }
1025
1026 /*
1027 * The best bit selector is that for which the number of ones is
1028 * closest to half of the number of children in the
f5531dd9
MD
1029 * distribution. We calculate the distance using the double of
1030 * the sub-distribution sizes to eliminate truncation error.
b1a90ce3
MD
1031 */
1032 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1033 unsigned int distance_to_best;
1034
f5531dd9 1035 distance_to_best = abs_int((nr_one[bit_i] << 1U) - distrib_nr_child);
b1a90ce3
MD
1036 if (distance_to_best < overall_best_distance) {
1037 overall_best_distance = distance_to_best;
1038 bitsel = bit_i;
1039 }
1040 }
1041 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
1042 return bitsel;
1043}
1044
19ddcd04
MD
1045/*
1046 * Calculate bit distribution in two dimensions. Returns the two bits
1047 * (each 0 to 7) that splits the distribution in four sub-distributions
1048 * containing as much elements one compared to the other.
1049 */
1050static
1051void ja_node_sum_distribution_2d(enum ja_recompact mode,
1052 struct cds_ja *ja,
1053 unsigned int type_index,
1054 const struct cds_ja_type *type,
1055 struct cds_ja_inode *node,
1056 struct cds_ja_shadow_node *shadow_node,
1057 uint8_t n,
1058 struct cds_ja_inode_flag *child_node_flag,
1059 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1060 unsigned int *_bitsel)
1061{
1062 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1063 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1064 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1065 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
1066 unsigned int bitsel[2] = { 0, 1 };
4a073c53
MD
1067 unsigned int bit_i, bit_j;
1068 int overall_best_distance = INT_MAX;
19ddcd04
MD
1069 unsigned int distrib_nr_child = 0;
1070
1071 memset(nr_2d_11, 0, sizeof(nr_2d_11));
1072 memset(nr_2d_10, 0, sizeof(nr_2d_10));
4a073c53
MD
1073 memset(nr_2d_01, 0, sizeof(nr_2d_01));
1074 memset(nr_2d_00, 0, sizeof(nr_2d_00));
19ddcd04
MD
1075
1076 switch (type->type_class) {
1077 case RCU_JA_LINEAR:
1078 {
1079 uint8_t nr_child =
1080 ja_linear_node_get_nr_child(type, node);
1081 unsigned int i;
1082
1083 for (i = 0; i < nr_child; i++) {
1084 struct cds_ja_inode_flag *iter;
1085 uint8_t v;
1086
1087 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1088 if (!iter)
1089 continue;
1090 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1091 continue;
1092 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1093 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1094 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1095 nr_2d_11[bit_i][bit_j]++;
1096 }
1097 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1098 nr_2d_10[bit_i][bit_j]++;
1099 }
1100 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1101 nr_2d_01[bit_i][bit_j]++;
1102 }
1103 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1104 nr_2d_00[bit_i][bit_j]++;
1105 }
1106 }
1107 }
1108 distrib_nr_child++;
1109 }
1110 break;
1111 }
1112 case RCU_JA_POOL:
1113 {
1114 unsigned int pool_nr;
1115
1116 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1117 struct cds_ja_inode *pool =
1118 ja_pool_node_get_ith_pool(type,
1119 node, pool_nr);
1120 uint8_t nr_child =
1121 ja_linear_node_get_nr_child(type, pool);
1122 unsigned int j;
1123
1124 for (j = 0; j < nr_child; j++) {
1125 struct cds_ja_inode_flag *iter;
1126 uint8_t v;
1127
1128 ja_linear_node_get_ith_pos(type, pool,
1129 j, &v, &iter);
1130 if (!iter)
1131 continue;
1132 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1133 continue;
1134 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1135 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1136 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1137 nr_2d_11[bit_i][bit_j]++;
1138 }
1139 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1140 nr_2d_10[bit_i][bit_j]++;
1141 }
1142 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1143 nr_2d_01[bit_i][bit_j]++;
1144 }
1145 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1146 nr_2d_00[bit_i][bit_j]++;
1147 }
1148 }
1149 }
1150 distrib_nr_child++;
1151 }
1152 }
1153 break;
1154 }
1155 case RCU_JA_PIGEON:
1156 {
19ddcd04
MD
1157 unsigned int i;
1158
1159 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1160 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
19ddcd04
MD
1161 struct cds_ja_inode_flag *iter;
1162
1163 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1164 if (!iter)
1165 continue;
1166 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1167 continue;
1168 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1169 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1170 if ((i & (1U << bit_i)) && (i & (1U << bit_j))) {
1171 nr_2d_11[bit_i][bit_j]++;
1172 }
1173 if ((i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1174 nr_2d_10[bit_i][bit_j]++;
1175 }
1176 if (!(i & (1U << bit_i)) && (i & (1U << bit_j))) {
1177 nr_2d_01[bit_i][bit_j]++;
1178 }
1179 if (!(i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1180 nr_2d_00[bit_i][bit_j]++;
1181 }
1182 }
1183 }
1184 distrib_nr_child++;
1185 }
1186 break;
1187 }
1188 case RCU_JA_NULL:
1189 assert(mode == JA_RECOMPACT_ADD_NEXT);
1190 break;
1191 default:
1192 assert(0);
1193 break;
1194 }
1195
1196 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1197 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1198 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1199 if ((n & (1U << bit_i)) && (n & (1U << bit_j))) {
1200 nr_2d_11[bit_i][bit_j]++;
1201 }
1202 if ((n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1203 nr_2d_10[bit_i][bit_j]++;
1204 }
1205 if (!(n & (1U << bit_i)) && (n & (1U << bit_j))) {
1206 nr_2d_01[bit_i][bit_j]++;
1207 }
1208 if (!(n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1209 nr_2d_00[bit_i][bit_j]++;
1210 }
1211 }
1212 }
1213 distrib_nr_child++;
1214 }
1215
1216 /*
1217 * The best bit selector is that for which the number of nodes
1218 * in each sub-class is closest to one-fourth of the number of
1219 * children in the distribution. We calculate the distance using
1220 * 4 times the size of the sub-distribution to eliminate
1221 * truncation error.
1222 */
1223 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1224 for (bit_j = 0; bit_j < bit_i; bit_j++) {
4a073c53 1225 int distance_to_best[4];
19ddcd04 1226
4a073c53
MD
1227 distance_to_best[0] = (nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1228 distance_to_best[1] = (nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1229 distance_to_best[2] = (nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1230 distance_to_best[3] = (nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
19ddcd04 1231
4a073c53
MD
1232 /* Consider worse distance above best */
1233 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
19ddcd04 1234 distance_to_best[0] = distance_to_best[1];
4a073c53 1235 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
19ddcd04 1236 distance_to_best[0] = distance_to_best[2];
4a073c53 1237 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
19ddcd04 1238 distance_to_best[0] = distance_to_best[3];
4a073c53 1239
19ddcd04
MD
1240 /*
1241 * If our worse distance is better than overall,
1242 * we become new best candidate.
1243 */
1244 if (distance_to_best[0] < overall_best_distance) {
1245 overall_best_distance = distance_to_best[0];
1246 bitsel[0] = bit_i;
1247 bitsel[1] = bit_j;
1248 }
1249 }
1250 }
1251
1252 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1253
1254 /* Return our bit selection */
1255 _bitsel[0] = bitsel[0];
1256 _bitsel[1] = bitsel[1];
1257}
1258
48cbe001
MD
1259static
1260unsigned int find_nearest_type_index(unsigned int type_index,
1261 unsigned int nr_nodes)
1262{
1263 const struct cds_ja_type *type;
1264
1265 assert(type_index != NODE_INDEX_NULL);
1266 if (nr_nodes == 0)
1267 return NODE_INDEX_NULL;
1268 for (;;) {
1269 type = &ja_types[type_index];
1270 if (nr_nodes < type->min_child)
1271 type_index--;
1272 else if (nr_nodes > type->max_child)
1273 type_index++;
1274 else
1275 break;
1276 }
1277 return type_index;
1278}
1279
7a0b2331
MD
1280/*
1281 * ja_node_recompact_add: recompact a node, adding a new child.
2e313670 1282 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 1283 * error value otherwise.
7a0b2331
MD
1284 */
1285static
2e313670
MD
1286int ja_node_recompact(enum ja_recompact mode,
1287 struct cds_ja *ja,
e1db2db5 1288 unsigned int old_type_index,
d96bfb0d 1289 const struct cds_ja_type *old_type,
b4540e8a 1290 struct cds_ja_inode *old_node,
5a9a87dd 1291 struct cds_ja_shadow_node *shadow_node,
3d8fe307 1292 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45 1293 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1294 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1295 int level)
7a0b2331 1296{
e1db2db5 1297 unsigned int new_type_index;
b4540e8a 1298 struct cds_ja_inode *new_node;
af3cbd45 1299 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 1300 const struct cds_ja_type *new_type;
3d8fe307 1301 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 1302 int ret;
f07b240f 1303 int fallback = 0;
7a0b2331 1304
3d8fe307
MD
1305 old_node_flag = *old_node_flag_ptr;
1306
48cbe001
MD
1307 /*
1308 * Need to find nearest type index even for ADD_SAME, because
1309 * this recompaction, when applied to linear nodes, will garbage
1310 * collect dummy (NULL) entries, and can therefore cause a few
1311 * linear representations to be skipped.
1312 */
2e313670 1313 switch (mode) {
19ddcd04 1314 case JA_RECOMPACT_ADD_SAME:
48cbe001
MD
1315 new_type_index = find_nearest_type_index(old_type_index,
1316 shadow_node->nr_child + 1);
1317 dbg_printf("Recompact for node with %u children\n",
1318 shadow_node->nr_child + 1);
2e313670 1319 break;
19ddcd04 1320 case JA_RECOMPACT_ADD_NEXT:
2e313670
MD
1321 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1322 new_type_index = 0;
48cbe001 1323 dbg_printf("Recompact for NULL\n");
2e313670 1324 } else {
48cbe001
MD
1325 new_type_index = find_nearest_type_index(old_type_index,
1326 shadow_node->nr_child + 1);
1327 dbg_printf("Recompact for node with %u children\n",
1328 shadow_node->nr_child + 1);
2e313670
MD
1329 }
1330 break;
1331 case JA_RECOMPACT_DEL:
48cbe001
MD
1332 new_type_index = find_nearest_type_index(old_type_index,
1333 shadow_node->nr_child - 1);
1334 dbg_printf("Recompact for node with %u children\n",
1335 shadow_node->nr_child - 1);
2e313670
MD
1336 break;
1337 default:
1338 assert(0);
7a0b2331 1339 }
a2a7ff59 1340
f07b240f 1341retry: /* for fallback */
582a6ade
MD
1342 dbg_printf("Recompact from type %d to type %d\n",
1343 old_type_index, new_type_index);
7a0b2331 1344 new_type = &ja_types[new_type_index];
2e313670 1345 if (new_type_index != NODE_INDEX_NULL) {
354981c2 1346 new_node = alloc_cds_ja_node(ja, new_type);
2e313670
MD
1347 if (!new_node)
1348 return -ENOMEM;
b1a90ce3
MD
1349
1350 if (new_type->type_class == RCU_JA_POOL) {
1351 switch (new_type->nr_pool_order) {
1352 case 1:
1353 {
19ddcd04
MD
1354 unsigned int node_distrib_bitsel;
1355
b1a90ce3
MD
1356 node_distrib_bitsel =
1357 ja_node_sum_distribution_1d(mode, ja,
1358 old_type_index, old_type,
1359 old_node, shadow_node,
1360 n, child_node_flag,
1361 nullify_node_flag_ptr);
1362 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1363 new_node_flag = ja_node_flag_pool_1d(new_node,
1364 new_type_index, node_distrib_bitsel);
1365 break;
1366 }
1367 case 2:
1368 {
19ddcd04
MD
1369 unsigned int node_distrib_bitsel[2];
1370
1371 ja_node_sum_distribution_2d(mode, ja,
1372 old_type_index, old_type,
1373 old_node, shadow_node,
1374 n, child_node_flag,
1375 nullify_node_flag_ptr,
1376 node_distrib_bitsel);
b1a90ce3
MD
1377 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1378 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
19ddcd04
MD
1379 new_node_flag = ja_node_flag_pool_2d(new_node,
1380 new_type_index, node_distrib_bitsel);
b1a90ce3
MD
1381 break;
1382 }
1383 default:
1384 assert(0);
1385 }
1386 } else {
1387 new_node_flag = ja_node_flag(new_node, new_type_index);
1388 }
1389
2e313670 1390 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
48cbe001 1391 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
2e313670 1392 if (!new_shadow_node) {
354981c2 1393 free_cds_ja_node(ja, new_node);
2e313670
MD
1394 return -ENOMEM;
1395 }
1396 if (fallback)
1397 new_shadow_node->fallback_removal_count =
1398 JA_FALLBACK_REMOVAL_COUNT;
1399 } else {
1400 new_node = NULL;
1401 new_node_flag = NULL;
e1db2db5 1402 }
11c5e016 1403
19ddcd04 1404 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
2e313670
MD
1405
1406 if (new_type_index == NODE_INDEX_NULL)
1407 goto skip_copy;
1408
11c5e016
MD
1409 switch (old_type->type_class) {
1410 case RCU_JA_LINEAR:
1411 {
1412 uint8_t nr_child =
1413 ja_linear_node_get_nr_child(old_type, old_node);
1414 unsigned int i;
1415
1416 for (i = 0; i < nr_child; i++) {
b4540e8a 1417 struct cds_ja_inode_flag *iter;
11c5e016
MD
1418 uint8_t v;
1419
1420 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1421 if (!iter)
1422 continue;
af3cbd45 1423 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1424 continue;
b1a90ce3 1425 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1426 new_shadow_node,
11c5e016 1427 v, iter);
f07b240f
MD
1428 if (new_type->type_class == RCU_JA_POOL && ret) {
1429 goto fallback_toosmall;
1430 }
11c5e016
MD
1431 assert(!ret);
1432 }
1433 break;
1434 }
1435 case RCU_JA_POOL:
1436 {
1437 unsigned int pool_nr;
1438
1439 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 1440 struct cds_ja_inode *pool =
11c5e016
MD
1441 ja_pool_node_get_ith_pool(old_type,
1442 old_node, pool_nr);
1443 uint8_t nr_child =
1444 ja_linear_node_get_nr_child(old_type, pool);
1445 unsigned int j;
1446
1447 for (j = 0; j < nr_child; j++) {
b4540e8a 1448 struct cds_ja_inode_flag *iter;
11c5e016
MD
1449 uint8_t v;
1450
1451 ja_linear_node_get_ith_pos(old_type, pool,
1452 j, &v, &iter);
1453 if (!iter)
1454 continue;
af3cbd45 1455 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1456 continue;
b1a90ce3 1457 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1458 new_shadow_node,
11c5e016 1459 v, iter);
f07b240f
MD
1460 if (new_type->type_class == RCU_JA_POOL
1461 && ret) {
1462 goto fallback_toosmall;
1463 }
11c5e016
MD
1464 assert(!ret);
1465 }
1466 }
1467 break;
7a0b2331 1468 }
a2a7ff59 1469 case RCU_JA_NULL:
19ddcd04 1470 assert(mode == JA_RECOMPACT_ADD_NEXT);
a2a7ff59 1471 break;
11c5e016 1472 case RCU_JA_PIGEON:
2e313670 1473 {
2e313670
MD
1474 unsigned int i;
1475
1476 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1477 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2e313670
MD
1478 struct cds_ja_inode_flag *iter;
1479
1480 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1481 if (!iter)
1482 continue;
af3cbd45 1483 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1484 continue;
b1a90ce3 1485 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1486 new_shadow_node,
1487 i, iter);
1488 if (new_type->type_class == RCU_JA_POOL && ret) {
1489 goto fallback_toosmall;
1490 }
1491 assert(!ret);
1492 }
1493 break;
1494 }
11c5e016
MD
1495 default:
1496 assert(0);
5a9a87dd 1497 ret = -EINVAL;
f07b240f 1498 goto end;
11c5e016 1499 }
2e313670 1500skip_copy:
11c5e016 1501
19ddcd04 1502 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
2e313670 1503 /* add node */
b1a90ce3 1504 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1505 new_shadow_node,
1506 n, child_node_flag);
7b413155
MD
1507 if (new_type->type_class == RCU_JA_POOL && ret) {
1508 goto fallback_toosmall;
1509 }
2e313670
MD
1510 assert(!ret);
1511 }
19ddcd04
MD
1512
1513 if (fallback) {
1514 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1515 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1516 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
354981c2 1517 uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
19ddcd04
MD
1518 }
1519
3d8fe307
MD
1520 /* Return pointer to new recompacted node through old_node_flag_ptr */
1521 *old_node_flag_ptr = new_node_flag;
a2a7ff59 1522 if (old_node) {
2e313670
MD
1523 int flags;
1524
1525 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1526 /*
1527 * It is OK to free the lock associated with a node
1528 * going to NULL, since we are holding the parent lock.
1529 * This synchronizes removal with re-add of that node.
1530 */
1531 if (new_type_index == NODE_INDEX_NULL)
48cbe001 1532 flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 1533 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 1534 flags);
a2a7ff59
MD
1535 assert(!ret);
1536 }
5a9a87dd
MD
1537
1538 ret = 0;
f07b240f 1539end:
5a9a87dd 1540 return ret;
f07b240f
MD
1541
1542fallback_toosmall:
1543 /* fallback if next pool is too small */
af3cbd45 1544 assert(new_shadow_node);
3d8fe307 1545 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
1546 RCUJA_SHADOW_CLEAR_FREE_NODE);
1547 assert(!ret);
1548
19ddcd04
MD
1549 switch (mode) {
1550 case JA_RECOMPACT_ADD_SAME:
1551 /*
1552 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1553 * node within a pool has unused entries. It should
1554 * therefore _never_ be too small.
1555 */
4a073c53 1556 assert(0);
4cde8267
MD
1557
1558 /* Fall-through */
19ddcd04
MD
1559 case JA_RECOMPACT_ADD_NEXT:
1560 {
1561 const struct cds_ja_type *next_type;
1562
1563 /*
1564 * Recompaction attempt on add failed. Should only
1565 * happen if target node type is pool. Caused by
1566 * hard-to-split distribution. Recompact using the next
1567 * distribution size.
1568 */
1569 assert(new_type->type_class == RCU_JA_POOL);
1570 next_type = &ja_types[new_type_index + 1];
1571 /*
1572 * Try going to the next pool size if our population
1573 * fits within its range. This is not flagged as a
1574 * fallback.
1575 */
1576 if (shadow_node->nr_child + 1 >= next_type->min_child
1577 && shadow_node->nr_child + 1 <= next_type->max_child) {
1578 new_type_index++;
1579 goto retry;
1580 } else {
1581 new_type_index++;
1582 dbg_printf("Add fallback to type %d\n", new_type_index);
1583 uatomic_inc(&ja->nr_fallback);
1584 fallback = 1;
1585 goto retry;
1586 }
1587 break;
1588 }
1589 case JA_RECOMPACT_DEL:
1590 /*
1591 * Recompaction attempt on delete failed. Should only
1592 * happen if target node type is pool. This is caused by
1593 * a hard-to-split distribution. Recompact on same node
1594 * size, but flag current node as "fallback" to ensure
1595 * we don't attempt recompaction before some activity
1596 * has reshuffled our node.
1597 */
1598 assert(new_type->type_class == RCU_JA_POOL);
1599 new_type_index = old_type_index;
1600 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1601 uatomic_inc(&ja->nr_fallback);
1602 fallback = 1;
1603 goto retry;
1604 default:
1605 assert(0);
1606 return -EINVAL;
1607 }
1608
1609 /*
1610 * Last resort fallback: pigeon.
1611 */
f07b240f
MD
1612 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1613 dbg_printf("Fallback to type %d\n", new_type_index);
1614 uatomic_inc(&ja->nr_fallback);
1615 fallback = 1;
1616 goto retry;
7a0b2331
MD
1617}
1618
5a9a87dd 1619/*
2e313670 1620 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
1621 * error value otherwise.
1622 */
7a0b2331 1623static
d96bfb0d 1624int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 1625 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd 1626 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1627 struct cds_ja_shadow_node *shadow_node,
1628 int level)
7a0b2331
MD
1629{
1630 int ret;
e1db2db5 1631 unsigned int type_index;
d96bfb0d 1632 const struct cds_ja_type *type;
b4540e8a 1633 struct cds_ja_inode *node;
7a0b2331 1634
a2a7ff59
MD
1635 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1636 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1637
e1db2db5
MD
1638 node = ja_node_ptr(*node_flag);
1639 type_index = ja_node_type(*node_flag);
1640 type = &ja_types[type_index];
b1a90ce3 1641 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
e1db2db5 1642 n, child_node_flag);
2e313670
MD
1643 switch (ret) {
1644 case -ENOSPC:
19ddcd04
MD
1645 /* Not enough space in node, need to recompact to next type. */
1646 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
48cbe001 1647 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1648 break;
1649 case -ERANGE:
1650 /* Node needs to be recompacted. */
19ddcd04 1651 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
48cbe001 1652 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1653 break;
1654 }
1655 return ret;
1656}
1657
1658/*
1659 * Return 0 on success, -EAGAIN if need to retry, or other negative
1660 * error value otherwise.
1661 */
1662static
af3cbd45
MD
1663int ja_node_clear_ptr(struct cds_ja *ja,
1664 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1665 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1666 struct cds_ja_shadow_node *shadow_node, /* of parent */
48cbe001 1667 uint8_t n, int level)
2e313670
MD
1668{
1669 int ret;
1670 unsigned int type_index;
1671 const struct cds_ja_type *type;
1672 struct cds_ja_inode *node;
1673
af3cbd45
MD
1674 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1675 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 1676
af3cbd45
MD
1677 node = ja_node_ptr(*parent_node_flag_ptr);
1678 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 1679 type = &ja_types[type_index];
19ddcd04 1680 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
2e313670 1681 if (ret == -EFBIG) {
19ddcd04 1682 /* Should try recompaction. */
2e313670 1683 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45 1684 shadow_node, parent_node_flag_ptr, n, NULL,
48cbe001 1685 node_flag_ptr, level);
7a0b2331
MD
1686 }
1687 return ret;
1688}
be9a7474 1689
03ec1aeb 1690struct cds_ja_node *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 1691{
41975c12
MD
1692 unsigned int tree_depth, i;
1693 struct cds_ja_inode_flag *node_flag;
1694
1695 if (caa_unlikely(key > ja->key_max))
03ec1aeb 1696 return NULL;
41975c12 1697 tree_depth = ja->tree_depth;
5a9a87dd 1698 node_flag = rcu_dereference(ja->root);
41975c12 1699
5a9a87dd
MD
1700 /* level 0: root node */
1701 if (!ja_node_ptr(node_flag))
03ec1aeb 1702 return NULL;
5a9a87dd
MD
1703
1704 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1705 uint8_t iter_key;
1706
1707 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
48cbe001 1708 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
582a6ade
MD
1709 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1710 (unsigned int) iter_key, node_flag);
41975c12 1711 if (!ja_node_ptr(node_flag))
03ec1aeb 1712 return NULL;
41975c12
MD
1713 }
1714
5a9a87dd 1715 /* Last level lookup succeded. We got an actual match. */
03ec1aeb 1716 return (struct cds_ja_node *) node_flag;
5a9a87dd
MD
1717}
1718
03ec1aeb 1719struct cds_ja_node *cds_ja_lookup_lower_equal(struct cds_ja *ja, uint64_t key)
291b2543
MD
1720{
1721 int tree_depth, level;
1722 struct cds_ja_inode_flag *node_flag, *cur_node_depth[JA_MAX_DEPTH];
291b2543
MD
1723
1724 if (caa_unlikely(key > ja->key_max || !key))
03ec1aeb 1725 return NULL;
291b2543
MD
1726
1727 memset(cur_node_depth, 0, sizeof(cur_node_depth));
1728 tree_depth = ja->tree_depth;
1729 node_flag = rcu_dereference(ja->root);
1730 cur_node_depth[0] = node_flag;
1731
1732 /* level 0: root node */
1733 if (!ja_node_ptr(node_flag))
03ec1aeb 1734 return NULL;
291b2543
MD
1735
1736 for (level = 1; level < tree_depth; level++) {
1737 uint8_t iter_key;
1738
1739 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1740 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1741 if (!ja_node_ptr(node_flag))
1742 break;
1743 cur_node_depth[level] = node_flag;
1744 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1745 (unsigned int) iter_key, node_flag);
1746 }
1747
1748 if (level == tree_depth) {
1749 /* Last level lookup succeded. We got an equal match. */
03ec1aeb 1750 return (struct cds_ja_node *) node_flag;
291b2543
MD
1751 }
1752
1753 /*
1754 * Find highest value left of current node.
1755 * Current node is cur_node_depth[level].
1756 * Start at current level. If we cannot find any key left of
1757 * ours, go one level up, seek highest value left of current
1758 * (recursively), and when we find one, get the rightmost child
1759 * of its rightmost child (recursively).
1760 */
1761 for (; level > 0; level--) {
1762 uint8_t iter_key;
1763
1764 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1765 node_flag = ja_node_get_left(cur_node_depth[level - 1],
1766 iter_key);
1767 /* If found left sibling, find rightmost child. */
1768 if (ja_node_ptr(node_flag))
1769 break;
1770 }
1771
1772 if (!level) {
1773 /* Reached the root and could not find a left sibling. */
03ec1aeb 1774 return NULL;
291b2543
MD
1775 }
1776
1777 level++;
3c52f0f9
MD
1778
1779 /*
4cef6f97 1780 * From this point, we are guaranteed to be able to find a
47d2eab3
MD
1781 * "lower than" match. ja_attach_node() and ja_detach_node()
1782 * both guarantee that it is not possible for a lookup to reach
1783 * a dead-end.
3c52f0f9
MD
1784 */
1785
291b2543
MD
1786 /* Find rightmost child of rightmost child (recursively). */
1787 for (; level < tree_depth; level++) {
1788 node_flag = ja_node_get_rightmost(node_flag);
1789 /* If found left sibling, find rightmost child. */
1790 if (!ja_node_ptr(node_flag))
1791 break;
1792 }
1793
4cef6f97 1794 assert(level == tree_depth);
291b2543 1795
03ec1aeb 1796 return (struct cds_ja_node *) node_flag;
291b2543
MD
1797}
1798
5a9a87dd
MD
1799/*
1800 * We reached an unpopulated node. Create it and the children we need,
1801 * and then attach the entire branch to the current node. This may
1802 * trigger recompaction of the current node. Locks needed: node lock
1803 * (for add), and, possibly, parent node lock (to update pointer due to
1804 * node recompaction).
1805 *
1806 * First take node lock, check if recompaction is needed, then take
1807 * parent lock (if needed). Then we can proceed to create the new
1808 * branch. Publish the new branch, and release locks.
1809 * TODO: we currently always take the parent lock even when not needed.
47d2eab3
MD
1810 *
1811 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1812 * leads to a dead-end: before attaching a branch, the entire content of
1813 * the new branch is populated, thus creating a cluster, before
1814 * attaching the cluster to the rest of the tree, thus making it visible
1815 * to lookups.
5a9a87dd
MD
1816 */
1817static
1818int ja_attach_node(struct cds_ja *ja,
b0ca2d21 1819 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 1820 struct cds_ja_inode_flag *attach_node_flag,
48cbe001
MD
1821 struct cds_ja_inode_flag *parent_attach_node_flag,
1822 struct cds_ja_inode_flag **old_node_flag_ptr,
1823 struct cds_ja_inode_flag *old_node_flag,
5a9a87dd 1824 uint64_t key,
79b41067 1825 unsigned int level,
5a9a87dd
MD
1826 struct cds_ja_node *child_node)
1827{
1828 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1829 *parent_shadow_node = NULL;
5a9a87dd
MD
1830 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1831 int ret, i;
a2a7ff59 1832 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1833 int nr_created_nodes = 0;
1834
48cbe001
MD
1835 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1836 level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag);
a2a7ff59 1837
48cbe001
MD
1838 assert(!old_node_flag);
1839 if (attach_node_flag) {
1840 shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag);
1841 if (!shadow_node) {
1842 ret = -EAGAIN;
1843 goto end;
1844 }
5a9a87dd 1845 }
48cbe001 1846 if (parent_attach_node_flag) {
5a9a87dd 1847 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
48cbe001 1848 parent_attach_node_flag);
5a9a87dd 1849 if (!parent_shadow_node) {
2e313670 1850 ret = -EAGAIN;
5a9a87dd
MD
1851 goto unlock_shadow;
1852 }
1853 }
1854
48cbe001 1855 if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) {
b306a0fe 1856 /*
c112acaa
MD
1857 * Target node has been updated between RCU lookup and
1858 * lock acquisition. We need to re-try lookup and
1859 * attach.
1860 */
1861 ret = -EAGAIN;
1862 goto unlock_parent;
1863 }
1864
9be99d4a
MD
1865 /*
1866 * Perform a lookup query to handle the case where
1867 * old_node_flag_ptr is NULL. We cannot use it to check if the
1868 * node has been populated between RCU lookup and mutex
1869 * acquisition.
1870 */
1871 if (!old_node_flag_ptr) {
1872 uint8_t iter_key;
1873 struct cds_ja_inode_flag *lookup_node_flag;
1874 struct cds_ja_inode_flag **lookup_node_flag_ptr;
1875
1876 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
1877 lookup_node_flag = ja_node_get_nth(attach_node_flag,
1878 &lookup_node_flag_ptr,
1879 iter_key);
1880 if (lookup_node_flag) {
1881 ret = -EEXIST;
1882 goto unlock_parent;
1883 }
1884 }
1885
c112acaa 1886 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 1887 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
1888 /*
1889 * Target node has been updated between RCU lookup and
1890 * lock acquisition. We need to re-try lookup and
1891 * attach.
b306a0fe
MD
1892 */
1893 ret = -EAGAIN;
1894 goto unlock_parent;
1895 }
1896
a2a7ff59 1897 /* Create new branch, starting from bottom */
03ec1aeb 1898 iter_node_flag = (struct cds_ja_inode_flag *) child_node;
5a9a87dd 1899
48cbe001 1900 for (i = ja->tree_depth - 1; i >= (int) level; i--) {
79b41067
MD
1901 uint8_t iter_key;
1902
48cbe001 1903 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1)));
79b41067 1904 dbg_printf("branch creation level %d, key %u\n",
48cbe001 1905 i, (unsigned int) iter_key);
5a9a87dd
MD
1906 iter_dest_node_flag = NULL;
1907 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1908 iter_key,
5a9a87dd 1909 iter_node_flag,
48cbe001 1910 NULL, i);
9be99d4a
MD
1911 if (ret) {
1912 dbg_printf("branch creation error %d\n", ret);
5a9a87dd 1913 goto check_error;
9be99d4a 1914 }
5a9a87dd
MD
1915 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1916 iter_node_flag = iter_dest_node_flag;
1917 }
48cbe001 1918 assert(level > 0);
5a9a87dd 1919
48cbe001
MD
1920 /* Publish branch */
1921 if (level == 1) {
1922 /*
1923 * Attaching to root node.
1924 */
1925 rcu_assign_pointer(ja->root, iter_node_flag);
1926 } else {
79b41067
MD
1927 uint8_t iter_key;
1928
1929 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
48cbe001
MD
1930 dbg_printf("publish branch at level %d, key %u\n",
1931 level - 1, (unsigned int) iter_key);
a2a7ff59 1932 /* We need to use set_nth on the previous level. */
48cbe001 1933 iter_dest_node_flag = attach_node_flag;
a2a7ff59 1934 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1935 iter_key,
a2a7ff59 1936 iter_node_flag,
48cbe001 1937 shadow_node, level - 1);
9be99d4a
MD
1938 if (ret) {
1939 dbg_printf("branch publish error %d\n", ret);
a2a7ff59 1940 goto check_error;
9be99d4a 1941 }
48cbe001
MD
1942 /*
1943 * Attach branch
1944 */
1945 rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag);
a2a7ff59
MD
1946 }
1947
5a9a87dd
MD
1948 /* Success */
1949 ret = 0;
1950
1951check_error:
1952 if (ret) {
1953 for (i = 0; i < nr_created_nodes; i++) {
1954 int tmpret;
a2a7ff59
MD
1955 int flags;
1956
1957 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1958 if (i)
1959 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 1960 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 1961 created_nodes[i],
a2a7ff59
MD
1962 NULL,
1963 flags);
5a9a87dd
MD
1964 assert(!tmpret);
1965 }
1966 }
b306a0fe 1967unlock_parent:
5a9a87dd
MD
1968 if (parent_shadow_node)
1969 rcuja_shadow_unlock(parent_shadow_node);
1970unlock_shadow:
1971 if (shadow_node)
1972 rcuja_shadow_unlock(shadow_node);
1973end:
1974 return ret;
1975}
1976
1977/*
03ec1aeb
MD
1978 * Lock the parent containing the pointer to list of duplicates, and add
1979 * node to this list. Failure can happen if concurrent update changes
1980 * the parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
1981 * Return 0 on success, negative error value on failure.
1982 */
1983static
1984int ja_chain_node(struct cds_ja *ja,
af3cbd45 1985 struct cds_ja_inode_flag *parent_node_flag,
fa112799 1986 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 1987 struct cds_ja_inode_flag *node_flag,
5a9a87dd
MD
1988 struct cds_ja_node *node)
1989{
1990 struct cds_ja_shadow_node *shadow_node;
fa112799 1991 int ret = 0;
5a9a87dd 1992
3d8fe307 1993 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 1994 if (!shadow_node) {
2e313670 1995 return -EAGAIN;
b306a0fe 1996 }
c112acaa 1997 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
1998 ret = -EAGAIN;
1999 goto end;
2000 }
03ec1aeb
MD
2001 /*
2002 * Add node to head of list. Safe against concurrent RCU read
2003 * traversals.
2004 */
2005 node->next = (struct cds_ja_node *) node_flag;
2006 rcu_assign_pointer(*node_flag_ptr, (struct cds_ja_inode_flag *) node);
fa112799 2007end:
5a9a87dd 2008 rcuja_shadow_unlock(shadow_node);
fa112799 2009 return ret;
5a9a87dd
MD
2010}
2011
75d573aa
MD
2012static
2013int _cds_ja_add(struct cds_ja *ja, uint64_t key,
2014 struct cds_ja_node *new_node,
2015 struct cds_ja_node **unique_node_ret)
5a9a87dd
MD
2016{
2017 unsigned int tree_depth, i;
48cbe001 2018 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd 2019 *parent_node_flag,
b62a8d0c 2020 *parent2_node_flag,
48cbe001
MD
2021 *node_flag,
2022 *parent_attach_node_flag;
2023 struct cds_ja_inode_flag **attach_node_flag_ptr,
2024 **parent_node_flag_ptr,
2025 **node_flag_ptr;
5a9a87dd
MD
2026 int ret;
2027
b306a0fe 2028 if (caa_unlikely(key > ja->key_max)) {
5a9a87dd 2029 return -EINVAL;
b306a0fe 2030 }
5a9a87dd
MD
2031 tree_depth = ja->tree_depth;
2032
2033retry:
a2a7ff59
MD
2034 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
2035 key, new_node);
5a9a87dd 2036 parent2_node_flag = NULL;
b0f74e47
MD
2037 parent_node_flag =
2038 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
48cbe001 2039 parent_node_flag_ptr = NULL;
35170a44 2040 node_flag = rcu_dereference(ja->root);
48cbe001 2041 node_flag_ptr = &ja->root;
5a9a87dd
MD
2042
2043 /* Iterate on all internal levels */
a2a7ff59 2044 for (i = 1; i < tree_depth; i++) {
79b41067
MD
2045 uint8_t iter_key;
2046
48cbe001
MD
2047 if (!ja_node_ptr(node_flag))
2048 break;
2049 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2050 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
79b41067 2051 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
2052 parent2_node_flag = parent_node_flag;
2053 parent_node_flag = node_flag;
48cbe001 2054 parent_node_flag_ptr = node_flag_ptr;
5a9a87dd
MD
2055 node_flag = ja_node_get_nth(node_flag,
2056 &node_flag_ptr,
79b41067 2057 iter_key);
5a9a87dd
MD
2058 }
2059
2060 /*
48cbe001
MD
2061 * We reached either bottom of tree or internal NULL node,
2062 * simply add node to last internal level, or chain it if key is
2063 * already present.
5a9a87dd
MD
2064 */
2065 if (!ja_node_ptr(node_flag)) {
48cbe001
MD
2066 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2067 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2068
48cbe001
MD
2069 attach_node_flag = parent_node_flag;
2070 attach_node_flag_ptr = parent_node_flag_ptr;
2071 parent_attach_node_flag = parent2_node_flag;
2072
b0ca2d21 2073 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 2074 attach_node_flag,
48cbe001
MD
2075 parent_attach_node_flag,
2076 node_flag_ptr,
2077 node_flag,
2078 key, i, new_node);
5a9a87dd 2079 } else {
75d573aa
MD
2080 if (unique_node_ret) {
2081 *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag);
2082 return -EEXIST;
2083 }
2084
48cbe001
MD
2085 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2086 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2087
48cbe001
MD
2088 attach_node_flag = node_flag;
2089 attach_node_flag_ptr = node_flag_ptr;
2090 parent_attach_node_flag = parent_node_flag;
2091
5a9a87dd 2092 ret = ja_chain_node(ja,
48cbe001
MD
2093 parent_attach_node_flag,
2094 attach_node_flag_ptr,
2095 attach_node_flag,
5a9a87dd
MD
2096 new_node);
2097 }
b306a0fe 2098 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd 2099 goto retry;
48cbe001 2100
5a9a87dd 2101 return ret;
b4540e8a
MD
2102}
2103
75d573aa
MD
2104int cds_ja_add(struct cds_ja *ja, uint64_t key,
2105 struct cds_ja_node *new_node)
2106{
2107 return _cds_ja_add(ja, key, new_node, NULL);
2108}
2109
2110struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key,
2111 struct cds_ja_node *new_node)
2112{
2113 int ret;
2114 struct cds_ja_node *ret_node;
2115
2116 ret = _cds_ja_add(ja, key, new_node, &ret_node);
2117 if (ret == -EEXIST)
2118 return ret_node;
2119 else
2120 return new_node;
2121}
2122
af3cbd45
MD
2123/*
2124 * Note: there is no need to lookup the pointer address associated with
2125 * each node's nth item after taking the lock: it's already been done by
2126 * cds_ja_del while holding the rcu read-side lock, and our node rules
2127 * ensure that when a match value -> pointer is found in a node, it is
2128 * _NEVER_ changed for that node without recompaction, and recompaction
2129 * reallocates the node.
b306a0fe
MD
2130 * However, when a child is removed from "linear" nodes, its pointer
2131 * is set to NULL. We therefore check, while holding the locks, if this
2132 * pointer is NULL, and return -ENOENT to the caller if it is the case.
4cef6f97
MD
2133 *
2134 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2135 * leads to a dead-end: when removing branch, it makes sure to perform
2136 * the "cut" at the highest node that has only one child, effectively
2137 * replacing it with a NULL pointer.
af3cbd45 2138 */
35170a44
MD
2139static
2140int ja_detach_node(struct cds_ja *ja,
2141 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
2142 struct cds_ja_inode_flag ***snapshot_ptr,
2143 uint8_t *snapshot_n,
35170a44
MD
2144 int nr_snapshot,
2145 uint64_t key,
2146 struct cds_ja_node *node)
2147{
af3cbd45
MD
2148 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
2149 struct cds_ja_inode_flag **node_flag_ptr = NULL,
2150 *parent_node_flag = NULL,
2151 **parent_node_flag_ptr = NULL;
b62a8d0c 2152 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
2153 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
2154 uint8_t n = 0;
35170a44 2155
4d6ef45e 2156 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 2157
af3cbd45
MD
2158 /*
2159 * From the last internal level node going up, get the node
2160 * lock, check if the node has only one child left. If it is the
2161 * case, we continue iterating upward. When we reach a node
2162 * which has more that one child left, we lock the parent, and
2163 * proceed to the node deletion (removing its children too).
2164 */
4d6ef45e 2165 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
2166 struct cds_ja_shadow_node *shadow_node;
2167
2168 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2169 snapshot[i]);
af3cbd45
MD
2170 if (!shadow_node) {
2171 ret = -EAGAIN;
2172 goto end;
2173 }
af3cbd45 2174 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2175
2176 /*
2177 * Check if node has been removed between RCU
2178 * lookup and lock acquisition.
2179 */
2180 assert(snapshot_ptr[i + 1]);
2181 if (ja_node_ptr(*snapshot_ptr[i + 1])
2182 != ja_node_ptr(snapshot[i + 1])) {
2183 ret = -ENOENT;
2184 goto end;
2185 }
2186
2187 assert(shadow_node->nr_child > 0);
d810c97f 2188 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
2189 nr_clear++;
2190 nr_branch++;
af3cbd45
MD
2191 if (shadow_node->nr_child > 1 || i == 1) {
2192 /* Lock parent and break */
2193 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2194 snapshot[i - 1]);
af3cbd45
MD
2195 if (!shadow_node) {
2196 ret = -EAGAIN;
2197 goto end;
2198 }
2199 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 2200
c112acaa
MD
2201 /*
2202 * Check if node has been removed between RCU
2203 * lookup and lock acquisition.
2204 */
b62a8d0c
MD
2205 assert(snapshot_ptr[i]);
2206 if (ja_node_ptr(*snapshot_ptr[i])
2207 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
2208 ret = -ENOENT;
2209 goto end;
2210 }
2211
b62a8d0c 2212 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
2213 n = snapshot_n[i + 1];
2214 parent_node_flag_ptr = snapshot_ptr[i];
2215 parent_node_flag = snapshot[i];
c112acaa 2216
af3cbd45
MD
2217 if (i > 1) {
2218 /*
2219 * Lock parent's parent, in case we need
2220 * to recompact parent.
2221 */
2222 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2223 snapshot[i - 2]);
af3cbd45
MD
2224 if (!shadow_node) {
2225 ret = -EAGAIN;
2226 goto end;
2227 }
2228 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2229
2230 /*
2231 * Check if node has been removed between RCU
2232 * lookup and lock acquisition.
2233 */
2234 assert(snapshot_ptr[i - 1]);
2235 if (ja_node_ptr(*snapshot_ptr[i - 1])
2236 != ja_node_ptr(snapshot[i - 1])) {
2237 ret = -ENOENT;
2238 goto end;
2239 }
af3cbd45 2240 }
b62a8d0c 2241
af3cbd45
MD
2242 break;
2243 }
2244 }
2245
2246 /*
4d6ef45e
MD
2247 * At this point, we want to delete all nodes that are about to
2248 * be removed from shadow_nodes (except the last one, which is
2249 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
2250 * child). OK to free lock here, because RCU read lock is held,
2251 * and free only performed in call_rcu.
af3cbd45
MD
2252 */
2253
2254 for (i = 0; i < nr_clear; i++) {
2255 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 2256 shadow_nodes[i]->node_flag,
af3cbd45
MD
2257 shadow_nodes[i],
2258 RCUJA_SHADOW_CLEAR_FREE_NODE
2259 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2260 assert(!ret);
2261 }
2262
2263 iter_node_flag = parent_node_flag;
2264 /* Remove from parent */
2265 ret = ja_node_clear_ptr(ja,
2266 node_flag_ptr, /* Pointer to location to nullify */
2267 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 2268 shadow_nodes[nr_branch - 1], /* of parent */
48cbe001 2269 n, nr_branch - 1);
b306a0fe
MD
2270 if (ret)
2271 goto end;
af3cbd45 2272
4d6ef45e
MD
2273 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2274 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
2275 /* Update address of parent ptr in its parent */
2276 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2277
2278end:
2279 for (i = 0; i < nr_shadow; i++)
2280 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
2281 return ret;
2282}
2283
af3cbd45
MD
2284static
2285int ja_unchain_node(struct cds_ja *ja,
2286 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2287 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 2288 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
2289 struct cds_ja_node *node)
2290{
2291 struct cds_ja_shadow_node *shadow_node;
03ec1aeb 2292 struct cds_ja_node *iter_node, **iter_node_ptr, **prev_node_ptr = NULL;
013a6083 2293 int ret = 0, count = 0, found = 0;
af3cbd45 2294
3d8fe307 2295 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
2296 if (!shadow_node)
2297 return -EAGAIN;
013a6083 2298 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2299 ret = -EAGAIN;
2300 goto end;
2301 }
af3cbd45 2302 /*
03ec1aeb
MD
2303 * Find the previous node's next pointer pointing to our node,
2304 * so we can update it. Retry if another thread removed all but
2305 * one of duplicates since check (this check was performed
2306 * without lock). Ensure that the node we are about to remove is
2307 * still in the list (while holding lock). No need for RCU
2308 * traversal here since we hold the lock on the parent.
af3cbd45 2309 */
03ec1aeb
MD
2310 iter_node_ptr = (struct cds_ja_node **) node_flag_ptr;
2311 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2312 cds_ja_for_each_duplicate(iter_node) {
f2758d14 2313 count++;
03ec1aeb
MD
2314 if (iter_node == node) {
2315 prev_node_ptr = iter_node_ptr;
013a6083 2316 found++;
03ec1aeb
MD
2317 }
2318 iter_node_ptr = &iter_node->next;
f2758d14 2319 }
013a6083
MD
2320 assert(found <= 1);
2321 if (!found || count == 1) {
af3cbd45
MD
2322 ret = -EAGAIN;
2323 goto end;
2324 }
03ec1aeb 2325 CMM_STORE_SHARED(*prev_node_ptr, node->next);
ade342cb
MD
2326 /*
2327 * Validate that we indeed removed the node from linked list.
2328 */
2329 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
2330end:
2331 rcuja_shadow_unlock(shadow_node);
2332 return ret;
2333}
2334
2335/*
2336 * Called with RCU read lock held.
2337 */
35170a44
MD
2338int cds_ja_del(struct cds_ja *ja, uint64_t key,
2339 struct cds_ja_node *node)
2340{
2341 unsigned int tree_depth, i;
2342 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
2343 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2344 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 2345 struct cds_ja_inode_flag *node_flag;
fa112799
MD
2346 struct cds_ja_inode_flag **prev_node_flag_ptr,
2347 **node_flag_ptr;
4d6ef45e 2348 int nr_snapshot;
35170a44
MD
2349 int ret;
2350
2351 if (caa_unlikely(key > ja->key_max))
2352 return -EINVAL;
2353 tree_depth = ja->tree_depth;
2354
2355retry:
4d6ef45e 2356 nr_snapshot = 0;
35170a44
MD
2357 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2358 key, node);
2359
2360 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
2361 snapshot_n[0] = 0;
2362 snapshot_n[1] = 0;
af3cbd45 2363 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
2364 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2365 node_flag = rcu_dereference(ja->root);
af3cbd45 2366 prev_node_flag_ptr = &ja->root;
fa112799 2367 node_flag_ptr = &ja->root;
35170a44
MD
2368
2369 /* Iterate on all internal levels */
2370 for (i = 1; i < tree_depth; i++) {
2371 uint8_t iter_key;
2372
2373 dbg_printf("cds_ja_del iter node_flag %p\n",
2374 node_flag);
2375 if (!ja_node_ptr(node_flag)) {
2376 return -ENOENT;
2377 }
35170a44 2378 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 2379 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
2380 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2381 snapshot[nr_snapshot++] = node_flag;
35170a44 2382 node_flag = ja_node_get_nth(node_flag,
fa112799 2383 &node_flag_ptr,
35170a44 2384 iter_key);
48cbe001
MD
2385 if (node_flag)
2386 prev_node_flag_ptr = node_flag_ptr;
af3cbd45
MD
2387 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2388 (unsigned int) iter_key, node_flag,
2389 prev_node_flag_ptr);
35170a44 2390 }
35170a44
MD
2391 /*
2392 * We reached bottom of tree, try to find the node we are trying
2393 * to remove. Fail if we cannot find it.
2394 */
2395 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
2396 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2397 key);
35170a44
MD
2398 return -ENOENT;
2399 } else {
03ec1aeb 2400 struct cds_ja_node *iter_node, *match = NULL;
af3cbd45 2401 int count = 0;
35170a44 2402
03ec1aeb
MD
2403 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2404 cds_ja_for_each_duplicate_rcu(iter_node) {
2405 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node, iter_node);
2406 if (iter_node == node)
2407 match = iter_node;
af3cbd45 2408 count++;
35170a44 2409 }
03ec1aeb 2410
4d6ef45e
MD
2411 if (!match) {
2412 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 2413 return -ENOENT;
4d6ef45e 2414 }
af3cbd45
MD
2415 assert(count > 0);
2416 if (count == 1) {
2417 /*
4d6ef45e
MD
2418 * Removing last of duplicates. Last snapshot
2419 * does not have a shadow node (external leafs).
af3cbd45
MD
2420 */
2421 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2422 snapshot[nr_snapshot++] = node_flag;
2423 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2424 snapshot_n, nr_snapshot, key, node);
2425 } else {
f2758d14 2426 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 2427 node_flag_ptr, node_flag, match);
af3cbd45 2428 }
35170a44 2429 }
b306a0fe
MD
2430 /*
2431 * Explanation of -ENOENT handling: caused by concurrent delete
2432 * between RCU lookup and actual removal. Need to re-do the
2433 * lookup and removal attempt.
2434 */
2435 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
2436 goto retry;
2437 return ret;
2438}
2439
b4540e8a
MD
2440struct cds_ja *_cds_ja_new(unsigned int key_bits,
2441 const struct rcu_flavor_struct *flavor)
be9a7474
MD
2442{
2443 struct cds_ja *ja;
b0f74e47 2444 int ret;
f07b240f 2445 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
2446
2447 ja = calloc(sizeof(*ja), 1);
2448 if (!ja)
2449 goto ja_error;
b4540e8a
MD
2450
2451 switch (key_bits) {
2452 case 8:
b4540e8a 2453 case 16:
1216b3d2 2454 case 24:
b4540e8a 2455 case 32:
1216b3d2
MD
2456 case 40:
2457 case 48:
2458 case 56:
2459 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
2460 break;
2461 case 64:
2462 ja->key_max = UINT64_MAX;
2463 break;
2464 default:
2465 goto check_error;
2466 }
2467
be9a7474 2468 /* ja->root is NULL */
5a9a87dd 2469 /* tree_depth 0 is for pointer to root node */
582a6ade 2470 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 2471 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
2472 ja->ht = rcuja_create_ht(flavor);
2473 if (!ja->ht)
2474 goto ht_error;
b0f74e47
MD
2475
2476 /*
2477 * Note: we should not free this node until judy array destroy.
2478 */
f07b240f 2479 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307 2480 (struct cds_ja_inode_flag *) &ja->root,
48cbe001 2481 NULL, ja, 0);
f07b240f
MD
2482 if (!root_shadow_node) {
2483 ret = -ENOMEM;
b0f74e47 2484 goto ht_node_error;
f07b240f 2485 }
b0f74e47 2486
be9a7474
MD
2487 return ja;
2488
b0f74e47
MD
2489ht_node_error:
2490 ret = rcuja_delete_ht(ja->ht);
2491 assert(!ret);
be9a7474 2492ht_error:
b4540e8a 2493check_error:
be9a7474
MD
2494 free(ja);
2495ja_error:
2496 return NULL;
2497}
2498
3d8fe307
MD
2499/*
2500 * Called from RCU read-side CS.
2501 */
2502__attribute__((visibility("protected")))
2503void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2504 struct cds_ja_inode_flag *node_flag,
21ac4c56 2505 void (*rcu_free_node)(struct cds_ja_node *node))
3d8fe307 2506{
3d8fe307
MD
2507 unsigned int type_index;
2508 struct cds_ja_inode *node;
2509 const struct cds_ja_type *type;
2510
3d8fe307
MD
2511 node = ja_node_ptr(node_flag);
2512 assert(node != NULL);
2513 type_index = ja_node_type(node_flag);
2514 type = &ja_types[type_index];
2515
2516 switch (type->type_class) {
2517 case RCU_JA_LINEAR:
2518 {
2519 uint8_t nr_child =
2520 ja_linear_node_get_nr_child(type, node);
2521 unsigned int i;
2522
2523 for (i = 0; i < nr_child; i++) {
2524 struct cds_ja_inode_flag *iter;
03ec1aeb 2525 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2526 uint8_t v;
2527
2528 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
03ec1aeb
MD
2529 node_iter = (struct cds_ja_node *) iter;
2530 cds_ja_for_each_duplicate_safe(node_iter, n) {
2531 rcu_free_node(node_iter);
3d8fe307
MD
2532 }
2533 }
2534 break;
2535 }
2536 case RCU_JA_POOL:
2537 {
2538 unsigned int pool_nr;
2539
2540 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2541 struct cds_ja_inode *pool =
2542 ja_pool_node_get_ith_pool(type, node, pool_nr);
2543 uint8_t nr_child =
2544 ja_linear_node_get_nr_child(type, pool);
2545 unsigned int j;
2546
2547 for (j = 0; j < nr_child; j++) {
2548 struct cds_ja_inode_flag *iter;
03ec1aeb 2549 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2550 uint8_t v;
2551
75d573aa 2552 ja_linear_node_get_ith_pos(type, pool, j, &v, &iter);
03ec1aeb
MD
2553 node_iter = (struct cds_ja_node *) iter;
2554 cds_ja_for_each_duplicate_safe(node_iter, n) {
2555 rcu_free_node(node_iter);
3d8fe307
MD
2556 }
2557 }
2558 }
2559 break;
2560 }
2561 case RCU_JA_NULL:
2562 break;
2563 case RCU_JA_PIGEON:
2564 {
3d8fe307
MD
2565 unsigned int i;
2566
48cbe001 2567 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
3d8fe307 2568 struct cds_ja_inode_flag *iter;
03ec1aeb 2569 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2570
2571 iter = ja_pigeon_node_get_ith_pos(type, node, i);
03ec1aeb
MD
2572 node_iter = (struct cds_ja_node *) iter;
2573 cds_ja_for_each_duplicate_safe(node_iter, n) {
2574 rcu_free_node(node_iter);
3d8fe307
MD
2575 }
2576 }
2577 break;
2578 }
2579 default:
2580 assert(0);
2581 }
2582}
2583
19ddcd04 2584static
354981c2 2585void print_debug_fallback_distribution(struct cds_ja *ja)
19ddcd04
MD
2586{
2587 int i;
2588
2589 fprintf(stderr, "Fallback node distribution:\n");
2590 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
354981c2 2591 if (!ja->node_fallback_count_distribution[i])
19ddcd04
MD
2592 continue;
2593 fprintf(stderr, " %3u: %4lu\n",
354981c2 2594 i, ja->node_fallback_count_distribution[i]);
19ddcd04
MD
2595 }
2596}
2597
021c72c0 2598static
19a748d9 2599int ja_final_checks(struct cds_ja *ja)
021c72c0
MD
2600{
2601 double fallback_ratio;
2602 unsigned long na, nf, nr_fallback;
19a748d9 2603 int ret = 0;
021c72c0
MD
2604
2605 fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
2606 fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
2607 nr_fallback = uatomic_read(&ja->nr_fallback);
2608 if (nr_fallback)
2609 fprintf(stderr,
2610 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2611 uatomic_read(&ja->nr_fallback),
2612 fallback_ratio);
2613
2614 na = uatomic_read(&ja->nr_nodes_allocated);
2615 nf = uatomic_read(&ja->nr_nodes_freed);
19a748d9
MD
2616 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
2617 if (nr_fallback)
2618 print_debug_fallback_distribution(ja);
2619
021c72c0
MD
2620 if (na != nf) {
2621 fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2622 (long) na - nf, na, nf);
19a748d9 2623 ret = -1;
021c72c0 2624 }
19a748d9 2625 return ret;
021c72c0
MD
2626}
2627
be9a7474
MD
2628/*
2629 * There should be no more concurrent add to the judy array while it is
2630 * being destroyed (ensured by the caller).
2631 */
3d8fe307 2632int cds_ja_destroy(struct cds_ja *ja,
21ac4c56 2633 void (*rcu_free_node)(struct cds_ja_node *node))
be9a7474 2634{
48cbe001 2635 const struct rcu_flavor_struct *flavor;
b4540e8a
MD
2636 int ret;
2637
48cbe001 2638 flavor = cds_lfht_rcu_flavor(ja->ht);
be9a7474 2639 rcuja_shadow_prune(ja->ht,
3d8fe307 2640 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
21ac4c56 2641 rcu_free_node);
48cbe001 2642 flavor->thread_offline();
b4540e8a
MD
2643 ret = rcuja_delete_ht(ja->ht);
2644 if (ret)
2645 return ret;
f2ae7af7
MD
2646
2647 /* Wait for in-flight call_rcu free to complete. */
2648 flavor->barrier();
2649
48cbe001 2650 flavor->thread_online();
19a748d9 2651 ret = ja_final_checks(ja);
b4540e8a 2652 free(ja);
19a748d9 2653 return ret;
be9a7474 2654}
This page took 0.151746 seconds and 4 git commands to generate.