rcuja fix: fix 2d distance calculation
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
b1a90ce3 27#include <string.h>
61009379 28#include <urcu/rcuja.h>
d68c6810
MD
29#include <urcu/compiler.h>
30#include <urcu/arch.h>
31#include <assert.h>
8e519e3c 32#include <urcu-pointer.h>
f07b240f 33#include <urcu/uatomic.h>
b4540e8a 34#include <stdint.h>
8e519e3c 35
61009379 36#include "rcuja-internal.h"
d68c6810 37#include "bitfield.h"
61009379 38
b1a90ce3
MD
39#ifndef abs
40#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
41#endif
42
d96bfb0d 43enum cds_ja_type_class {
e5227865 44 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
45 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
46 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
47 RCU_JA_POOL = 1, /* Type B */
48 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
49 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 50 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
51 /* 32-bit: 101 to 256 children, 1024 bytes */
52 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 53 /* Leaf nodes are implicit from their height in the tree */
1db4943c 54 RCU_JA_NR_TYPES,
e1db2db5
MD
55
56 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
57};
58
d96bfb0d
MD
59struct cds_ja_type {
60 enum cds_ja_type_class type_class;
8e519e3c
MD
61 uint16_t min_child; /* minimum number of children: 1 to 256 */
62 uint16_t max_child; /* maximum number of children: 1 to 256 */
63 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
64 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
65 uint16_t nr_pool_order; /* number of pools */
66 uint16_t pool_size_order; /* pool size */
e5227865
MD
67};
68
69/*
70 * Iteration on the array to find the right node size for the number of
d68c6810 71 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 72 * possible node size, which contains 256 children).
d68c6810
MD
73 * The min_child overlaps with the previous max_child to provide an
74 * hysteresis loop to reallocation for patterns of cyclic add/removal
75 * within the same node.
76 * The node the index within the following arrays is represented on 3
77 * bits. It identifies the node type, min/max number of children, and
78 * the size order.
3d45251f
MD
79 * The max_child values for the RCU_JA_POOL below result from
80 * statistical approximation: over million populations, the max_child
81 * covers between 97% and 99% of the populations generated. Therefore, a
82 * fallback should exist to cover the rare extreme population unbalance
83 * cases, but it will not have a major impact on speed nor space
84 * consumption, since those are rare cases.
e5227865 85 */
e5227865 86
d68c6810
MD
87#if (CAA_BITS_PER_LONG < 64)
88/* 32-bit pointers */
1db4943c
MD
89enum {
90 ja_type_0_max_child = 1,
91 ja_type_1_max_child = 3,
92 ja_type_2_max_child = 6,
93 ja_type_3_max_child = 12,
94 ja_type_4_max_child = 25,
95 ja_type_5_max_child = 48,
96 ja_type_6_max_child = 92,
97 ja_type_7_max_child = 256,
e1db2db5 98 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
99};
100
8e519e3c
MD
101enum {
102 ja_type_0_max_linear_child = 1,
103 ja_type_1_max_linear_child = 3,
104 ja_type_2_max_linear_child = 6,
105 ja_type_3_max_linear_child = 12,
106 ja_type_4_max_linear_child = 25,
107 ja_type_5_max_linear_child = 24,
108 ja_type_6_max_linear_child = 23,
109};
110
1db4943c
MD
111enum {
112 ja_type_5_nr_pool_order = 1,
113 ja_type_6_nr_pool_order = 2,
114};
115
d96bfb0d 116const struct cds_ja_type ja_types[] = {
8e519e3c
MD
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
121 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 122
fd800776 123 /* Pools may fill sooner than max_child */
8e519e3c
MD
124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
125 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
126
127 /*
b1a90ce3
MD
128 * Upon node removal below min_child, if child pool is filled
129 * beyond capacity, we roll back to pigeon.
3d45251f 130 */
1db4943c 131 { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
132
133 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 134};
d68c6810
MD
135#else /* !(CAA_BITS_PER_LONG < 64) */
136/* 64-bit pointers */
1db4943c
MD
137enum {
138 ja_type_0_max_child = 1,
139 ja_type_1_max_child = 3,
140 ja_type_2_max_child = 7,
141 ja_type_3_max_child = 14,
142 ja_type_4_max_child = 28,
143 ja_type_5_max_child = 54,
144 ja_type_6_max_child = 104,
145 ja_type_7_max_child = 256,
e1db2db5 146 ja_type_8_max_child = 256,
1db4943c
MD
147};
148
8e519e3c
MD
149enum {
150 ja_type_0_max_linear_child = 1,
151 ja_type_1_max_linear_child = 3,
152 ja_type_2_max_linear_child = 7,
153 ja_type_3_max_linear_child = 14,
154 ja_type_4_max_linear_child = 28,
155 ja_type_5_max_linear_child = 27,
156 ja_type_6_max_linear_child = 26,
157};
158
1db4943c
MD
159enum {
160 ja_type_5_nr_pool_order = 1,
161 ja_type_6_nr_pool_order = 2,
162};
163
d96bfb0d 164const struct cds_ja_type ja_types[] = {
8e519e3c
MD
165 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 170
3d45251f 171 /* Pools may fill sooner than max_child. */
8e519e3c
MD
172 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
173 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 174
3d45251f 175 /*
b1a90ce3
MD
176 * Upon node removal below min_child, if child pool is filled
177 * beyond capacity, we roll back to pigeon.
3d45251f 178 */
1db4943c 179 { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
180
181 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 182};
d68c6810 183#endif /* !(BITS_PER_LONG < 64) */
e5227865 184
1db4943c
MD
185static inline __attribute__((unused))
186void static_array_size_check(void)
187{
e1db2db5 188 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
189}
190
e5227865 191/*
d96bfb0d 192 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
193 * read-side. For linear and pool node configurations, it starts with a
194 * byte counting the number of children in the node. Then, the
195 * node-specific data is placed.
196 * The node mutex, if any is needed, protecting concurrent updated of
197 * each node is placed in a separate hash table indexed by node address.
198 * For the pigeon configuration, the number of children is also kept in
199 * a separate hash table, indexed by node address, because it is only
200 * required for updates.
e5227865 201 */
1db4943c 202
ff38c745
MD
203#define DECLARE_LINEAR_NODE(index) \
204 struct { \
205 uint8_t nr_child; \
206 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 207 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
208 }
209
210#define DECLARE_POOL_NODE(index) \
211 struct { \
212 struct { \
213 uint8_t nr_child; \
214 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 215 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
216 } linear[1U << ja_type_## index ##_nr_pool_order]; \
217 }
1db4943c 218
b4540e8a 219struct cds_ja_inode {
1db4943c
MD
220 union {
221 /* Linear configuration */
222 DECLARE_LINEAR_NODE(0) conf_0;
223 DECLARE_LINEAR_NODE(1) conf_1;
224 DECLARE_LINEAR_NODE(2) conf_2;
225 DECLARE_LINEAR_NODE(3) conf_3;
226 DECLARE_LINEAR_NODE(4) conf_4;
227
228 /* Pool configuration */
229 DECLARE_POOL_NODE(5) conf_5;
230 DECLARE_POOL_NODE(6) conf_6;
231
232 /* Pigeon configuration */
233 struct {
b4540e8a 234 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
235 } conf_7;
236 /* data aliasing nodes for computed accesses */
b4540e8a 237 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 238 } u;
e5227865
MD
239};
240
2e313670 241enum ja_recompact {
19ddcd04
MD
242 JA_RECOMPACT_ADD_SAME,
243 JA_RECOMPACT_ADD_NEXT,
2e313670
MD
244 JA_RECOMPACT_DEL,
245};
246
19ddcd04
MD
247static
248unsigned long node_fallback_count_distribution[JA_ENTRY_PER_NODE];
249
b1a90ce3
MD
250static
251struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
252{
253 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
254}
255
256unsigned long ja_node_type(struct cds_ja_inode_flag *node)
257{
258 unsigned long type;
259
260 if (_ja_node_mask_ptr(node) == NULL) {
261 return NODE_INDEX_NULL;
262 }
263 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
264 assert(type < (1UL << JA_TYPE_BITS));
265 return type;
266}
267
268struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
269{
270 unsigned long type_index = ja_node_type(node);
271 const struct cds_ja_type *type;
272
273 type = &ja_types[type_index];
274 switch (type->type_class) {
275 case RCU_JA_LINEAR:
276 case RCU_JA_PIGEON: /* fall-through */
277 case RCU_JA_NULL: /* fall-through */
278 default: /* fall-through */
279 return _ja_node_mask_ptr(node);
280 case RCU_JA_POOL:
281 switch (type->nr_pool_order) {
282 case 1:
283 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_1D_MASK | JA_TYPE_MASK));
284 case 2:
285 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_2D_MASK | JA_POOL_1D_MASK | JA_TYPE_MASK));
286 default:
287 assert(0);
288 }
289 }
290}
291
b4540e8a 292struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
e5227865 293{
b1a90ce3
MD
294 size_t len = 1U << ja_type->order;
295 void *p;
296 int ret;
297
298 ret = posix_memalign(&p, len, len);
299 if (ret || !p) {
300 return NULL;
301 }
302 memset(p, 0, len);
303 return p;
e5227865
MD
304}
305
b4540e8a 306void free_cds_ja_node(struct cds_ja_inode *node)
e5227865
MD
307{
308 free(node);
309}
310
d68c6810
MD
311#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
312#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
313#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
314#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
315
316static
1db4943c 317uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 318{
1db4943c 319 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
320}
321
11c5e016 322static
d96bfb0d 323uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 324 struct cds_ja_inode *node)
11c5e016
MD
325{
326 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 327 return rcu_dereference(node->u.data[0]);
11c5e016
MD
328}
329
13a7f5a6
MD
330/*
331 * The order in which values and pointers are does does not matter: if
332 * a value is missing, we return NULL. If a value is there, but its
333 * associated pointers is still NULL, we return NULL too.
334 */
d68c6810 335static
b4540e8a
MD
336struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
337 struct cds_ja_inode *node,
5a9a87dd 338 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 339 struct cds_ja_inode_flag **child_node_flag_v,
b0ca2d21 340 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 341 uint8_t n)
d68c6810
MD
342{
343 uint8_t nr_child;
344 uint8_t *values;
b4540e8a
MD
345 struct cds_ja_inode_flag **pointers;
346 struct cds_ja_inode_flag *ptr;
d68c6810
MD
347 unsigned int i;
348
8e519e3c 349 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 350
11c5e016 351 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 352 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
353 assert(nr_child <= type->max_linear_child);
354 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 355
1db4943c 356 values = &node->u.data[1];
d68c6810 357 for (i = 0; i < nr_child; i++) {
13a7f5a6 358 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
359 break;
360 }
b0ca2d21
MD
361 if (i >= nr_child) {
362 if (caa_unlikely(node_flag_ptr))
363 *node_flag_ptr = NULL;
d68c6810 364 return NULL;
b0ca2d21 365 }
b4540e8a 366 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 367 ptr = rcu_dereference(pointers[i]);
2e313670
MD
368 if (caa_unlikely(child_node_flag_ptr) && ptr)
369 *child_node_flag_ptr = &pointers[i];
b62a8d0c
MD
370 if (caa_unlikely(child_node_flag_v) && ptr)
371 *child_node_flag_v = ptr;
b0ca2d21
MD
372 if (caa_unlikely(node_flag_ptr))
373 *node_flag_ptr = &pointers[i];
d68c6810
MD
374 return ptr;
375}
376
11c5e016 377static
5a9a87dd 378void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 379 struct cds_ja_inode *node,
11c5e016
MD
380 uint8_t i,
381 uint8_t *v,
b4540e8a 382 struct cds_ja_inode_flag **iter)
11c5e016
MD
383{
384 uint8_t *values;
b4540e8a 385 struct cds_ja_inode_flag **pointers;
11c5e016
MD
386
387 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
388 assert(i < ja_linear_node_get_nr_child(type, node));
389
390 values = &node->u.data[1];
391 *v = values[i];
b4540e8a 392 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
393 *iter = pointers[i];
394}
395
d68c6810 396static
b4540e8a
MD
397struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
398 struct cds_ja_inode *node,
b1a90ce3 399 struct cds_ja_inode_flag *node_flag,
5a9a87dd 400 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 401 struct cds_ja_inode_flag **child_node_flag_v,
b0ca2d21 402 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 403 uint8_t n)
d68c6810 404{
b4540e8a 405 struct cds_ja_inode *linear;
d68c6810 406
fd800776 407 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
408
409 switch (type->nr_pool_order) {
410 case 1:
411 {
412 unsigned long bitsel, index;
413
414 bitsel = ja_node_pool_1d_bitsel(node_flag);
415 assert(bitsel < CHAR_BIT);
19ddcd04 416 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
417 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
418 break;
419 }
420 case 2:
421 {
19ddcd04
MD
422 unsigned long bitsel[2], index[2], rindex;
423
424 ja_node_pool_2d_bitsel(node_flag, bitsel);
425 assert(bitsel[0] < CHAR_BIT);
426 assert(bitsel[1] < CHAR_BIT);
427 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
428 index[0] <<= 1;
429 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
430 rindex = index[0] | index[1];
431 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
432 break;
433 }
434 default:
435 linear = NULL;
436 assert(0);
437 }
b0ca2d21 438 return ja_linear_node_get_nth(type, linear, child_node_flag_ptr,
b62a8d0c 439 child_node_flag_v, node_flag_ptr, n);
d68c6810
MD
440}
441
11c5e016 442static
b4540e8a
MD
443struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
444 struct cds_ja_inode *node,
11c5e016
MD
445 uint8_t i)
446{
447 assert(type->type_class == RCU_JA_POOL);
b4540e8a 448 return (struct cds_ja_inode *)
11c5e016
MD
449 &node->u.data[(unsigned int) i << type->pool_size_order];
450}
451
d68c6810 452static
b4540e8a
MD
453struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
454 struct cds_ja_inode *node,
5a9a87dd 455 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 456 struct cds_ja_inode_flag **child_node_flag_v,
b0ca2d21 457 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 458 uint8_t n)
d68c6810 459{
5a9a87dd 460 struct cds_ja_inode_flag **child_node_flag;
b62a8d0c 461 struct cds_ja_inode_flag *child_node_flag_read;
5a9a87dd 462
d68c6810 463 assert(type->type_class == RCU_JA_PIGEON);
5a9a87dd 464 child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n];
b62a8d0c 465 child_node_flag_read = rcu_dereference(*child_node_flag);
582a6ade
MD
466 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
467 child_node_flag);
b62a8d0c 468 if (caa_unlikely(child_node_flag_ptr) && child_node_flag_read)
5a9a87dd 469 *child_node_flag_ptr = child_node_flag;
b62a8d0c
MD
470 if (caa_unlikely(child_node_flag_v) && child_node_flag_read)
471 *child_node_flag_v = child_node_flag_read;
b0ca2d21
MD
472 if (caa_unlikely(node_flag_ptr))
473 *node_flag_ptr = child_node_flag;
b62a8d0c 474 return child_node_flag_read;
d68c6810
MD
475}
476
2e313670
MD
477static
478struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
479 struct cds_ja_inode *node,
480 uint8_t i)
481{
b62a8d0c 482 return ja_pigeon_node_get_nth(type, node, NULL, NULL, NULL, i);
2e313670
MD
483}
484
13a7f5a6
MD
485/*
486 * ja_node_get_nth: get nth item from a node.
487 * node_flag is already rcu_dereference'd.
488 */
d68c6810 489static
b62a8d0c 490struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
5a9a87dd 491 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 492 struct cds_ja_inode_flag **child_node_flag,
b0ca2d21 493 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 494 uint8_t n)
d68c6810
MD
495{
496 unsigned int type_index;
b4540e8a 497 struct cds_ja_inode *node;
d96bfb0d 498 const struct cds_ja_type *type;
d68c6810 499
d68c6810 500 node = ja_node_ptr(node_flag);
5a9a87dd 501 assert(node != NULL);
d68c6810
MD
502 type_index = ja_node_type(node_flag);
503 type = &ja_types[type_index];
504
505 switch (type->type_class) {
506 case RCU_JA_LINEAR:
5a9a87dd 507 return ja_linear_node_get_nth(type, node,
b62a8d0c
MD
508 child_node_flag_ptr, child_node_flag,
509 node_flag_ptr, n);
fd800776 510 case RCU_JA_POOL:
b1a90ce3 511 return ja_pool_node_get_nth(type, node, node_flag,
b62a8d0c
MD
512 child_node_flag_ptr, child_node_flag,
513 node_flag_ptr, n);
d68c6810 514 case RCU_JA_PIGEON:
5a9a87dd 515 return ja_pigeon_node_get_nth(type, node,
b62a8d0c
MD
516 child_node_flag_ptr, child_node_flag,
517 node_flag_ptr, n);
d68c6810
MD
518 default:
519 assert(0);
520 return (void *) -1UL;
521 }
522}
523
8e519e3c 524static
d96bfb0d 525int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 526 struct cds_ja_inode *node,
d96bfb0d 527 struct cds_ja_shadow_node *shadow_node,
8e519e3c 528 uint8_t n,
b4540e8a 529 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
530{
531 uint8_t nr_child;
532 uint8_t *values, *nr_child_ptr;
b4540e8a 533 struct cds_ja_inode_flag **pointers;
2e313670 534 unsigned int i, unused = 0;
8e519e3c
MD
535
536 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
537
538 nr_child_ptr = &node->u.data[0];
a2a7ff59 539 dbg_printf("linear set nth: nr_child_ptr %p\n", nr_child_ptr);
8e519e3c
MD
540 nr_child = *nr_child_ptr;
541 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
542
543 values = &node->u.data[1];
2e313670
MD
544 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
545 /* Check if node value is already populated */
8e519e3c 546 for (i = 0; i < nr_child; i++) {
2e313670
MD
547 if (values[i] == n) {
548 if (pointers[i])
549 return -EEXIST;
550 else
551 break;
552 } else {
553 if (!pointers[i])
554 unused++;
555 }
8e519e3c 556 }
2e313670
MD
557 if (i == nr_child && nr_child >= type->max_linear_child) {
558 if (unused)
559 return -ERANGE; /* recompact node */
560 else
561 return -ENOSPC; /* No space left in this node type */
562 }
563
564 assert(pointers[i] == NULL);
565 rcu_assign_pointer(pointers[i], child_node_flag);
566 /* If we expanded the nr_child, increment it */
567 if (i == nr_child) {
568 CMM_STORE_SHARED(values[nr_child], n);
569 /* write pointer and value before nr_child */
570 cmm_smp_wmb();
571 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 572 }
e1db2db5 573 shadow_node->nr_child++;
a2a7ff59
MD
574 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
575 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
576 (unsigned int) shadow_node->nr_child,
577 node, shadow_node);
578
8e519e3c
MD
579 return 0;
580}
581
582static
d96bfb0d 583int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 584 struct cds_ja_inode *node,
b1a90ce3 585 struct cds_ja_inode_flag *node_flag,
d96bfb0d 586 struct cds_ja_shadow_node *shadow_node,
8e519e3c 587 uint8_t n,
b4540e8a 588 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 589{
b4540e8a 590 struct cds_ja_inode *linear;
8e519e3c
MD
591
592 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
593
594 switch (type->nr_pool_order) {
595 case 1:
596 {
597 unsigned long bitsel, index;
598
599 bitsel = ja_node_pool_1d_bitsel(node_flag);
600 assert(bitsel < CHAR_BIT);
19ddcd04 601 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
602 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
603 break;
604 }
605 case 2:
606 {
19ddcd04
MD
607 unsigned long bitsel[2], index[2], rindex;
608
609 ja_node_pool_2d_bitsel(node_flag, bitsel);
610 assert(bitsel[0] < CHAR_BIT);
611 assert(bitsel[1] < CHAR_BIT);
612 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
613 index[0] <<= 1;
614 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
615 rindex = index[0] | index[1];
616 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
617 break;
618 }
619 default:
620 linear = NULL;
621 assert(0);
622 }
623
e1db2db5
MD
624 return ja_linear_node_set_nth(type, linear, shadow_node,
625 n, child_node_flag);
8e519e3c
MD
626}
627
628static
d96bfb0d 629int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 630 struct cds_ja_inode *node,
d96bfb0d 631 struct cds_ja_shadow_node *shadow_node,
8e519e3c 632 uint8_t n,
b4540e8a 633 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 634{
b4540e8a 635 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
636
637 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 638 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 639 if (*ptr)
8e519e3c
MD
640 return -EEXIST;
641 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 642 shadow_node->nr_child++;
8e519e3c
MD
643 return 0;
644}
645
d68c6810 646/*
7a0b2331 647 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 648 * (negative error value) if it is already there.
d68c6810 649 */
8e519e3c 650static
d96bfb0d 651int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 652 struct cds_ja_inode *node,
b1a90ce3 653 struct cds_ja_inode_flag *node_flag,
d96bfb0d 654 struct cds_ja_shadow_node *shadow_node,
e1db2db5 655 uint8_t n,
b4540e8a 656 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 657{
8e519e3c
MD
658 switch (type->type_class) {
659 case RCU_JA_LINEAR:
e1db2db5 660 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
661 child_node_flag);
662 case RCU_JA_POOL:
b1a90ce3 663 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
8e519e3c
MD
664 child_node_flag);
665 case RCU_JA_PIGEON:
e1db2db5 666 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 667 child_node_flag);
e1db2db5
MD
668 case RCU_JA_NULL:
669 return -ENOSPC;
8e519e3c
MD
670 default:
671 assert(0);
672 return -EINVAL;
673 }
674
675 return 0;
676}
7a0b2331 677
2e313670 678static
af3cbd45 679int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
680 struct cds_ja_inode *node,
681 struct cds_ja_shadow_node *shadow_node,
af3cbd45 682 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
683{
684 uint8_t nr_child;
af3cbd45 685 uint8_t *nr_child_ptr;
2e313670
MD
686
687 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
688
689 nr_child_ptr = &node->u.data[0];
2e313670
MD
690 nr_child = *nr_child_ptr;
691 assert(nr_child <= type->max_linear_child);
692
2e313670
MD
693 if (shadow_node->fallback_removal_count) {
694 shadow_node->fallback_removal_count--;
695 } else {
19ddcd04
MD
696 if (type->type_class == RCU_JA_LINEAR
697 && shadow_node->nr_child <= type->min_child) {
2e313670
MD
698 /* We need to try recompacting the node */
699 return -EFBIG;
700 }
701 }
19ddcd04 702 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
af3cbd45
MD
703 assert(*node_flag_ptr != NULL);
704 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
705 /*
706 * Value and nr_child are never changed (would cause ABA issue).
707 * Instead, we leave the pointer to NULL and recompact the node
708 * once in a while. It is allowed to set a NULL pointer to a new
709 * value without recompaction though.
710 * Only update the shadow node accounting.
711 */
712 shadow_node->nr_child--;
af3cbd45 713 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
714 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
715 (unsigned int) shadow_node->nr_child,
716 node, shadow_node);
2e313670
MD
717 return 0;
718}
719
720static
af3cbd45 721int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670 722 struct cds_ja_inode *node,
19ddcd04 723 struct cds_ja_inode_flag *node_flag,
2e313670 724 struct cds_ja_shadow_node *shadow_node,
af3cbd45 725 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
726 uint8_t n)
727{
728 struct cds_ja_inode *linear;
729
730 assert(type->type_class == RCU_JA_POOL);
19ddcd04
MD
731
732 if (shadow_node->fallback_removal_count) {
733 shadow_node->fallback_removal_count--;
734 } else {
735 /* We should try recompacting the node */
736 if (shadow_node->nr_child <= type->min_child)
737 return -EFBIG;
738 }
739
740 switch (type->nr_pool_order) {
741 case 1:
742 {
743 unsigned long bitsel, index;
744
745 bitsel = ja_node_pool_1d_bitsel(node_flag);
746 assert(bitsel < CHAR_BIT);
747 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
748 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
749 break;
750 }
751 case 2:
752 {
753 unsigned long bitsel[2], index[2], rindex;
754
755 ja_node_pool_2d_bitsel(node_flag, bitsel);
756 assert(bitsel[0] < CHAR_BIT);
757 assert(bitsel[1] < CHAR_BIT);
758 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
759 index[0] <<= 1;
760 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
761 rindex = index[0] | index[1];
762 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
763 break;
764 }
765 default:
766 linear = NULL;
767 assert(0);
768 }
769
af3cbd45 770 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
771}
772
773static
af3cbd45 774int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
775 struct cds_ja_inode *node,
776 struct cds_ja_shadow_node *shadow_node,
af3cbd45 777 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 778{
2e313670 779 assert(type->type_class == RCU_JA_PIGEON);
19ddcd04
MD
780
781 if (shadow_node->fallback_removal_count) {
782 shadow_node->fallback_removal_count--;
783 } else {
784 /* We should try recompacting the node */
785 if (shadow_node->nr_child <= type->min_child)
786 return -EFBIG;
787 }
4d6ef45e 788 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 789 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
790 shadow_node->nr_child--;
791 return 0;
792}
793
794/*
af3cbd45 795 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
796 * (negative error value) if it is not found (-ENOENT).
797 */
798static
af3cbd45 799int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670 800 struct cds_ja_inode *node,
19ddcd04 801 struct cds_ja_inode_flag *node_flag,
2e313670 802 struct cds_ja_shadow_node *shadow_node,
af3cbd45 803 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
804 uint8_t n)
805{
806 switch (type->type_class) {
807 case RCU_JA_LINEAR:
af3cbd45 808 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 809 case RCU_JA_POOL:
19ddcd04 810 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
2e313670 811 case RCU_JA_PIGEON:
af3cbd45 812 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
813 case RCU_JA_NULL:
814 return -ENOENT;
815 default:
816 assert(0);
817 return -EINVAL;
818 }
819
820 return 0;
821}
822
b1a90ce3
MD
823/*
824 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
825 * distribution in two sub-distributions containing as much elements one
826 * compared to the other.
827 */
828static
829unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
830 struct cds_ja *ja,
831 unsigned int type_index,
832 const struct cds_ja_type *type,
833 struct cds_ja_inode *node,
834 struct cds_ja_shadow_node *shadow_node,
835 uint8_t n,
836 struct cds_ja_inode_flag *child_node_flag,
837 struct cds_ja_inode_flag **nullify_node_flag_ptr)
838{
839 uint8_t nr_one[JA_BITS_PER_BYTE];
840 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
841 unsigned int distrib_nr_child = 0;
842
843 memset(nr_one, 0, sizeof(nr_one));
844
845 switch (type->type_class) {
846 case RCU_JA_LINEAR:
847 {
848 uint8_t nr_child =
849 ja_linear_node_get_nr_child(type, node);
850 unsigned int i;
851
852 for (i = 0; i < nr_child; i++) {
853 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
854 uint8_t v;
855
856 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
857 if (!iter)
858 continue;
859 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
860 continue;
f5531dd9
MD
861 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
862 if (v & (1U << bit_i))
863 nr_one[bit_i]++;
b1a90ce3
MD
864 }
865 distrib_nr_child++;
866 }
867 break;
868 }
869 case RCU_JA_POOL:
870 {
871 unsigned int pool_nr;
872
873 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
874 struct cds_ja_inode *pool =
875 ja_pool_node_get_ith_pool(type,
876 node, pool_nr);
877 uint8_t nr_child =
878 ja_linear_node_get_nr_child(type, pool);
879 unsigned int j;
880
881 for (j = 0; j < nr_child; j++) {
882 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
883 uint8_t v;
884
885 ja_linear_node_get_ith_pos(type, pool,
886 j, &v, &iter);
887 if (!iter)
888 continue;
889 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
890 continue;
f5531dd9
MD
891 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
892 if (v & (1U << bit_i))
893 nr_one[bit_i]++;
b1a90ce3
MD
894 }
895 distrib_nr_child++;
896 }
897 }
898 break;
899 }
900 case RCU_JA_PIGEON:
901 {
902 uint8_t nr_child;
903 unsigned int i;
904
905 assert(mode == JA_RECOMPACT_DEL);
906 nr_child = shadow_node->nr_child;
907 for (i = 0; i < nr_child; i++) {
908 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
909
910 iter = ja_pigeon_node_get_ith_pos(type, node, i);
911 if (!iter)
912 continue;
913 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
914 continue;
f5531dd9
MD
915 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
916 if (i & (1U << bit_i))
917 nr_one[bit_i]++;
b1a90ce3
MD
918 }
919 distrib_nr_child++;
920 }
921 break;
922 }
923 case RCU_JA_NULL:
19ddcd04 924 assert(mode == JA_RECOMPACT_ADD_NEXT);
b1a90ce3
MD
925 break;
926 default:
927 assert(0);
928 break;
929 }
930
19ddcd04 931 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
f5531dd9
MD
932 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
933 if (n & (1U << bit_i))
934 nr_one[bit_i]++;
b1a90ce3
MD
935 }
936 distrib_nr_child++;
937 }
938
939 /*
940 * The best bit selector is that for which the number of ones is
941 * closest to half of the number of children in the
f5531dd9
MD
942 * distribution. We calculate the distance using the double of
943 * the sub-distribution sizes to eliminate truncation error.
b1a90ce3
MD
944 */
945 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
946 unsigned int distance_to_best;
947
f5531dd9 948 distance_to_best = abs_int((nr_one[bit_i] << 1U) - distrib_nr_child);
b1a90ce3
MD
949 if (distance_to_best < overall_best_distance) {
950 overall_best_distance = distance_to_best;
951 bitsel = bit_i;
952 }
953 }
954 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
955 return bitsel;
956}
957
19ddcd04
MD
958/*
959 * Calculate bit distribution in two dimensions. Returns the two bits
960 * (each 0 to 7) that splits the distribution in four sub-distributions
961 * containing as much elements one compared to the other.
962 */
963static
964void ja_node_sum_distribution_2d(enum ja_recompact mode,
965 struct cds_ja *ja,
966 unsigned int type_index,
967 const struct cds_ja_type *type,
968 struct cds_ja_inode *node,
969 struct cds_ja_shadow_node *shadow_node,
970 uint8_t n,
971 struct cds_ja_inode_flag *child_node_flag,
972 struct cds_ja_inode_flag **nullify_node_flag_ptr,
973 unsigned int *_bitsel)
974{
975 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
976 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
977 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
978 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
979 unsigned int bitsel[2] = { 0, 1 };
4a073c53
MD
980 unsigned int bit_i, bit_j;
981 int overall_best_distance = INT_MAX;
19ddcd04
MD
982 unsigned int distrib_nr_child = 0;
983
984 memset(nr_2d_11, 0, sizeof(nr_2d_11));
985 memset(nr_2d_10, 0, sizeof(nr_2d_10));
4a073c53
MD
986 memset(nr_2d_01, 0, sizeof(nr_2d_01));
987 memset(nr_2d_00, 0, sizeof(nr_2d_00));
19ddcd04
MD
988
989 switch (type->type_class) {
990 case RCU_JA_LINEAR:
991 {
992 uint8_t nr_child =
993 ja_linear_node_get_nr_child(type, node);
994 unsigned int i;
995
996 for (i = 0; i < nr_child; i++) {
997 struct cds_ja_inode_flag *iter;
998 uint8_t v;
999
1000 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1001 if (!iter)
1002 continue;
1003 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1004 continue;
1005 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1006 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1007 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1008 nr_2d_11[bit_i][bit_j]++;
1009 }
1010 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1011 nr_2d_10[bit_i][bit_j]++;
1012 }
1013 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1014 nr_2d_01[bit_i][bit_j]++;
1015 }
1016 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1017 nr_2d_00[bit_i][bit_j]++;
1018 }
1019 }
1020 }
1021 distrib_nr_child++;
1022 }
1023 break;
1024 }
1025 case RCU_JA_POOL:
1026 {
1027 unsigned int pool_nr;
1028
1029 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1030 struct cds_ja_inode *pool =
1031 ja_pool_node_get_ith_pool(type,
1032 node, pool_nr);
1033 uint8_t nr_child =
1034 ja_linear_node_get_nr_child(type, pool);
1035 unsigned int j;
1036
1037 for (j = 0; j < nr_child; j++) {
1038 struct cds_ja_inode_flag *iter;
1039 uint8_t v;
1040
1041 ja_linear_node_get_ith_pos(type, pool,
1042 j, &v, &iter);
1043 if (!iter)
1044 continue;
1045 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1046 continue;
1047 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1048 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1049 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1050 nr_2d_11[bit_i][bit_j]++;
1051 }
1052 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1053 nr_2d_10[bit_i][bit_j]++;
1054 }
1055 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1056 nr_2d_01[bit_i][bit_j]++;
1057 }
1058 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1059 nr_2d_00[bit_i][bit_j]++;
1060 }
1061 }
1062 }
1063 distrib_nr_child++;
1064 }
1065 }
1066 break;
1067 }
1068 case RCU_JA_PIGEON:
1069 {
1070 uint8_t nr_child;
1071 unsigned int i;
1072
1073 assert(mode == JA_RECOMPACT_DEL);
1074 nr_child = shadow_node->nr_child;
1075 for (i = 0; i < nr_child; i++) {
1076 struct cds_ja_inode_flag *iter;
1077
1078 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1079 if (!iter)
1080 continue;
1081 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1082 continue;
1083 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1084 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1085 if ((i & (1U << bit_i)) && (i & (1U << bit_j))) {
1086 nr_2d_11[bit_i][bit_j]++;
1087 }
1088 if ((i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1089 nr_2d_10[bit_i][bit_j]++;
1090 }
1091 if (!(i & (1U << bit_i)) && (i & (1U << bit_j))) {
1092 nr_2d_01[bit_i][bit_j]++;
1093 }
1094 if (!(i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1095 nr_2d_00[bit_i][bit_j]++;
1096 }
1097 }
1098 }
1099 distrib_nr_child++;
1100 }
1101 break;
1102 }
1103 case RCU_JA_NULL:
1104 assert(mode == JA_RECOMPACT_ADD_NEXT);
1105 break;
1106 default:
1107 assert(0);
1108 break;
1109 }
1110
1111 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1112 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1113 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1114 if ((n & (1U << bit_i)) && (n & (1U << bit_j))) {
1115 nr_2d_11[bit_i][bit_j]++;
1116 }
1117 if ((n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1118 nr_2d_10[bit_i][bit_j]++;
1119 }
1120 if (!(n & (1U << bit_i)) && (n & (1U << bit_j))) {
1121 nr_2d_01[bit_i][bit_j]++;
1122 }
1123 if (!(n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1124 nr_2d_00[bit_i][bit_j]++;
1125 }
1126 }
1127 }
1128 distrib_nr_child++;
1129 }
1130
1131 /*
1132 * The best bit selector is that for which the number of nodes
1133 * in each sub-class is closest to one-fourth of the number of
1134 * children in the distribution. We calculate the distance using
1135 * 4 times the size of the sub-distribution to eliminate
1136 * truncation error.
1137 */
1138 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1139 for (bit_j = 0; bit_j < bit_i; bit_j++) {
4a073c53 1140 int distance_to_best[4];
19ddcd04 1141
4a073c53
MD
1142 distance_to_best[0] = (nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1143 distance_to_best[1] = (nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1144 distance_to_best[2] = (nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1145 distance_to_best[3] = (nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
19ddcd04 1146
4a073c53
MD
1147 /* Consider worse distance above best */
1148 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
19ddcd04 1149 distance_to_best[0] = distance_to_best[1];
4a073c53 1150 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
19ddcd04 1151 distance_to_best[0] = distance_to_best[2];
4a073c53 1152 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
19ddcd04 1153 distance_to_best[0] = distance_to_best[3];
4a073c53 1154
19ddcd04
MD
1155 /*
1156 * If our worse distance is better than overall,
1157 * we become new best candidate.
1158 */
1159 if (distance_to_best[0] < overall_best_distance) {
1160 overall_best_distance = distance_to_best[0];
1161 bitsel[0] = bit_i;
1162 bitsel[1] = bit_j;
1163 }
1164 }
1165 }
1166
1167 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1168
1169 /* Return our bit selection */
1170 _bitsel[0] = bitsel[0];
1171 _bitsel[1] = bitsel[1];
1172}
1173
7a0b2331
MD
1174/*
1175 * ja_node_recompact_add: recompact a node, adding a new child.
2e313670 1176 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 1177 * error value otherwise.
7a0b2331
MD
1178 */
1179static
2e313670
MD
1180int ja_node_recompact(enum ja_recompact mode,
1181 struct cds_ja *ja,
e1db2db5 1182 unsigned int old_type_index,
d96bfb0d 1183 const struct cds_ja_type *old_type,
b4540e8a 1184 struct cds_ja_inode *old_node,
5a9a87dd 1185 struct cds_ja_shadow_node *shadow_node,
3d8fe307 1186 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45
MD
1187 struct cds_ja_inode_flag *child_node_flag,
1188 struct cds_ja_inode_flag **nullify_node_flag_ptr)
7a0b2331 1189{
e1db2db5 1190 unsigned int new_type_index;
b4540e8a 1191 struct cds_ja_inode *new_node;
af3cbd45 1192 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 1193 const struct cds_ja_type *new_type;
3d8fe307 1194 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 1195 int ret;
f07b240f 1196 int fallback = 0;
7a0b2331 1197
3d8fe307
MD
1198 old_node_flag = *old_node_flag_ptr;
1199
2e313670 1200 switch (mode) {
19ddcd04
MD
1201 case JA_RECOMPACT_ADD_SAME:
1202 if (old_type->type_class == RCU_JA_POOL) {
1203 /*
1204 * For pool type, try redistributing
1205 * into a different distribution of same
1206 * size if we have not reached limits.
1207 */
1208 if (shadow_node->nr_child + 1 > old_type->max_child) {
1209 new_type_index = old_type_index + 1;
1210 } else if (shadow_node->nr_child + 1 < old_type->min_child) {
1211 new_type_index = old_type_index - 1;
1212 } else {
1213 new_type_index = old_type_index;
1214 }
1215 } else {
1216 new_type_index = old_type_index;
1217 }
2e313670 1218 break;
19ddcd04 1219 case JA_RECOMPACT_ADD_NEXT:
2e313670
MD
1220 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1221 new_type_index = 0;
1222 } else {
19ddcd04
MD
1223 if (old_type->type_class == RCU_JA_POOL) {
1224 /*
1225 * For pool type, try redistributing
1226 * into a different distribution of same
1227 * size if we have not reached limits.
1228 */
1229 if (shadow_node->nr_child + 1 > old_type->max_child) {
1230 new_type_index = old_type_index + 1;
1231 } else {
1232 new_type_index = old_type_index;
1233 }
1234 } else {
1235 new_type_index = old_type_index + 1;
1236 }
2e313670
MD
1237 }
1238 break;
1239 case JA_RECOMPACT_DEL:
1240 if (old_type_index == 0) {
1241 new_type_index = NODE_INDEX_NULL;
1242 } else {
19ddcd04
MD
1243 if (old_type->type_class == RCU_JA_POOL) {
1244 /*
1245 * For pool type, try redistributing
1246 * into a different distribution of same
1247 * size if we have not reached limits.
1248 */
1249 if (shadow_node->nr_child - 1 < old_type->min_child) {
1250 new_type_index = old_type_index - 1;
1251 } else {
1252 new_type_index = old_type_index;
1253 }
1254 } else {
1255 new_type_index = old_type_index - 1;
1256 }
2e313670
MD
1257 }
1258 break;
1259 default:
1260 assert(0);
7a0b2331 1261 }
a2a7ff59 1262
f07b240f 1263retry: /* for fallback */
582a6ade
MD
1264 dbg_printf("Recompact from type %d to type %d\n",
1265 old_type_index, new_type_index);
7a0b2331 1266 new_type = &ja_types[new_type_index];
2e313670
MD
1267 if (new_type_index != NODE_INDEX_NULL) {
1268 new_node = alloc_cds_ja_node(new_type);
1269 if (!new_node)
1270 return -ENOMEM;
b1a90ce3
MD
1271
1272 if (new_type->type_class == RCU_JA_POOL) {
1273 switch (new_type->nr_pool_order) {
1274 case 1:
1275 {
19ddcd04
MD
1276 unsigned int node_distrib_bitsel;
1277
b1a90ce3
MD
1278 node_distrib_bitsel =
1279 ja_node_sum_distribution_1d(mode, ja,
1280 old_type_index, old_type,
1281 old_node, shadow_node,
1282 n, child_node_flag,
1283 nullify_node_flag_ptr);
1284 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1285 new_node_flag = ja_node_flag_pool_1d(new_node,
1286 new_type_index, node_distrib_bitsel);
1287 break;
1288 }
1289 case 2:
1290 {
19ddcd04
MD
1291 unsigned int node_distrib_bitsel[2];
1292
1293 ja_node_sum_distribution_2d(mode, ja,
1294 old_type_index, old_type,
1295 old_node, shadow_node,
1296 n, child_node_flag,
1297 nullify_node_flag_ptr,
1298 node_distrib_bitsel);
b1a90ce3
MD
1299 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1300 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
19ddcd04
MD
1301 new_node_flag = ja_node_flag_pool_2d(new_node,
1302 new_type_index, node_distrib_bitsel);
b1a90ce3
MD
1303 break;
1304 }
1305 default:
1306 assert(0);
1307 }
1308 } else {
1309 new_node_flag = ja_node_flag(new_node, new_type_index);
1310 }
1311
2e313670 1312 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
3d8fe307 1313 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja);
2e313670
MD
1314 if (!new_shadow_node) {
1315 free(new_node);
1316 return -ENOMEM;
1317 }
1318 if (fallback)
1319 new_shadow_node->fallback_removal_count =
1320 JA_FALLBACK_REMOVAL_COUNT;
1321 } else {
1322 new_node = NULL;
1323 new_node_flag = NULL;
e1db2db5 1324 }
11c5e016 1325
19ddcd04 1326 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
2e313670
MD
1327
1328 if (new_type_index == NODE_INDEX_NULL)
1329 goto skip_copy;
1330
11c5e016
MD
1331 switch (old_type->type_class) {
1332 case RCU_JA_LINEAR:
1333 {
1334 uint8_t nr_child =
1335 ja_linear_node_get_nr_child(old_type, old_node);
1336 unsigned int i;
1337
1338 for (i = 0; i < nr_child; i++) {
b4540e8a 1339 struct cds_ja_inode_flag *iter;
11c5e016
MD
1340 uint8_t v;
1341
1342 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1343 if (!iter)
1344 continue;
af3cbd45 1345 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1346 continue;
b1a90ce3 1347 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1348 new_shadow_node,
11c5e016 1349 v, iter);
f07b240f
MD
1350 if (new_type->type_class == RCU_JA_POOL && ret) {
1351 goto fallback_toosmall;
1352 }
11c5e016
MD
1353 assert(!ret);
1354 }
1355 break;
1356 }
1357 case RCU_JA_POOL:
1358 {
1359 unsigned int pool_nr;
1360
1361 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 1362 struct cds_ja_inode *pool =
11c5e016
MD
1363 ja_pool_node_get_ith_pool(old_type,
1364 old_node, pool_nr);
1365 uint8_t nr_child =
1366 ja_linear_node_get_nr_child(old_type, pool);
1367 unsigned int j;
1368
1369 for (j = 0; j < nr_child; j++) {
b4540e8a 1370 struct cds_ja_inode_flag *iter;
11c5e016
MD
1371 uint8_t v;
1372
1373 ja_linear_node_get_ith_pos(old_type, pool,
1374 j, &v, &iter);
1375 if (!iter)
1376 continue;
af3cbd45 1377 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1378 continue;
b1a90ce3 1379 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1380 new_shadow_node,
11c5e016 1381 v, iter);
f07b240f
MD
1382 if (new_type->type_class == RCU_JA_POOL
1383 && ret) {
1384 goto fallback_toosmall;
1385 }
11c5e016
MD
1386 assert(!ret);
1387 }
1388 }
1389 break;
7a0b2331 1390 }
a2a7ff59 1391 case RCU_JA_NULL:
19ddcd04 1392 assert(mode == JA_RECOMPACT_ADD_NEXT);
a2a7ff59 1393 break;
11c5e016 1394 case RCU_JA_PIGEON:
2e313670
MD
1395 {
1396 uint8_t nr_child;
1397 unsigned int i;
1398
1399 assert(mode == JA_RECOMPACT_DEL);
1400 nr_child = shadow_node->nr_child;
1401 for (i = 0; i < nr_child; i++) {
1402 struct cds_ja_inode_flag *iter;
1403
1404 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1405 if (!iter)
1406 continue;
af3cbd45 1407 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1408 continue;
b1a90ce3 1409 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1410 new_shadow_node,
1411 i, iter);
1412 if (new_type->type_class == RCU_JA_POOL && ret) {
1413 goto fallback_toosmall;
1414 }
1415 assert(!ret);
1416 }
1417 break;
1418 }
11c5e016
MD
1419 default:
1420 assert(0);
5a9a87dd 1421 ret = -EINVAL;
f07b240f 1422 goto end;
11c5e016 1423 }
2e313670 1424skip_copy:
11c5e016 1425
19ddcd04 1426 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
2e313670 1427 /* add node */
b1a90ce3 1428 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1429 new_shadow_node,
1430 n, child_node_flag);
7b413155
MD
1431 if (new_type->type_class == RCU_JA_POOL && ret) {
1432 goto fallback_toosmall;
1433 }
2e313670
MD
1434 assert(!ret);
1435 }
19ddcd04
MD
1436
1437 if (fallback) {
1438 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1439 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1440 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
1441 uatomic_inc(&node_fallback_count_distribution[new_shadow_node->nr_child]);
1442 }
1443
3d8fe307
MD
1444 /* Return pointer to new recompacted node through old_node_flag_ptr */
1445 *old_node_flag_ptr = new_node_flag;
a2a7ff59 1446 if (old_node) {
2e313670
MD
1447 int flags;
1448
1449 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1450 /*
1451 * It is OK to free the lock associated with a node
1452 * going to NULL, since we are holding the parent lock.
1453 * This synchronizes removal with re-add of that node.
1454 */
1455 if (new_type_index == NODE_INDEX_NULL)
1456 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 1457 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 1458 flags);
a2a7ff59
MD
1459 assert(!ret);
1460 }
5a9a87dd
MD
1461
1462 ret = 0;
f07b240f 1463end:
5a9a87dd 1464 return ret;
f07b240f
MD
1465
1466fallback_toosmall:
1467 /* fallback if next pool is too small */
af3cbd45 1468 assert(new_shadow_node);
3d8fe307 1469 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
1470 RCUJA_SHADOW_CLEAR_FREE_NODE);
1471 assert(!ret);
1472
19ddcd04
MD
1473 switch (mode) {
1474 case JA_RECOMPACT_ADD_SAME:
1475 /*
1476 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1477 * node within a pool has unused entries. It should
1478 * therefore _never_ be too small.
1479 */
4a073c53 1480 assert(0);
4cde8267
MD
1481
1482 /* Fall-through */
19ddcd04
MD
1483 case JA_RECOMPACT_ADD_NEXT:
1484 {
1485 const struct cds_ja_type *next_type;
1486
1487 /*
1488 * Recompaction attempt on add failed. Should only
1489 * happen if target node type is pool. Caused by
1490 * hard-to-split distribution. Recompact using the next
1491 * distribution size.
1492 */
1493 assert(new_type->type_class == RCU_JA_POOL);
1494 next_type = &ja_types[new_type_index + 1];
1495 /*
1496 * Try going to the next pool size if our population
1497 * fits within its range. This is not flagged as a
1498 * fallback.
1499 */
1500 if (shadow_node->nr_child + 1 >= next_type->min_child
1501 && shadow_node->nr_child + 1 <= next_type->max_child) {
1502 new_type_index++;
1503 goto retry;
1504 } else {
1505 new_type_index++;
1506 dbg_printf("Add fallback to type %d\n", new_type_index);
1507 uatomic_inc(&ja->nr_fallback);
1508 fallback = 1;
1509 goto retry;
1510 }
1511 break;
1512 }
1513 case JA_RECOMPACT_DEL:
1514 /*
1515 * Recompaction attempt on delete failed. Should only
1516 * happen if target node type is pool. This is caused by
1517 * a hard-to-split distribution. Recompact on same node
1518 * size, but flag current node as "fallback" to ensure
1519 * we don't attempt recompaction before some activity
1520 * has reshuffled our node.
1521 */
1522 assert(new_type->type_class == RCU_JA_POOL);
1523 new_type_index = old_type_index;
1524 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1525 uatomic_inc(&ja->nr_fallback);
1526 fallback = 1;
1527 goto retry;
1528 default:
1529 assert(0);
1530 return -EINVAL;
1531 }
1532
1533 /*
1534 * Last resort fallback: pigeon.
1535 */
f07b240f
MD
1536 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1537 dbg_printf("Fallback to type %d\n", new_type_index);
1538 uatomic_inc(&ja->nr_fallback);
1539 fallback = 1;
1540 goto retry;
7a0b2331
MD
1541}
1542
5a9a87dd 1543/*
2e313670 1544 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
1545 * error value otherwise.
1546 */
7a0b2331 1547static
d96bfb0d 1548int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 1549 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd
MD
1550 struct cds_ja_inode_flag *child_node_flag,
1551 struct cds_ja_shadow_node *shadow_node)
7a0b2331
MD
1552{
1553 int ret;
e1db2db5 1554 unsigned int type_index;
d96bfb0d 1555 const struct cds_ja_type *type;
b4540e8a 1556 struct cds_ja_inode *node;
7a0b2331 1557
a2a7ff59
MD
1558 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1559 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1560
e1db2db5
MD
1561 node = ja_node_ptr(*node_flag);
1562 type_index = ja_node_type(*node_flag);
1563 type = &ja_types[type_index];
b1a90ce3 1564 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
e1db2db5 1565 n, child_node_flag);
2e313670
MD
1566 switch (ret) {
1567 case -ENOSPC:
19ddcd04
MD
1568 /* Not enough space in node, need to recompact to next type. */
1569 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
af3cbd45 1570 shadow_node, node_flag, n, child_node_flag, NULL);
2e313670
MD
1571 break;
1572 case -ERANGE:
1573 /* Node needs to be recompacted. */
19ddcd04 1574 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
af3cbd45 1575 shadow_node, node_flag, n, child_node_flag, NULL);
2e313670
MD
1576 break;
1577 }
1578 return ret;
1579}
1580
1581/*
1582 * Return 0 on success, -EAGAIN if need to retry, or other negative
1583 * error value otherwise.
1584 */
1585static
af3cbd45
MD
1586int ja_node_clear_ptr(struct cds_ja *ja,
1587 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1588 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1589 struct cds_ja_shadow_node *shadow_node, /* of parent */
1590 uint8_t n)
2e313670
MD
1591{
1592 int ret;
1593 unsigned int type_index;
1594 const struct cds_ja_type *type;
1595 struct cds_ja_inode *node;
1596
af3cbd45
MD
1597 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1598 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 1599
af3cbd45
MD
1600 node = ja_node_ptr(*parent_node_flag_ptr);
1601 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 1602 type = &ja_types[type_index];
19ddcd04 1603 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
2e313670 1604 if (ret == -EFBIG) {
19ddcd04 1605 /* Should try recompaction. */
2e313670 1606 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45
MD
1607 shadow_node, parent_node_flag_ptr, n, NULL,
1608 node_flag_ptr);
7a0b2331
MD
1609 }
1610 return ret;
1611}
be9a7474 1612
af3cbd45 1613struct cds_hlist_head cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 1614{
41975c12
MD
1615 unsigned int tree_depth, i;
1616 struct cds_ja_inode_flag *node_flag;
af3cbd45 1617 struct cds_hlist_head head = { NULL };
41975c12
MD
1618
1619 if (caa_unlikely(key > ja->key_max))
af3cbd45 1620 return head;
41975c12 1621 tree_depth = ja->tree_depth;
5a9a87dd 1622 node_flag = rcu_dereference(ja->root);
41975c12 1623
5a9a87dd
MD
1624 /* level 0: root node */
1625 if (!ja_node_ptr(node_flag))
af3cbd45 1626 return head;
5a9a87dd
MD
1627
1628 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1629 uint8_t iter_key;
1630
1631 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
b62a8d0c 1632 node_flag = ja_node_get_nth(node_flag, NULL, NULL, NULL,
79b41067 1633 iter_key);
582a6ade
MD
1634 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1635 (unsigned int) iter_key, node_flag);
41975c12 1636 if (!ja_node_ptr(node_flag))
af3cbd45 1637 return head;
41975c12
MD
1638 }
1639
5a9a87dd 1640 /* Last level lookup succeded. We got an actual match. */
af3cbd45
MD
1641 head.next = (struct cds_hlist_node *) node_flag;
1642 return head;
5a9a87dd
MD
1643}
1644
1645/*
1646 * We reached an unpopulated node. Create it and the children we need,
1647 * and then attach the entire branch to the current node. This may
1648 * trigger recompaction of the current node. Locks needed: node lock
1649 * (for add), and, possibly, parent node lock (to update pointer due to
1650 * node recompaction).
1651 *
1652 * First take node lock, check if recompaction is needed, then take
1653 * parent lock (if needed). Then we can proceed to create the new
1654 * branch. Publish the new branch, and release locks.
1655 * TODO: we currently always take the parent lock even when not needed.
1656 */
1657static
1658int ja_attach_node(struct cds_ja *ja,
b0ca2d21 1659 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 1660 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd
MD
1661 struct cds_ja_inode_flag **node_flag_ptr,
1662 struct cds_ja_inode_flag *node_flag,
1663 struct cds_ja_inode_flag *parent_node_flag,
1664 uint64_t key,
79b41067 1665 unsigned int level,
5a9a87dd
MD
1666 struct cds_ja_node *child_node)
1667{
1668 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1669 *parent_shadow_node = NULL;
5a9a87dd
MD
1670 struct cds_ja_inode *node = ja_node_ptr(node_flag);
1671 struct cds_ja_inode *parent_node = ja_node_ptr(parent_node_flag);
1672 struct cds_hlist_head head;
1673 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1674 int ret, i;
a2a7ff59 1675 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1676 int nr_created_nodes = 0;
1677
582a6ade
MD
1678 dbg_printf("Attach node at level %u (node %p, node_flag %p)\n",
1679 level, node, node_flag);
a2a7ff59 1680
5a9a87dd 1681 assert(node);
3d8fe307 1682 shadow_node = rcuja_shadow_lookup_lock(ja->ht, node_flag);
5a9a87dd 1683 if (!shadow_node) {
2e313670 1684 ret = -EAGAIN;
5a9a87dd
MD
1685 goto end;
1686 }
1687 if (parent_node) {
1688 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1689 parent_node_flag);
5a9a87dd 1690 if (!parent_shadow_node) {
2e313670 1691 ret = -EAGAIN;
5a9a87dd
MD
1692 goto unlock_shadow;
1693 }
1694 }
1695
b62a8d0c 1696 if (node_flag_ptr && ja_node_ptr(*node_flag_ptr)) {
b306a0fe 1697 /*
c112acaa
MD
1698 * Target node has been updated between RCU lookup and
1699 * lock acquisition. We need to re-try lookup and
1700 * attach.
1701 */
1702 ret = -EAGAIN;
1703 goto unlock_parent;
1704 }
1705
1706 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 1707 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
1708 /*
1709 * Target node has been updated between RCU lookup and
1710 * lock acquisition. We need to re-try lookup and
1711 * attach.
b306a0fe
MD
1712 */
1713 ret = -EAGAIN;
1714 goto unlock_parent;
1715 }
1716
a2a7ff59 1717 /* Create new branch, starting from bottom */
5a9a87dd
MD
1718 CDS_INIT_HLIST_HEAD(&head);
1719 cds_hlist_add_head_rcu(&child_node->list, &head);
a2a7ff59 1720 iter_node_flag = (struct cds_ja_inode_flag *) head.next;
5a9a87dd 1721
79b41067
MD
1722 for (i = ja->tree_depth; i > (int) level; i--) {
1723 uint8_t iter_key;
1724
1725 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i)));
1726 dbg_printf("branch creation level %d, key %u\n",
1727 i - 1, (unsigned int) iter_key);
5a9a87dd
MD
1728 iter_dest_node_flag = NULL;
1729 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1730 iter_key,
5a9a87dd
MD
1731 iter_node_flag,
1732 NULL);
1733 if (ret)
1734 goto check_error;
1735 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1736 iter_node_flag = iter_dest_node_flag;
1737 }
1738
79b41067
MD
1739 if (level > 1) {
1740 uint8_t iter_key;
1741
1742 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
a2a7ff59
MD
1743 /* We need to use set_nth on the previous level. */
1744 iter_dest_node_flag = node_flag;
1745 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1746 iter_key,
a2a7ff59
MD
1747 iter_node_flag,
1748 shadow_node);
1749 if (ret)
1750 goto check_error;
1751 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1752 iter_node_flag = iter_dest_node_flag;
1753 }
1754
5a9a87dd 1755 /* Publish new branch */
a2a7ff59 1756 dbg_printf("Publish branch %p, replacing %p\n",
b0ca2d21
MD
1757 iter_node_flag, *attach_node_flag_ptr);
1758 rcu_assign_pointer(*attach_node_flag_ptr, iter_node_flag);
5a9a87dd
MD
1759
1760 /* Success */
1761 ret = 0;
1762
1763check_error:
1764 if (ret) {
1765 for (i = 0; i < nr_created_nodes; i++) {
1766 int tmpret;
a2a7ff59
MD
1767 int flags;
1768
1769 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1770 if (i)
1771 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 1772 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 1773 created_nodes[i],
a2a7ff59
MD
1774 NULL,
1775 flags);
5a9a87dd
MD
1776 assert(!tmpret);
1777 }
1778 }
b306a0fe 1779unlock_parent:
5a9a87dd
MD
1780 if (parent_shadow_node)
1781 rcuja_shadow_unlock(parent_shadow_node);
1782unlock_shadow:
1783 if (shadow_node)
1784 rcuja_shadow_unlock(shadow_node);
1785end:
1786 return ret;
1787}
1788
1789/*
af3cbd45
MD
1790 * Lock the parent containing the hlist head pointer, and add node to list of
1791 * duplicates. Failure can happen if concurrent update changes the
1792 * parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
1793 * Return 0 on success, negative error value on failure.
1794 */
1795static
1796int ja_chain_node(struct cds_ja *ja,
af3cbd45 1797 struct cds_ja_inode_flag *parent_node_flag,
fa112799 1798 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 1799 struct cds_ja_inode_flag *node_flag,
5a9a87dd
MD
1800 struct cds_hlist_head *head,
1801 struct cds_ja_node *node)
1802{
1803 struct cds_ja_shadow_node *shadow_node;
fa112799 1804 int ret = 0;
5a9a87dd 1805
3d8fe307 1806 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 1807 if (!shadow_node) {
2e313670 1808 return -EAGAIN;
b306a0fe 1809 }
c112acaa 1810 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
1811 ret = -EAGAIN;
1812 goto end;
1813 }
5a9a87dd 1814 cds_hlist_add_head_rcu(&node->list, head);
fa112799 1815end:
5a9a87dd 1816 rcuja_shadow_unlock(shadow_node);
fa112799 1817 return ret;
5a9a87dd
MD
1818}
1819
1820int cds_ja_add(struct cds_ja *ja, uint64_t key,
1821 struct cds_ja_node *new_node)
1822{
1823 unsigned int tree_depth, i;
b0ca2d21
MD
1824 struct cds_ja_inode_flag **attach_node_flag_ptr,
1825 **node_flag_ptr;
5a9a87dd
MD
1826 struct cds_ja_inode_flag *node_flag,
1827 *parent_node_flag,
b62a8d0c
MD
1828 *parent2_node_flag,
1829 *attach_node_flag;
5a9a87dd
MD
1830 int ret;
1831
b306a0fe 1832 if (caa_unlikely(key > ja->key_max)) {
5a9a87dd 1833 return -EINVAL;
b306a0fe 1834 }
5a9a87dd
MD
1835 tree_depth = ja->tree_depth;
1836
1837retry:
a2a7ff59
MD
1838 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
1839 key, new_node);
5a9a87dd 1840 parent2_node_flag = NULL;
b0f74e47
MD
1841 parent_node_flag =
1842 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
b0ca2d21 1843 attach_node_flag_ptr = &ja->root;
b62a8d0c 1844 attach_node_flag = rcu_dereference(ja->root);
5a9a87dd 1845 node_flag_ptr = &ja->root;
35170a44 1846 node_flag = rcu_dereference(ja->root);
5a9a87dd
MD
1847
1848 /* Iterate on all internal levels */
a2a7ff59 1849 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1850 uint8_t iter_key;
1851
b0ca2d21 1852 dbg_printf("cds_ja_add iter attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
c112acaa 1853 attach_node_flag_ptr, node_flag_ptr, node_flag);
5a9a87dd 1854 if (!ja_node_ptr(node_flag)) {
b0ca2d21 1855 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 1856 attach_node_flag,
b0ca2d21 1857 node_flag_ptr,
c112acaa
MD
1858 parent_node_flag,
1859 parent2_node_flag,
5a9a87dd 1860 key, i, new_node);
2e313670 1861 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd
MD
1862 goto retry;
1863 else
1864 goto end;
1865 }
79b41067 1866 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
1867 parent2_node_flag = parent_node_flag;
1868 parent_node_flag = node_flag;
1869 node_flag = ja_node_get_nth(node_flag,
b0ca2d21 1870 &attach_node_flag_ptr,
b62a8d0c 1871 &attach_node_flag,
5a9a87dd 1872 &node_flag_ptr,
79b41067 1873 iter_key);
b0ca2d21
MD
1874 dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p attach_node_flag_ptr %p node_flag_ptr %p\n",
1875 (unsigned int) iter_key, node_flag,
c112acaa
MD
1876 attach_node_flag_ptr,
1877 node_flag_ptr);
5a9a87dd
MD
1878 }
1879
1880 /*
1881 * We reached bottom of tree, simply add node to last internal
1882 * level, or chain it if key is already present.
1883 */
1884 if (!ja_node_ptr(node_flag)) {
c112acaa
MD
1885 dbg_printf("cds_ja_add attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
1886 attach_node_flag_ptr, node_flag_ptr, node_flag);
b0ca2d21 1887 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 1888 attach_node_flag,
b0ca2d21 1889 node_flag_ptr, parent_node_flag,
5a9a87dd
MD
1890 parent2_node_flag, key, i, new_node);
1891 } else {
1892 ret = ja_chain_node(ja,
af3cbd45 1893 parent_node_flag,
fa112799 1894 node_flag_ptr,
c112acaa 1895 node_flag,
b0ca2d21 1896 (struct cds_hlist_head *) attach_node_flag_ptr,
5a9a87dd
MD
1897 new_node);
1898 }
b306a0fe 1899 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd
MD
1900 goto retry;
1901end:
1902 return ret;
b4540e8a
MD
1903}
1904
af3cbd45
MD
1905/*
1906 * Note: there is no need to lookup the pointer address associated with
1907 * each node's nth item after taking the lock: it's already been done by
1908 * cds_ja_del while holding the rcu read-side lock, and our node rules
1909 * ensure that when a match value -> pointer is found in a node, it is
1910 * _NEVER_ changed for that node without recompaction, and recompaction
1911 * reallocates the node.
b306a0fe
MD
1912 * However, when a child is removed from "linear" nodes, its pointer
1913 * is set to NULL. We therefore check, while holding the locks, if this
1914 * pointer is NULL, and return -ENOENT to the caller if it is the case.
af3cbd45 1915 */
35170a44
MD
1916static
1917int ja_detach_node(struct cds_ja *ja,
1918 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
1919 struct cds_ja_inode_flag ***snapshot_ptr,
1920 uint8_t *snapshot_n,
35170a44
MD
1921 int nr_snapshot,
1922 uint64_t key,
1923 struct cds_ja_node *node)
1924{
af3cbd45
MD
1925 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
1926 struct cds_ja_inode_flag **node_flag_ptr = NULL,
1927 *parent_node_flag = NULL,
1928 **parent_node_flag_ptr = NULL;
b62a8d0c 1929 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
1930 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
1931 uint8_t n = 0;
35170a44 1932
4d6ef45e 1933 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 1934
af3cbd45
MD
1935 /*
1936 * From the last internal level node going up, get the node
1937 * lock, check if the node has only one child left. If it is the
1938 * case, we continue iterating upward. When we reach a node
1939 * which has more that one child left, we lock the parent, and
1940 * proceed to the node deletion (removing its children too).
1941 */
4d6ef45e 1942 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
1943 struct cds_ja_shadow_node *shadow_node;
1944
1945 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1946 snapshot[i]);
af3cbd45
MD
1947 if (!shadow_node) {
1948 ret = -EAGAIN;
1949 goto end;
1950 }
af3cbd45 1951 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
1952
1953 /*
1954 * Check if node has been removed between RCU
1955 * lookup and lock acquisition.
1956 */
1957 assert(snapshot_ptr[i + 1]);
1958 if (ja_node_ptr(*snapshot_ptr[i + 1])
1959 != ja_node_ptr(snapshot[i + 1])) {
1960 ret = -ENOENT;
1961 goto end;
1962 }
1963
1964 assert(shadow_node->nr_child > 0);
d810c97f 1965 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
1966 nr_clear++;
1967 nr_branch++;
af3cbd45
MD
1968 if (shadow_node->nr_child > 1 || i == 1) {
1969 /* Lock parent and break */
1970 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1971 snapshot[i - 1]);
af3cbd45
MD
1972 if (!shadow_node) {
1973 ret = -EAGAIN;
1974 goto end;
1975 }
1976 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 1977
c112acaa
MD
1978 /*
1979 * Check if node has been removed between RCU
1980 * lookup and lock acquisition.
1981 */
b62a8d0c
MD
1982 assert(snapshot_ptr[i]);
1983 if (ja_node_ptr(*snapshot_ptr[i])
1984 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
1985 ret = -ENOENT;
1986 goto end;
1987 }
1988
b62a8d0c 1989 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
1990 n = snapshot_n[i + 1];
1991 parent_node_flag_ptr = snapshot_ptr[i];
1992 parent_node_flag = snapshot[i];
c112acaa 1993
af3cbd45
MD
1994 if (i > 1) {
1995 /*
1996 * Lock parent's parent, in case we need
1997 * to recompact parent.
1998 */
1999 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2000 snapshot[i - 2]);
af3cbd45
MD
2001 if (!shadow_node) {
2002 ret = -EAGAIN;
2003 goto end;
2004 }
2005 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2006
2007 /*
2008 * Check if node has been removed between RCU
2009 * lookup and lock acquisition.
2010 */
2011 assert(snapshot_ptr[i - 1]);
2012 if (ja_node_ptr(*snapshot_ptr[i - 1])
2013 != ja_node_ptr(snapshot[i - 1])) {
2014 ret = -ENOENT;
2015 goto end;
2016 }
af3cbd45 2017 }
b62a8d0c 2018
af3cbd45
MD
2019 break;
2020 }
2021 }
2022
2023 /*
4d6ef45e
MD
2024 * At this point, we want to delete all nodes that are about to
2025 * be removed from shadow_nodes (except the last one, which is
2026 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
2027 * child). OK to free lock here, because RCU read lock is held,
2028 * and free only performed in call_rcu.
af3cbd45
MD
2029 */
2030
2031 for (i = 0; i < nr_clear; i++) {
2032 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 2033 shadow_nodes[i]->node_flag,
af3cbd45
MD
2034 shadow_nodes[i],
2035 RCUJA_SHADOW_CLEAR_FREE_NODE
2036 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2037 assert(!ret);
2038 }
2039
2040 iter_node_flag = parent_node_flag;
2041 /* Remove from parent */
2042 ret = ja_node_clear_ptr(ja,
2043 node_flag_ptr, /* Pointer to location to nullify */
2044 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 2045 shadow_nodes[nr_branch - 1], /* of parent */
af3cbd45 2046 n);
b306a0fe
MD
2047 if (ret)
2048 goto end;
af3cbd45 2049
4d6ef45e
MD
2050 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2051 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
2052 /* Update address of parent ptr in its parent */
2053 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2054
2055end:
2056 for (i = 0; i < nr_shadow; i++)
2057 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
2058 return ret;
2059}
2060
af3cbd45
MD
2061static
2062int ja_unchain_node(struct cds_ja *ja,
2063 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2064 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 2065 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
2066 struct cds_ja_node *node)
2067{
2068 struct cds_ja_shadow_node *shadow_node;
f2758d14 2069 struct cds_hlist_node *hlist_node;
013a6083
MD
2070 struct cds_hlist_head hlist_head;
2071 int ret = 0, count = 0, found = 0;
af3cbd45 2072
3d8fe307 2073 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
2074 if (!shadow_node)
2075 return -EAGAIN;
013a6083 2076 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2077 ret = -EAGAIN;
2078 goto end;
2079 }
013a6083 2080 hlist_head.next = (struct cds_hlist_node *) ja_node_ptr(node_flag);
af3cbd45
MD
2081 /*
2082 * Retry if another thread removed all but one of duplicates
fa112799 2083 * since check (this check was performed without lock).
013a6083
MD
2084 * Ensure that the node we are about to remove is still in the
2085 * list (while holding lock).
af3cbd45 2086 */
013a6083 2087 cds_hlist_for_each_rcu(hlist_node, &hlist_head) {
ade342cb
MD
2088 if (count == 0) {
2089 /* FIXME: currently a work-around */
2090 hlist_node->prev = (struct cds_hlist_node *) node_flag_ptr;
2091 }
f2758d14 2092 count++;
013a6083
MD
2093 if (hlist_node == &node->list)
2094 found++;
f2758d14 2095 }
013a6083
MD
2096 assert(found <= 1);
2097 if (!found || count == 1) {
af3cbd45
MD
2098 ret = -EAGAIN;
2099 goto end;
2100 }
2101 cds_hlist_del_rcu(&node->list);
ade342cb
MD
2102 /*
2103 * Validate that we indeed removed the node from linked list.
2104 */
2105 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
2106end:
2107 rcuja_shadow_unlock(shadow_node);
2108 return ret;
2109}
2110
2111/*
2112 * Called with RCU read lock held.
2113 */
35170a44
MD
2114int cds_ja_del(struct cds_ja *ja, uint64_t key,
2115 struct cds_ja_node *node)
2116{
2117 unsigned int tree_depth, i;
2118 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
2119 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2120 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 2121 struct cds_ja_inode_flag *node_flag;
fa112799
MD
2122 struct cds_ja_inode_flag **prev_node_flag_ptr,
2123 **node_flag_ptr;
4d6ef45e 2124 int nr_snapshot;
35170a44
MD
2125 int ret;
2126
2127 if (caa_unlikely(key > ja->key_max))
2128 return -EINVAL;
2129 tree_depth = ja->tree_depth;
2130
2131retry:
4d6ef45e 2132 nr_snapshot = 0;
35170a44
MD
2133 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2134 key, node);
2135
2136 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
2137 snapshot_n[0] = 0;
2138 snapshot_n[1] = 0;
af3cbd45 2139 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
2140 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2141 node_flag = rcu_dereference(ja->root);
af3cbd45 2142 prev_node_flag_ptr = &ja->root;
fa112799 2143 node_flag_ptr = &ja->root;
35170a44
MD
2144
2145 /* Iterate on all internal levels */
2146 for (i = 1; i < tree_depth; i++) {
2147 uint8_t iter_key;
2148
2149 dbg_printf("cds_ja_del iter node_flag %p\n",
2150 node_flag);
2151 if (!ja_node_ptr(node_flag)) {
2152 return -ENOENT;
2153 }
35170a44 2154 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 2155 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
2156 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2157 snapshot[nr_snapshot++] = node_flag;
35170a44 2158 node_flag = ja_node_get_nth(node_flag,
af3cbd45 2159 &prev_node_flag_ptr,
b62a8d0c 2160 NULL,
fa112799 2161 &node_flag_ptr,
35170a44 2162 iter_key);
af3cbd45
MD
2163 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2164 (unsigned int) iter_key, node_flag,
2165 prev_node_flag_ptr);
35170a44 2166 }
35170a44
MD
2167 /*
2168 * We reached bottom of tree, try to find the node we are trying
2169 * to remove. Fail if we cannot find it.
2170 */
2171 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
2172 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2173 key);
35170a44
MD
2174 return -ENOENT;
2175 } else {
4d6ef45e 2176 struct cds_hlist_head hlist_head;
35170a44 2177 struct cds_hlist_node *hlist_node;
af3cbd45
MD
2178 struct cds_ja_node *entry, *match = NULL;
2179 int count = 0;
35170a44 2180
4d6ef45e
MD
2181 hlist_head.next =
2182 (struct cds_hlist_node *) ja_node_ptr(node_flag);
af3cbd45 2183 cds_hlist_for_each_entry_rcu(entry,
35170a44 2184 hlist_node,
4d6ef45e 2185 &hlist_head,
35170a44 2186 list) {
4d6ef45e 2187 dbg_printf("cds_ja_del: compare %p with entry %p\n", node, entry);
af3cbd45
MD
2188 if (entry == node)
2189 match = entry;
2190 count++;
35170a44 2191 }
4d6ef45e
MD
2192 if (!match) {
2193 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 2194 return -ENOENT;
4d6ef45e 2195 }
af3cbd45
MD
2196 assert(count > 0);
2197 if (count == 1) {
2198 /*
4d6ef45e
MD
2199 * Removing last of duplicates. Last snapshot
2200 * does not have a shadow node (external leafs).
af3cbd45
MD
2201 */
2202 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2203 snapshot[nr_snapshot++] = node_flag;
2204 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2205 snapshot_n, nr_snapshot, key, node);
2206 } else {
f2758d14 2207 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 2208 node_flag_ptr, node_flag, match);
af3cbd45 2209 }
35170a44 2210 }
b306a0fe
MD
2211 /*
2212 * Explanation of -ENOENT handling: caused by concurrent delete
2213 * between RCU lookup and actual removal. Need to re-do the
2214 * lookup and removal attempt.
2215 */
2216 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
2217 goto retry;
2218 return ret;
2219}
2220
b4540e8a
MD
2221struct cds_ja *_cds_ja_new(unsigned int key_bits,
2222 const struct rcu_flavor_struct *flavor)
be9a7474
MD
2223{
2224 struct cds_ja *ja;
b0f74e47 2225 int ret;
f07b240f 2226 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
2227
2228 ja = calloc(sizeof(*ja), 1);
2229 if (!ja)
2230 goto ja_error;
b4540e8a
MD
2231
2232 switch (key_bits) {
2233 case 8:
b4540e8a 2234 case 16:
1216b3d2 2235 case 24:
b4540e8a 2236 case 32:
1216b3d2
MD
2237 case 40:
2238 case 48:
2239 case 56:
2240 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
2241 break;
2242 case 64:
2243 ja->key_max = UINT64_MAX;
2244 break;
2245 default:
2246 goto check_error;
2247 }
2248
be9a7474 2249 /* ja->root is NULL */
5a9a87dd 2250 /* tree_depth 0 is for pointer to root node */
582a6ade 2251 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 2252 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
2253 ja->ht = rcuja_create_ht(flavor);
2254 if (!ja->ht)
2255 goto ht_error;
b0f74e47
MD
2256
2257 /*
2258 * Note: we should not free this node until judy array destroy.
2259 */
f07b240f 2260 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307
MD
2261 (struct cds_ja_inode_flag *) &ja->root,
2262 NULL, ja);
f07b240f
MD
2263 if (!root_shadow_node) {
2264 ret = -ENOMEM;
b0f74e47 2265 goto ht_node_error;
f07b240f 2266 }
3d8fe307 2267 root_shadow_node->level = 0;
b0f74e47 2268
be9a7474
MD
2269 return ja;
2270
b0f74e47
MD
2271ht_node_error:
2272 ret = rcuja_delete_ht(ja->ht);
2273 assert(!ret);
be9a7474 2274ht_error:
b4540e8a 2275check_error:
be9a7474
MD
2276 free(ja);
2277ja_error:
2278 return NULL;
2279}
2280
3d8fe307
MD
2281/*
2282 * Called from RCU read-side CS.
2283 */
2284__attribute__((visibility("protected")))
2285void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2286 struct cds_ja_inode_flag *node_flag,
2287 void (*free_node_cb)(struct rcu_head *head))
2288{
2289 const struct rcu_flavor_struct *flavor;
2290 unsigned int type_index;
2291 struct cds_ja_inode *node;
2292 const struct cds_ja_type *type;
2293
2294 flavor = cds_lfht_rcu_flavor(shadow_node->ja->ht);
2295 node = ja_node_ptr(node_flag);
2296 assert(node != NULL);
2297 type_index = ja_node_type(node_flag);
2298 type = &ja_types[type_index];
2299
2300 switch (type->type_class) {
2301 case RCU_JA_LINEAR:
2302 {
2303 uint8_t nr_child =
2304 ja_linear_node_get_nr_child(type, node);
2305 unsigned int i;
2306
2307 for (i = 0; i < nr_child; i++) {
2308 struct cds_ja_inode_flag *iter;
2309 struct cds_hlist_head head;
2310 struct cds_ja_node *entry;
2311 struct cds_hlist_node *pos;
2312 uint8_t v;
2313
2314 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
2315 if (!iter)
2316 continue;
2317 head.next = (struct cds_hlist_node *) iter;
2318 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
2319 flavor->update_call_rcu(&entry->head, free_node_cb);
2320 }
2321 }
2322 break;
2323 }
2324 case RCU_JA_POOL:
2325 {
2326 unsigned int pool_nr;
2327
2328 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2329 struct cds_ja_inode *pool =
2330 ja_pool_node_get_ith_pool(type, node, pool_nr);
2331 uint8_t nr_child =
2332 ja_linear_node_get_nr_child(type, pool);
2333 unsigned int j;
2334
2335 for (j = 0; j < nr_child; j++) {
2336 struct cds_ja_inode_flag *iter;
2337 struct cds_hlist_head head;
2338 struct cds_ja_node *entry;
2339 struct cds_hlist_node *pos;
2340 uint8_t v;
2341
2342 ja_linear_node_get_ith_pos(type, node, j, &v, &iter);
2343 if (!iter)
2344 continue;
2345 head.next = (struct cds_hlist_node *) iter;
2346 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
2347 flavor->update_call_rcu(&entry->head, free_node_cb);
2348 }
2349 }
2350 }
2351 break;
2352 }
2353 case RCU_JA_NULL:
2354 break;
2355 case RCU_JA_PIGEON:
2356 {
2357 uint8_t nr_child;
2358 unsigned int i;
2359
2360 nr_child = shadow_node->nr_child;
2361 for (i = 0; i < nr_child; i++) {
2362 struct cds_ja_inode_flag *iter;
2363 struct cds_hlist_head head;
2364 struct cds_ja_node *entry;
2365 struct cds_hlist_node *pos;
2366
2367 iter = ja_pigeon_node_get_ith_pos(type, node, i);
2368 if (!iter)
2369 continue;
2370 head.next = (struct cds_hlist_node *) iter;
2371 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
2372 flavor->update_call_rcu(&entry->head, free_node_cb);
2373 }
2374 }
2375 break;
2376 }
2377 default:
2378 assert(0);
2379 }
2380}
2381
19ddcd04
MD
2382static
2383void print_debug_fallback_distribution(void)
2384{
2385 int i;
2386
2387 fprintf(stderr, "Fallback node distribution:\n");
2388 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2389 if (!node_fallback_count_distribution[i])
2390 continue;
2391 fprintf(stderr, " %3u: %4lu\n",
2392 i, node_fallback_count_distribution[i]);
2393 }
2394}
2395
be9a7474
MD
2396/*
2397 * There should be no more concurrent add to the judy array while it is
2398 * being destroyed (ensured by the caller).
2399 */
3d8fe307
MD
2400int cds_ja_destroy(struct cds_ja *ja,
2401 void (*free_node_cb)(struct rcu_head *head))
be9a7474 2402{
b4540e8a
MD
2403 int ret;
2404
be9a7474 2405 rcuja_shadow_prune(ja->ht,
3d8fe307
MD
2406 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
2407 free_node_cb);
b4540e8a
MD
2408 ret = rcuja_delete_ht(ja->ht);
2409 if (ret)
2410 return ret;
f07b240f
MD
2411 if (uatomic_read(&ja->nr_fallback))
2412 fprintf(stderr,
2413 "[warning] RCU Judy Array used %lu fallback node(s)\n",
2414 uatomic_read(&ja->nr_fallback));
19ddcd04 2415 print_debug_fallback_distribution();
b4540e8a 2416 free(ja);
41975c12 2417 return 0;
be9a7474 2418}
This page took 0.190282 seconds and 4 git commands to generate.