rcuja: implement add
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
61009379 27#include <urcu/rcuja.h>
d68c6810
MD
28#include <urcu/compiler.h>
29#include <urcu/arch.h>
30#include <assert.h>
8e519e3c 31#include <urcu-pointer.h>
b4540e8a 32#include <stdint.h>
8e519e3c 33
61009379 34#include "rcuja-internal.h"
d68c6810 35#include "bitfield.h"
61009379 36
d96bfb0d 37enum cds_ja_type_class {
e5227865 38 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
39 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
40 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
41 RCU_JA_POOL = 1, /* Type B */
42 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
43 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 44 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
45 /* 32-bit: 101 to 256 children, 1024 bytes */
46 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 47 /* Leaf nodes are implicit from their height in the tree */
1db4943c 48 RCU_JA_NR_TYPES,
e1db2db5
MD
49
50 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
51};
52
d96bfb0d
MD
53struct cds_ja_type {
54 enum cds_ja_type_class type_class;
8e519e3c
MD
55 uint16_t min_child; /* minimum number of children: 1 to 256 */
56 uint16_t max_child; /* maximum number of children: 1 to 256 */
57 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
58 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
59 uint16_t nr_pool_order; /* number of pools */
60 uint16_t pool_size_order; /* pool size */
e5227865
MD
61};
62
d68c6810
MD
63/*
64 * Number of least significant pointer bits reserved to represent the
65 * child type.
66 */
67#define JA_TYPE_BITS 3
68#define JA_TYPE_MAX_NR (1U << JA_TYPE_BITS)
69#define JA_TYPE_MASK (JA_TYPE_MAX_NR - 1)
70#define JA_PTR_MASK (~JA_TYPE_MASK)
71
72#define JA_ENTRY_PER_NODE 256UL
b4540e8a 73#define JA_BITS_PER_BYTE 3
d68c6810 74
5a9a87dd
MD
75#define JA_MAX_INTERNAL_DEPTH 5 /* Maximum depth, excluding leafs */
76
e1db2db5
MD
77/*
78 * Entry for NULL node is at index 8 of the table. It is never encoded
79 * in flags.
80 */
81#define NODE_INDEX_NULL 8
82
e5227865
MD
83/*
84 * Iteration on the array to find the right node size for the number of
d68c6810 85 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 86 * possible node size, which contains 256 children).
d68c6810
MD
87 * The min_child overlaps with the previous max_child to provide an
88 * hysteresis loop to reallocation for patterns of cyclic add/removal
89 * within the same node.
90 * The node the index within the following arrays is represented on 3
91 * bits. It identifies the node type, min/max number of children, and
92 * the size order.
3d45251f
MD
93 * The max_child values for the RCU_JA_POOL below result from
94 * statistical approximation: over million populations, the max_child
95 * covers between 97% and 99% of the populations generated. Therefore, a
96 * fallback should exist to cover the rare extreme population unbalance
97 * cases, but it will not have a major impact on speed nor space
98 * consumption, since those are rare cases.
e5227865 99 */
e5227865 100
d68c6810
MD
101#if (CAA_BITS_PER_LONG < 64)
102/* 32-bit pointers */
1db4943c
MD
103enum {
104 ja_type_0_max_child = 1,
105 ja_type_1_max_child = 3,
106 ja_type_2_max_child = 6,
107 ja_type_3_max_child = 12,
108 ja_type_4_max_child = 25,
109 ja_type_5_max_child = 48,
110 ja_type_6_max_child = 92,
111 ja_type_7_max_child = 256,
e1db2db5 112 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
113};
114
8e519e3c
MD
115enum {
116 ja_type_0_max_linear_child = 1,
117 ja_type_1_max_linear_child = 3,
118 ja_type_2_max_linear_child = 6,
119 ja_type_3_max_linear_child = 12,
120 ja_type_4_max_linear_child = 25,
121 ja_type_5_max_linear_child = 24,
122 ja_type_6_max_linear_child = 23,
123};
124
1db4943c
MD
125enum {
126 ja_type_5_nr_pool_order = 1,
127 ja_type_6_nr_pool_order = 2,
128};
129
d96bfb0d 130const struct cds_ja_type ja_types[] = {
8e519e3c
MD
131 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
132 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
133 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
134 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
135 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 136
fd800776 137 /* Pools may fill sooner than max_child */
8e519e3c
MD
138 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
139 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
140
141 /*
142 * TODO: Upon node removal below min_child, if child pool is
143 * filled beyond capacity, we need to roll back to pigeon.
144 */
1db4943c 145 { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
146
147 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 148};
d68c6810
MD
149#else /* !(CAA_BITS_PER_LONG < 64) */
150/* 64-bit pointers */
1db4943c
MD
151enum {
152 ja_type_0_max_child = 1,
153 ja_type_1_max_child = 3,
154 ja_type_2_max_child = 7,
155 ja_type_3_max_child = 14,
156 ja_type_4_max_child = 28,
157 ja_type_5_max_child = 54,
158 ja_type_6_max_child = 104,
159 ja_type_7_max_child = 256,
e1db2db5 160 ja_type_8_max_child = 256,
1db4943c
MD
161};
162
8e519e3c
MD
163enum {
164 ja_type_0_max_linear_child = 1,
165 ja_type_1_max_linear_child = 3,
166 ja_type_2_max_linear_child = 7,
167 ja_type_3_max_linear_child = 14,
168 ja_type_4_max_linear_child = 28,
169 ja_type_5_max_linear_child = 27,
170 ja_type_6_max_linear_child = 26,
171};
172
1db4943c
MD
173enum {
174 ja_type_5_nr_pool_order = 1,
175 ja_type_6_nr_pool_order = 2,
176};
177
d96bfb0d 178const struct cds_ja_type ja_types[] = {
8e519e3c
MD
179 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
180 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
181 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
182 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
183 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 184
3d45251f 185 /* Pools may fill sooner than max_child. */
8e519e3c
MD
186 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
187 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 188
3d45251f
MD
189 /*
190 * TODO: Upon node removal below min_child, if child pool is
191 * filled beyond capacity, we need to roll back to pigeon.
192 */
1db4943c 193 { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
194
195 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 196};
d68c6810 197#endif /* !(BITS_PER_LONG < 64) */
e5227865 198
1db4943c
MD
199static inline __attribute__((unused))
200void static_array_size_check(void)
201{
e1db2db5 202 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
203}
204
e5227865 205/*
d96bfb0d 206 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
207 * read-side. For linear and pool node configurations, it starts with a
208 * byte counting the number of children in the node. Then, the
209 * node-specific data is placed.
210 * The node mutex, if any is needed, protecting concurrent updated of
211 * each node is placed in a separate hash table indexed by node address.
212 * For the pigeon configuration, the number of children is also kept in
213 * a separate hash table, indexed by node address, because it is only
214 * required for updates.
e5227865 215 */
1db4943c 216
ff38c745
MD
217#define DECLARE_LINEAR_NODE(index) \
218 struct { \
219 uint8_t nr_child; \
220 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 221 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
222 }
223
224#define DECLARE_POOL_NODE(index) \
225 struct { \
226 struct { \
227 uint8_t nr_child; \
228 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 229 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
230 } linear[1U << ja_type_## index ##_nr_pool_order]; \
231 }
1db4943c 232
b4540e8a 233struct cds_ja_inode {
1db4943c
MD
234 union {
235 /* Linear configuration */
236 DECLARE_LINEAR_NODE(0) conf_0;
237 DECLARE_LINEAR_NODE(1) conf_1;
238 DECLARE_LINEAR_NODE(2) conf_2;
239 DECLARE_LINEAR_NODE(3) conf_3;
240 DECLARE_LINEAR_NODE(4) conf_4;
241
242 /* Pool configuration */
243 DECLARE_POOL_NODE(5) conf_5;
244 DECLARE_POOL_NODE(6) conf_6;
245
246 /* Pigeon configuration */
247 struct {
b4540e8a 248 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
249 } conf_7;
250 /* data aliasing nodes for computed accesses */
b4540e8a 251 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 252 } u;
e5227865
MD
253};
254
d68c6810 255static
b4540e8a 256struct cds_ja_inode_flag *ja_node_flag(struct cds_ja_inode *node,
8e519e3c 257 unsigned int type)
d68c6810 258{
1db4943c 259 assert(type < RCU_JA_NR_TYPES);
b4540e8a 260 return (struct cds_ja_inode_flag *) (((unsigned long) node) | type);
d68c6810
MD
261}
262
e1db2db5 263static
b4540e8a 264struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
e1db2db5 265{
b4540e8a 266 return (struct cds_ja_inode *) (((unsigned long) node) | JA_PTR_MASK);
e1db2db5
MD
267}
268
d68c6810 269static
b4540e8a 270unsigned int ja_node_type(struct cds_ja_inode_flag *node)
d68c6810
MD
271{
272 unsigned int type;
273
e1db2db5
MD
274 if (ja_node_ptr(node) == NULL) {
275 return NODE_INDEX_NULL;
276 }
d68c6810 277 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
1db4943c 278 assert(type < RCU_JA_NR_TYPES);
d68c6810
MD
279 return type;
280}
281
b4540e8a 282struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
e5227865 283{
1db4943c 284 return calloc(1U << ja_type->order, sizeof(char));
e5227865
MD
285}
286
b4540e8a 287void free_cds_ja_node(struct cds_ja_inode *node)
e5227865
MD
288{
289 free(node);
290}
291
d68c6810
MD
292#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
293#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
294#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
295#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
296
297static
1db4943c 298uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 299{
1db4943c 300 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
301}
302
11c5e016 303static
d96bfb0d 304uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 305 struct cds_ja_inode *node)
11c5e016
MD
306{
307 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
308 return CMM_LOAD_SHARED(node->u.data[0]);
309}
310
13a7f5a6
MD
311/*
312 * The order in which values and pointers are does does not matter: if
313 * a value is missing, we return NULL. If a value is there, but its
314 * associated pointers is still NULL, we return NULL too.
315 */
d68c6810 316static
b4540e8a
MD
317struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
318 struct cds_ja_inode *node,
5a9a87dd 319 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 320 uint8_t n)
d68c6810
MD
321{
322 uint8_t nr_child;
323 uint8_t *values;
b4540e8a
MD
324 struct cds_ja_inode_flag **pointers;
325 struct cds_ja_inode_flag *ptr;
d68c6810
MD
326 unsigned int i;
327
8e519e3c 328 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 329
11c5e016 330 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 331 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
332 assert(nr_child <= type->max_linear_child);
333 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 334
1db4943c 335 values = &node->u.data[1];
d68c6810 336 for (i = 0; i < nr_child; i++) {
13a7f5a6 337 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
338 break;
339 }
340 if (i >= nr_child)
341 return NULL;
b4540e8a 342 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
5a9a87dd
MD
343 if (caa_unlikely(child_node_flag_ptr))
344 *child_node_flag_ptr = &pointers[i];
13a7f5a6 345 ptr = rcu_dereference(pointers[i]);
d68c6810
MD
346 assert(ja_node_ptr(ptr) != NULL);
347 return ptr;
348}
349
11c5e016 350static
5a9a87dd 351void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 352 struct cds_ja_inode *node,
11c5e016
MD
353 uint8_t i,
354 uint8_t *v,
b4540e8a 355 struct cds_ja_inode_flag **iter)
11c5e016
MD
356{
357 uint8_t *values;
b4540e8a 358 struct cds_ja_inode_flag **pointers;
11c5e016
MD
359
360 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
361 assert(i < ja_linear_node_get_nr_child(type, node));
362
363 values = &node->u.data[1];
364 *v = values[i];
b4540e8a 365 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
366 *iter = pointers[i];
367}
368
d68c6810 369static
b4540e8a
MD
370struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
371 struct cds_ja_inode *node,
5a9a87dd 372 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 373 uint8_t n)
d68c6810 374{
b4540e8a 375 struct cds_ja_inode *linear;
d68c6810 376
fd800776 377 assert(type->type_class == RCU_JA_POOL);
e1db2db5
MD
378 /*
379 * TODO: currently, we select the pool by highest bits. We
380 * should support various encodings.
381 */
b4540e8a 382 linear = (struct cds_ja_inode *)
1db4943c 383 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
5a9a87dd 384 return ja_linear_node_get_nth(type, linear, child_node_flag_ptr, n);
d68c6810
MD
385}
386
11c5e016 387static
b4540e8a
MD
388struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
389 struct cds_ja_inode *node,
11c5e016
MD
390 uint8_t i)
391{
392 assert(type->type_class == RCU_JA_POOL);
b4540e8a 393 return (struct cds_ja_inode *)
11c5e016
MD
394 &node->u.data[(unsigned int) i << type->pool_size_order];
395}
396
d68c6810 397static
b4540e8a
MD
398struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
399 struct cds_ja_inode *node,
5a9a87dd 400 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 401 uint8_t n)
d68c6810 402{
5a9a87dd
MD
403 struct cds_ja_inode_flag **child_node_flag;
404
d68c6810 405 assert(type->type_class == RCU_JA_PIGEON);
5a9a87dd
MD
406 child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n];
407 if (caa_unlikely(child_node_flag_ptr))
408 *child_node_flag_ptr = child_node_flag;
409 return rcu_dereference(*child_node_flag);
d68c6810
MD
410}
411
13a7f5a6
MD
412/*
413 * ja_node_get_nth: get nth item from a node.
414 * node_flag is already rcu_dereference'd.
415 */
d68c6810 416static
41975c12 417struct cds_ja_inode_flag * ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
5a9a87dd 418 struct cds_ja_inode_flag ***child_node_flag_ptr,
8e519e3c 419 uint8_t n)
d68c6810
MD
420{
421 unsigned int type_index;
b4540e8a 422 struct cds_ja_inode *node;
d96bfb0d 423 const struct cds_ja_type *type;
d68c6810 424
d68c6810 425 node = ja_node_ptr(node_flag);
5a9a87dd 426 assert(node != NULL);
d68c6810
MD
427 type_index = ja_node_type(node_flag);
428 type = &ja_types[type_index];
429
430 switch (type->type_class) {
431 case RCU_JA_LINEAR:
5a9a87dd
MD
432 return ja_linear_node_get_nth(type, node,
433 child_node_flag_ptr, n);
fd800776 434 case RCU_JA_POOL:
5a9a87dd
MD
435 return ja_pool_node_get_nth(type, node,
436 child_node_flag_ptr, n);
d68c6810 437 case RCU_JA_PIGEON:
5a9a87dd
MD
438 return ja_pigeon_node_get_nth(type, node,
439 child_node_flag_ptr, n);
d68c6810
MD
440 default:
441 assert(0);
442 return (void *) -1UL;
443 }
444}
445
335d8b18
MD
446/*
447 * TODO: use ja_get_nr_child to monitor limits triggering shrink
448 * recompaction.
449 * Also use ja_get_nr_child to make the difference between resize and
450 * pool change of compaction bit(s).
451 */
e1db2db5 452static
d96bfb0d 453unsigned int ja_get_nr_child(struct cds_ja_shadow_node *shadow_node)
e1db2db5
MD
454{
455 return shadow_node->nr_child;
456}
457
8e519e3c 458static
d96bfb0d 459int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 460 struct cds_ja_inode *node,
d96bfb0d 461 struct cds_ja_shadow_node *shadow_node,
8e519e3c 462 uint8_t n,
b4540e8a 463 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
464{
465 uint8_t nr_child;
466 uint8_t *values, *nr_child_ptr;
b4540e8a 467 struct cds_ja_inode_flag **pointers;
8e519e3c
MD
468 unsigned int i;
469
470 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
471
472 nr_child_ptr = &node->u.data[0];
473 nr_child = *nr_child_ptr;
474 assert(nr_child <= type->max_linear_child);
475 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
476
477 values = &node->u.data[1];
478 for (i = 0; i < nr_child; i++) {
479 if (values[i] == n)
480 return -EEXIST;
481 }
482 if (nr_child >= type->max_linear_child) {
483 /* No space left in this node type */
484 return -ENOSPC;
485 }
b4540e8a 486 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6
MD
487 assert(pointers[nr_child] == NULL);
488 rcu_assign_pointer(pointers[nr_child], child_node_flag);
489 CMM_STORE_SHARED(values[nr_child], n);
490 cmm_smp_wmb(); /* write value and pointer before nr_child */
491 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
e1db2db5 492 shadow_node->nr_child++;
8e519e3c
MD
493 return 0;
494}
495
496static
d96bfb0d 497int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 498 struct cds_ja_inode *node,
d96bfb0d 499 struct cds_ja_shadow_node *shadow_node,
8e519e3c 500 uint8_t n,
b4540e8a 501 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 502{
b4540e8a 503 struct cds_ja_inode *linear;
8e519e3c
MD
504
505 assert(type->type_class == RCU_JA_POOL);
b4540e8a 506 linear = (struct cds_ja_inode *)
8e519e3c 507 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
e1db2db5
MD
508 return ja_linear_node_set_nth(type, linear, shadow_node,
509 n, child_node_flag);
8e519e3c
MD
510}
511
512static
d96bfb0d 513int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 514 struct cds_ja_inode *node,
d96bfb0d 515 struct cds_ja_shadow_node *shadow_node,
8e519e3c 516 uint8_t n,
b4540e8a 517 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 518{
b4540e8a 519 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
520
521 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 522 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 523 if (*ptr)
8e519e3c
MD
524 return -EEXIST;
525 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 526 shadow_node->nr_child++;
8e519e3c
MD
527 return 0;
528}
529
d68c6810 530/*
7a0b2331 531 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c
MD
532 * (negative error value) if it is already there.
533 * TODO: exclusive access on node.
d68c6810 534 */
8e519e3c 535static
d96bfb0d 536int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 537 struct cds_ja_inode *node,
d96bfb0d 538 struct cds_ja_shadow_node *shadow_node,
e1db2db5 539 uint8_t n,
b4540e8a 540 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 541{
8e519e3c
MD
542 switch (type->type_class) {
543 case RCU_JA_LINEAR:
e1db2db5 544 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
545 child_node_flag);
546 case RCU_JA_POOL:
e1db2db5 547 return ja_pool_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
548 child_node_flag);
549 case RCU_JA_PIGEON:
e1db2db5 550 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 551 child_node_flag);
e1db2db5
MD
552 case RCU_JA_NULL:
553 return -ENOSPC;
8e519e3c
MD
554 default:
555 assert(0);
556 return -EINVAL;
557 }
558
559 return 0;
560}
7a0b2331
MD
561
562/*
563 * ja_node_recompact_add: recompact a node, adding a new child.
e1db2db5 564 * TODO: for pool type, take selection bit(s) into account.
5a9a87dd
MD
565 * Return 0 on success, -ENOENT if need to retry, or other negative
566 * error value otherwise.
7a0b2331
MD
567 */
568static
d96bfb0d 569int ja_node_recompact_add(struct cds_ja *ja,
e1db2db5 570 unsigned int old_type_index,
d96bfb0d 571 const struct cds_ja_type *old_type,
b4540e8a 572 struct cds_ja_inode *old_node,
5a9a87dd 573 struct cds_ja_shadow_node *shadow_node,
b4540e8a
MD
574 struct cds_ja_inode_flag **old_node_flag, uint8_t n,
575 struct cds_ja_inode_flag *child_node_flag)
7a0b2331 576{
e1db2db5 577 unsigned int new_type_index;
b4540e8a 578 struct cds_ja_inode *new_node;
d96bfb0d 579 const struct cds_ja_type *new_type;
b4540e8a 580 struct cds_ja_inode_flag *new_node_flag;
5a9a87dd 581 int new_shadow = 0;
7a0b2331
MD
582 int ret;
583
5a9a87dd 584 if (!shadow_node) {
7a0b2331
MD
585 new_type_index = 0;
586 } else {
7a0b2331
MD
587 new_type_index = old_type_index + 1;
588 }
589 new_type = &ja_types[new_type_index];
d96bfb0d 590 new_node = alloc_cds_ja_node(new_type);
7a0b2331
MD
591 if (!new_node)
592 return -ENOMEM;
593 new_node_flag = ja_node_flag(new_node, new_type_index);
594
5a9a87dd
MD
595 ret = rcuja_shadow_set(ja->ht, new_node, shadow_node);
596 if (ret) {
597 free(new_node);
e1db2db5 598 return ret;
5a9a87dd 599 }
e1db2db5 600
5a9a87dd
MD
601 if (!shadow_node) {
602 shadow_node = rcuja_shadow_lookup_lock(ja->ht, new_node);
603 assert(shadow_node);
604 new_shadow = 1;
e1db2db5
MD
605 }
606
11c5e016
MD
607 /*
608 * We need to clear nr_child, because it will be re-incremented
609 * by _ja_node_set_nth().
610 */
5a9a87dd 611 shadow_node->nr_child = 0;
11c5e016
MD
612
613 assert(old_type->type_class != RCU_JA_PIGEON);
614 switch (old_type->type_class) {
615 case RCU_JA_LINEAR:
616 {
617 uint8_t nr_child =
618 ja_linear_node_get_nr_child(old_type, old_node);
619 unsigned int i;
620
621 for (i = 0; i < nr_child; i++) {
b4540e8a 622 struct cds_ja_inode_flag *iter;
11c5e016
MD
623 uint8_t v;
624
625 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
626 if (!iter)
627 continue;
5a9a87dd 628 ret = _ja_node_set_nth(new_type, new_node, shadow_node,
11c5e016
MD
629 v, iter);
630 assert(!ret);
631 }
632 break;
633 }
634 case RCU_JA_POOL:
635 {
636 unsigned int pool_nr;
637
638 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 639 struct cds_ja_inode *pool =
11c5e016
MD
640 ja_pool_node_get_ith_pool(old_type,
641 old_node, pool_nr);
642 uint8_t nr_child =
643 ja_linear_node_get_nr_child(old_type, pool);
644 unsigned int j;
645
646 for (j = 0; j < nr_child; j++) {
b4540e8a 647 struct cds_ja_inode_flag *iter;
11c5e016
MD
648 uint8_t v;
649
650 ja_linear_node_get_ith_pos(old_type, pool,
651 j, &v, &iter);
652 if (!iter)
653 continue;
5a9a87dd 654 ret = _ja_node_set_nth(new_type, new_node, shadow_node,
11c5e016
MD
655 v, iter);
656 assert(!ret);
657 }
658 }
659 break;
7a0b2331 660 }
11c5e016
MD
661 case RCU_JA_PIGEON:
662 default:
663 assert(0);
5a9a87dd
MD
664 ret = -EINVAL;
665 goto unlock_new_shadow;
11c5e016
MD
666 }
667
7a0b2331 668 /* add node */
5a9a87dd 669 ret = _ja_node_set_nth(new_type, new_node, shadow_node,
e1db2db5 670 n, child_node_flag);
7a0b2331 671 assert(!ret);
5a9a87dd
MD
672 /* Return pointer to new recompacted new through old_node_flag */
673 *old_node_flag = new_node_flag;
e1db2db5
MD
674 ret = rcuja_shadow_clear(ja->ht, old_node,
675 RCUJA_SHADOW_CLEAR_FREE_NODE);
676 assert(!ret);
5a9a87dd
MD
677
678 ret = 0;
679
680unlock_new_shadow:
681 if (new_shadow)
682 rcuja_shadow_unlock(shadow_node);
683 return ret;
7a0b2331
MD
684}
685
5a9a87dd
MD
686/*
687 * Return 0 on success, -ENOENT if need to retry, or other negative
688 * error value otherwise.
689 */
7a0b2331 690static
d96bfb0d 691int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 692 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd
MD
693 struct cds_ja_inode_flag *child_node_flag,
694 struct cds_ja_shadow_node *shadow_node)
7a0b2331
MD
695{
696 int ret;
e1db2db5 697 unsigned int type_index;
d96bfb0d 698 const struct cds_ja_type *type;
b4540e8a 699 struct cds_ja_inode *node;
7a0b2331 700
e1db2db5
MD
701 node = ja_node_ptr(*node_flag);
702 type_index = ja_node_type(*node_flag);
703 type = &ja_types[type_index];
e1db2db5
MD
704 ret = _ja_node_set_nth(type, node, shadow_node,
705 n, child_node_flag);
7a0b2331 706 if (ret == -ENOSPC) {
e1db2db5
MD
707 /* Not enough space in node, need to recompact. */
708 ret = ja_node_recompact_add(ja, type_index, type, node,
5a9a87dd 709 shadow_node, node_flag, n, child_node_flag);
7a0b2331
MD
710 }
711 return ret;
712}
be9a7474 713
5a9a87dd 714struct cds_hlist_head *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 715{
41975c12
MD
716 unsigned int tree_depth, i;
717 struct cds_ja_inode_flag *node_flag;
718
719 if (caa_unlikely(key > ja->key_max))
720 return NULL;
721 tree_depth = ja->tree_depth;
5a9a87dd 722 node_flag = rcu_dereference(ja->root);
41975c12 723
5a9a87dd
MD
724 /* level 0: root node */
725 if (!ja_node_ptr(node_flag))
726 return NULL;
727
728 for (i = 1; i < tree_depth; i++) {
729 node_flag = ja_node_get_nth(node_flag, NULL,
41975c12
MD
730 (unsigned char) key);
731 if (!ja_node_ptr(node_flag))
732 return NULL;
733 key >>= JA_BITS_PER_BYTE;
734 }
735
5a9a87dd
MD
736 /* Last level lookup succeded. We got an actual match. */
737 return (struct cds_hlist_head *) node_flag;
738}
739
740/*
741 * We reached an unpopulated node. Create it and the children we need,
742 * and then attach the entire branch to the current node. This may
743 * trigger recompaction of the current node. Locks needed: node lock
744 * (for add), and, possibly, parent node lock (to update pointer due to
745 * node recompaction).
746 *
747 * First take node lock, check if recompaction is needed, then take
748 * parent lock (if needed). Then we can proceed to create the new
749 * branch. Publish the new branch, and release locks.
750 * TODO: we currently always take the parent lock even when not needed.
751 */
752static
753int ja_attach_node(struct cds_ja *ja,
754 struct cds_ja_inode_flag **node_flag_ptr,
755 struct cds_ja_inode_flag *node_flag,
756 struct cds_ja_inode_flag *parent_node_flag,
757 uint64_t key,
758 unsigned int depth,
759 struct cds_ja_node *child_node)
760{
761 struct cds_ja_shadow_node *shadow_node = NULL,
762 *parent_shadow_node = NULL;
763 struct cds_ja_inode *node = ja_node_ptr(node_flag);
764 struct cds_ja_inode *parent_node = ja_node_ptr(parent_node_flag);
765 struct cds_hlist_head head;
766 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
767 int ret, i;
768 struct cds_ja_inode_flag *created_nodes[JA_MAX_INTERNAL_DEPTH];
769 int nr_created_nodes = 0;
770
771 assert(node);
772 shadow_node = rcuja_shadow_lookup_lock(ja->ht, node);
773 if (!shadow_node) {
774 ret = -ENOENT;
775 goto end;
776 }
777 if (parent_node) {
778 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
779 parent_node);
780 if (!parent_shadow_node) {
781 ret = -ENOENT;
782 goto unlock_shadow;
783 }
784 }
785
786 CDS_INIT_HLIST_HEAD(&head);
787 cds_hlist_add_head_rcu(&child_node->list, &head);
788
789 iter_dest_node_flag = NULL;
790 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
791 key >> (JA_BITS_PER_BYTE * (ja->tree_depth - 2)),
792 (struct cds_ja_inode_flag *) head.next,
793 NULL);
794 if (ret)
795 goto unlock_parent;
796 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
797 iter_node_flag = iter_dest_node_flag;
798
799 /* Create new branch, starting from bottom */
800 for (i = ja->tree_depth - 2; i >= (int) depth; i--) {
801 iter_dest_node_flag = NULL;
802 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
803 key >> (JA_BITS_PER_BYTE * (i - 1)),
804 iter_node_flag,
805 NULL);
806 if (ret)
807 goto check_error;
808 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
809 iter_node_flag = iter_dest_node_flag;
810 }
811
812 /* Publish new branch */
813 rcu_assign_pointer(*node_flag_ptr, iter_node_flag);
814
815 /* Success */
816 ret = 0;
817
818check_error:
819 if (ret) {
820 for (i = 0; i < nr_created_nodes; i++) {
821 int tmpret;
822 tmpret = rcuja_shadow_clear(ja->ht,
823 ja_node_ptr(created_nodes[i]),
824 RCUJA_SHADOW_CLEAR_FREE_NODE
825 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
826 assert(!tmpret);
827 }
828 }
829unlock_parent:
830 if (parent_shadow_node)
831 rcuja_shadow_unlock(parent_shadow_node);
832unlock_shadow:
833 if (shadow_node)
834 rcuja_shadow_unlock(shadow_node);
835end:
836 return ret;
837}
838
839/*
840 * Lock the hlist head shadow node mutex, and add node to list of
841 * duplicates. Failure can happen if concurrent removal removes the last
842 * node with same key before we get the lock.
843 * Return 0 on success, negative error value on failure.
844 */
845static
846int ja_chain_node(struct cds_ja *ja,
847 struct cds_hlist_head *head,
848 struct cds_ja_node *node)
849{
850 struct cds_ja_shadow_node *shadow_node;
851
852 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
853 (struct cds_ja_inode *) head);
854 if (!shadow_node)
855 return -ENOENT;
856 cds_hlist_add_head_rcu(&node->list, head);
857 rcuja_shadow_unlock(shadow_node);
858 return 0;
859}
860
861int cds_ja_add(struct cds_ja *ja, uint64_t key,
862 struct cds_ja_node *new_node)
863{
864 unsigned int tree_depth, i;
865 uint64_t iter_key;
866 struct cds_ja_inode_flag **node_flag_ptr; /* in parent */
867 struct cds_ja_inode_flag *node_flag,
868 *parent_node_flag,
869 *parent2_node_flag;
870 int ret;
871
872 if (caa_unlikely(key > ja->key_max))
873 return -EINVAL;
874 tree_depth = ja->tree_depth;
875
876retry:
877 iter_key = key;
878 parent2_node_flag = NULL;
879 parent_node_flag = NULL;
880 node_flag_ptr = &ja->root;
881 node_flag = rcu_dereference(*node_flag_ptr);
882
883 /* Iterate on all internal levels */
884 for (i = 0; i < tree_depth - 1; i++) {
885 if (!ja_node_ptr(node_flag)) {
886 ret = ja_attach_node(ja, node_flag_ptr,
887 parent_node_flag, parent2_node_flag,
888 key, i, new_node);
889 if (ret == -ENOENT || ret == -EEXIST)
890 goto retry;
891 else
892 goto end;
893 }
894 parent2_node_flag = parent_node_flag;
895 parent_node_flag = node_flag;
896 node_flag = ja_node_get_nth(node_flag,
897 &node_flag_ptr,
898 (unsigned char) iter_key);
899 iter_key >>= JA_BITS_PER_BYTE;
900 }
901
902 /*
903 * We reached bottom of tree, simply add node to last internal
904 * level, or chain it if key is already present.
905 */
906 if (!ja_node_ptr(node_flag)) {
907 ret = ja_attach_node(ja, node_flag_ptr, parent_node_flag,
908 parent2_node_flag, key, i, new_node);
909 } else {
910 ret = ja_chain_node(ja,
911 (struct cds_hlist_head *) ja_node_ptr(node_flag),
912 new_node);
913 }
914 if (ret == -ENOENT)
915 goto retry;
916end:
917 return ret;
b4540e8a
MD
918}
919
920struct cds_ja *_cds_ja_new(unsigned int key_bits,
921 const struct rcu_flavor_struct *flavor)
be9a7474
MD
922{
923 struct cds_ja *ja;
924
925 ja = calloc(sizeof(*ja), 1);
926 if (!ja)
927 goto ja_error;
b4540e8a
MD
928
929 switch (key_bits) {
930 case 8:
931 ja->key_max = UINT8_MAX;
932 break;
933 case 16:
934 ja->key_max = UINT16_MAX;
935 break;
936 case 32:
937 ja->key_max = UINT32_MAX;
938 break;
939 case 64:
940 ja->key_max = UINT64_MAX;
941 break;
942 default:
943 goto check_error;
944 }
945
be9a7474 946 /* ja->root is NULL */
5a9a87dd
MD
947 /* tree_depth 0 is for pointer to root node */
948 ja->tree_depth = (key_bits >> JA_BITS_PER_BYTE) + 1;
949 assert(ja->tree_depth <= JA_MAX_INTERNAL_DEPTH + 1);
be9a7474
MD
950 ja->ht = rcuja_create_ht(flavor);
951 if (!ja->ht)
952 goto ht_error;
953 return ja;
954
955ht_error:
b4540e8a 956check_error:
be9a7474
MD
957 free(ja);
958ja_error:
959 return NULL;
960}
961
962/*
963 * There should be no more concurrent add to the judy array while it is
964 * being destroyed (ensured by the caller).
965 */
966int cds_ja_destroy(struct cds_ja *ja)
967{
b4540e8a
MD
968 int ret;
969
be9a7474
MD
970 rcuja_shadow_prune(ja->ht,
971 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK);
b4540e8a
MD
972 ret = rcuja_delete_ht(ja->ht);
973 if (ret)
974 return ret;
975 free(ja);
41975c12 976 return 0;
be9a7474 977}
This page took 0.068736 seconds and 4 git commands to generate.