rcuja: use rcu ja app flavor for shadow hash table
[userspace-rcu.git] / rcuja / rcuja.c
1 /*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdint.h>
25 #include <errno.h>
26 #include <limits.h>
27 #include <urcu/rcuja.h>
28 #include <urcu/compiler.h>
29 #include <urcu/arch.h>
30 #include <assert.h>
31 #include <urcu-pointer.h>
32
33 #include "rcuja-internal.h"
34 #include "bitfield.h"
35
36 enum rcu_ja_type_class {
37 RCU_JA_LINEAR = 0, /* Type A */
38 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
39 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
40 RCU_JA_POOL = 1, /* Type B */
41 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
42 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
43 RCU_JA_PIGEON = 2, /* Type C */
44 /* 32-bit: 101 to 256 children, 1024 bytes */
45 /* 64-bit: 113 to 256 children, 2048 bytes */
46 /* Leaf nodes are implicit from their height in the tree */
47 RCU_JA_NR_TYPES,
48 };
49
50 struct rcu_ja_type {
51 enum rcu_ja_type_class type_class;
52 uint16_t min_child; /* minimum number of children: 1 to 256 */
53 uint16_t max_child; /* maximum number of children: 1 to 256 */
54 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
55 uint16_t order; /* node size is (1 << order), in bytes */
56 uint16_t nr_pool_order; /* number of pools */
57 uint16_t pool_size_order; /* pool size */
58 };
59
60 /*
61 * Number of least significant pointer bits reserved to represent the
62 * child type.
63 */
64 #define JA_TYPE_BITS 3
65 #define JA_TYPE_MAX_NR (1U << JA_TYPE_BITS)
66 #define JA_TYPE_MASK (JA_TYPE_MAX_NR - 1)
67 #define JA_PTR_MASK (~JA_TYPE_MASK)
68
69 #define JA_ENTRY_PER_NODE 256UL
70
71 /*
72 * Iteration on the array to find the right node size for the number of
73 * children stops when it reaches .max_child == 256 (this is the largest
74 * possible node size, which contains 256 children).
75 * The min_child overlaps with the previous max_child to provide an
76 * hysteresis loop to reallocation for patterns of cyclic add/removal
77 * within the same node.
78 * The node the index within the following arrays is represented on 3
79 * bits. It identifies the node type, min/max number of children, and
80 * the size order.
81 * The max_child values for the RCU_JA_POOL below result from
82 * statistical approximation: over million populations, the max_child
83 * covers between 97% and 99% of the populations generated. Therefore, a
84 * fallback should exist to cover the rare extreme population unbalance
85 * cases, but it will not have a major impact on speed nor space
86 * consumption, since those are rare cases.
87 */
88
89 #if (CAA_BITS_PER_LONG < 64)
90 /* 32-bit pointers */
91 enum {
92 ja_type_0_max_child = 1,
93 ja_type_1_max_child = 3,
94 ja_type_2_max_child = 6,
95 ja_type_3_max_child = 12,
96 ja_type_4_max_child = 25,
97 ja_type_5_max_child = 48,
98 ja_type_6_max_child = 92,
99 ja_type_7_max_child = 256,
100 };
101
102 enum {
103 ja_type_0_max_linear_child = 1,
104 ja_type_1_max_linear_child = 3,
105 ja_type_2_max_linear_child = 6,
106 ja_type_3_max_linear_child = 12,
107 ja_type_4_max_linear_child = 25,
108 ja_type_5_max_linear_child = 24,
109 ja_type_6_max_linear_child = 23,
110 };
111
112 enum {
113 ja_type_5_nr_pool_order = 1,
114 ja_type_6_nr_pool_order = 2,
115 };
116
117 const struct rcu_ja_type ja_types[] = {
118 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
121 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
122 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
123
124 /* Pools may fill sooner than max_child */
125 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
126 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
127
128 /*
129 * TODO: Upon node removal below min_child, if child pool is
130 * filled beyond capacity, we need to roll back to pigeon.
131 */
132 { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
133 };
134 #else /* !(CAA_BITS_PER_LONG < 64) */
135 /* 64-bit pointers */
136 enum {
137 ja_type_0_max_child = 1,
138 ja_type_1_max_child = 3,
139 ja_type_2_max_child = 7,
140 ja_type_3_max_child = 14,
141 ja_type_4_max_child = 28,
142 ja_type_5_max_child = 54,
143 ja_type_6_max_child = 104,
144 ja_type_7_max_child = 256,
145 };
146
147 enum {
148 ja_type_0_max_linear_child = 1,
149 ja_type_1_max_linear_child = 3,
150 ja_type_2_max_linear_child = 7,
151 ja_type_3_max_linear_child = 14,
152 ja_type_4_max_linear_child = 28,
153 ja_type_5_max_linear_child = 27,
154 ja_type_6_max_linear_child = 26,
155 };
156
157 enum {
158 ja_type_5_nr_pool_order = 1,
159 ja_type_6_nr_pool_order = 2,
160 };
161
162 const struct rcu_ja_type ja_types[] = {
163 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
164 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
165 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
166 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
168
169 /* Pools may fill sooner than max_child. */
170 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
171 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
172
173 /*
174 * TODO: Upon node removal below min_child, if child pool is
175 * filled beyond capacity, we need to roll back to pigeon.
176 */
177 { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
178 };
179 #endif /* !(BITS_PER_LONG < 64) */
180
181 static inline __attribute__((unused))
182 void static_array_size_check(void)
183 {
184 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) > JA_TYPE_MAX_NR);
185 }
186
187 /*
188 * The rcu_ja_node contains the compressed node data needed for
189 * read-side. For linear and pool node configurations, it starts with a
190 * byte counting the number of children in the node. Then, the
191 * node-specific data is placed.
192 * The node mutex, if any is needed, protecting concurrent updated of
193 * each node is placed in a separate hash table indexed by node address.
194 * For the pigeon configuration, the number of children is also kept in
195 * a separate hash table, indexed by node address, because it is only
196 * required for updates.
197 */
198
199 #define DECLARE_LINEAR_NODE(index) \
200 struct { \
201 uint8_t nr_child; \
202 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
203 struct rcu_ja_node_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
204 }
205
206 #define DECLARE_POOL_NODE(index) \
207 struct { \
208 struct { \
209 uint8_t nr_child; \
210 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
211 struct rcu_ja_node_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
212 } linear[1U << ja_type_## index ##_nr_pool_order]; \
213 }
214
215 struct rcu_ja_node {
216 union {
217 /* Linear configuration */
218 DECLARE_LINEAR_NODE(0) conf_0;
219 DECLARE_LINEAR_NODE(1) conf_1;
220 DECLARE_LINEAR_NODE(2) conf_2;
221 DECLARE_LINEAR_NODE(3) conf_3;
222 DECLARE_LINEAR_NODE(4) conf_4;
223
224 /* Pool configuration */
225 DECLARE_POOL_NODE(5) conf_5;
226 DECLARE_POOL_NODE(6) conf_6;
227
228 /* Pigeon configuration */
229 struct {
230 struct rcu_ja_node_flag *child[ja_type_7_max_child];
231 } conf_7;
232 /* data aliasing nodes for computed accesses */
233 uint8_t data[sizeof(struct rcu_ja_node_flag *) * ja_type_7_max_child];
234 } u;
235 };
236
237 static
238 struct rcu_ja_node_flag *ja_node_flag(struct rcu_ja_node *node,
239 unsigned int type)
240 {
241 assert(type < RCU_JA_NR_TYPES);
242 return (struct rcu_ja_node_flag *) (((unsigned long) node) | type);
243 }
244
245 static
246 unsigned int ja_node_type(struct rcu_ja_node_flag *node)
247 {
248 unsigned int type;
249
250 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
251 assert(type < RCU_JA_NR_TYPES);
252 return type;
253 }
254
255 static
256 struct rcu_ja_node *ja_node_ptr(struct rcu_ja_node_flag *node)
257 {
258 return (struct rcu_ja_node *) (((unsigned long) node) | JA_PTR_MASK);
259 }
260
261 struct rcu_ja_node *alloc_rcu_ja_node(const struct rcu_ja_type *ja_type)
262 {
263 return calloc(1U << ja_type->order, sizeof(char));
264 }
265
266 void free_rcu_ja_node(struct rcu_ja_node *node)
267 {
268 free(node);
269 }
270
271 #define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
272 #define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
273 #define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
274 #define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
275
276 static
277 uint8_t *align_ptr_size(uint8_t *ptr)
278 {
279 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
280 }
281
282 /*
283 * The order in which values and pointers are does does not matter: if
284 * a value is missing, we return NULL. If a value is there, but its
285 * associated pointers is still NULL, we return NULL too.
286 */
287 static
288 struct rcu_ja_node_flag *ja_linear_node_get_nth(const struct rcu_ja_type *type,
289 struct rcu_ja_node *node,
290 uint8_t n)
291 {
292 uint8_t nr_child;
293 uint8_t *values;
294 struct rcu_ja_node_flag **pointers;
295 struct rcu_ja_node_flag *ptr;
296 unsigned int i;
297
298 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
299
300 nr_child = CMM_LOAD_SHARED(node->u.data[0]);
301 cmm_smp_rmb(); /* read nr_child before values and pointers */
302 assert(nr_child <= type->max_linear_child);
303 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
304
305 values = &node->u.data[1];
306 for (i = 0; i < nr_child; i++) {
307 if (CMM_LOAD_SHARED(values[i]) == n)
308 break;
309 }
310 if (i >= nr_child)
311 return NULL;
312 pointers = (struct rcu_ja_node_flag **) align_ptr_size(&values[type->max_linear_child]);
313 ptr = rcu_dereference(pointers[i]);
314 assert(ja_node_ptr(ptr) != NULL);
315 return ptr;
316 }
317
318 static
319 struct rcu_ja_node_flag *ja_pool_node_get_nth(const struct rcu_ja_type *type,
320 struct rcu_ja_node *node,
321 uint8_t n)
322 {
323 struct rcu_ja_node *linear;
324
325 assert(type->type_class == RCU_JA_POOL);
326 linear = (struct rcu_ja_node *)
327 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
328 return ja_linear_node_get_nth(type, linear, n);
329 }
330
331 static
332 struct rcu_ja_node_flag *ja_pigeon_node_get_nth(const struct rcu_ja_type *type,
333 struct rcu_ja_node *node,
334 uint8_t n)
335 {
336 assert(type->type_class == RCU_JA_PIGEON);
337 return rcu_dereference(((struct rcu_ja_node_flag **) node->u.data)[n]);
338 }
339
340 /*
341 * ja_node_get_nth: get nth item from a node.
342 * node_flag is already rcu_dereference'd.
343 */
344 static
345 struct rcu_ja_node_flag *ja_node_get_nth(struct rcu_ja_node_flag *node_flag,
346 uint8_t n)
347 {
348 unsigned int type_index;
349 struct rcu_ja_node *node;
350 const struct rcu_ja_type *type;
351
352 node = ja_node_ptr(node_flag);
353 assert(node != NULL);
354 type_index = ja_node_type(node_flag);
355 type = &ja_types[type_index];
356
357 switch (type->type_class) {
358 case RCU_JA_LINEAR:
359 return ja_linear_node_get_nth(type, node, n);
360 case RCU_JA_POOL:
361 return ja_pool_node_get_nth(type, node, n);
362 case RCU_JA_PIGEON:
363 return ja_pigeon_node_get_nth(type, node, n);
364 default:
365 assert(0);
366 return (void *) -1UL;
367 }
368 }
369
370 static
371 int ja_linear_node_set_nth(const struct rcu_ja_type *type,
372 struct rcu_ja_node *node,
373 uint8_t n,
374 struct rcu_ja_node_flag *child_node_flag)
375 {
376 uint8_t nr_child;
377 uint8_t *values, *nr_child_ptr;
378 struct rcu_ja_node_flag **pointers;
379 unsigned int i;
380
381 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
382
383 nr_child_ptr = &node->u.data[0];
384 nr_child = *nr_child_ptr;
385 assert(nr_child <= type->max_linear_child);
386 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
387
388 values = &node->u.data[1];
389 for (i = 0; i < nr_child; i++) {
390 if (values[i] == n)
391 return -EEXIST;
392 }
393 if (nr_child >= type->max_linear_child) {
394 /* No space left in this node type */
395 return -ENOSPC;
396 }
397 pointers = (struct rcu_ja_node_flag **) align_ptr_size(&values[type->max_linear_child]);
398 assert(pointers[nr_child] == NULL);
399 rcu_assign_pointer(pointers[nr_child], child_node_flag);
400 CMM_STORE_SHARED(values[nr_child], n);
401 cmm_smp_wmb(); /* write value and pointer before nr_child */
402 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
403 return 0;
404 }
405
406 static
407 int ja_pool_node_set_nth(const struct rcu_ja_type *type,
408 struct rcu_ja_node *node,
409 uint8_t n,
410 struct rcu_ja_node_flag *child_node_flag)
411 {
412 struct rcu_ja_node *linear;
413
414 assert(type->type_class == RCU_JA_POOL);
415 linear = (struct rcu_ja_node *)
416 &node->u.data[((unsigned long) n >> (CHAR_BIT - type->nr_pool_order)) << type->pool_size_order];
417 return ja_linear_node_set_nth(type, linear, n, child_node_flag);
418 }
419
420 static
421 int ja_pigeon_node_set_nth(const struct rcu_ja_type *type,
422 struct rcu_ja_node *node,
423 uint8_t n,
424 struct rcu_ja_node_flag *child_node_flag)
425 {
426 struct rcu_ja_node_flag **ptr;
427
428 assert(type->type_class == RCU_JA_PIGEON);
429 ptr = &((struct rcu_ja_node_flag **) node->u.data)[n];
430 if (*ptr != NULL)
431 return -EEXIST;
432 rcu_assign_pointer(*ptr, child_node_flag);
433 return 0;
434 }
435
436 /*
437 * _ja_node_set_nth: set nth item within a node. Return an error
438 * (negative error value) if it is already there.
439 * TODO: exclusive access on node.
440 */
441 static
442 int _ja_node_set_nth(struct rcu_ja_node_flag *node_flag, uint8_t n,
443 struct rcu_ja_node_flag *child_node_flag)
444 {
445 unsigned int type_index;
446 struct rcu_ja_node *node;
447 const struct rcu_ja_type *type;
448
449 node = ja_node_ptr(node_flag);
450 assert(node != NULL);
451 type_index = ja_node_type(node_flag);
452 type = &ja_types[type_index];
453
454 switch (type->type_class) {
455 case RCU_JA_LINEAR:
456 return ja_linear_node_set_nth(type, node, n,
457 child_node_flag);
458 case RCU_JA_POOL:
459 return ja_pool_node_set_nth(type, node, n,
460 child_node_flag);
461 case RCU_JA_PIGEON:
462 return ja_pigeon_node_set_nth(type, node, n,
463 child_node_flag);
464 default:
465 assert(0);
466 return -EINVAL;
467 }
468
469 return 0;
470 }
471
472 /*
473 * ja_node_recompact_add: recompact a node, adding a new child.
474 */
475 static
476 int ja_node_recompact_add(struct rcu_ja_node_flag **old_node_flag, uint8_t n,
477 struct rcu_ja_node_flag *child_node_flag)
478
479 {
480 unsigned int old_type_index, new_type_index;
481 struct rcu_ja_node *old_node, *new_node;
482 const struct rcu_ja_type *old_type, *new_type;
483 struct rcu_ja_node_flag *new_node_flag;
484 unsigned int i;
485 int ret;
486
487 old_node = ja_node_ptr(*old_node_flag);
488 if (old_node == NULL) {
489 new_type_index = 0;
490 } else {
491 old_type_index = ja_node_type(*old_node_flag);
492 old_type = &ja_types[old_type_index];
493 new_type_index = old_type_index + 1;
494 }
495 new_type = &ja_types[new_type_index];
496 new_node = alloc_rcu_ja_node(new_type);
497 if (!new_node)
498 return -ENOMEM;
499 new_node_flag = ja_node_flag(new_node, new_type_index);
500
501 for (i = 0; i < old_type->max_child; i++) {
502 struct rcu_ja_node_flag *iter;
503
504 iter = ja_node_get_nth(*old_node_flag, i);
505 if (!iter)
506 continue;
507 ret = _ja_node_set_nth(new_node_flag, i, iter);
508 assert(!ret);
509 }
510 /* add node */
511 ret = _ja_node_set_nth(new_node_flag, n, child_node_flag);
512 assert(!ret);
513 /* Replace the old node with the new recompacted one */
514 rcu_assign_pointer(*old_node_flag, new_node_flag);
515 /* TODO: free old_node (call_rcu) */
516 return 0;
517 }
518
519 static
520 int ja_node_set_nth(struct rcu_ja_node_flag **node_flag, uint8_t n,
521 struct rcu_ja_node_flag *child_node_flag)
522 {
523 int ret;
524
525 ret = _ja_node_set_nth(*node_flag, n, child_node_flag);
526 if (ret == -ENOSPC) {
527 /* Not enough space in node, need to recompact */
528 ret = ja_node_recompact_add(node_flag, n,
529 child_node_flag);
530 if (ret < 0)
531 return ret;
532 }
533 return ret;
534 }
This page took 0.040388 seconds and 4 git commands to generate.