rculfhash: document flags
[urcu.git] / rculfhash.c
CommitLineData
5e28c532 1/*
abc490a1
MD
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
5e28c532
MD
21 */
22
2ed95849
MD
23#define _LGPL_SOURCE
24#include <stdlib.h>
e0ba718a
MD
25#include <errno.h>
26#include <assert.h>
27#include <stdio.h>
abc490a1 28#include <stdint.h>
f000907d 29#include <string.h>
e0ba718a 30
2ed95849 31#include <urcu.h>
abc490a1 32#include <urcu-call-rcu.h>
a42cc659
MD
33#include <urcu/arch.h>
34#include <urcu/uatomic.h>
674f7a69 35#include <urcu/jhash.h>
a42cc659 36#include <urcu/compiler.h>
abc490a1 37#include <urcu/rculfhash.h>
5e28c532 38#include <stdio.h>
464a1ec9 39#include <pthread.h>
44395fb7 40
f9830efd
MD
41#define DEBUG /* Test */
42
43#ifdef DEBUG
44#define dbg_printf(args...) printf(args)
45#else
46#define dbg_printf(args...)
47#endif
48
65e8e729
MD
49#define CHAIN_LEN_TARGET 1
50#define CHAIN_LEN_RESIZE_THRESHOLD 2
2ed95849 51
abc490a1
MD
52#ifndef max
53#define max(a, b) ((a) > (b) ? (a) : (b))
54#endif
2ed95849 55
d95bd160
MD
56/*
57 * The removed flag needs to be updated atomically with the pointer.
58 * The dummy flag does not require to be updated atomically with the
59 * pointer, but it is added as a pointer low bit flag to save space.
60 */
d37166c6 61#define REMOVED_FLAG (1UL << 0)
f5596c94
MD
62#define DUMMY_FLAG (1UL << 1)
63#define FLAGS_MASK ((1UL << 2) - 1)
d37166c6 64
395270b6 65struct rcu_table {
abc490a1 66 unsigned long size; /* always a power of 2 */
f9830efd 67 unsigned long resize_target;
11519af6 68 int resize_initiated;
abc490a1 69 struct rcu_head head;
395270b6
MD
70 struct rcu_ht_node *tbl[0];
71};
72
2ed95849 73struct rcu_ht {
395270b6 74 struct rcu_table *t; /* shared */
2ed95849 75 ht_hash_fct hash_fct;
732ad076
MD
76 ht_compare_fct compare_fct;
77 unsigned long hash_seed;
464a1ec9 78 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
848d4088 79 unsigned int in_progress_resize;
abc490a1
MD
80 void (*ht_call_rcu)(struct rcu_head *head,
81 void (*func)(struct rcu_head *head));
2ed95849
MD
82};
83
abc490a1
MD
84struct rcu_resize_work {
85 struct rcu_head head;
2ed95849 86 struct rcu_ht *ht;
abc490a1 87};
2ed95849 88
abc490a1
MD
89/*
90 * Algorithm to reverse bits in a word by lookup table, extended to
91 * 64-bit words.
f9830efd 92 * Source:
abc490a1 93 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
f9830efd 94 * Originally from Public Domain.
abc490a1
MD
95 */
96
97static const uint8_t BitReverseTable256[256] =
2ed95849 98{
abc490a1
MD
99#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
100#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
101#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
102 R6(0), R6(2), R6(1), R6(3)
103};
104#undef R2
105#undef R4
106#undef R6
2ed95849 107
abc490a1
MD
108static
109uint8_t bit_reverse_u8(uint8_t v)
110{
111 return BitReverseTable256[v];
112}
ab7d5fc6 113
abc490a1
MD
114static __attribute__((unused))
115uint32_t bit_reverse_u32(uint32_t v)
116{
117 return ((uint32_t) bit_reverse_u8(v) << 24) |
118 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
119 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
120 ((uint32_t) bit_reverse_u8(v >> 24));
2ed95849
MD
121}
122
abc490a1
MD
123static __attribute__((unused))
124uint64_t bit_reverse_u64(uint64_t v)
2ed95849 125{
abc490a1
MD
126 return ((uint64_t) bit_reverse_u8(v) << 56) |
127 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
128 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
129 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
130 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
131 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
132 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
133 ((uint64_t) bit_reverse_u8(v >> 56));
134}
135
136static
137unsigned long bit_reverse_ulong(unsigned long v)
138{
139#if (CAA_BITS_PER_LONG == 32)
140 return bit_reverse_u32(v);
141#else
142 return bit_reverse_u64(v);
143#endif
144}
145
f9830efd
MD
146/*
147 * Algorithm to find the log2 of a 32-bit unsigned integer.
148 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
149 * Originally from Public Domain.
150 */
151static const char LogTable256[256] =
152{
153#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
154 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
155 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
156 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
157};
158
159uint32_t log2_u32(uint32_t v)
160{
161 uint32_t t, tt;
162
163 if ((tt = (v >> 16)))
164 return (t = (tt >> 8))
165 ? 24 + LogTable256[t]
166 : 16 + LogTable256[tt];
167 else
168 return (t = (v >> 8))
169 ? 8 + LogTable256[t]
170 : LogTable256[v];
171}
172
173static
174void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
175
176static
177void check_resize(struct rcu_ht *ht, struct rcu_table *t,
178 uint32_t chain_len)
179{
3390d470
MD
180 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
181 ht_resize_lazy(ht, t,
65e8e729 182 log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
f9830efd
MD
183}
184
abc490a1
MD
185static
186struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
187{
d37166c6 188 return (struct rcu_ht_node *) (((unsigned long) node) & ~FLAGS_MASK);
abc490a1
MD
189}
190
191static
192int is_removed(struct rcu_ht_node *node)
193{
d37166c6 194 return ((unsigned long) node) & REMOVED_FLAG;
abc490a1
MD
195}
196
197static
198struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
199{
d37166c6 200 return (struct rcu_ht_node *) (((unsigned long) node) | REMOVED_FLAG);
abc490a1
MD
201}
202
f5596c94
MD
203static
204int is_dummy(struct rcu_ht_node *node)
205{
206 return ((unsigned long) node) & DUMMY_FLAG;
207}
208
209static
210struct rcu_ht_node *flag_dummy(struct rcu_ht_node *node)
211{
212 return (struct rcu_ht_node *) (((unsigned long) node) | DUMMY_FLAG);
213}
214
abc490a1 215static
f9830efd 216unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
abc490a1
MD
217{
218 unsigned long old1, old2;
219
220 old1 = uatomic_read(ptr);
221 do {
222 old2 = old1;
223 if (old2 >= v)
f9830efd 224 return old2;
abc490a1 225 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
f9830efd 226 return v;
abc490a1
MD
227}
228
273399de
MD
229/*
230 * Remove all logically deleted nodes from a bucket up to a certain node key.
231 */
232static
233void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
234{
f5596c94 235 struct rcu_ht_node *iter_prev, *iter, *next, *new_next;
273399de
MD
236
237 for (;;) {
238 iter_prev = dummy;
239 /* We can always skip the dummy node initially */
cc4fcb10
MD
240 iter = rcu_dereference(iter_prev->p.next);
241 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
273399de 242 for (;;) {
a2974903 243 if (unlikely(!clear_flag(iter)))
479c8a32 244 return;
cc4fcb10 245 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
273399de 246 return;
cc4fcb10 247 next = rcu_dereference(clear_flag(iter)->p.next);
273399de
MD
248 if (is_removed(next))
249 break;
273399de
MD
250 iter_prev = iter;
251 iter = next;
252 }
253 assert(!is_removed(iter));
f5596c94
MD
254 if (is_dummy(iter))
255 new_next = flag_dummy(clear_flag(next));
256 else
257 new_next = clear_flag(next);
258 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
273399de
MD
259 }
260}
261
abc490a1 262static
18117871 263struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
f5596c94 264 struct rcu_ht_node *node, int unique, int dummy)
abc490a1 265{
f5596c94
MD
266 struct rcu_ht_node *iter_prev, *iter, *next, *new_node, *new_next,
267 *dummy_node;
49c2e2d6 268 unsigned long hash;
abc490a1 269
18117871 270 if (!t->size) {
f5596c94
MD
271 assert(dummy);
272 node->p.next = flag_dummy(NULL);
18117871
MD
273 return node; /* Initial first add (head) */
274 }
cc4fcb10 275 hash = bit_reverse_ulong(node->p.reverse_hash);
abc490a1 276 for (;;) {
f9830efd 277 uint32_t chain_len = 0;
abc490a1 278
11519af6
MD
279 /*
280 * iter_prev points to the non-removed node prior to the
281 * insert location.
11519af6 282 */
49c2e2d6 283 iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]);
11519af6 284 /* We can always skip the dummy node initially */
cc4fcb10
MD
285 iter = rcu_dereference(iter_prev->p.next);
286 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
abc490a1 287 for (;;) {
a2974903 288 if (unlikely(!clear_flag(iter)))
273399de 289 goto insert;
cc4fcb10 290 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
273399de 291 goto insert;
cc4fcb10 292 next = rcu_dereference(clear_flag(iter)->p.next);
273399de 293 if (is_removed(next))
9dba85be 294 goto gc_node;
e43f23f8 295 if (unique
1b81fe1a 296 && !is_dummy(next)
e43f23f8
MD
297 && !ht->compare_fct(node->key, node->key_len,
298 clear_flag(iter)->key,
299 clear_flag(iter)->key_len))
18117871 300 return clear_flag(iter);
11519af6 301 /* Only account for identical reverse hash once */
cc4fcb10 302 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash)
11519af6
MD
303 check_resize(ht, t, ++chain_len);
304 iter_prev = clear_flag(iter);
273399de 305 iter = next;
abc490a1 306 }
273399de 307 insert:
7ec59d3b 308 assert(node != clear_flag(iter));
11519af6 309 assert(!is_removed(iter_prev));
f000907d 310 assert(iter_prev != node);
f5596c94 311 if (!dummy)
1b81fe1a 312 node->p.next = clear_flag(iter);
f5596c94 313 else
1b81fe1a 314 node->p.next = flag_dummy(clear_flag(iter));
f5596c94
MD
315 if (is_dummy(iter))
316 new_node = flag_dummy(node);
317 else
318 new_node = node;
cc4fcb10 319 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
f5596c94 320 new_node) != iter)
273399de 321 continue; /* retry */
11519af6 322 else
273399de 323 goto gc_end;
9dba85be
MD
324 gc_node:
325 assert(!is_removed(iter));
f5596c94
MD
326 if (is_dummy(iter))
327 new_next = flag_dummy(clear_flag(next));
328 else
329 new_next = clear_flag(next);
330 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
273399de 331 /* retry */
464a1ec9 332 }
273399de
MD
333gc_end:
334 /* Garbage collect logically removed nodes in the bucket */
f5596c94
MD
335 dummy_node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
336 _ht_gc_bucket(dummy_node, node);
18117871 337 return node;
abc490a1 338}
464a1ec9 339
abc490a1
MD
340static
341int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
342{
273399de 343 struct rcu_ht_node *dummy, *next, *old;
abc490a1 344 int flagged = 0;
49c2e2d6 345 unsigned long hash;
5e28c532 346
7ec59d3b 347 /* logically delete the node */
cc4fcb10 348 old = rcu_dereference(node->p.next);
7ec59d3b
MD
349 do {
350 next = old;
351 if (is_removed(next))
352 goto end;
1b81fe1a 353 assert(!is_dummy(next));
cc4fcb10 354 old = uatomic_cmpxchg(&node->p.next, next,
7ec59d3b
MD
355 flag_removed(next));
356 } while (old != next);
357
358 /* We performed the (logical) deletion. */
359 flagged = 1;
360
361 /*
362 * Ensure that the node is not visible to readers anymore: lookup for
273399de
MD
363 * the node, and remove it (along with any other logically removed node)
364 * if found.
11519af6 365 */
cc4fcb10 366 hash = bit_reverse_ulong(node->p.reverse_hash);
49c2e2d6 367 dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
273399de 368 _ht_gc_bucket(dummy, node);
2ed95849 369end:
11519af6
MD
370 /*
371 * Only the flagging action indicated that we (and no other)
372 * removed the node from the hash.
373 */
7ec59d3b 374 if (flagged) {
cc4fcb10 375 assert(is_removed(rcu_dereference(node->p.next)));
11519af6 376 return 0;
7ec59d3b 377 } else
11519af6 378 return -ENOENT;
abc490a1 379}
2ed95849 380
abc490a1
MD
381static
382void init_table(struct rcu_ht *ht, struct rcu_table *t,
383 unsigned long first, unsigned long len)
384{
385 unsigned long i, end;
386
387 end = first + len;
388 for (i = first; i < end; i++) {
389 /* Update table size when power of two */
390 if (i != 0 && !(i & (i - 1)))
391 t->size = i;
cc4fcb10 392 t->tbl[i] = calloc(1, sizeof(struct _rcu_ht_node));
cc4fcb10 393 t->tbl[i]->p.reverse_hash = bit_reverse_ulong(i);
f5596c94 394 (void) _ht_add(ht, t, t->tbl[i], 0, 1);
abc490a1 395 }
f9830efd 396 t->resize_target = t->size = end;
11519af6 397 t->resize_initiated = 0;
2ed95849
MD
398}
399
abc490a1 400struct rcu_ht *ht_new(ht_hash_fct hash_fct,
732ad076
MD
401 ht_compare_fct compare_fct,
402 unsigned long hash_seed,
abc490a1
MD
403 unsigned long init_size,
404 void (*ht_call_rcu)(struct rcu_head *head,
405 void (*func)(struct rcu_head *head)))
406{
407 struct rcu_ht *ht;
408
409 ht = calloc(1, sizeof(struct rcu_ht));
410 ht->hash_fct = hash_fct;
732ad076
MD
411 ht->compare_fct = compare_fct;
412 ht->hash_seed = hash_seed;
f000907d 413 ht->ht_call_rcu = ht_call_rcu;
848d4088 414 ht->in_progress_resize = 0;
abc490a1
MD
415 /* this mutex should not nest in read-side C.S. */
416 pthread_mutex_init(&ht->resize_mutex, NULL);
417 ht->t = calloc(1, sizeof(struct rcu_table)
418 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
419 ht->t->size = 0;
f000907d 420 pthread_mutex_lock(&ht->resize_mutex);
abc490a1 421 init_table(ht, ht->t, 0, max(init_size, 1));
f000907d 422 pthread_mutex_unlock(&ht->resize_mutex);
abc490a1
MD
423 return ht;
424}
425
732ad076 426struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
2ed95849 427{
395270b6 428 struct rcu_table *t;
1b81fe1a 429 struct rcu_ht_node *node, *next;
abc490a1 430 unsigned long hash, reverse_hash;
2ed95849 431
732ad076 432 hash = ht->hash_fct(key, key_len, ht->hash_seed);
abc490a1 433 reverse_hash = bit_reverse_ulong(hash);
464a1ec9 434
395270b6 435 t = rcu_dereference(ht->t);
abc490a1 436 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
2ed95849 437 for (;;) {
abc490a1
MD
438 if (unlikely(!node))
439 break;
cc4fcb10 440 if (unlikely(node->p.reverse_hash > reverse_hash)) {
abc490a1
MD
441 node = NULL;
442 break;
2ed95849 443 }
1b81fe1a
MD
444 next = rcu_dereference(node->p.next);
445 if (likely(!is_removed(next))
446 && !is_dummy(next)
49c2e2d6 447 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
273399de 448 break;
2ed95849 449 }
1b81fe1a 450 node = clear_flag(next);
2ed95849 451 }
1b81fe1a 452 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
abc490a1
MD
453 return node;
454}
e0ba718a 455
f000907d 456void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
abc490a1
MD
457{
458 struct rcu_table *t;
49c2e2d6 459 unsigned long hash;
ab7d5fc6 460
49c2e2d6 461 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
cc4fcb10 462 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
2ed95849 463
abc490a1 464 t = rcu_dereference(ht->t);
f5596c94 465 (void) _ht_add(ht, t, node, 0, 0);
3eca1b8c
MD
466}
467
18117871 468struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
3eca1b8c
MD
469{
470 struct rcu_table *t;
49c2e2d6 471 unsigned long hash;
3eca1b8c 472
49c2e2d6 473 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
cc4fcb10 474 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
3eca1b8c
MD
475
476 t = rcu_dereference(ht->t);
f5596c94 477 return _ht_add(ht, t, node, 1, 0);
2ed95849
MD
478}
479
abc490a1 480int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
2ed95849 481{
abc490a1
MD
482 struct rcu_table *t;
483
484 t = rcu_dereference(ht->t);
abc490a1 485 return _ht_remove(ht, t, node);
2ed95849 486}
ab7d5fc6 487
abc490a1
MD
488static
489int ht_delete_dummy(struct rcu_ht *ht)
674f7a69 490{
395270b6 491 struct rcu_table *t;
abc490a1
MD
492 struct rcu_ht_node *node;
493 unsigned long i;
674f7a69 494
abc490a1
MD
495 t = ht->t;
496 /* Check that the table is empty */
497 node = t->tbl[0];
498 do {
1b81fe1a
MD
499 node = clear_flag(node)->p.next;
500 if (!is_dummy(node))
abc490a1 501 return -EPERM;
273399de 502 assert(!is_removed(node));
a2974903 503 } while (clear_flag(node));
abc490a1 504 /* Internal sanity check: all nodes left should be dummy */
395270b6 505 for (i = 0; i < t->size; i++) {
1b81fe1a 506 assert(is_dummy(t->tbl[i]->p.next));
abc490a1 507 free(t->tbl[i]);
674f7a69 508 }
abc490a1 509 return 0;
674f7a69
MD
510}
511
512/*
513 * Should only be called when no more concurrent readers nor writers can
514 * possibly access the table.
515 */
5e28c532 516int ht_destroy(struct rcu_ht *ht)
674f7a69 517{
5e28c532
MD
518 int ret;
519
848d4088
MD
520 /* Wait for in-flight resize operations to complete */
521 while (uatomic_read(&ht->in_progress_resize))
522 poll(NULL, 0, 100); /* wait for 100ms */
abc490a1
MD
523 ret = ht_delete_dummy(ht);
524 if (ret)
525 return ret;
395270b6 526 free(ht->t);
674f7a69 527 free(ht);
5e28c532 528 return ret;
674f7a69
MD
529}
530
273399de
MD
531void ht_count_nodes(struct rcu_ht *ht,
532 unsigned long *count,
533 unsigned long *removed)
534{
535 struct rcu_table *t;
536 struct rcu_ht_node *node, *next;
537
538 *count = 0;
539 *removed = 0;
540
541 t = rcu_dereference(ht->t);
542 /* Check that the table is empty */
543 node = rcu_dereference(t->tbl[0]);
544 do {
cc4fcb10 545 next = rcu_dereference(node->p.next);
273399de 546 if (is_removed(next)) {
1b81fe1a 547 assert(!is_dummy(next));
273399de 548 (*removed)++;
1b81fe1a 549 } else if (!is_dummy(next))
273399de
MD
550 (*count)++;
551 node = clear_flag(next);
552 } while (node);
553}
554
abc490a1
MD
555static
556void ht_free_table_cb(struct rcu_head *head)
557{
558 struct rcu_table *t =
559 caa_container_of(head, struct rcu_table, head);
560 free(t);
561}
562
563/* called with resize mutex held */
564static
565void _do_ht_resize(struct rcu_ht *ht)
464a1ec9 566{
abc490a1 567 unsigned long new_size, old_size;
395270b6 568 struct rcu_table *new_t, *old_t;
464a1ec9 569
395270b6
MD
570 old_t = ht->t;
571 old_size = old_t->size;
464a1ec9 572
f9830efd
MD
573 new_size = CMM_LOAD_SHARED(old_t->resize_target);
574 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
575 old_size, new_size);
abc490a1 576 if (old_size == new_size)
464a1ec9 577 return;
f000907d 578 new_t = malloc(sizeof(struct rcu_table)
abc490a1 579 + (new_size * sizeof(struct rcu_ht_node *)));
f000907d
MD
580 assert(new_size > old_size);
581 memcpy(&new_t->tbl, &old_t->tbl,
582 old_size * sizeof(struct rcu_ht_node *));
583 init_table(ht, new_t, old_size, new_size - old_size);
f000907d
MD
584 /* Changing table and size atomically wrt lookups */
585 rcu_assign_pointer(ht->t, new_t);
586 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
464a1ec9
MD
587}
588
abc490a1 589static
f9830efd
MD
590unsigned long resize_target_update(struct rcu_table *t,
591 int growth_order)
464a1ec9 592{
f9830efd
MD
593 return _uatomic_max(&t->resize_target,
594 t->size << growth_order);
464a1ec9
MD
595}
596
464a1ec9
MD
597void ht_resize(struct rcu_ht *ht, int growth)
598{
f9830efd
MD
599 struct rcu_table *t = rcu_dereference(ht->t);
600 unsigned long target_size;
601
602 target_size = resize_target_update(t, growth);
603 if (t->size < target_size) {
11519af6 604 CMM_STORE_SHARED(t->resize_initiated, 1);
f9830efd
MD
605 pthread_mutex_lock(&ht->resize_mutex);
606 _do_ht_resize(ht);
607 pthread_mutex_unlock(&ht->resize_mutex);
608 }
abc490a1 609}
464a1ec9 610
abc490a1
MD
611static
612void do_resize_cb(struct rcu_head *head)
613{
614 struct rcu_resize_work *work =
615 caa_container_of(head, struct rcu_resize_work, head);
616 struct rcu_ht *ht = work->ht;
617
618 pthread_mutex_lock(&ht->resize_mutex);
619 _do_ht_resize(ht);
620 pthread_mutex_unlock(&ht->resize_mutex);
621 free(work);
848d4088
MD
622 cmm_smp_mb(); /* finish resize before decrement */
623 uatomic_dec(&ht->in_progress_resize);
464a1ec9
MD
624}
625
abc490a1 626static
f000907d 627void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
ab7d5fc6 628{
abc490a1 629 struct rcu_resize_work *work;
f9830efd 630 unsigned long target_size;
abc490a1 631
f9830efd 632 target_size = resize_target_update(t, growth);
11519af6 633 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
848d4088
MD
634 uatomic_inc(&ht->in_progress_resize);
635 cmm_smp_mb(); /* increment resize count before calling it */
f9830efd
MD
636 work = malloc(sizeof(*work));
637 work->ht = ht;
638 ht->ht_call_rcu(&work->head, do_resize_cb);
11519af6 639 CMM_STORE_SHARED(t->resize_initiated, 1);
f9830efd 640 }
ab7d5fc6 641}
This page took 0.054489 seconds and 4 git commands to generate.