rculfhash: merge dummy into next ptr
[urcu.git] / rculfhash.c
CommitLineData
5e28c532 1/*
abc490a1
MD
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
5e28c532
MD
21 */
22
2ed95849
MD
23#define _LGPL_SOURCE
24#include <stdlib.h>
e0ba718a
MD
25#include <errno.h>
26#include <assert.h>
27#include <stdio.h>
abc490a1 28#include <stdint.h>
f000907d 29#include <string.h>
e0ba718a 30
2ed95849 31#include <urcu.h>
abc490a1 32#include <urcu-call-rcu.h>
a42cc659
MD
33#include <urcu/arch.h>
34#include <urcu/uatomic.h>
674f7a69 35#include <urcu/jhash.h>
a42cc659 36#include <urcu/compiler.h>
abc490a1 37#include <urcu/rculfhash.h>
5e28c532 38#include <stdio.h>
464a1ec9 39#include <pthread.h>
44395fb7 40
f9830efd
MD
41#define DEBUG /* Test */
42
43#ifdef DEBUG
44#define dbg_printf(args...) printf(args)
45#else
46#define dbg_printf(args...)
47#endif
48
65e8e729
MD
49#define CHAIN_LEN_TARGET 1
50#define CHAIN_LEN_RESIZE_THRESHOLD 2
2ed95849 51
abc490a1
MD
52#ifndef max
53#define max(a, b) ((a) > (b) ? (a) : (b))
54#endif
2ed95849 55
d37166c6 56#define REMOVED_FLAG (1UL << 0)
f5596c94
MD
57#define DUMMY_FLAG (1UL << 1)
58#define FLAGS_MASK ((1UL << 2) - 1)
d37166c6 59
395270b6 60struct rcu_table {
abc490a1 61 unsigned long size; /* always a power of 2 */
f9830efd 62 unsigned long resize_target;
11519af6 63 int resize_initiated;
abc490a1 64 struct rcu_head head;
395270b6
MD
65 struct rcu_ht_node *tbl[0];
66};
67
2ed95849 68struct rcu_ht {
395270b6 69 struct rcu_table *t; /* shared */
2ed95849 70 ht_hash_fct hash_fct;
732ad076
MD
71 ht_compare_fct compare_fct;
72 unsigned long hash_seed;
464a1ec9 73 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
848d4088 74 unsigned int in_progress_resize;
abc490a1
MD
75 void (*ht_call_rcu)(struct rcu_head *head,
76 void (*func)(struct rcu_head *head));
2ed95849
MD
77};
78
abc490a1
MD
79struct rcu_resize_work {
80 struct rcu_head head;
2ed95849 81 struct rcu_ht *ht;
abc490a1 82};
2ed95849 83
abc490a1
MD
84/*
85 * Algorithm to reverse bits in a word by lookup table, extended to
86 * 64-bit words.
f9830efd 87 * Source:
abc490a1 88 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
f9830efd 89 * Originally from Public Domain.
abc490a1
MD
90 */
91
92static const uint8_t BitReverseTable256[256] =
2ed95849 93{
abc490a1
MD
94#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
95#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
96#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
97 R6(0), R6(2), R6(1), R6(3)
98};
99#undef R2
100#undef R4
101#undef R6
2ed95849 102
abc490a1
MD
103static
104uint8_t bit_reverse_u8(uint8_t v)
105{
106 return BitReverseTable256[v];
107}
ab7d5fc6 108
abc490a1
MD
109static __attribute__((unused))
110uint32_t bit_reverse_u32(uint32_t v)
111{
112 return ((uint32_t) bit_reverse_u8(v) << 24) |
113 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
114 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
115 ((uint32_t) bit_reverse_u8(v >> 24));
2ed95849
MD
116}
117
abc490a1
MD
118static __attribute__((unused))
119uint64_t bit_reverse_u64(uint64_t v)
2ed95849 120{
abc490a1
MD
121 return ((uint64_t) bit_reverse_u8(v) << 56) |
122 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
123 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
124 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
125 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
126 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
127 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
128 ((uint64_t) bit_reverse_u8(v >> 56));
129}
130
131static
132unsigned long bit_reverse_ulong(unsigned long v)
133{
134#if (CAA_BITS_PER_LONG == 32)
135 return bit_reverse_u32(v);
136#else
137 return bit_reverse_u64(v);
138#endif
139}
140
f9830efd
MD
141/*
142 * Algorithm to find the log2 of a 32-bit unsigned integer.
143 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
144 * Originally from Public Domain.
145 */
146static const char LogTable256[256] =
147{
148#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
149 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
150 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
151 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
152};
153
154uint32_t log2_u32(uint32_t v)
155{
156 uint32_t t, tt;
157
158 if ((tt = (v >> 16)))
159 return (t = (tt >> 8))
160 ? 24 + LogTable256[t]
161 : 16 + LogTable256[tt];
162 else
163 return (t = (v >> 8))
164 ? 8 + LogTable256[t]
165 : LogTable256[v];
166}
167
168static
169void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
170
171static
172void check_resize(struct rcu_ht *ht, struct rcu_table *t,
173 uint32_t chain_len)
174{
3390d470
MD
175 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
176 ht_resize_lazy(ht, t,
65e8e729 177 log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
f9830efd
MD
178}
179
abc490a1
MD
180static
181struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
182{
d37166c6 183 return (struct rcu_ht_node *) (((unsigned long) node) & ~FLAGS_MASK);
abc490a1
MD
184}
185
186static
187int is_removed(struct rcu_ht_node *node)
188{
d37166c6 189 return ((unsigned long) node) & REMOVED_FLAG;
abc490a1
MD
190}
191
192static
193struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
194{
d37166c6 195 return (struct rcu_ht_node *) (((unsigned long) node) | REMOVED_FLAG);
abc490a1
MD
196}
197
f5596c94
MD
198static
199int is_dummy(struct rcu_ht_node *node)
200{
201 return ((unsigned long) node) & DUMMY_FLAG;
202}
203
204static
205struct rcu_ht_node *flag_dummy(struct rcu_ht_node *node)
206{
207 return (struct rcu_ht_node *) (((unsigned long) node) | DUMMY_FLAG);
208}
209
abc490a1 210static
f9830efd 211unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
abc490a1
MD
212{
213 unsigned long old1, old2;
214
215 old1 = uatomic_read(ptr);
216 do {
217 old2 = old1;
218 if (old2 >= v)
f9830efd 219 return old2;
abc490a1 220 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
f9830efd 221 return v;
abc490a1
MD
222}
223
273399de
MD
224/*
225 * Remove all logically deleted nodes from a bucket up to a certain node key.
226 */
227static
228void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
229{
f5596c94 230 struct rcu_ht_node *iter_prev, *iter, *next, *new_next;
273399de
MD
231
232 for (;;) {
233 iter_prev = dummy;
234 /* We can always skip the dummy node initially */
cc4fcb10
MD
235 iter = rcu_dereference(iter_prev->p.next);
236 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
273399de 237 for (;;) {
a2974903 238 if (unlikely(!clear_flag(iter)))
479c8a32 239 return;
cc4fcb10 240 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
273399de 241 return;
cc4fcb10 242 next = rcu_dereference(clear_flag(iter)->p.next);
273399de
MD
243 if (is_removed(next))
244 break;
273399de
MD
245 iter_prev = iter;
246 iter = next;
247 }
248 assert(!is_removed(iter));
f5596c94
MD
249 if (is_dummy(iter))
250 new_next = flag_dummy(clear_flag(next));
251 else
252 new_next = clear_flag(next);
253 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
273399de
MD
254 }
255}
256
abc490a1 257static
18117871 258struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
f5596c94 259 struct rcu_ht_node *node, int unique, int dummy)
abc490a1 260{
f5596c94
MD
261 struct rcu_ht_node *iter_prev, *iter, *next, *new_node, *new_next,
262 *dummy_node;
49c2e2d6 263 unsigned long hash;
abc490a1 264
18117871 265 if (!t->size) {
f5596c94
MD
266 assert(dummy);
267 node->p.next = flag_dummy(NULL);
18117871
MD
268 return node; /* Initial first add (head) */
269 }
cc4fcb10 270 hash = bit_reverse_ulong(node->p.reverse_hash);
abc490a1 271 for (;;) {
f9830efd 272 uint32_t chain_len = 0;
abc490a1 273
11519af6
MD
274 /*
275 * iter_prev points to the non-removed node prior to the
276 * insert location.
11519af6 277 */
49c2e2d6 278 iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]);
11519af6 279 /* We can always skip the dummy node initially */
cc4fcb10
MD
280 iter = rcu_dereference(iter_prev->p.next);
281 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
abc490a1 282 for (;;) {
a2974903 283 if (unlikely(!clear_flag(iter)))
273399de 284 goto insert;
cc4fcb10 285 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
273399de 286 goto insert;
cc4fcb10 287 next = rcu_dereference(clear_flag(iter)->p.next);
273399de 288 if (is_removed(next))
9dba85be 289 goto gc_node;
e43f23f8 290 if (unique
1b81fe1a 291 && !is_dummy(next)
e43f23f8
MD
292 && !ht->compare_fct(node->key, node->key_len,
293 clear_flag(iter)->key,
294 clear_flag(iter)->key_len))
18117871 295 return clear_flag(iter);
11519af6 296 /* Only account for identical reverse hash once */
cc4fcb10 297 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash)
11519af6
MD
298 check_resize(ht, t, ++chain_len);
299 iter_prev = clear_flag(iter);
273399de 300 iter = next;
abc490a1 301 }
273399de 302 insert:
7ec59d3b 303 assert(node != clear_flag(iter));
11519af6 304 assert(!is_removed(iter_prev));
f000907d 305 assert(iter_prev != node);
f5596c94 306 if (!dummy)
1b81fe1a 307 node->p.next = clear_flag(iter);
f5596c94 308 else
1b81fe1a 309 node->p.next = flag_dummy(clear_flag(iter));
f5596c94
MD
310 if (is_dummy(iter))
311 new_node = flag_dummy(node);
312 else
313 new_node = node;
cc4fcb10 314 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
f5596c94 315 new_node) != iter)
273399de 316 continue; /* retry */
11519af6 317 else
273399de 318 goto gc_end;
9dba85be
MD
319 gc_node:
320 assert(!is_removed(iter));
f5596c94
MD
321 if (is_dummy(iter))
322 new_next = flag_dummy(clear_flag(next));
323 else
324 new_next = clear_flag(next);
325 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
273399de 326 /* retry */
464a1ec9 327 }
273399de
MD
328gc_end:
329 /* Garbage collect logically removed nodes in the bucket */
f5596c94
MD
330 dummy_node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
331 _ht_gc_bucket(dummy_node, node);
18117871 332 return node;
abc490a1 333}
464a1ec9 334
abc490a1
MD
335static
336int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
337{
273399de 338 struct rcu_ht_node *dummy, *next, *old;
abc490a1 339 int flagged = 0;
49c2e2d6 340 unsigned long hash;
5e28c532 341
7ec59d3b 342 /* logically delete the node */
cc4fcb10 343 old = rcu_dereference(node->p.next);
7ec59d3b
MD
344 do {
345 next = old;
346 if (is_removed(next))
347 goto end;
1b81fe1a 348 assert(!is_dummy(next));
cc4fcb10 349 old = uatomic_cmpxchg(&node->p.next, next,
7ec59d3b
MD
350 flag_removed(next));
351 } while (old != next);
352
353 /* We performed the (logical) deletion. */
354 flagged = 1;
355
356 /*
357 * Ensure that the node is not visible to readers anymore: lookup for
273399de
MD
358 * the node, and remove it (along with any other logically removed node)
359 * if found.
11519af6 360 */
cc4fcb10 361 hash = bit_reverse_ulong(node->p.reverse_hash);
49c2e2d6 362 dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
273399de 363 _ht_gc_bucket(dummy, node);
2ed95849 364end:
11519af6
MD
365 /*
366 * Only the flagging action indicated that we (and no other)
367 * removed the node from the hash.
368 */
7ec59d3b 369 if (flagged) {
cc4fcb10 370 assert(is_removed(rcu_dereference(node->p.next)));
11519af6 371 return 0;
7ec59d3b 372 } else
11519af6 373 return -ENOENT;
abc490a1 374}
2ed95849 375
abc490a1
MD
376static
377void init_table(struct rcu_ht *ht, struct rcu_table *t,
378 unsigned long first, unsigned long len)
379{
380 unsigned long i, end;
381
382 end = first + len;
383 for (i = first; i < end; i++) {
384 /* Update table size when power of two */
385 if (i != 0 && !(i & (i - 1)))
386 t->size = i;
cc4fcb10 387 t->tbl[i] = calloc(1, sizeof(struct _rcu_ht_node));
cc4fcb10 388 t->tbl[i]->p.reverse_hash = bit_reverse_ulong(i);
f5596c94 389 (void) _ht_add(ht, t, t->tbl[i], 0, 1);
abc490a1 390 }
f9830efd 391 t->resize_target = t->size = end;
11519af6 392 t->resize_initiated = 0;
2ed95849
MD
393}
394
abc490a1 395struct rcu_ht *ht_new(ht_hash_fct hash_fct,
732ad076
MD
396 ht_compare_fct compare_fct,
397 unsigned long hash_seed,
abc490a1
MD
398 unsigned long init_size,
399 void (*ht_call_rcu)(struct rcu_head *head,
400 void (*func)(struct rcu_head *head)))
401{
402 struct rcu_ht *ht;
403
404 ht = calloc(1, sizeof(struct rcu_ht));
405 ht->hash_fct = hash_fct;
732ad076
MD
406 ht->compare_fct = compare_fct;
407 ht->hash_seed = hash_seed;
f000907d 408 ht->ht_call_rcu = ht_call_rcu;
848d4088 409 ht->in_progress_resize = 0;
abc490a1
MD
410 /* this mutex should not nest in read-side C.S. */
411 pthread_mutex_init(&ht->resize_mutex, NULL);
412 ht->t = calloc(1, sizeof(struct rcu_table)
413 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
414 ht->t->size = 0;
f000907d 415 pthread_mutex_lock(&ht->resize_mutex);
abc490a1 416 init_table(ht, ht->t, 0, max(init_size, 1));
f000907d 417 pthread_mutex_unlock(&ht->resize_mutex);
abc490a1
MD
418 return ht;
419}
420
732ad076 421struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
2ed95849 422{
395270b6 423 struct rcu_table *t;
1b81fe1a 424 struct rcu_ht_node *node, *next;
abc490a1 425 unsigned long hash, reverse_hash;
2ed95849 426
732ad076 427 hash = ht->hash_fct(key, key_len, ht->hash_seed);
abc490a1 428 reverse_hash = bit_reverse_ulong(hash);
464a1ec9 429
395270b6 430 t = rcu_dereference(ht->t);
abc490a1 431 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
2ed95849 432 for (;;) {
abc490a1
MD
433 if (unlikely(!node))
434 break;
cc4fcb10 435 if (unlikely(node->p.reverse_hash > reverse_hash)) {
abc490a1
MD
436 node = NULL;
437 break;
2ed95849 438 }
1b81fe1a
MD
439 next = rcu_dereference(node->p.next);
440 if (likely(!is_removed(next))
441 && !is_dummy(next)
49c2e2d6 442 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
273399de 443 break;
2ed95849 444 }
1b81fe1a 445 node = clear_flag(next);
2ed95849 446 }
1b81fe1a 447 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
abc490a1
MD
448 return node;
449}
e0ba718a 450
f000907d 451void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
abc490a1
MD
452{
453 struct rcu_table *t;
49c2e2d6 454 unsigned long hash;
ab7d5fc6 455
49c2e2d6 456 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
cc4fcb10 457 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
2ed95849 458
abc490a1 459 t = rcu_dereference(ht->t);
f5596c94 460 (void) _ht_add(ht, t, node, 0, 0);
3eca1b8c
MD
461}
462
18117871 463struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
3eca1b8c
MD
464{
465 struct rcu_table *t;
49c2e2d6 466 unsigned long hash;
3eca1b8c 467
49c2e2d6 468 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
cc4fcb10 469 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
3eca1b8c
MD
470
471 t = rcu_dereference(ht->t);
f5596c94 472 return _ht_add(ht, t, node, 1, 0);
2ed95849
MD
473}
474
abc490a1 475int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
2ed95849 476{
abc490a1
MD
477 struct rcu_table *t;
478
479 t = rcu_dereference(ht->t);
abc490a1 480 return _ht_remove(ht, t, node);
2ed95849 481}
ab7d5fc6 482
abc490a1
MD
483static
484int ht_delete_dummy(struct rcu_ht *ht)
674f7a69 485{
395270b6 486 struct rcu_table *t;
abc490a1
MD
487 struct rcu_ht_node *node;
488 unsigned long i;
674f7a69 489
abc490a1
MD
490 t = ht->t;
491 /* Check that the table is empty */
492 node = t->tbl[0];
493 do {
1b81fe1a
MD
494 node = clear_flag(node)->p.next;
495 if (!is_dummy(node))
abc490a1 496 return -EPERM;
273399de 497 assert(!is_removed(node));
a2974903 498 } while (clear_flag(node));
abc490a1 499 /* Internal sanity check: all nodes left should be dummy */
395270b6 500 for (i = 0; i < t->size; i++) {
1b81fe1a 501 assert(is_dummy(t->tbl[i]->p.next));
abc490a1 502 free(t->tbl[i]);
674f7a69 503 }
abc490a1 504 return 0;
674f7a69
MD
505}
506
507/*
508 * Should only be called when no more concurrent readers nor writers can
509 * possibly access the table.
510 */
5e28c532 511int ht_destroy(struct rcu_ht *ht)
674f7a69 512{
5e28c532
MD
513 int ret;
514
848d4088
MD
515 /* Wait for in-flight resize operations to complete */
516 while (uatomic_read(&ht->in_progress_resize))
517 poll(NULL, 0, 100); /* wait for 100ms */
abc490a1
MD
518 ret = ht_delete_dummy(ht);
519 if (ret)
520 return ret;
395270b6 521 free(ht->t);
674f7a69 522 free(ht);
5e28c532 523 return ret;
674f7a69
MD
524}
525
273399de
MD
526void ht_count_nodes(struct rcu_ht *ht,
527 unsigned long *count,
528 unsigned long *removed)
529{
530 struct rcu_table *t;
531 struct rcu_ht_node *node, *next;
532
533 *count = 0;
534 *removed = 0;
535
536 t = rcu_dereference(ht->t);
537 /* Check that the table is empty */
538 node = rcu_dereference(t->tbl[0]);
539 do {
cc4fcb10 540 next = rcu_dereference(node->p.next);
273399de 541 if (is_removed(next)) {
1b81fe1a 542 assert(!is_dummy(next));
273399de 543 (*removed)++;
1b81fe1a 544 } else if (!is_dummy(next))
273399de
MD
545 (*count)++;
546 node = clear_flag(next);
547 } while (node);
548}
549
abc490a1
MD
550static
551void ht_free_table_cb(struct rcu_head *head)
552{
553 struct rcu_table *t =
554 caa_container_of(head, struct rcu_table, head);
555 free(t);
556}
557
558/* called with resize mutex held */
559static
560void _do_ht_resize(struct rcu_ht *ht)
464a1ec9 561{
abc490a1 562 unsigned long new_size, old_size;
395270b6 563 struct rcu_table *new_t, *old_t;
464a1ec9 564
395270b6
MD
565 old_t = ht->t;
566 old_size = old_t->size;
464a1ec9 567
f9830efd
MD
568 new_size = CMM_LOAD_SHARED(old_t->resize_target);
569 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
570 old_size, new_size);
abc490a1 571 if (old_size == new_size)
464a1ec9 572 return;
f000907d 573 new_t = malloc(sizeof(struct rcu_table)
abc490a1 574 + (new_size * sizeof(struct rcu_ht_node *)));
f000907d
MD
575 assert(new_size > old_size);
576 memcpy(&new_t->tbl, &old_t->tbl,
577 old_size * sizeof(struct rcu_ht_node *));
578 init_table(ht, new_t, old_size, new_size - old_size);
f000907d
MD
579 /* Changing table and size atomically wrt lookups */
580 rcu_assign_pointer(ht->t, new_t);
581 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
464a1ec9
MD
582}
583
abc490a1 584static
f9830efd
MD
585unsigned long resize_target_update(struct rcu_table *t,
586 int growth_order)
464a1ec9 587{
f9830efd
MD
588 return _uatomic_max(&t->resize_target,
589 t->size << growth_order);
464a1ec9
MD
590}
591
464a1ec9
MD
592void ht_resize(struct rcu_ht *ht, int growth)
593{
f9830efd
MD
594 struct rcu_table *t = rcu_dereference(ht->t);
595 unsigned long target_size;
596
597 target_size = resize_target_update(t, growth);
598 if (t->size < target_size) {
11519af6 599 CMM_STORE_SHARED(t->resize_initiated, 1);
f9830efd
MD
600 pthread_mutex_lock(&ht->resize_mutex);
601 _do_ht_resize(ht);
602 pthread_mutex_unlock(&ht->resize_mutex);
603 }
abc490a1 604}
464a1ec9 605
abc490a1
MD
606static
607void do_resize_cb(struct rcu_head *head)
608{
609 struct rcu_resize_work *work =
610 caa_container_of(head, struct rcu_resize_work, head);
611 struct rcu_ht *ht = work->ht;
612
613 pthread_mutex_lock(&ht->resize_mutex);
614 _do_ht_resize(ht);
615 pthread_mutex_unlock(&ht->resize_mutex);
616 free(work);
848d4088
MD
617 cmm_smp_mb(); /* finish resize before decrement */
618 uatomic_dec(&ht->in_progress_resize);
464a1ec9
MD
619}
620
abc490a1 621static
f000907d 622void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
ab7d5fc6 623{
abc490a1 624 struct rcu_resize_work *work;
f9830efd 625 unsigned long target_size;
abc490a1 626
f9830efd 627 target_size = resize_target_update(t, growth);
11519af6 628 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
848d4088
MD
629 uatomic_inc(&ht->in_progress_resize);
630 cmm_smp_mb(); /* increment resize count before calling it */
f9830efd
MD
631 work = malloc(sizeof(*work));
632 work->ht = ht;
633 ht->ht_call_rcu(&work->head, do_resize_cb);
11519af6 634 CMM_STORE_SHARED(t->resize_initiated, 1);
f9830efd 635 }
ab7d5fc6 636}
This page took 0.053794 seconds and 4 git commands to generate.