Update resize thresholds
[urcu.git] / rculfhash.c
CommitLineData
5e28c532 1/*
abc490a1
MD
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
5e28c532
MD
21 */
22
2ed95849
MD
23#define _LGPL_SOURCE
24#include <stdlib.h>
e0ba718a
MD
25#include <errno.h>
26#include <assert.h>
27#include <stdio.h>
abc490a1 28#include <stdint.h>
f000907d 29#include <string.h>
e0ba718a 30
2ed95849 31#include <urcu.h>
abc490a1 32#include <urcu-call-rcu.h>
a42cc659
MD
33#include <urcu/arch.h>
34#include <urcu/uatomic.h>
674f7a69 35#include <urcu/jhash.h>
a42cc659 36#include <urcu/compiler.h>
abc490a1 37#include <urcu/rculfhash.h>
5e28c532 38#include <stdio.h>
464a1ec9 39#include <pthread.h>
44395fb7 40
f9830efd
MD
41#define DEBUG /* Test */
42
43#ifdef DEBUG
44#define dbg_printf(args...) printf(args)
45#else
46#define dbg_printf(args...)
47#endif
48
65e8e729
MD
49#define CHAIN_LEN_TARGET 1
50#define CHAIN_LEN_RESIZE_THRESHOLD 2
2ed95849 51
abc490a1
MD
52#ifndef max
53#define max(a, b) ((a) > (b) ? (a) : (b))
54#endif
2ed95849 55
395270b6 56struct rcu_table {
abc490a1 57 unsigned long size; /* always a power of 2 */
f9830efd 58 unsigned long resize_target;
11519af6 59 int resize_initiated;
abc490a1 60 struct rcu_head head;
395270b6
MD
61 struct rcu_ht_node *tbl[0];
62};
63
2ed95849 64struct rcu_ht {
395270b6 65 struct rcu_table *t; /* shared */
2ed95849 66 ht_hash_fct hash_fct;
732ad076
MD
67 ht_compare_fct compare_fct;
68 unsigned long hash_seed;
464a1ec9 69 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
abc490a1
MD
70 void (*ht_call_rcu)(struct rcu_head *head,
71 void (*func)(struct rcu_head *head));
2ed95849
MD
72};
73
abc490a1
MD
74struct rcu_resize_work {
75 struct rcu_head head;
2ed95849 76 struct rcu_ht *ht;
abc490a1 77};
2ed95849 78
abc490a1
MD
79/*
80 * Algorithm to reverse bits in a word by lookup table, extended to
81 * 64-bit words.
f9830efd 82 * Source:
abc490a1 83 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
f9830efd 84 * Originally from Public Domain.
abc490a1
MD
85 */
86
87static const uint8_t BitReverseTable256[256] =
2ed95849 88{
abc490a1
MD
89#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
90#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
91#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
92 R6(0), R6(2), R6(1), R6(3)
93};
94#undef R2
95#undef R4
96#undef R6
2ed95849 97
abc490a1
MD
98static
99uint8_t bit_reverse_u8(uint8_t v)
100{
101 return BitReverseTable256[v];
102}
ab7d5fc6 103
abc490a1
MD
104static __attribute__((unused))
105uint32_t bit_reverse_u32(uint32_t v)
106{
107 return ((uint32_t) bit_reverse_u8(v) << 24) |
108 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
109 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
110 ((uint32_t) bit_reverse_u8(v >> 24));
2ed95849
MD
111}
112
abc490a1
MD
113static __attribute__((unused))
114uint64_t bit_reverse_u64(uint64_t v)
2ed95849 115{
abc490a1
MD
116 return ((uint64_t) bit_reverse_u8(v) << 56) |
117 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
118 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
119 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
120 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
121 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
122 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
123 ((uint64_t) bit_reverse_u8(v >> 56));
124}
125
126static
127unsigned long bit_reverse_ulong(unsigned long v)
128{
129#if (CAA_BITS_PER_LONG == 32)
130 return bit_reverse_u32(v);
131#else
132 return bit_reverse_u64(v);
133#endif
134}
135
f9830efd
MD
136/*
137 * Algorithm to find the log2 of a 32-bit unsigned integer.
138 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
139 * Originally from Public Domain.
140 */
141static const char LogTable256[256] =
142{
143#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
144 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
145 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
146 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
147};
148
149uint32_t log2_u32(uint32_t v)
150{
151 uint32_t t, tt;
152
153 if ((tt = (v >> 16)))
154 return (t = (tt >> 8))
155 ? 24 + LogTable256[t]
156 : 16 + LogTable256[tt];
157 else
158 return (t = (v >> 8))
159 ? 8 + LogTable256[t]
160 : LogTable256[v];
161}
162
163static
164void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
165
166static
167void check_resize(struct rcu_ht *ht, struct rcu_table *t,
168 uint32_t chain_len)
169{
3390d470
MD
170 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
171 ht_resize_lazy(ht, t,
65e8e729 172 log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
f9830efd
MD
173}
174
abc490a1
MD
175static
176struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
177{
178 return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1);
179}
180
181static
182int is_removed(struct rcu_ht_node *node)
183{
184 return ((unsigned long) node) & 0x1;
185}
186
187static
188struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
189{
190 return (struct rcu_ht_node *) (((unsigned long) node) | 0x1);
191}
192
193static
f9830efd 194unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
abc490a1
MD
195{
196 unsigned long old1, old2;
197
198 old1 = uatomic_read(ptr);
199 do {
200 old2 = old1;
201 if (old2 >= v)
f9830efd 202 return old2;
abc490a1 203 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
f9830efd 204 return v;
abc490a1
MD
205}
206
207static
f000907d 208void _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
abc490a1 209{
11519af6 210 struct rcu_ht_node *iter_prev, *iter, *iter_prev_next, *next;
abc490a1 211
f000907d
MD
212 if (!t->size)
213 return;
abc490a1 214 for (;;) {
f9830efd 215 uint32_t chain_len = 0;
abc490a1 216
11519af6
MD
217 /*
218 * iter_prev points to the non-removed node prior to the
219 * insert location.
220 * iter iterates until it finds the next non-removed
221 * node.
222 */
abc490a1 223 iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
11519af6
MD
224 /* We can always skip the dummy node initially */
225 iter_prev_next = next = rcu_dereference(iter_prev->next);
abc490a1
MD
226 assert(iter_prev);
227 assert(iter_prev->reverse_hash <= node->reverse_hash);
228 for (;;) {
11519af6
MD
229 iter = next;
230 if (unlikely(!clear_flag(iter)))
abc490a1 231 break;
11519af6
MD
232 next = rcu_dereference(clear_flag(iter)->next);
233 if (unlikely(is_removed(next)))
234 continue;
235 if (clear_flag(iter)->reverse_hash > node->reverse_hash)
abc490a1 236 break;
11519af6
MD
237 /* Only account for identical reverse hash once */
238 if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash)
239 check_resize(ht, t, ++chain_len);
240 iter_prev = clear_flag(iter);
241 iter_prev_next = next;
abc490a1 242 }
f000907d 243 assert(node != iter);
11519af6 244 assert(!is_removed(iter_prev));
f000907d 245 assert(iter_prev != node);
11519af6
MD
246 node->next = iter;
247 if (uatomic_cmpxchg(&iter_prev->next, iter_prev_next,
248 node) != iter_prev_next)
abc490a1 249 continue;
11519af6
MD
250 else
251 break;
464a1ec9 252 }
abc490a1 253}
464a1ec9 254
abc490a1
MD
255static
256int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
257{
11519af6 258 struct rcu_ht_node *iter_prev, *iter, *iter_prev_next, *next, *old;
abc490a1 259 unsigned long chain_len;
11519af6 260 int found;
abc490a1 261 int flagged = 0;
5e28c532 262
abc490a1
MD
263retry:
264 chain_len = 0;
265 found = 0;
11519af6
MD
266 /*
267 * iter_prev points to the non-removed node prior to the remove
268 * location.
269 * node is the node to remove.
270 */
abc490a1 271 iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
11519af6
MD
272 /* We can always skip the dummy node initially */
273 iter_prev_next = next = rcu_dereference(iter_prev->next);
abc490a1
MD
274 assert(iter_prev);
275 assert(iter_prev->reverse_hash <= node->reverse_hash);
2ed95849 276 for (;;) {
11519af6
MD
277 iter = next;
278 if (unlikely(!clear_flag(iter)))
abc490a1 279 break;
11519af6 280 next = rcu_dereference(clear_flag(iter)->next);
abc490a1
MD
281 if (iter == node) {
282 found = 1;
2ed95849
MD
283 break;
284 }
11519af6
MD
285 if (unlikely(is_removed(next)))
286 continue;
287 if (clear_flag(iter)->reverse_hash > node->reverse_hash)
288 break;
289 iter_prev = clear_flag(iter);
290 iter_prev_next = next;
abc490a1 291 }
11519af6 292 if (!found)
abc490a1 293 goto end;
abc490a1 294 if (!flagged) {
11519af6 295 if (is_removed(next))
2ed95849 296 goto end;
abc490a1 297 /* set deletion flag */
f000907d
MD
298 if ((old = uatomic_cmpxchg(&iter->next, next,
299 flag_removed(next))) != next) {
11519af6 300 if (old == flag_removed(next))
abc490a1 301 goto end;
11519af6 302 else
abc490a1 303 goto retry;
abc490a1
MD
304 }
305 flagged = 1;
2ed95849 306 }
abc490a1 307 /*
98f969fc 308 * Remove the element from the list.
11519af6
MD
309 * - Retry if there has been a concurrent add before us.
310 * - Retry if the prev node has been deleted (its next removed
311 * flag would be set).
312 * - There cannot be a concurrent delete for our position, because
313 * we won the deletion flag cmpxchg.
314 * - If there is a concurrent add or remove after us while our
315 * removed flag is set, it will skip us and link directly after
316 * the prior non-removed node before us. In this case, the
317 * retry will not find the node in the list anymore.
abc490a1 318 */
11519af6
MD
319 if (uatomic_cmpxchg(&iter_prev->next, iter_prev_next,
320 clear_flag(next)) != iter_prev_next)
abc490a1 321 goto retry;
2ed95849 322end:
11519af6
MD
323 /*
324 * Only the flagging action indicated that we (and no other)
325 * removed the node from the hash.
326 */
327 if (flagged)
328 return 0;
329 else
330 return -ENOENT;
abc490a1 331}
2ed95849 332
abc490a1
MD
333static
334void init_table(struct rcu_ht *ht, struct rcu_table *t,
335 unsigned long first, unsigned long len)
336{
337 unsigned long i, end;
338
339 end = first + len;
340 for (i = first; i < end; i++) {
341 /* Update table size when power of two */
342 if (i != 0 && !(i & (i - 1)))
343 t->size = i;
344 t->tbl[i] = calloc(1, sizeof(struct rcu_ht_node));
345 t->tbl[i]->dummy = 1;
346 t->tbl[i]->hash = i;
347 t->tbl[i]->reverse_hash = bit_reverse_ulong(i);
348 _ht_add(ht, t, t->tbl[i]);
349 }
f9830efd 350 t->resize_target = t->size = end;
11519af6 351 t->resize_initiated = 0;
2ed95849
MD
352}
353
abc490a1 354struct rcu_ht *ht_new(ht_hash_fct hash_fct,
732ad076
MD
355 ht_compare_fct compare_fct,
356 unsigned long hash_seed,
abc490a1
MD
357 unsigned long init_size,
358 void (*ht_call_rcu)(struct rcu_head *head,
359 void (*func)(struct rcu_head *head)))
360{
361 struct rcu_ht *ht;
362
363 ht = calloc(1, sizeof(struct rcu_ht));
364 ht->hash_fct = hash_fct;
732ad076
MD
365 ht->compare_fct = compare_fct;
366 ht->hash_seed = hash_seed;
f000907d 367 ht->ht_call_rcu = ht_call_rcu;
abc490a1
MD
368 /* this mutex should not nest in read-side C.S. */
369 pthread_mutex_init(&ht->resize_mutex, NULL);
370 ht->t = calloc(1, sizeof(struct rcu_table)
371 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
372 ht->t->size = 0;
f000907d 373 pthread_mutex_lock(&ht->resize_mutex);
abc490a1 374 init_table(ht, ht->t, 0, max(init_size, 1));
f000907d 375 pthread_mutex_unlock(&ht->resize_mutex);
abc490a1
MD
376 return ht;
377}
378
732ad076 379struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
2ed95849 380{
395270b6 381 struct rcu_table *t;
abc490a1
MD
382 struct rcu_ht_node *node;
383 unsigned long hash, reverse_hash;
2ed95849 384
732ad076 385 hash = ht->hash_fct(key, key_len, ht->hash_seed);
abc490a1 386 reverse_hash = bit_reverse_ulong(hash);
464a1ec9 387
395270b6 388 t = rcu_dereference(ht->t);
abc490a1 389 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
2ed95849 390 for (;;) {
abc490a1
MD
391 if (unlikely(!node))
392 break;
dd4505e0 393 if (unlikely(node->reverse_hash > reverse_hash)) {
abc490a1
MD
394 node = NULL;
395 break;
2ed95849 396 }
732ad076 397 if (!ht->compare_fct(node->key, node->key_len, key, key_len)) {
dd4505e0 398 if (unlikely(is_removed(rcu_dereference(node->next))))
abc490a1 399 node = NULL;
2ed95849
MD
400 break;
401 }
abc490a1 402 node = clear_flag(rcu_dereference(node->next));
2ed95849 403 }
abc490a1
MD
404 return node;
405}
e0ba718a 406
f000907d 407void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
abc490a1
MD
408{
409 struct rcu_table *t;
ab7d5fc6 410
732ad076 411 node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
abc490a1 412 node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
2ed95849 413
abc490a1 414 t = rcu_dereference(ht->t);
f000907d 415 _ht_add(ht, t, node);
2ed95849
MD
416}
417
abc490a1 418int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
2ed95849 419{
abc490a1
MD
420 struct rcu_table *t;
421
422 t = rcu_dereference(ht->t);
abc490a1 423 return _ht_remove(ht, t, node);
2ed95849 424}
ab7d5fc6 425
abc490a1
MD
426static
427int ht_delete_dummy(struct rcu_ht *ht)
674f7a69 428{
395270b6 429 struct rcu_table *t;
abc490a1
MD
430 struct rcu_ht_node *node;
431 unsigned long i;
674f7a69 432
abc490a1
MD
433 t = ht->t;
434 /* Check that the table is empty */
435 node = t->tbl[0];
436 do {
437 if (!node->dummy)
438 return -EPERM;
439 node = node->next;
440 } while (node);
441 /* Internal sanity check: all nodes left should be dummy */
395270b6 442 for (i = 0; i < t->size; i++) {
abc490a1
MD
443 assert(t->tbl[i]->dummy);
444 free(t->tbl[i]);
674f7a69 445 }
abc490a1 446 return 0;
674f7a69
MD
447}
448
449/*
450 * Should only be called when no more concurrent readers nor writers can
451 * possibly access the table.
452 */
5e28c532 453int ht_destroy(struct rcu_ht *ht)
674f7a69 454{
5e28c532
MD
455 int ret;
456
abc490a1
MD
457 ret = ht_delete_dummy(ht);
458 if (ret)
459 return ret;
395270b6 460 free(ht->t);
674f7a69 461 free(ht);
5e28c532 462 return ret;
674f7a69
MD
463}
464
abc490a1
MD
465static
466void ht_free_table_cb(struct rcu_head *head)
467{
468 struct rcu_table *t =
469 caa_container_of(head, struct rcu_table, head);
470 free(t);
471}
472
473/* called with resize mutex held */
474static
475void _do_ht_resize(struct rcu_ht *ht)
464a1ec9 476{
abc490a1 477 unsigned long new_size, old_size;
395270b6 478 struct rcu_table *new_t, *old_t;
464a1ec9 479
395270b6
MD
480 old_t = ht->t;
481 old_size = old_t->size;
464a1ec9 482
f9830efd
MD
483 new_size = CMM_LOAD_SHARED(old_t->resize_target);
484 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
485 old_size, new_size);
abc490a1 486 if (old_size == new_size)
464a1ec9 487 return;
f000907d 488 new_t = malloc(sizeof(struct rcu_table)
abc490a1 489 + (new_size * sizeof(struct rcu_ht_node *)));
f000907d
MD
490 assert(new_size > old_size);
491 memcpy(&new_t->tbl, &old_t->tbl,
492 old_size * sizeof(struct rcu_ht_node *));
493 init_table(ht, new_t, old_size, new_size - old_size);
f000907d
MD
494 /* Changing table and size atomically wrt lookups */
495 rcu_assign_pointer(ht->t, new_t);
496 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
464a1ec9
MD
497}
498
abc490a1 499static
f9830efd
MD
500unsigned long resize_target_update(struct rcu_table *t,
501 int growth_order)
464a1ec9 502{
f9830efd
MD
503 return _uatomic_max(&t->resize_target,
504 t->size << growth_order);
464a1ec9
MD
505}
506
464a1ec9
MD
507void ht_resize(struct rcu_ht *ht, int growth)
508{
f9830efd
MD
509 struct rcu_table *t = rcu_dereference(ht->t);
510 unsigned long target_size;
511
512 target_size = resize_target_update(t, growth);
513 if (t->size < target_size) {
11519af6 514 CMM_STORE_SHARED(t->resize_initiated, 1);
f9830efd
MD
515 pthread_mutex_lock(&ht->resize_mutex);
516 _do_ht_resize(ht);
517 pthread_mutex_unlock(&ht->resize_mutex);
518 }
abc490a1 519}
464a1ec9 520
abc490a1
MD
521static
522void do_resize_cb(struct rcu_head *head)
523{
524 struct rcu_resize_work *work =
525 caa_container_of(head, struct rcu_resize_work, head);
526 struct rcu_ht *ht = work->ht;
527
528 pthread_mutex_lock(&ht->resize_mutex);
529 _do_ht_resize(ht);
530 pthread_mutex_unlock(&ht->resize_mutex);
531 free(work);
464a1ec9
MD
532}
533
abc490a1 534static
f000907d 535void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
ab7d5fc6 536{
abc490a1 537 struct rcu_resize_work *work;
f9830efd 538 unsigned long target_size;
abc490a1 539
f9830efd 540 target_size = resize_target_update(t, growth);
11519af6 541 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
f9830efd
MD
542 work = malloc(sizeof(*work));
543 work->ht = ht;
544 ht->ht_call_rcu(&work->head, do_resize_cb);
11519af6 545 CMM_STORE_SHARED(t->resize_initiated, 1);
f9830efd 546 }
ab7d5fc6 547}
This page took 0.047733 seconds and 4 git commands to generate.