rculfhash: fix resize (use log2 of chain length)
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <urcu.h>
32 #include <urcu-call-rcu.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/jhash.h>
36 #include <urcu/compiler.h>
37 #include <urcu/rculfhash.h>
38 #include <stdio.h>
39 #include <pthread.h>
40
41 #define DEBUG /* Test */
42
43 #ifdef DEBUG
44 #define dbg_printf(args...) printf(args)
45 #else
46 #define dbg_printf(args...)
47 #endif
48
49 #define BUCKET_SIZE_RESIZE_THRESHOLD 8
50
51 #ifndef max
52 #define max(a, b) ((a) > (b) ? (a) : (b))
53 #endif
54
55 struct rcu_table {
56 unsigned long size; /* always a power of 2 */
57 unsigned long resize_target;
58 struct rcu_head head;
59 struct rcu_ht_node *tbl[0];
60 };
61
62 struct rcu_ht {
63 struct rcu_table *t; /* shared */
64 ht_hash_fct hash_fct;
65 void *hashseed;
66 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
67 void (*ht_call_rcu)(struct rcu_head *head,
68 void (*func)(struct rcu_head *head));
69 };
70
71 struct rcu_resize_work {
72 struct rcu_head head;
73 struct rcu_ht *ht;
74 };
75
76 /*
77 * Algorithm to reverse bits in a word by lookup table, extended to
78 * 64-bit words.
79 * Source:
80 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
81 * Originally from Public Domain.
82 */
83
84 static const uint8_t BitReverseTable256[256] =
85 {
86 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
87 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
88 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
89 R6(0), R6(2), R6(1), R6(3)
90 };
91 #undef R2
92 #undef R4
93 #undef R6
94
95 static
96 uint8_t bit_reverse_u8(uint8_t v)
97 {
98 return BitReverseTable256[v];
99 }
100
101 static __attribute__((unused))
102 uint32_t bit_reverse_u32(uint32_t v)
103 {
104 return ((uint32_t) bit_reverse_u8(v) << 24) |
105 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
106 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
107 ((uint32_t) bit_reverse_u8(v >> 24));
108 }
109
110 static __attribute__((unused))
111 uint64_t bit_reverse_u64(uint64_t v)
112 {
113 return ((uint64_t) bit_reverse_u8(v) << 56) |
114 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
115 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
116 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
117 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
118 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
119 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
120 ((uint64_t) bit_reverse_u8(v >> 56));
121 }
122
123 static
124 unsigned long bit_reverse_ulong(unsigned long v)
125 {
126 #if (CAA_BITS_PER_LONG == 32)
127 return bit_reverse_u32(v);
128 #else
129 return bit_reverse_u64(v);
130 #endif
131 }
132
133 /*
134 * Algorithm to find the log2 of a 32-bit unsigned integer.
135 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
136 * Originally from Public Domain.
137 */
138 static const char LogTable256[256] =
139 {
140 #define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
141 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
142 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
143 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
144 };
145
146 uint32_t log2_u32(uint32_t v)
147 {
148 uint32_t t, tt;
149
150 if ((tt = (v >> 16)))
151 return (t = (tt >> 8))
152 ? 24 + LogTable256[t]
153 : 16 + LogTable256[tt];
154 else
155 return (t = (v >> 8))
156 ? 8 + LogTable256[t]
157 : LogTable256[v];
158 }
159
160 static
161 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
162
163 static
164 void check_resize(struct rcu_ht *ht, struct rcu_table *t,
165 uint32_t chain_len)
166 {
167 if (chain_len >= BUCKET_SIZE_RESIZE_THRESHOLD)
168 ht_resize_lazy(ht, t, log2_u32(chain_len));
169 }
170
171 static
172 struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
173 {
174 return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1);
175 }
176
177 static
178 int is_removed(struct rcu_ht_node *node)
179 {
180 return ((unsigned long) node) & 0x1;
181 }
182
183 static
184 struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
185 {
186 return (struct rcu_ht_node *) (((unsigned long) node) | 0x1);
187 }
188
189 static
190 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
191 {
192 unsigned long old1, old2;
193
194 old1 = uatomic_read(ptr);
195 do {
196 old2 = old1;
197 if (old2 >= v)
198 return old2;
199 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
200 return v;
201 }
202
203 static
204 void _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
205 {
206 struct rcu_ht_node *iter_prev = NULL, *iter = NULL;
207
208 if (!t->size)
209 return;
210 for (;;) {
211 uint32_t chain_len = 0;
212
213 iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
214 assert(iter_prev);
215 assert(iter_prev->reverse_hash <= node->reverse_hash);
216 for (;;) {
217 iter = clear_flag(rcu_dereference(iter_prev->next));
218 if (unlikely(!iter))
219 break;
220 if (iter->reverse_hash < node->reverse_hash)
221 break;
222 iter_prev = iter;
223 check_resize(ht, t, ++chain_len);
224 }
225 /* add in iter_prev->next */
226 if (is_removed(iter))
227 continue;
228 assert(node != iter);
229 node->next = iter;
230 assert(iter_prev != node);
231 if (uatomic_cmpxchg(&iter_prev->next, iter, node) != iter)
232 continue;
233 break;
234 }
235 }
236
237 static
238 int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
239 {
240 struct rcu_ht_node *iter_prev, *iter, *next, *old;
241 unsigned long chain_len;
242 int found, ret = 0;
243 int flagged = 0;
244
245 retry:
246 chain_len = 0;
247 found = 0;
248 iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
249 assert(iter_prev);
250 assert(iter_prev->reverse_hash <= node->reverse_hash);
251 for (;;) {
252 iter = clear_flag(rcu_dereference(iter_prev->next));
253 if (unlikely(!iter))
254 break;
255 if (iter->reverse_hash < node->reverse_hash)
256 break;
257 if (iter == node) {
258 found = 1;
259 break;
260 }
261 iter_prev = iter;
262 }
263 if (!found) {
264 ret = -ENOENT;
265 goto end;
266 }
267 next = rcu_dereference(iter->next);
268 if (!flagged) {
269 if (is_removed(next)) {
270 ret = -ENOENT;
271 goto end;
272 }
273 /* set deletion flag */
274 if ((old = uatomic_cmpxchg(&iter->next, next,
275 flag_removed(next))) != next) {
276 if (old == flag_removed(next)) {
277 ret = -ENOENT;
278 goto end;
279 } else {
280 goto retry;
281 }
282 }
283 flagged = 1;
284 }
285 /*
286 * Remove the element from the list. Retry if there has been a
287 * concurrent add (there cannot be a concurrent delete, because
288 * we won the deletion flag cmpxchg).
289 */
290 if (uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next)) != iter)
291 goto retry;
292 end:
293 return ret;
294 }
295
296 static
297 void init_table(struct rcu_ht *ht, struct rcu_table *t,
298 unsigned long first, unsigned long len)
299 {
300 unsigned long i, end;
301
302 end = first + len;
303 for (i = first; i < end; i++) {
304 /* Update table size when power of two */
305 if (i != 0 && !(i & (i - 1)))
306 t->size = i;
307 t->tbl[i] = calloc(1, sizeof(struct rcu_ht_node));
308 t->tbl[i]->dummy = 1;
309 t->tbl[i]->hash = i;
310 t->tbl[i]->reverse_hash = bit_reverse_ulong(i);
311 _ht_add(ht, t, t->tbl[i]);
312 }
313 t->resize_target = t->size = end;
314 }
315
316 struct rcu_ht *ht_new(ht_hash_fct hash_fct,
317 void *hashseed,
318 unsigned long init_size,
319 void (*ht_call_rcu)(struct rcu_head *head,
320 void (*func)(struct rcu_head *head)))
321 {
322 struct rcu_ht *ht;
323
324 ht = calloc(1, sizeof(struct rcu_ht));
325 ht->hash_fct = hash_fct;
326 ht->hashseed = hashseed;
327 ht->ht_call_rcu = ht_call_rcu;
328 /* this mutex should not nest in read-side C.S. */
329 pthread_mutex_init(&ht->resize_mutex, NULL);
330 ht->t = calloc(1, sizeof(struct rcu_table)
331 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
332 ht->t->size = 0;
333 pthread_mutex_lock(&ht->resize_mutex);
334 init_table(ht, ht->t, 0, max(init_size, 1));
335 pthread_mutex_unlock(&ht->resize_mutex);
336 return ht;
337 }
338
339 struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key)
340 {
341 struct rcu_table *t;
342 struct rcu_ht_node *node;
343 unsigned long hash, reverse_hash;
344
345 hash = ht->hash_fct(ht->hashseed, key);
346 reverse_hash = bit_reverse_ulong(hash);
347
348 t = rcu_dereference(ht->t);
349 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
350 for (;;) {
351 if (unlikely(!node))
352 break;
353 if (node->reverse_hash > reverse_hash) {
354 node = NULL;
355 break;
356 }
357 if (node->key == key) {
358 if (is_removed(rcu_dereference(node->next)))
359 node = NULL;
360 break;
361 }
362 node = clear_flag(rcu_dereference(node->next));
363 }
364 return node;
365 }
366
367 void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
368 {
369 struct rcu_table *t;
370
371 node->hash = ht->hash_fct(ht->hashseed, node->key);
372 node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
373
374 t = rcu_dereference(ht->t);
375 _ht_add(ht, t, node);
376 }
377
378 int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
379 {
380 struct rcu_table *t;
381
382 t = rcu_dereference(ht->t);
383 return _ht_remove(ht, t, node);
384 }
385
386 static
387 int ht_delete_dummy(struct rcu_ht *ht)
388 {
389 struct rcu_table *t;
390 struct rcu_ht_node *node;
391 unsigned long i;
392
393 t = ht->t;
394 /* Check that the table is empty */
395 node = t->tbl[0];
396 do {
397 if (!node->dummy)
398 return -EPERM;
399 node = node->next;
400 } while (node);
401 /* Internal sanity check: all nodes left should be dummy */
402 for (i = 0; i < t->size; i++) {
403 assert(t->tbl[i]->dummy);
404 free(t->tbl[i]);
405 }
406 return 0;
407 }
408
409 /*
410 * Should only be called when no more concurrent readers nor writers can
411 * possibly access the table.
412 */
413 int ht_destroy(struct rcu_ht *ht)
414 {
415 int ret;
416
417 ret = ht_delete_dummy(ht);
418 if (ret)
419 return ret;
420 free(ht->t);
421 free(ht);
422 return ret;
423 }
424
425 static
426 void ht_free_table_cb(struct rcu_head *head)
427 {
428 struct rcu_table *t =
429 caa_container_of(head, struct rcu_table, head);
430 free(t);
431 }
432
433 /* called with resize mutex held */
434 static
435 void _do_ht_resize(struct rcu_ht *ht)
436 {
437 unsigned long new_size, old_size;
438 struct rcu_table *new_t, *old_t;
439
440 old_t = ht->t;
441 old_size = old_t->size;
442
443 new_size = CMM_LOAD_SHARED(old_t->resize_target);
444 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
445 old_size, new_size);
446 if (old_size == new_size)
447 return;
448 new_t = malloc(sizeof(struct rcu_table)
449 + (new_size * sizeof(struct rcu_ht_node *)));
450 assert(new_size > old_size);
451 memcpy(&new_t->tbl, &old_t->tbl,
452 old_size * sizeof(struct rcu_ht_node *));
453 init_table(ht, new_t, old_size, new_size - old_size);
454 new_t->size = new_size;
455 /* Changing table and size atomically wrt lookups */
456 rcu_assign_pointer(ht->t, new_t);
457 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
458 }
459
460 static
461 unsigned long resize_target_update(struct rcu_table *t,
462 int growth_order)
463 {
464 return _uatomic_max(&t->resize_target,
465 t->size << growth_order);
466 }
467
468 void ht_resize(struct rcu_ht *ht, int growth)
469 {
470 struct rcu_table *t = rcu_dereference(ht->t);
471 unsigned long target_size;
472
473 target_size = resize_target_update(t, growth);
474 if (t->size < target_size) {
475 pthread_mutex_lock(&ht->resize_mutex);
476 _do_ht_resize(ht);
477 pthread_mutex_unlock(&ht->resize_mutex);
478 }
479 }
480
481 static
482 void do_resize_cb(struct rcu_head *head)
483 {
484 struct rcu_resize_work *work =
485 caa_container_of(head, struct rcu_resize_work, head);
486 struct rcu_ht *ht = work->ht;
487
488 pthread_mutex_lock(&ht->resize_mutex);
489 _do_ht_resize(ht);
490 pthread_mutex_unlock(&ht->resize_mutex);
491 free(work);
492 }
493
494 static
495 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
496 {
497 struct rcu_resize_work *work;
498 unsigned long target_size;
499
500 target_size = resize_target_update(t, growth);
501 if (t->size < target_size) {
502 work = malloc(sizeof(*work));
503 work->ht = ht;
504 ht->ht_call_rcu(&work->head, do_resize_cb);
505 }
506 }
This page took 0.039384 seconds and 5 git commands to generate.