Comment list behavior wrt lock-freedom
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27 #include <stdio.h>
28 #include <stdint.h>
29 #include <string.h>
30
31 #include <urcu.h>
32 #include <urcu-call-rcu.h>
33 #include <urcu/arch.h>
34 #include <urcu/uatomic.h>
35 #include <urcu/jhash.h>
36 #include <urcu/compiler.h>
37 #include <urcu/rculfhash.h>
38 #include <stdio.h>
39 #include <pthread.h>
40
41 #define DEBUG /* Test */
42
43 #ifdef DEBUG
44 #define dbg_printf(args...) printf(args)
45 #else
46 #define dbg_printf(args...)
47 #endif
48
49 #define CHAIN_LEN_TARGET 4
50 #define CHAIN_LEN_RESIZE_THRESHOLD 16
51
52 #ifndef max
53 #define max(a, b) ((a) > (b) ? (a) : (b))
54 #endif
55
56 struct rcu_table {
57 unsigned long size; /* always a power of 2 */
58 unsigned long resize_target;
59 struct rcu_head head;
60 struct rcu_ht_node *tbl[0];
61 };
62
63 struct rcu_ht {
64 struct rcu_table *t; /* shared */
65 ht_hash_fct hash_fct;
66 ht_compare_fct compare_fct;
67 unsigned long hash_seed;
68 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
69 void (*ht_call_rcu)(struct rcu_head *head,
70 void (*func)(struct rcu_head *head));
71 };
72
73 struct rcu_resize_work {
74 struct rcu_head head;
75 struct rcu_ht *ht;
76 };
77
78 /*
79 * Algorithm to reverse bits in a word by lookup table, extended to
80 * 64-bit words.
81 * Source:
82 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
83 * Originally from Public Domain.
84 */
85
86 static const uint8_t BitReverseTable256[256] =
87 {
88 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
89 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
90 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
91 R6(0), R6(2), R6(1), R6(3)
92 };
93 #undef R2
94 #undef R4
95 #undef R6
96
97 static
98 uint8_t bit_reverse_u8(uint8_t v)
99 {
100 return BitReverseTable256[v];
101 }
102
103 static __attribute__((unused))
104 uint32_t bit_reverse_u32(uint32_t v)
105 {
106 return ((uint32_t) bit_reverse_u8(v) << 24) |
107 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
108 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
109 ((uint32_t) bit_reverse_u8(v >> 24));
110 }
111
112 static __attribute__((unused))
113 uint64_t bit_reverse_u64(uint64_t v)
114 {
115 return ((uint64_t) bit_reverse_u8(v) << 56) |
116 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
117 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
118 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
119 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
120 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
121 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
122 ((uint64_t) bit_reverse_u8(v >> 56));
123 }
124
125 static
126 unsigned long bit_reverse_ulong(unsigned long v)
127 {
128 #if (CAA_BITS_PER_LONG == 32)
129 return bit_reverse_u32(v);
130 #else
131 return bit_reverse_u64(v);
132 #endif
133 }
134
135 /*
136 * Algorithm to find the log2 of a 32-bit unsigned integer.
137 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
138 * Originally from Public Domain.
139 */
140 static const char LogTable256[256] =
141 {
142 #define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
143 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
144 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
145 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
146 };
147
148 uint32_t log2_u32(uint32_t v)
149 {
150 uint32_t t, tt;
151
152 if ((tt = (v >> 16)))
153 return (t = (tt >> 8))
154 ? 24 + LogTable256[t]
155 : 16 + LogTable256[tt];
156 else
157 return (t = (v >> 8))
158 ? 8 + LogTable256[t]
159 : LogTable256[v];
160 }
161
162 static
163 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
164
165 static
166 void check_resize(struct rcu_ht *ht, struct rcu_table *t,
167 uint32_t chain_len)
168 {
169 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
170 ht_resize_lazy(ht, t,
171 log2_u32(chain_len - CHAIN_LEN_TARGET));
172 }
173
174 static
175 struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
176 {
177 return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1);
178 }
179
180 static
181 int is_removed(struct rcu_ht_node *node)
182 {
183 return ((unsigned long) node) & 0x1;
184 }
185
186 static
187 struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
188 {
189 return (struct rcu_ht_node *) (((unsigned long) node) | 0x1);
190 }
191
192 static
193 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
194 {
195 unsigned long old1, old2;
196
197 old1 = uatomic_read(ptr);
198 do {
199 old2 = old1;
200 if (old2 >= v)
201 return old2;
202 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
203 return v;
204 }
205
206 static
207 void _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
208 {
209 struct rcu_ht_node *iter_prev = NULL, *iter = NULL;
210
211 if (!t->size)
212 return;
213 for (;;) {
214 uint32_t chain_len = 0;
215
216 iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
217 assert(iter_prev);
218 assert(iter_prev->reverse_hash <= node->reverse_hash);
219 for (;;) {
220 iter = clear_flag(rcu_dereference(iter_prev->next));
221 if (unlikely(!iter))
222 break;
223 if (iter->reverse_hash > node->reverse_hash)
224 break;
225 iter_prev = iter;
226 check_resize(ht, t, ++chain_len);
227 }
228 /*
229 * add in iter_prev->next: TODO: check for helping
230 * delete, for lock-freedom...
231 */
232 if (is_removed(iter))
233 continue;
234 assert(node != iter);
235 node->next = iter;
236 assert(iter_prev != node);
237 if (uatomic_cmpxchg(&iter_prev->next, iter, node) != iter)
238 continue;
239 break;
240 }
241 }
242
243 static
244 int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
245 {
246 struct rcu_ht_node *iter_prev, *iter, *next, *old;
247 unsigned long chain_len;
248 int found, ret = 0;
249 int flagged = 0;
250
251 retry:
252 chain_len = 0;
253 found = 0;
254 iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
255 assert(iter_prev);
256 assert(iter_prev->reverse_hash <= node->reverse_hash);
257 for (;;) {
258 iter = clear_flag(rcu_dereference(iter_prev->next));
259 if (unlikely(!iter))
260 break;
261 if (unlikely(iter->reverse_hash > node->reverse_hash))
262 break;
263 if (iter == node) {
264 found = 1;
265 break;
266 }
267 iter_prev = iter;
268 }
269 if (!found) {
270 ret = -ENOENT;
271 goto end;
272 }
273 next = rcu_dereference(iter->next);
274 if (!flagged) {
275 if (is_removed(next)) {
276 ret = -ENOENT;
277 goto end;
278 }
279 /* set deletion flag */
280 if ((old = uatomic_cmpxchg(&iter->next, next,
281 flag_removed(next))) != next) {
282 if (old == flag_removed(next)) {
283 ret = -ENOENT;
284 goto end;
285 } else {
286 goto retry;
287 }
288 }
289 flagged = 1;
290 }
291 /*
292 * Remove the element from the list.
293 * Retry if there has been a concurrent add before us.
294 * Retry if the prev node has been deleted.
295 * There cannot be a concurrent delete for our position, because
296 * we won the deletion flag cmpxchg.
297 * If there is a concurrent add after us, our deletion flag
298 * makes it busy-loop (FIXME: not lock-free).
299 */
300 if (uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next)) != iter)
301 goto retry;
302 end:
303 return ret;
304 }
305
306 static
307 void init_table(struct rcu_ht *ht, struct rcu_table *t,
308 unsigned long first, unsigned long len)
309 {
310 unsigned long i, end;
311
312 end = first + len;
313 for (i = first; i < end; i++) {
314 /* Update table size when power of two */
315 if (i != 0 && !(i & (i - 1)))
316 t->size = i;
317 t->tbl[i] = calloc(1, sizeof(struct rcu_ht_node));
318 t->tbl[i]->dummy = 1;
319 t->tbl[i]->hash = i;
320 t->tbl[i]->reverse_hash = bit_reverse_ulong(i);
321 _ht_add(ht, t, t->tbl[i]);
322 }
323 t->resize_target = t->size = end;
324 }
325
326 struct rcu_ht *ht_new(ht_hash_fct hash_fct,
327 ht_compare_fct compare_fct,
328 unsigned long hash_seed,
329 unsigned long init_size,
330 void (*ht_call_rcu)(struct rcu_head *head,
331 void (*func)(struct rcu_head *head)))
332 {
333 struct rcu_ht *ht;
334
335 ht = calloc(1, sizeof(struct rcu_ht));
336 ht->hash_fct = hash_fct;
337 ht->compare_fct = compare_fct;
338 ht->hash_seed = hash_seed;
339 ht->ht_call_rcu = ht_call_rcu;
340 /* this mutex should not nest in read-side C.S. */
341 pthread_mutex_init(&ht->resize_mutex, NULL);
342 ht->t = calloc(1, sizeof(struct rcu_table)
343 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
344 ht->t->size = 0;
345 pthread_mutex_lock(&ht->resize_mutex);
346 init_table(ht, ht->t, 0, max(init_size, 1));
347 pthread_mutex_unlock(&ht->resize_mutex);
348 return ht;
349 }
350
351 struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
352 {
353 struct rcu_table *t;
354 struct rcu_ht_node *node;
355 unsigned long hash, reverse_hash;
356
357 hash = ht->hash_fct(key, key_len, ht->hash_seed);
358 reverse_hash = bit_reverse_ulong(hash);
359
360 t = rcu_dereference(ht->t);
361 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
362 for (;;) {
363 if (unlikely(!node))
364 break;
365 if (unlikely(node->reverse_hash > reverse_hash)) {
366 node = NULL;
367 break;
368 }
369 if (!ht->compare_fct(node->key, node->key_len, key, key_len)) {
370 if (unlikely(is_removed(rcu_dereference(node->next))))
371 node = NULL;
372 break;
373 }
374 node = clear_flag(rcu_dereference(node->next));
375 }
376 return node;
377 }
378
379 void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
380 {
381 struct rcu_table *t;
382
383 node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
384 node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
385
386 t = rcu_dereference(ht->t);
387 _ht_add(ht, t, node);
388 }
389
390 int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
391 {
392 struct rcu_table *t;
393
394 t = rcu_dereference(ht->t);
395 return _ht_remove(ht, t, node);
396 }
397
398 static
399 int ht_delete_dummy(struct rcu_ht *ht)
400 {
401 struct rcu_table *t;
402 struct rcu_ht_node *node;
403 unsigned long i;
404
405 t = ht->t;
406 /* Check that the table is empty */
407 node = t->tbl[0];
408 do {
409 if (!node->dummy)
410 return -EPERM;
411 node = node->next;
412 } while (node);
413 /* Internal sanity check: all nodes left should be dummy */
414 for (i = 0; i < t->size; i++) {
415 assert(t->tbl[i]->dummy);
416 free(t->tbl[i]);
417 }
418 return 0;
419 }
420
421 /*
422 * Should only be called when no more concurrent readers nor writers can
423 * possibly access the table.
424 */
425 int ht_destroy(struct rcu_ht *ht)
426 {
427 int ret;
428
429 ret = ht_delete_dummy(ht);
430 if (ret)
431 return ret;
432 free(ht->t);
433 free(ht);
434 return ret;
435 }
436
437 static
438 void ht_free_table_cb(struct rcu_head *head)
439 {
440 struct rcu_table *t =
441 caa_container_of(head, struct rcu_table, head);
442 free(t);
443 }
444
445 /* called with resize mutex held */
446 static
447 void _do_ht_resize(struct rcu_ht *ht)
448 {
449 unsigned long new_size, old_size;
450 struct rcu_table *new_t, *old_t;
451
452 old_t = ht->t;
453 old_size = old_t->size;
454
455 new_size = CMM_LOAD_SHARED(old_t->resize_target);
456 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
457 old_size, new_size);
458 if (old_size == new_size)
459 return;
460 new_t = malloc(sizeof(struct rcu_table)
461 + (new_size * sizeof(struct rcu_ht_node *)));
462 assert(new_size > old_size);
463 memcpy(&new_t->tbl, &old_t->tbl,
464 old_size * sizeof(struct rcu_ht_node *));
465 init_table(ht, new_t, old_size, new_size - old_size);
466 /* Changing table and size atomically wrt lookups */
467 rcu_assign_pointer(ht->t, new_t);
468 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
469 }
470
471 static
472 unsigned long resize_target_update(struct rcu_table *t,
473 int growth_order)
474 {
475 return _uatomic_max(&t->resize_target,
476 t->size << growth_order);
477 }
478
479 void ht_resize(struct rcu_ht *ht, int growth)
480 {
481 struct rcu_table *t = rcu_dereference(ht->t);
482 unsigned long target_size;
483
484 target_size = resize_target_update(t, growth);
485 if (t->size < target_size) {
486 pthread_mutex_lock(&ht->resize_mutex);
487 _do_ht_resize(ht);
488 pthread_mutex_unlock(&ht->resize_mutex);
489 }
490 }
491
492 static
493 void do_resize_cb(struct rcu_head *head)
494 {
495 struct rcu_resize_work *work =
496 caa_container_of(head, struct rcu_resize_work, head);
497 struct rcu_ht *ht = work->ht;
498
499 pthread_mutex_lock(&ht->resize_mutex);
500 _do_ht_resize(ht);
501 pthread_mutex_unlock(&ht->resize_mutex);
502 free(work);
503 }
504
505 static
506 void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
507 {
508 struct rcu_resize_work *work;
509 unsigned long target_size;
510
511 target_size = resize_target_update(t, growth);
512 if (t->size < target_size) {
513 work = malloc(sizeof(*work));
514 work->ht = ht;
515 ht->ht_call_rcu(&work->head, do_resize_cb);
516 }
517 }
This page took 0.040385 seconds and 5 git commands to generate.