Commit | Line | Data |
---|---|---|
5e28c532 | 1 | /* |
abc490a1 MD |
2 | * rculfhash.c |
3 | * | |
1475579c | 4 | * Userspace RCU library - Lock-Free Resizable RCU Hash Table |
abc490a1 MD |
5 | * |
6 | * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
0dcf4847 | 7 | * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com> |
abc490a1 MD |
8 | * |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
5e28c532 MD |
22 | */ |
23 | ||
e753ff5a MD |
24 | /* |
25 | * Based on the following articles: | |
26 | * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free | |
27 | * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405. | |
28 | * - Michael, M. M. High performance dynamic lock-free hash tables | |
29 | * and list-based sets. In Proceedings of the fourteenth annual ACM | |
30 | * symposium on Parallel algorithms and architectures, ACM Press, | |
31 | * (2002), 73-82. | |
32 | * | |
1475579c | 33 | * Some specificities of this Lock-Free Resizable RCU Hash Table |
e753ff5a MD |
34 | * implementation: |
35 | * | |
36 | * - RCU read-side critical section allows readers to perform hash | |
37 | * table lookups and use the returned objects safely by delaying | |
38 | * memory reclaim of a grace period. | |
39 | * - Add and remove operations are lock-free, and do not need to | |
40 | * allocate memory. They need to be executed within RCU read-side | |
41 | * critical section to ensure the objects they read are valid and to | |
42 | * deal with the cmpxchg ABA problem. | |
43 | * - add and add_unique operations are supported. add_unique checks if | |
44 | * the node key already exists in the hash table. It ensures no key | |
45 | * duplicata exists. | |
46 | * - The resize operation executes concurrently with add/remove/lookup. | |
47 | * - Hash table nodes are contained within a split-ordered list. This | |
48 | * list is ordered by incrementing reversed-bits-hash value. | |
1ee8f000 | 49 | * - An index of bucket nodes is kept. These bucket nodes are the hash |
e753ff5a MD |
50 | * table "buckets", and they are also chained together in the |
51 | * split-ordered list, which allows recursive expansion. | |
1475579c MD |
52 | * - The resize operation for small tables only allows expanding the hash table. |
53 | * It is triggered automatically by detecting long chains in the add | |
54 | * operation. | |
55 | * - The resize operation for larger tables (and available through an | |
56 | * API) allows both expanding and shrinking the hash table. | |
4c42f1b8 | 57 | * - Split-counters are used to keep track of the number of |
1475579c | 58 | * nodes within the hash table for automatic resize triggering. |
e753ff5a MD |
59 | * - Resize operation initiated by long chain detection is executed by a |
60 | * call_rcu thread, which keeps lock-freedom of add and remove. | |
61 | * - Resize operations are protected by a mutex. | |
62 | * - The removal operation is split in two parts: first, a "removed" | |
63 | * flag is set in the next pointer within the node to remove. Then, | |
64 | * a "garbage collection" is performed in the bucket containing the | |
65 | * removed node (from the start of the bucket up to the removed node). | |
66 | * All encountered nodes with "removed" flag set in their next | |
67 | * pointers are removed from the linked-list. If the cmpxchg used for | |
68 | * removal fails (due to concurrent garbage-collection or concurrent | |
69 | * add), we retry from the beginning of the bucket. This ensures that | |
70 | * the node with "removed" flag set is removed from the hash table | |
71 | * (not visible to lookups anymore) before the RCU read-side critical | |
72 | * section held across removal ends. Furthermore, this ensures that | |
73 | * the node with "removed" flag set is removed from the linked-list | |
74 | * before its memory is reclaimed. Only the thread which removal | |
75 | * successfully set the "removed" flag (with a cmpxchg) into a node's | |
76 | * next pointer is considered to have succeeded its removal (and thus | |
77 | * owns the node to reclaim). Because we garbage-collect starting from | |
1ee8f000 | 78 | * an invariant node (the start-of-bucket bucket node) up to the |
e753ff5a MD |
79 | * "removed" node (or find a reverse-hash that is higher), we are sure |
80 | * that a successful traversal of the chain leads to a chain that is | |
81 | * present in the linked-list (the start node is never removed) and | |
82 | * that is does not contain the "removed" node anymore, even if | |
83 | * concurrent delete/add operations are changing the structure of the | |
84 | * list concurrently. | |
29e669f6 MD |
85 | * - The add operation performs gargage collection of buckets if it |
86 | * encounters nodes with removed flag set in the bucket where it wants | |
87 | * to add its new node. This ensures lock-freedom of add operation by | |
88 | * helping the remover unlink nodes from the list rather than to wait | |
89 | * for it do to so. | |
e753ff5a MD |
90 | * - A RCU "order table" indexed by log2(hash index) is copied and |
91 | * expanded by the resize operation. This order table allows finding | |
1ee8f000 LJ |
92 | * the "bucket node" tables. |
93 | * - There is one bucket node table per hash index order. The size of | |
94 | * each bucket node table is half the number of hashes contained in | |
93d46c39 | 95 | * this order (except for order 0). |
1ee8f000 LJ |
96 | * - synchronzie_rcu is used to garbage-collect the old bucket node table. |
97 | * - The per-order bucket node tables contain a compact version of the | |
e753ff5a MD |
98 | * hash table nodes. These tables are invariant after they are |
99 | * populated into the hash table. | |
93d46c39 | 100 | * |
1ee8f000 | 101 | * Bucket node tables: |
93d46c39 | 102 | * |
1ee8f000 LJ |
103 | * hash table hash table the last all bucket node tables |
104 | * order size bucket node 0 1 2 3 4 5 6(index) | |
93d46c39 LJ |
105 | * table size |
106 | * 0 1 1 1 | |
107 | * 1 2 1 1 1 | |
108 | * 2 4 2 1 1 2 | |
109 | * 3 8 4 1 1 2 4 | |
110 | * 4 16 8 1 1 2 4 8 | |
111 | * 5 32 16 1 1 2 4 8 16 | |
112 | * 6 64 32 1 1 2 4 8 16 32 | |
113 | * | |
1ee8f000 | 114 | * When growing/shrinking, we only focus on the last bucket node table |
93d46c39 LJ |
115 | * which size is (!order ? 1 : (1 << (order -1))). |
116 | * | |
117 | * Example for growing/shrinking: | |
1ee8f000 LJ |
118 | * grow hash table from order 5 to 6: init the index=6 bucket node table |
119 | * shrink hash table from order 6 to 5: fini the index=6 bucket node table | |
93d46c39 | 120 | * |
1475579c MD |
121 | * A bit of ascii art explanation: |
122 | * | |
123 | * Order index is the off-by-one compare to the actual power of 2 because | |
124 | * we use index 0 to deal with the 0 special-case. | |
125 | * | |
126 | * This shows the nodes for a small table ordered by reversed bits: | |
127 | * | |
128 | * bits reverse | |
129 | * 0 000 000 | |
130 | * 4 100 001 | |
131 | * 2 010 010 | |
132 | * 6 110 011 | |
133 | * 1 001 100 | |
134 | * 5 101 101 | |
135 | * 3 011 110 | |
136 | * 7 111 111 | |
137 | * | |
138 | * This shows the nodes in order of non-reversed bits, linked by | |
139 | * reversed-bit order. | |
140 | * | |
141 | * order bits reverse | |
142 | * 0 0 000 000 | |
0adc36a8 LJ |
143 | * 1 | 1 001 100 <- |
144 | * 2 | | 2 010 010 <- | | |
f6fdd688 | 145 | * | | | 3 011 110 | <- | |
1475579c MD |
146 | * 3 -> | | | 4 100 001 | | |
147 | * -> | | 5 101 101 | | |
148 | * -> | 6 110 011 | |
149 | * -> 7 111 111 | |
e753ff5a MD |
150 | */ |
151 | ||
2ed95849 MD |
152 | #define _LGPL_SOURCE |
153 | #include <stdlib.h> | |
e0ba718a MD |
154 | #include <errno.h> |
155 | #include <assert.h> | |
156 | #include <stdio.h> | |
abc490a1 | 157 | #include <stdint.h> |
f000907d | 158 | #include <string.h> |
e0ba718a | 159 | |
15cfbec7 | 160 | #include "config.h" |
2ed95849 | 161 | #include <urcu.h> |
abc490a1 | 162 | #include <urcu-call-rcu.h> |
7b17c13e | 163 | #include <urcu-flavor.h> |
a42cc659 MD |
164 | #include <urcu/arch.h> |
165 | #include <urcu/uatomic.h> | |
a42cc659 | 166 | #include <urcu/compiler.h> |
abc490a1 | 167 | #include <urcu/rculfhash.h> |
0b6aa001 | 168 | #include <rculfhash-internal.h> |
5e28c532 | 169 | #include <stdio.h> |
464a1ec9 | 170 | #include <pthread.h> |
44395fb7 | 171 | |
f8994aee | 172 | /* |
4c42f1b8 | 173 | * Split-counters lazily update the global counter each 1024 |
f8994aee MD |
174 | * addition/removal. It automatically keeps track of resize required. |
175 | * We use the bucket length as indicator for need to expand for small | |
176 | * tables and machines lacking per-cpu data suppport. | |
177 | */ | |
178 | #define COUNT_COMMIT_ORDER 10 | |
4ddbb355 | 179 | #define DEFAULT_SPLIT_COUNT_MASK 0xFUL |
6ea6bc67 MD |
180 | #define CHAIN_LEN_TARGET 1 |
181 | #define CHAIN_LEN_RESIZE_THRESHOLD 3 | |
2ed95849 | 182 | |
cd95516d | 183 | /* |
76a73da8 | 184 | * Define the minimum table size. |
cd95516d | 185 | */ |
d0d8f9aa LJ |
186 | #define MIN_TABLE_ORDER 0 |
187 | #define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER) | |
cd95516d | 188 | |
b7d619b0 | 189 | /* |
1ee8f000 | 190 | * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink. |
b7d619b0 | 191 | */ |
6083a889 MD |
192 | #define MIN_PARTITION_PER_THREAD_ORDER 12 |
193 | #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER) | |
b7d619b0 | 194 | |
d95bd160 MD |
195 | /* |
196 | * The removed flag needs to be updated atomically with the pointer. | |
48ed1c18 | 197 | * It indicates that no node must attach to the node scheduled for |
b198f0fd | 198 | * removal, and that node garbage collection must be performed. |
1ee8f000 | 199 | * The bucket flag does not require to be updated atomically with the |
d95bd160 MD |
200 | * pointer, but it is added as a pointer low bit flag to save space. |
201 | */ | |
d37166c6 | 202 | #define REMOVED_FLAG (1UL << 0) |
1ee8f000 | 203 | #define BUCKET_FLAG (1UL << 1) |
b198f0fd | 204 | #define FLAGS_MASK ((1UL << 2) - 1) |
d37166c6 | 205 | |
bb7b2f26 | 206 | /* Value of the end pointer. Should not interact with flags. */ |
f9c80341 | 207 | #define END_VALUE NULL |
bb7b2f26 | 208 | |
7f52427b MD |
209 | /* |
210 | * ht_items_count: Split-counters counting the number of node addition | |
211 | * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag | |
212 | * is set at hash table creation. | |
213 | * | |
214 | * These are free-running counters, never reset to zero. They count the | |
215 | * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER) | |
216 | * operations to update the global counter. We choose a power-of-2 value | |
217 | * for the trigger to deal with 32 or 64-bit overflow of the counter. | |
218 | */ | |
df44348d | 219 | struct ht_items_count { |
860d07e8 | 220 | unsigned long add, del; |
df44348d MD |
221 | } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); |
222 | ||
7f52427b MD |
223 | /* |
224 | * rcu_resize_work: Contains arguments passed to RCU worker thread | |
225 | * responsible for performing lazy resize. | |
226 | */ | |
abc490a1 MD |
227 | struct rcu_resize_work { |
228 | struct rcu_head head; | |
14044b37 | 229 | struct cds_lfht *ht; |
abc490a1 | 230 | }; |
2ed95849 | 231 | |
7f52427b MD |
232 | /* |
233 | * partition_resize_work: Contains arguments passed to worker threads | |
234 | * executing the hash table resize on partitions of the hash table | |
235 | * assigned to each processor's worker thread. | |
236 | */ | |
b7d619b0 | 237 | struct partition_resize_work { |
1af6e26e | 238 | pthread_t thread_id; |
b7d619b0 MD |
239 | struct cds_lfht *ht; |
240 | unsigned long i, start, len; | |
241 | void (*fct)(struct cds_lfht *ht, unsigned long i, | |
242 | unsigned long start, unsigned long len); | |
243 | }; | |
244 | ||
abc490a1 MD |
245 | /* |
246 | * Algorithm to reverse bits in a word by lookup table, extended to | |
247 | * 64-bit words. | |
f9830efd | 248 | * Source: |
abc490a1 | 249 | * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
f9830efd | 250 | * Originally from Public Domain. |
abc490a1 MD |
251 | */ |
252 | ||
253 | static const uint8_t BitReverseTable256[256] = | |
2ed95849 | 254 | { |
abc490a1 MD |
255 | #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64 |
256 | #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16) | |
257 | #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 ) | |
258 | R6(0), R6(2), R6(1), R6(3) | |
259 | }; | |
260 | #undef R2 | |
261 | #undef R4 | |
262 | #undef R6 | |
2ed95849 | 263 | |
abc490a1 MD |
264 | static |
265 | uint8_t bit_reverse_u8(uint8_t v) | |
266 | { | |
267 | return BitReverseTable256[v]; | |
268 | } | |
ab7d5fc6 | 269 | |
abc490a1 MD |
270 | static __attribute__((unused)) |
271 | uint32_t bit_reverse_u32(uint32_t v) | |
272 | { | |
273 | return ((uint32_t) bit_reverse_u8(v) << 24) | | |
274 | ((uint32_t) bit_reverse_u8(v >> 8) << 16) | | |
275 | ((uint32_t) bit_reverse_u8(v >> 16) << 8) | | |
276 | ((uint32_t) bit_reverse_u8(v >> 24)); | |
2ed95849 MD |
277 | } |
278 | ||
abc490a1 MD |
279 | static __attribute__((unused)) |
280 | uint64_t bit_reverse_u64(uint64_t v) | |
2ed95849 | 281 | { |
abc490a1 MD |
282 | return ((uint64_t) bit_reverse_u8(v) << 56) | |
283 | ((uint64_t) bit_reverse_u8(v >> 8) << 48) | | |
284 | ((uint64_t) bit_reverse_u8(v >> 16) << 40) | | |
285 | ((uint64_t) bit_reverse_u8(v >> 24) << 32) | | |
286 | ((uint64_t) bit_reverse_u8(v >> 32) << 24) | | |
287 | ((uint64_t) bit_reverse_u8(v >> 40) << 16) | | |
288 | ((uint64_t) bit_reverse_u8(v >> 48) << 8) | | |
289 | ((uint64_t) bit_reverse_u8(v >> 56)); | |
290 | } | |
291 | ||
292 | static | |
293 | unsigned long bit_reverse_ulong(unsigned long v) | |
294 | { | |
295 | #if (CAA_BITS_PER_LONG == 32) | |
296 | return bit_reverse_u32(v); | |
297 | #else | |
298 | return bit_reverse_u64(v); | |
299 | #endif | |
300 | } | |
301 | ||
f9830efd | 302 | /* |
24365af7 MD |
303 | * fls: returns the position of the most significant bit. |
304 | * Returns 0 if no bit is set, else returns the position of the most | |
305 | * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit). | |
f9830efd | 306 | */ |
24365af7 MD |
307 | #if defined(__i386) || defined(__x86_64) |
308 | static inline | |
309 | unsigned int fls_u32(uint32_t x) | |
f9830efd | 310 | { |
24365af7 MD |
311 | int r; |
312 | ||
313 | asm("bsrl %1,%0\n\t" | |
314 | "jnz 1f\n\t" | |
315 | "movl $-1,%0\n\t" | |
316 | "1:\n\t" | |
317 | : "=r" (r) : "rm" (x)); | |
318 | return r + 1; | |
319 | } | |
320 | #define HAS_FLS_U32 | |
321 | #endif | |
322 | ||
323 | #if defined(__x86_64) | |
324 | static inline | |
325 | unsigned int fls_u64(uint64_t x) | |
326 | { | |
327 | long r; | |
328 | ||
329 | asm("bsrq %1,%0\n\t" | |
330 | "jnz 1f\n\t" | |
331 | "movq $-1,%0\n\t" | |
332 | "1:\n\t" | |
333 | : "=r" (r) : "rm" (x)); | |
334 | return r + 1; | |
335 | } | |
336 | #define HAS_FLS_U64 | |
337 | #endif | |
338 | ||
339 | #ifndef HAS_FLS_U64 | |
340 | static __attribute__((unused)) | |
341 | unsigned int fls_u64(uint64_t x) | |
342 | { | |
343 | unsigned int r = 64; | |
344 | ||
345 | if (!x) | |
346 | return 0; | |
347 | ||
348 | if (!(x & 0xFFFFFFFF00000000ULL)) { | |
349 | x <<= 32; | |
350 | r -= 32; | |
351 | } | |
352 | if (!(x & 0xFFFF000000000000ULL)) { | |
353 | x <<= 16; | |
354 | r -= 16; | |
355 | } | |
356 | if (!(x & 0xFF00000000000000ULL)) { | |
357 | x <<= 8; | |
358 | r -= 8; | |
359 | } | |
360 | if (!(x & 0xF000000000000000ULL)) { | |
361 | x <<= 4; | |
362 | r -= 4; | |
363 | } | |
364 | if (!(x & 0xC000000000000000ULL)) { | |
365 | x <<= 2; | |
366 | r -= 2; | |
367 | } | |
368 | if (!(x & 0x8000000000000000ULL)) { | |
369 | x <<= 1; | |
370 | r -= 1; | |
371 | } | |
372 | return r; | |
373 | } | |
374 | #endif | |
375 | ||
376 | #ifndef HAS_FLS_U32 | |
377 | static __attribute__((unused)) | |
378 | unsigned int fls_u32(uint32_t x) | |
379 | { | |
380 | unsigned int r = 32; | |
f9830efd | 381 | |
24365af7 MD |
382 | if (!x) |
383 | return 0; | |
384 | if (!(x & 0xFFFF0000U)) { | |
385 | x <<= 16; | |
386 | r -= 16; | |
387 | } | |
388 | if (!(x & 0xFF000000U)) { | |
389 | x <<= 8; | |
390 | r -= 8; | |
391 | } | |
392 | if (!(x & 0xF0000000U)) { | |
393 | x <<= 4; | |
394 | r -= 4; | |
395 | } | |
396 | if (!(x & 0xC0000000U)) { | |
397 | x <<= 2; | |
398 | r -= 2; | |
399 | } | |
400 | if (!(x & 0x80000000U)) { | |
401 | x <<= 1; | |
402 | r -= 1; | |
403 | } | |
404 | return r; | |
405 | } | |
406 | #endif | |
407 | ||
408 | unsigned int fls_ulong(unsigned long x) | |
f9830efd | 409 | { |
6887cc5e | 410 | #if (CAA_BITS_PER_LONG == 32) |
24365af7 MD |
411 | return fls_u32(x); |
412 | #else | |
413 | return fls_u64(x); | |
414 | #endif | |
415 | } | |
f9830efd | 416 | |
920f8ef6 LJ |
417 | /* |
418 | * Return the minimum order for which x <= (1UL << order). | |
419 | * Return -1 if x is 0. | |
420 | */ | |
24365af7 MD |
421 | int get_count_order_u32(uint32_t x) |
422 | { | |
920f8ef6 LJ |
423 | if (!x) |
424 | return -1; | |
24365af7 | 425 | |
920f8ef6 | 426 | return fls_u32(x - 1); |
24365af7 MD |
427 | } |
428 | ||
920f8ef6 LJ |
429 | /* |
430 | * Return the minimum order for which x <= (1UL << order). | |
431 | * Return -1 if x is 0. | |
432 | */ | |
24365af7 MD |
433 | int get_count_order_ulong(unsigned long x) |
434 | { | |
920f8ef6 LJ |
435 | if (!x) |
436 | return -1; | |
24365af7 | 437 | |
920f8ef6 | 438 | return fls_ulong(x - 1); |
f9830efd MD |
439 | } |
440 | ||
441 | static | |
ab65b890 | 442 | void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth); |
f9830efd | 443 | |
f8994aee | 444 | static |
4105056a | 445 | void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, |
f8994aee MD |
446 | unsigned long count); |
447 | ||
df44348d | 448 | static long nr_cpus_mask = -1; |
4c42f1b8 LJ |
449 | static long split_count_mask = -1; |
450 | ||
4ddbb355 | 451 | #if defined(HAVE_SYSCONF) |
4c42f1b8 LJ |
452 | static void ht_init_nr_cpus_mask(void) |
453 | { | |
454 | long maxcpus; | |
455 | ||
456 | maxcpus = sysconf(_SC_NPROCESSORS_CONF); | |
457 | if (maxcpus <= 0) { | |
458 | nr_cpus_mask = -2; | |
459 | return; | |
460 | } | |
461 | /* | |
462 | * round up number of CPUs to next power of two, so we | |
463 | * can use & for modulo. | |
464 | */ | |
465 | maxcpus = 1UL << get_count_order_ulong(maxcpus); | |
466 | nr_cpus_mask = maxcpus - 1; | |
467 | } | |
4ddbb355 LJ |
468 | #else /* #if defined(HAVE_SYSCONF) */ |
469 | static void ht_init_nr_cpus_mask(void) | |
470 | { | |
471 | nr_cpus_mask = -2; | |
472 | } | |
473 | #endif /* #else #if defined(HAVE_SYSCONF) */ | |
df44348d MD |
474 | |
475 | static | |
5afadd12 | 476 | void alloc_split_items_count(struct cds_lfht *ht) |
df44348d MD |
477 | { |
478 | struct ht_items_count *count; | |
479 | ||
4c42f1b8 LJ |
480 | if (nr_cpus_mask == -1) { |
481 | ht_init_nr_cpus_mask(); | |
4ddbb355 LJ |
482 | if (nr_cpus_mask < 0) |
483 | split_count_mask = DEFAULT_SPLIT_COUNT_MASK; | |
484 | else | |
485 | split_count_mask = nr_cpus_mask; | |
df44348d | 486 | } |
4c42f1b8 | 487 | |
4ddbb355 | 488 | assert(split_count_mask >= 0); |
5afadd12 LJ |
489 | |
490 | if (ht->flags & CDS_LFHT_ACCOUNTING) { | |
491 | ht->split_count = calloc(split_count_mask + 1, sizeof(*count)); | |
492 | assert(ht->split_count); | |
493 | } else { | |
494 | ht->split_count = NULL; | |
495 | } | |
df44348d MD |
496 | } |
497 | ||
498 | static | |
5afadd12 | 499 | void free_split_items_count(struct cds_lfht *ht) |
df44348d | 500 | { |
5afadd12 | 501 | poison_free(ht->split_count); |
df44348d MD |
502 | } |
503 | ||
14360f1c | 504 | #if defined(HAVE_SCHED_GETCPU) |
df44348d | 505 | static |
14360f1c | 506 | int ht_get_split_count_index(unsigned long hash) |
df44348d MD |
507 | { |
508 | int cpu; | |
509 | ||
4c42f1b8 | 510 | assert(split_count_mask >= 0); |
df44348d | 511 | cpu = sched_getcpu(); |
8ed51e04 | 512 | if (caa_unlikely(cpu < 0)) |
14360f1c | 513 | return hash & split_count_mask; |
df44348d | 514 | else |
4c42f1b8 | 515 | return cpu & split_count_mask; |
df44348d | 516 | } |
14360f1c LJ |
517 | #else /* #if defined(HAVE_SCHED_GETCPU) */ |
518 | static | |
519 | int ht_get_split_count_index(unsigned long hash) | |
520 | { | |
521 | return hash & split_count_mask; | |
522 | } | |
523 | #endif /* #else #if defined(HAVE_SCHED_GETCPU) */ | |
df44348d MD |
524 | |
525 | static | |
14360f1c | 526 | void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash) |
df44348d | 527 | { |
4c42f1b8 LJ |
528 | unsigned long split_count; |
529 | int index; | |
314558bf | 530 | long count; |
df44348d | 531 | |
8ed51e04 | 532 | if (caa_unlikely(!ht->split_count)) |
3171717f | 533 | return; |
14360f1c | 534 | index = ht_get_split_count_index(hash); |
4c42f1b8 | 535 | split_count = uatomic_add_return(&ht->split_count[index].add, 1); |
314558bf MD |
536 | if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1))) |
537 | return; | |
538 | /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */ | |
539 | ||
540 | dbg_printf("add split count %lu\n", split_count); | |
541 | count = uatomic_add_return(&ht->count, | |
542 | 1UL << COUNT_COMMIT_ORDER); | |
4c299dcb | 543 | if (caa_likely(count & (count - 1))) |
314558bf MD |
544 | return; |
545 | /* Only if global count is power of 2 */ | |
546 | ||
547 | if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size) | |
548 | return; | |
549 | dbg_printf("add set global %ld\n", count); | |
550 | cds_lfht_resize_lazy_count(ht, size, | |
551 | count >> (CHAIN_LEN_TARGET - 1)); | |
df44348d MD |
552 | } |
553 | ||
554 | static | |
14360f1c | 555 | void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash) |
df44348d | 556 | { |
4c42f1b8 LJ |
557 | unsigned long split_count; |
558 | int index; | |
314558bf | 559 | long count; |
df44348d | 560 | |
8ed51e04 | 561 | if (caa_unlikely(!ht->split_count)) |
3171717f | 562 | return; |
14360f1c | 563 | index = ht_get_split_count_index(hash); |
4c42f1b8 | 564 | split_count = uatomic_add_return(&ht->split_count[index].del, 1); |
314558bf MD |
565 | if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1))) |
566 | return; | |
567 | /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */ | |
568 | ||
569 | dbg_printf("del split count %lu\n", split_count); | |
570 | count = uatomic_add_return(&ht->count, | |
571 | -(1UL << COUNT_COMMIT_ORDER)); | |
4c299dcb | 572 | if (caa_likely(count & (count - 1))) |
314558bf MD |
573 | return; |
574 | /* Only if global count is power of 2 */ | |
575 | ||
576 | if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size) | |
577 | return; | |
578 | dbg_printf("del set global %ld\n", count); | |
579 | /* | |
580 | * Don't shrink table if the number of nodes is below a | |
581 | * certain threshold. | |
582 | */ | |
583 | if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1)) | |
584 | return; | |
585 | cds_lfht_resize_lazy_count(ht, size, | |
586 | count >> (CHAIN_LEN_TARGET - 1)); | |
df44348d MD |
587 | } |
588 | ||
f9830efd | 589 | static |
4105056a | 590 | void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) |
f9830efd | 591 | { |
f8994aee MD |
592 | unsigned long count; |
593 | ||
b8af5011 MD |
594 | if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) |
595 | return; | |
f8994aee MD |
596 | count = uatomic_read(&ht->count); |
597 | /* | |
598 | * Use bucket-local length for small table expand and for | |
599 | * environments lacking per-cpu data support. | |
600 | */ | |
601 | if (count >= (1UL << COUNT_COMMIT_ORDER)) | |
602 | return; | |
24365af7 | 603 | if (chain_len > 100) |
f0c29ed7 | 604 | dbg_printf("WARNING: large chain length: %u.\n", |
24365af7 | 605 | chain_len); |
3390d470 | 606 | if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) |
ab65b890 | 607 | cds_lfht_resize_lazy_grow(ht, size, |
01370f0b | 608 | get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); |
f9830efd MD |
609 | } |
610 | ||
abc490a1 | 611 | static |
14044b37 | 612 | struct cds_lfht_node *clear_flag(struct cds_lfht_node *node) |
abc490a1 | 613 | { |
14044b37 | 614 | return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK); |
abc490a1 MD |
615 | } |
616 | ||
617 | static | |
14044b37 | 618 | int is_removed(struct cds_lfht_node *node) |
abc490a1 | 619 | { |
d37166c6 | 620 | return ((unsigned long) node) & REMOVED_FLAG; |
abc490a1 MD |
621 | } |
622 | ||
623 | static | |
14044b37 | 624 | struct cds_lfht_node *flag_removed(struct cds_lfht_node *node) |
abc490a1 | 625 | { |
14044b37 | 626 | return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG); |
abc490a1 MD |
627 | } |
628 | ||
f5596c94 | 629 | static |
1ee8f000 | 630 | int is_bucket(struct cds_lfht_node *node) |
f5596c94 | 631 | { |
1ee8f000 | 632 | return ((unsigned long) node) & BUCKET_FLAG; |
f5596c94 MD |
633 | } |
634 | ||
635 | static | |
1ee8f000 | 636 | struct cds_lfht_node *flag_bucket(struct cds_lfht_node *node) |
f5596c94 | 637 | { |
1ee8f000 | 638 | return (struct cds_lfht_node *) (((unsigned long) node) | BUCKET_FLAG); |
f5596c94 | 639 | } |
bb7b2f26 MD |
640 | |
641 | static | |
642 | struct cds_lfht_node *get_end(void) | |
643 | { | |
644 | return (struct cds_lfht_node *) END_VALUE; | |
645 | } | |
646 | ||
647 | static | |
648 | int is_end(struct cds_lfht_node *node) | |
649 | { | |
650 | return clear_flag(node) == (struct cds_lfht_node *) END_VALUE; | |
651 | } | |
652 | ||
abc490a1 | 653 | static |
ab65b890 LJ |
654 | unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr, |
655 | unsigned long v) | |
abc490a1 MD |
656 | { |
657 | unsigned long old1, old2; | |
658 | ||
659 | old1 = uatomic_read(ptr); | |
660 | do { | |
661 | old2 = old1; | |
662 | if (old2 >= v) | |
f9830efd | 663 | return old2; |
abc490a1 | 664 | } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2); |
ab65b890 | 665 | return old2; |
abc490a1 MD |
666 | } |
667 | ||
48f1b16d LJ |
668 | static |
669 | void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order) | |
670 | { | |
0b6aa001 | 671 | return ht->mm->alloc_bucket_table(ht, order); |
48f1b16d LJ |
672 | } |
673 | ||
674 | /* | |
675 | * cds_lfht_free_bucket_table() should be called with decreasing order. | |
676 | * When cds_lfht_free_bucket_table(0) is called, it means the whole | |
677 | * lfht is destroyed. | |
678 | */ | |
679 | static | |
680 | void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order) | |
681 | { | |
0b6aa001 | 682 | return ht->mm->free_bucket_table(ht, order); |
48f1b16d LJ |
683 | } |
684 | ||
9d72a73f LJ |
685 | static inline |
686 | struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index) | |
f4a9cc0b | 687 | { |
0b6aa001 | 688 | return ht->bucket_at(ht, index); |
f4a9cc0b LJ |
689 | } |
690 | ||
9d72a73f LJ |
691 | static inline |
692 | struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, | |
693 | unsigned long hash) | |
694 | { | |
695 | assert(size > 0); | |
696 | return bucket_at(ht, hash & (size - 1)); | |
697 | } | |
698 | ||
273399de MD |
699 | /* |
700 | * Remove all logically deleted nodes from a bucket up to a certain node key. | |
701 | */ | |
702 | static | |
1ee8f000 | 703 | void _cds_lfht_gc_bucket(struct cds_lfht_node *bucket, struct cds_lfht_node *node) |
273399de | 704 | { |
14044b37 | 705 | struct cds_lfht_node *iter_prev, *iter, *next, *new_next; |
273399de | 706 | |
1ee8f000 LJ |
707 | assert(!is_bucket(bucket)); |
708 | assert(!is_removed(bucket)); | |
709 | assert(!is_bucket(node)); | |
c90201ac | 710 | assert(!is_removed(node)); |
273399de | 711 | for (;;) { |
1ee8f000 LJ |
712 | iter_prev = bucket; |
713 | /* We can always skip the bucket node initially */ | |
04db56f8 | 714 | iter = rcu_dereference(iter_prev->next); |
b4cb483f | 715 | assert(!is_removed(iter)); |
04db56f8 | 716 | assert(iter_prev->reverse_hash <= node->reverse_hash); |
bd4db153 | 717 | /* |
1ee8f000 | 718 | * We should never be called with bucket (start of chain) |
bd4db153 MD |
719 | * and logically removed node (end of path compression |
720 | * marker) being the actual same node. This would be a | |
721 | * bug in the algorithm implementation. | |
722 | */ | |
1ee8f000 | 723 | assert(bucket != node); |
273399de | 724 | for (;;) { |
8ed51e04 | 725 | if (caa_unlikely(is_end(iter))) |
f9c80341 | 726 | return; |
04db56f8 | 727 | if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash)) |
f9c80341 | 728 | return; |
04db56f8 | 729 | next = rcu_dereference(clear_flag(iter)->next); |
8ed51e04 | 730 | if (caa_likely(is_removed(next))) |
273399de | 731 | break; |
b453eae1 | 732 | iter_prev = clear_flag(iter); |
273399de MD |
733 | iter = next; |
734 | } | |
b198f0fd | 735 | assert(!is_removed(iter)); |
1ee8f000 LJ |
736 | if (is_bucket(iter)) |
737 | new_next = flag_bucket(clear_flag(next)); | |
f5596c94 MD |
738 | else |
739 | new_next = clear_flag(next); | |
04db56f8 | 740 | (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next); |
273399de | 741 | } |
f9c80341 | 742 | return; |
273399de MD |
743 | } |
744 | ||
9357c415 MD |
745 | static |
746 | int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, | |
747 | struct cds_lfht_node *old_node, | |
3fb86f26 | 748 | struct cds_lfht_node *old_next, |
9357c415 MD |
749 | struct cds_lfht_node *new_node) |
750 | { | |
04db56f8 | 751 | struct cds_lfht_node *bucket, *ret_next; |
9357c415 MD |
752 | |
753 | if (!old_node) /* Return -ENOENT if asked to replace NULL node */ | |
7801dadd | 754 | return -ENOENT; |
9357c415 MD |
755 | |
756 | assert(!is_removed(old_node)); | |
1ee8f000 | 757 | assert(!is_bucket(old_node)); |
9357c415 | 758 | assert(!is_removed(new_node)); |
1ee8f000 | 759 | assert(!is_bucket(new_node)); |
9357c415 | 760 | assert(new_node != old_node); |
3fb86f26 | 761 | for (;;) { |
9357c415 | 762 | /* Insert after node to be replaced */ |
9357c415 MD |
763 | if (is_removed(old_next)) { |
764 | /* | |
765 | * Too late, the old node has been removed under us | |
766 | * between lookup and replace. Fail. | |
767 | */ | |
7801dadd | 768 | return -ENOENT; |
9357c415 | 769 | } |
1ee8f000 | 770 | assert(!is_bucket(old_next)); |
9357c415 | 771 | assert(new_node != clear_flag(old_next)); |
04db56f8 | 772 | new_node->next = clear_flag(old_next); |
9357c415 MD |
773 | /* |
774 | * Here is the whole trick for lock-free replace: we add | |
775 | * the replacement node _after_ the node we want to | |
776 | * replace by atomically setting its next pointer at the | |
777 | * same time we set its removal flag. Given that | |
778 | * the lookups/get next use an iterator aware of the | |
779 | * next pointer, they will either skip the old node due | |
780 | * to the removal flag and see the new node, or use | |
781 | * the old node, but will not see the new one. | |
782 | */ | |
04db56f8 | 783 | ret_next = uatomic_cmpxchg(&old_node->next, |
9357c415 | 784 | old_next, flag_removed(new_node)); |
3fb86f26 | 785 | if (ret_next == old_next) |
7801dadd | 786 | break; /* We performed the replacement. */ |
3fb86f26 LJ |
787 | old_next = ret_next; |
788 | } | |
9357c415 | 789 | |
9357c415 MD |
790 | /* |
791 | * Ensure that the old node is not visible to readers anymore: | |
792 | * lookup for the node, and remove it (along with any other | |
793 | * logically removed node) if found. | |
794 | */ | |
04db56f8 LJ |
795 | bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash)); |
796 | _cds_lfht_gc_bucket(bucket, new_node); | |
7801dadd | 797 | |
04db56f8 | 798 | assert(is_removed(rcu_dereference(old_node->next))); |
7801dadd | 799 | return 0; |
9357c415 MD |
800 | } |
801 | ||
83beee94 MD |
802 | /* |
803 | * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add | |
804 | * mode. A NULL unique_ret allows creation of duplicate keys. | |
805 | */ | |
abc490a1 | 806 | static |
83beee94 | 807 | void _cds_lfht_add(struct cds_lfht *ht, |
0422d92c | 808 | cds_lfht_match_fct match, |
996ff57c | 809 | const void *key, |
83beee94 MD |
810 | unsigned long size, |
811 | struct cds_lfht_node *node, | |
812 | struct cds_lfht_iter *unique_ret, | |
1ee8f000 | 813 | int bucket_flag) |
abc490a1 | 814 | { |
14044b37 | 815 | struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, |
960c9e4f | 816 | *return_node; |
04db56f8 | 817 | struct cds_lfht_node *bucket; |
abc490a1 | 818 | |
1ee8f000 | 819 | assert(!is_bucket(node)); |
c90201ac | 820 | assert(!is_removed(node)); |
04db56f8 | 821 | bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash)); |
abc490a1 | 822 | for (;;) { |
adc0de68 | 823 | uint32_t chain_len = 0; |
abc490a1 | 824 | |
11519af6 MD |
825 | /* |
826 | * iter_prev points to the non-removed node prior to the | |
827 | * insert location. | |
11519af6 | 828 | */ |
04db56f8 | 829 | iter_prev = bucket; |
1ee8f000 | 830 | /* We can always skip the bucket node initially */ |
04db56f8 LJ |
831 | iter = rcu_dereference(iter_prev->next); |
832 | assert(iter_prev->reverse_hash <= node->reverse_hash); | |
abc490a1 | 833 | for (;;) { |
8ed51e04 | 834 | if (caa_unlikely(is_end(iter))) |
273399de | 835 | goto insert; |
04db56f8 | 836 | if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash)) |
273399de | 837 | goto insert; |
238cc06e | 838 | |
1ee8f000 LJ |
839 | /* bucket node is the first node of the identical-hash-value chain */ |
840 | if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash) | |
194fdbd1 | 841 | goto insert; |
238cc06e | 842 | |
04db56f8 | 843 | next = rcu_dereference(clear_flag(iter)->next); |
8ed51e04 | 844 | if (caa_unlikely(is_removed(next))) |
9dba85be | 845 | goto gc_node; |
238cc06e LJ |
846 | |
847 | /* uniquely add */ | |
83beee94 | 848 | if (unique_ret |
1ee8f000 | 849 | && !is_bucket(next) |
04db56f8 | 850 | && clear_flag(iter)->reverse_hash == node->reverse_hash) { |
238cc06e LJ |
851 | struct cds_lfht_iter d_iter = { .node = node, .next = iter, }; |
852 | ||
853 | /* | |
854 | * uniquely adding inserts the node as the first | |
855 | * node of the identical-hash-value node chain. | |
856 | * | |
857 | * This semantic ensures no duplicated keys | |
858 | * should ever be observable in the table | |
859 | * (including observe one node by one node | |
860 | * by forward iterations) | |
861 | */ | |
04db56f8 | 862 | cds_lfht_next_duplicate(ht, match, key, &d_iter); |
238cc06e LJ |
863 | if (!d_iter.node) |
864 | goto insert; | |
865 | ||
866 | *unique_ret = d_iter; | |
83beee94 | 867 | return; |
48ed1c18 | 868 | } |
238cc06e | 869 | |
11519af6 | 870 | /* Only account for identical reverse hash once */ |
04db56f8 | 871 | if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash |
1ee8f000 | 872 | && !is_bucket(next)) |
4105056a | 873 | check_resize(ht, size, ++chain_len); |
11519af6 | 874 | iter_prev = clear_flag(iter); |
273399de | 875 | iter = next; |
abc490a1 | 876 | } |
48ed1c18 | 877 | |
273399de | 878 | insert: |
7ec59d3b | 879 | assert(node != clear_flag(iter)); |
11519af6 | 880 | assert(!is_removed(iter_prev)); |
c90201ac | 881 | assert(!is_removed(iter)); |
f000907d | 882 | assert(iter_prev != node); |
1ee8f000 | 883 | if (!bucket_flag) |
04db56f8 | 884 | node->next = clear_flag(iter); |
f9c80341 | 885 | else |
1ee8f000 LJ |
886 | node->next = flag_bucket(clear_flag(iter)); |
887 | if (is_bucket(iter)) | |
888 | new_node = flag_bucket(node); | |
f5596c94 MD |
889 | else |
890 | new_node = node; | |
04db56f8 | 891 | if (uatomic_cmpxchg(&iter_prev->next, iter, |
48ed1c18 | 892 | new_node) != iter) { |
273399de | 893 | continue; /* retry */ |
48ed1c18 | 894 | } else { |
83beee94 | 895 | return_node = node; |
960c9e4f | 896 | goto end; |
48ed1c18 MD |
897 | } |
898 | ||
9dba85be MD |
899 | gc_node: |
900 | assert(!is_removed(iter)); | |
1ee8f000 LJ |
901 | if (is_bucket(iter)) |
902 | new_next = flag_bucket(clear_flag(next)); | |
f5596c94 MD |
903 | else |
904 | new_next = clear_flag(next); | |
04db56f8 | 905 | (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next); |
273399de | 906 | /* retry */ |
464a1ec9 | 907 | } |
9357c415 | 908 | end: |
83beee94 MD |
909 | if (unique_ret) { |
910 | unique_ret->node = return_node; | |
911 | /* unique_ret->next left unset, never used. */ | |
912 | } | |
abc490a1 | 913 | } |
464a1ec9 | 914 | |
abc490a1 | 915 | static |
860d07e8 | 916 | int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, |
4105056a | 917 | struct cds_lfht_node *node, |
1ee8f000 | 918 | int bucket_removal) |
abc490a1 | 919 | { |
04db56f8 | 920 | struct cds_lfht_node *bucket, *next, *old; |
5e28c532 | 921 | |
9357c415 | 922 | if (!node) /* Return -ENOENT if asked to delete NULL node */ |
743f9143 | 923 | return -ENOENT; |
9357c415 | 924 | |
7ec59d3b | 925 | /* logically delete the node */ |
1ee8f000 | 926 | assert(!is_bucket(node)); |
c90201ac | 927 | assert(!is_removed(node)); |
04db56f8 | 928 | old = rcu_dereference(node->next); |
7ec59d3b | 929 | do { |
48ed1c18 MD |
930 | struct cds_lfht_node *new_next; |
931 | ||
7ec59d3b | 932 | next = old; |
8ed51e04 | 933 | if (caa_unlikely(is_removed(next))) |
743f9143 | 934 | return -ENOENT; |
1ee8f000 LJ |
935 | if (bucket_removal) |
936 | assert(is_bucket(next)); | |
1475579c | 937 | else |
1ee8f000 | 938 | assert(!is_bucket(next)); |
48ed1c18 | 939 | new_next = flag_removed(next); |
04db56f8 | 940 | old = uatomic_cmpxchg(&node->next, next, new_next); |
7ec59d3b | 941 | } while (old != next); |
7ec59d3b | 942 | /* We performed the (logical) deletion. */ |
7ec59d3b MD |
943 | |
944 | /* | |
945 | * Ensure that the node is not visible to readers anymore: lookup for | |
273399de MD |
946 | * the node, and remove it (along with any other logically removed node) |
947 | * if found. | |
11519af6 | 948 | */ |
04db56f8 LJ |
949 | bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash)); |
950 | _cds_lfht_gc_bucket(bucket, node); | |
743f9143 | 951 | |
04db56f8 | 952 | assert(is_removed(rcu_dereference(node->next))); |
743f9143 | 953 | return 0; |
abc490a1 | 954 | } |
2ed95849 | 955 | |
b7d619b0 MD |
956 | static |
957 | void *partition_resize_thread(void *arg) | |
958 | { | |
959 | struct partition_resize_work *work = arg; | |
960 | ||
7b17c13e | 961 | work->ht->flavor->register_thread(); |
b7d619b0 | 962 | work->fct(work->ht, work->i, work->start, work->len); |
7b17c13e | 963 | work->ht->flavor->unregister_thread(); |
b7d619b0 MD |
964 | return NULL; |
965 | } | |
966 | ||
967 | static | |
968 | void partition_resize_helper(struct cds_lfht *ht, unsigned long i, | |
969 | unsigned long len, | |
970 | void (*fct)(struct cds_lfht *ht, unsigned long i, | |
971 | unsigned long start, unsigned long len)) | |
972 | { | |
973 | unsigned long partition_len; | |
974 | struct partition_resize_work *work; | |
6083a889 MD |
975 | int thread, ret; |
976 | unsigned long nr_threads; | |
b7d619b0 | 977 | |
6083a889 MD |
978 | /* |
979 | * Note: nr_cpus_mask + 1 is always power of 2. | |
980 | * We spawn just the number of threads we need to satisfy the minimum | |
981 | * partition size, up to the number of CPUs in the system. | |
982 | */ | |
91452a6a MD |
983 | if (nr_cpus_mask > 0) { |
984 | nr_threads = min(nr_cpus_mask + 1, | |
985 | len >> MIN_PARTITION_PER_THREAD_ORDER); | |
986 | } else { | |
987 | nr_threads = 1; | |
988 | } | |
6083a889 MD |
989 | partition_len = len >> get_count_order_ulong(nr_threads); |
990 | work = calloc(nr_threads, sizeof(*work)); | |
b7d619b0 | 991 | assert(work); |
6083a889 MD |
992 | for (thread = 0; thread < nr_threads; thread++) { |
993 | work[thread].ht = ht; | |
994 | work[thread].i = i; | |
995 | work[thread].len = partition_len; | |
996 | work[thread].start = thread * partition_len; | |
997 | work[thread].fct = fct; | |
1af6e26e | 998 | ret = pthread_create(&(work[thread].thread_id), ht->resize_attr, |
6083a889 | 999 | partition_resize_thread, &work[thread]); |
b7d619b0 MD |
1000 | assert(!ret); |
1001 | } | |
6083a889 | 1002 | for (thread = 0; thread < nr_threads; thread++) { |
1af6e26e | 1003 | ret = pthread_join(work[thread].thread_id, NULL); |
b7d619b0 MD |
1004 | assert(!ret); |
1005 | } | |
1006 | free(work); | |
b7d619b0 MD |
1007 | } |
1008 | ||
e8de508e MD |
1009 | /* |
1010 | * Holding RCU read lock to protect _cds_lfht_add against memory | |
1011 | * reclaim that could be performed by other call_rcu worker threads (ABA | |
1012 | * problem). | |
9ee0fc9a | 1013 | * |
b7d619b0 | 1014 | * When we reach a certain length, we can split this population phase over |
9ee0fc9a MD |
1015 | * many worker threads, based on the number of CPUs available in the system. |
1016 | * This should therefore take care of not having the expand lagging behind too | |
1017 | * many concurrent insertion threads by using the scheduler's ability to | |
1ee8f000 | 1018 | * schedule bucket node population fairly with insertions. |
e8de508e | 1019 | */ |
4105056a | 1020 | static |
b7d619b0 MD |
1021 | void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, |
1022 | unsigned long start, unsigned long len) | |
4105056a | 1023 | { |
9d72a73f | 1024 | unsigned long j, size = 1UL << (i - 1); |
4105056a | 1025 | |
d0d8f9aa | 1026 | assert(i > MIN_TABLE_ORDER); |
7b17c13e | 1027 | ht->flavor->read_lock(); |
9d72a73f LJ |
1028 | for (j = size + start; j < size + start + len; j++) { |
1029 | struct cds_lfht_node *new_node = bucket_at(ht, j); | |
1030 | ||
1031 | assert(j >= size && j < (size << 1)); | |
1032 | dbg_printf("init populate: order %lu index %lu hash %lu\n", | |
1033 | i, j, j); | |
1034 | new_node->reverse_hash = bit_reverse_ulong(j); | |
1035 | _cds_lfht_add(ht, NULL, NULL, size, new_node, NULL, 1); | |
4105056a | 1036 | } |
7b17c13e | 1037 | ht->flavor->read_unlock(); |
b7d619b0 MD |
1038 | } |
1039 | ||
1040 | static | |
1041 | void init_table_populate(struct cds_lfht *ht, unsigned long i, | |
1042 | unsigned long len) | |
1043 | { | |
1044 | assert(nr_cpus_mask != -1); | |
6083a889 | 1045 | if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { |
7b17c13e | 1046 | ht->flavor->thread_online(); |
b7d619b0 | 1047 | init_table_populate_partition(ht, i, 0, len); |
7b17c13e | 1048 | ht->flavor->thread_offline(); |
b7d619b0 MD |
1049 | return; |
1050 | } | |
1051 | partition_resize_helper(ht, i, len, init_table_populate_partition); | |
4105056a MD |
1052 | } |
1053 | ||
abc490a1 | 1054 | static |
4105056a | 1055 | void init_table(struct cds_lfht *ht, |
93d46c39 | 1056 | unsigned long first_order, unsigned long last_order) |
24365af7 | 1057 | { |
93d46c39 | 1058 | unsigned long i; |
24365af7 | 1059 | |
93d46c39 LJ |
1060 | dbg_printf("init table: first_order %lu last_order %lu\n", |
1061 | first_order, last_order); | |
d0d8f9aa | 1062 | assert(first_order > MIN_TABLE_ORDER); |
93d46c39 | 1063 | for (i = first_order; i <= last_order; i++) { |
4105056a | 1064 | unsigned long len; |
24365af7 | 1065 | |
4f6e90b7 | 1066 | len = 1UL << (i - 1); |
f0c29ed7 | 1067 | dbg_printf("init order %lu len: %lu\n", i, len); |
4d676753 MD |
1068 | |
1069 | /* Stop expand if the resize target changes under us */ | |
7b3893e4 | 1070 | if (CMM_LOAD_SHARED(ht->resize_target) < (1UL << i)) |
4d676753 MD |
1071 | break; |
1072 | ||
48f1b16d | 1073 | cds_lfht_alloc_bucket_table(ht, i); |
4105056a | 1074 | |
4105056a | 1075 | /* |
1ee8f000 LJ |
1076 | * Set all bucket nodes reverse hash values for a level and |
1077 | * link all bucket nodes into the table. | |
4105056a | 1078 | */ |
dc1da8f6 | 1079 | init_table_populate(ht, i, len); |
4105056a | 1080 | |
f9c80341 MD |
1081 | /* |
1082 | * Update table size. | |
1083 | */ | |
1084 | cmm_smp_wmb(); /* populate data before RCU size */ | |
7b3893e4 | 1085 | CMM_STORE_SHARED(ht->size, 1UL << i); |
f9c80341 | 1086 | |
4f6e90b7 | 1087 | dbg_printf("init new size: %lu\n", 1UL << i); |
4105056a MD |
1088 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
1089 | break; | |
1090 | } | |
1091 | } | |
1092 | ||
e8de508e MD |
1093 | /* |
1094 | * Holding RCU read lock to protect _cds_lfht_remove against memory | |
1095 | * reclaim that could be performed by other call_rcu worker threads (ABA | |
1096 | * problem). | |
1097 | * For a single level, we logically remove and garbage collect each node. | |
1098 | * | |
1099 | * As a design choice, we perform logical removal and garbage collection on a | |
1100 | * node-per-node basis to simplify this algorithm. We also assume keeping good | |
1101 | * cache locality of the operation would overweight possible performance gain | |
1102 | * that could be achieved by batching garbage collection for multiple levels. | |
1103 | * However, this would have to be justified by benchmarks. | |
1104 | * | |
1105 | * Concurrent removal and add operations are helping us perform garbage | |
1106 | * collection of logically removed nodes. We guarantee that all logically | |
1107 | * removed nodes have been garbage-collected (unlinked) before call_rcu is | |
1ee8f000 | 1108 | * invoked to free a hole level of bucket nodes (after a grace period). |
e8de508e MD |
1109 | * |
1110 | * Logical removal and garbage collection can therefore be done in batch or on a | |
1111 | * node-per-node basis, as long as the guarantee above holds. | |
9ee0fc9a | 1112 | * |
b7d619b0 MD |
1113 | * When we reach a certain length, we can split this removal over many worker |
1114 | * threads, based on the number of CPUs available in the system. This should | |
1115 | * take care of not letting resize process lag behind too many concurrent | |
9ee0fc9a | 1116 | * updater threads actively inserting into the hash table. |
e8de508e | 1117 | */ |
4105056a | 1118 | static |
b7d619b0 MD |
1119 | void remove_table_partition(struct cds_lfht *ht, unsigned long i, |
1120 | unsigned long start, unsigned long len) | |
4105056a | 1121 | { |
9d72a73f | 1122 | unsigned long j, size = 1UL << (i - 1); |
4105056a | 1123 | |
d0d8f9aa | 1124 | assert(i > MIN_TABLE_ORDER); |
7b17c13e | 1125 | ht->flavor->read_lock(); |
9d72a73f LJ |
1126 | for (j = size + start; j < size + start + len; j++) { |
1127 | struct cds_lfht_node *fini_node = bucket_at(ht, j); | |
1128 | ||
1129 | assert(j >= size && j < (size << 1)); | |
1130 | dbg_printf("remove entry: order %lu index %lu hash %lu\n", | |
1131 | i, j, j); | |
1132 | fini_node->reverse_hash = bit_reverse_ulong(j); | |
1133 | (void) _cds_lfht_del(ht, size, fini_node, 1); | |
abc490a1 | 1134 | } |
7b17c13e | 1135 | ht->flavor->read_unlock(); |
b7d619b0 MD |
1136 | } |
1137 | ||
1138 | static | |
1139 | void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) | |
1140 | { | |
1141 | ||
1142 | assert(nr_cpus_mask != -1); | |
6083a889 | 1143 | if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) { |
7b17c13e | 1144 | ht->flavor->thread_online(); |
b7d619b0 | 1145 | remove_table_partition(ht, i, 0, len); |
7b17c13e | 1146 | ht->flavor->thread_offline(); |
b7d619b0 MD |
1147 | return; |
1148 | } | |
1149 | partition_resize_helper(ht, i, len, remove_table_partition); | |
2ed95849 MD |
1150 | } |
1151 | ||
61adb337 MD |
1152 | /* |
1153 | * fini_table() is never called for first_order == 0, which is why | |
1154 | * free_by_rcu_order == 0 can be used as criterion to know if free must | |
1155 | * be called. | |
1156 | */ | |
1475579c | 1157 | static |
4105056a | 1158 | void fini_table(struct cds_lfht *ht, |
93d46c39 | 1159 | unsigned long first_order, unsigned long last_order) |
1475579c | 1160 | { |
93d46c39 | 1161 | long i; |
48f1b16d | 1162 | unsigned long free_by_rcu_order = 0; |
1475579c | 1163 | |
93d46c39 LJ |
1164 | dbg_printf("fini table: first_order %lu last_order %lu\n", |
1165 | first_order, last_order); | |
d0d8f9aa | 1166 | assert(first_order > MIN_TABLE_ORDER); |
93d46c39 | 1167 | for (i = last_order; i >= first_order; i--) { |
4105056a | 1168 | unsigned long len; |
1475579c | 1169 | |
4f6e90b7 | 1170 | len = 1UL << (i - 1); |
1475579c | 1171 | dbg_printf("fini order %lu len: %lu\n", i, len); |
4105056a | 1172 | |
4d676753 | 1173 | /* Stop shrink if the resize target changes under us */ |
7b3893e4 | 1174 | if (CMM_LOAD_SHARED(ht->resize_target) > (1UL << (i - 1))) |
4d676753 MD |
1175 | break; |
1176 | ||
1177 | cmm_smp_wmb(); /* populate data before RCU size */ | |
7b3893e4 | 1178 | CMM_STORE_SHARED(ht->size, 1UL << (i - 1)); |
4d676753 MD |
1179 | |
1180 | /* | |
1181 | * We need to wait for all add operations to reach Q.S. (and | |
1182 | * thus use the new table for lookups) before we can start | |
1ee8f000 | 1183 | * releasing the old bucket nodes. Otherwise their lookup will |
4d676753 MD |
1184 | * return a logically removed node as insert position. |
1185 | */ | |
7b17c13e | 1186 | ht->flavor->update_synchronize_rcu(); |
48f1b16d LJ |
1187 | if (free_by_rcu_order) |
1188 | cds_lfht_free_bucket_table(ht, free_by_rcu_order); | |
4d676753 | 1189 | |
21263e21 | 1190 | /* |
1ee8f000 LJ |
1191 | * Set "removed" flag in bucket nodes about to be removed. |
1192 | * Unlink all now-logically-removed bucket node pointers. | |
4105056a MD |
1193 | * Concurrent add/remove operation are helping us doing |
1194 | * the gc. | |
21263e21 | 1195 | */ |
4105056a MD |
1196 | remove_table(ht, i, len); |
1197 | ||
48f1b16d | 1198 | free_by_rcu_order = i; |
4105056a MD |
1199 | |
1200 | dbg_printf("fini new size: %lu\n", 1UL << i); | |
1475579c MD |
1201 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
1202 | break; | |
1203 | } | |
0d14ceb2 | 1204 | |
48f1b16d | 1205 | if (free_by_rcu_order) { |
7b17c13e | 1206 | ht->flavor->update_synchronize_rcu(); |
48f1b16d | 1207 | cds_lfht_free_bucket_table(ht, free_by_rcu_order); |
0d14ceb2 | 1208 | } |
1475579c MD |
1209 | } |
1210 | ||
ff0d69de | 1211 | static |
1ee8f000 | 1212 | void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) |
ff0d69de | 1213 | { |
04db56f8 | 1214 | struct cds_lfht_node *prev, *node; |
9d72a73f | 1215 | unsigned long order, len, i; |
ff0d69de | 1216 | |
48f1b16d | 1217 | cds_lfht_alloc_bucket_table(ht, 0); |
ff0d69de | 1218 | |
9d72a73f LJ |
1219 | dbg_printf("create bucket: order 0 index 0 hash 0\n"); |
1220 | node = bucket_at(ht, 0); | |
1221 | node->next = flag_bucket(get_end()); | |
1222 | node->reverse_hash = 0; | |
ff0d69de LJ |
1223 | |
1224 | for (order = 1; order < get_count_order_ulong(size) + 1; order++) { | |
1225 | len = 1UL << (order - 1); | |
48f1b16d | 1226 | cds_lfht_alloc_bucket_table(ht, order); |
ff0d69de | 1227 | |
9d72a73f LJ |
1228 | for (i = 0; i < len; i++) { |
1229 | /* | |
1230 | * Now, we are trying to init the node with the | |
1231 | * hash=(len+i) (which is also a bucket with the | |
1232 | * index=(len+i)) and insert it into the hash table, | |
1233 | * so this node has to be inserted after the bucket | |
1234 | * with the index=(len+i)&(len-1)=i. And because there | |
1235 | * is no other non-bucket node nor bucket node with | |
1236 | * larger index/hash inserted, so the bucket node | |
1237 | * being inserted should be inserted directly linked | |
1238 | * after the bucket node with index=i. | |
1239 | */ | |
1240 | prev = bucket_at(ht, i); | |
1241 | node = bucket_at(ht, len + i); | |
ff0d69de | 1242 | |
1ee8f000 | 1243 | dbg_printf("create bucket: order %lu index %lu hash %lu\n", |
9d72a73f LJ |
1244 | order, len + i, len + i); |
1245 | node->reverse_hash = bit_reverse_ulong(len + i); | |
1246 | ||
1247 | /* insert after prev */ | |
1248 | assert(is_bucket(prev->next)); | |
ff0d69de | 1249 | node->next = prev->next; |
1ee8f000 | 1250 | prev->next = flag_bucket(node); |
ff0d69de LJ |
1251 | } |
1252 | } | |
1253 | } | |
1254 | ||
0422d92c | 1255 | struct cds_lfht *_cds_lfht_new(unsigned long init_size, |
0722081a | 1256 | unsigned long min_nr_alloc_buckets, |
747d725c | 1257 | unsigned long max_nr_buckets, |
b8af5011 | 1258 | int flags, |
0b6aa001 | 1259 | const struct cds_lfht_mm_type *mm, |
7b17c13e | 1260 | const struct rcu_flavor_struct *flavor, |
b7d619b0 | 1261 | pthread_attr_t *attr) |
abc490a1 | 1262 | { |
14044b37 | 1263 | struct cds_lfht *ht; |
24365af7 | 1264 | unsigned long order; |
abc490a1 | 1265 | |
0722081a LJ |
1266 | /* min_nr_alloc_buckets must be power of two */ |
1267 | if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1))) | |
5488222b | 1268 | return NULL; |
747d725c | 1269 | |
8129be4e | 1270 | /* init_size must be power of two */ |
5488222b | 1271 | if (!init_size || (init_size & (init_size - 1))) |
8129be4e | 1272 | return NULL; |
747d725c | 1273 | |
c1888f3a MD |
1274 | /* |
1275 | * Memory management plugin default. | |
1276 | */ | |
1277 | if (!mm) { | |
5a2141a7 MD |
1278 | if (CAA_BITS_PER_LONG > 32 |
1279 | && max_nr_buckets | |
c1888f3a MD |
1280 | && max_nr_buckets <= (1ULL << 32)) { |
1281 | /* | |
1282 | * For 64-bit architectures, with max number of | |
1283 | * buckets small enough not to use the entire | |
1284 | * 64-bit memory mapping space (and allowing a | |
1285 | * fair number of hash table instances), use the | |
1286 | * mmap allocator, which is faster than the | |
1287 | * order allocator. | |
1288 | */ | |
1289 | mm = &cds_lfht_mm_mmap; | |
1290 | } else { | |
1291 | /* | |
1292 | * The fallback is to use the order allocator. | |
1293 | */ | |
1294 | mm = &cds_lfht_mm_order; | |
1295 | } | |
1296 | } | |
1297 | ||
0b6aa001 LJ |
1298 | /* max_nr_buckets == 0 for order based mm means infinite */ |
1299 | if (mm == &cds_lfht_mm_order && !max_nr_buckets) | |
747d725c LJ |
1300 | max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1); |
1301 | ||
1302 | /* max_nr_buckets must be power of two */ | |
1303 | if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1))) | |
1304 | return NULL; | |
1305 | ||
0722081a | 1306 | min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE); |
d0d8f9aa | 1307 | init_size = max(init_size, MIN_TABLE_SIZE); |
747d725c LJ |
1308 | max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets); |
1309 | init_size = min(init_size, max_nr_buckets); | |
0b6aa001 LJ |
1310 | |
1311 | ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets); | |
b7d619b0 | 1312 | assert(ht); |
0b6aa001 LJ |
1313 | assert(ht->mm == mm); |
1314 | assert(ht->bucket_at == mm->bucket_at); | |
1315 | ||
b5d6b20f | 1316 | ht->flags = flags; |
7b17c13e | 1317 | ht->flavor = flavor; |
b7d619b0 | 1318 | ht->resize_attr = attr; |
5afadd12 | 1319 | alloc_split_items_count(ht); |
abc490a1 MD |
1320 | /* this mutex should not nest in read-side C.S. */ |
1321 | pthread_mutex_init(&ht->resize_mutex, NULL); | |
5488222b | 1322 | order = get_count_order_ulong(init_size); |
7b3893e4 | 1323 | ht->resize_target = 1UL << order; |
1ee8f000 | 1324 | cds_lfht_create_bucket(ht, 1UL << order); |
7b3893e4 | 1325 | ht->size = 1UL << order; |
abc490a1 MD |
1326 | return ht; |
1327 | } | |
1328 | ||
6f554439 | 1329 | void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash, |
996ff57c | 1330 | cds_lfht_match_fct match, const void *key, |
6f554439 | 1331 | struct cds_lfht_iter *iter) |
2ed95849 | 1332 | { |
04db56f8 | 1333 | struct cds_lfht_node *node, *next, *bucket; |
0422d92c | 1334 | unsigned long reverse_hash, size; |
2ed95849 | 1335 | |
abc490a1 | 1336 | reverse_hash = bit_reverse_ulong(hash); |
464a1ec9 | 1337 | |
7b3893e4 | 1338 | size = rcu_dereference(ht->size); |
04db56f8 | 1339 | bucket = lookup_bucket(ht, size, hash); |
1ee8f000 | 1340 | /* We can always skip the bucket node initially */ |
04db56f8 | 1341 | node = rcu_dereference(bucket->next); |
bb7b2f26 | 1342 | node = clear_flag(node); |
2ed95849 | 1343 | for (;;) { |
8ed51e04 | 1344 | if (caa_unlikely(is_end(node))) { |
96ad1112 | 1345 | node = next = NULL; |
abc490a1 | 1346 | break; |
bb7b2f26 | 1347 | } |
04db56f8 | 1348 | if (caa_unlikely(node->reverse_hash > reverse_hash)) { |
96ad1112 | 1349 | node = next = NULL; |
abc490a1 | 1350 | break; |
2ed95849 | 1351 | } |
04db56f8 | 1352 | next = rcu_dereference(node->next); |
7f52427b | 1353 | assert(node == clear_flag(node)); |
8ed51e04 | 1354 | if (caa_likely(!is_removed(next)) |
1ee8f000 | 1355 | && !is_bucket(next) |
04db56f8 | 1356 | && node->reverse_hash == reverse_hash |
0422d92c | 1357 | && caa_likely(match(node, key))) { |
273399de | 1358 | break; |
2ed95849 | 1359 | } |
1b81fe1a | 1360 | node = clear_flag(next); |
2ed95849 | 1361 | } |
1ee8f000 | 1362 | assert(!node || !is_bucket(rcu_dereference(node->next))); |
adc0de68 MD |
1363 | iter->node = node; |
1364 | iter->next = next; | |
abc490a1 | 1365 | } |
e0ba718a | 1366 | |
0422d92c | 1367 | void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match, |
996ff57c | 1368 | const void *key, struct cds_lfht_iter *iter) |
a481e5ff | 1369 | { |
adc0de68 | 1370 | struct cds_lfht_node *node, *next; |
a481e5ff | 1371 | unsigned long reverse_hash; |
a481e5ff | 1372 | |
adc0de68 | 1373 | node = iter->node; |
04db56f8 | 1374 | reverse_hash = node->reverse_hash; |
adc0de68 | 1375 | next = iter->next; |
a481e5ff MD |
1376 | node = clear_flag(next); |
1377 | ||
1378 | for (;;) { | |
8ed51e04 | 1379 | if (caa_unlikely(is_end(node))) { |
96ad1112 | 1380 | node = next = NULL; |
a481e5ff | 1381 | break; |
bb7b2f26 | 1382 | } |
04db56f8 | 1383 | if (caa_unlikely(node->reverse_hash > reverse_hash)) { |
96ad1112 | 1384 | node = next = NULL; |
a481e5ff MD |
1385 | break; |
1386 | } | |
04db56f8 | 1387 | next = rcu_dereference(node->next); |
8ed51e04 | 1388 | if (caa_likely(!is_removed(next)) |
1ee8f000 | 1389 | && !is_bucket(next) |
04db56f8 | 1390 | && caa_likely(match(node, key))) { |
a481e5ff MD |
1391 | break; |
1392 | } | |
1393 | node = clear_flag(next); | |
1394 | } | |
1ee8f000 | 1395 | assert(!node || !is_bucket(rcu_dereference(node->next))); |
adc0de68 MD |
1396 | iter->node = node; |
1397 | iter->next = next; | |
a481e5ff MD |
1398 | } |
1399 | ||
4e9b9fbf MD |
1400 | void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) |
1401 | { | |
1402 | struct cds_lfht_node *node, *next; | |
1403 | ||
853395e1 | 1404 | node = clear_flag(iter->next); |
4e9b9fbf | 1405 | for (;;) { |
8ed51e04 | 1406 | if (caa_unlikely(is_end(node))) { |
4e9b9fbf MD |
1407 | node = next = NULL; |
1408 | break; | |
1409 | } | |
04db56f8 | 1410 | next = rcu_dereference(node->next); |
8ed51e04 | 1411 | if (caa_likely(!is_removed(next)) |
1ee8f000 | 1412 | && !is_bucket(next)) { |
4e9b9fbf MD |
1413 | break; |
1414 | } | |
1415 | node = clear_flag(next); | |
1416 | } | |
1ee8f000 | 1417 | assert(!node || !is_bucket(rcu_dereference(node->next))); |
4e9b9fbf MD |
1418 | iter->node = node; |
1419 | iter->next = next; | |
1420 | } | |
1421 | ||
1422 | void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) | |
1423 | { | |
4e9b9fbf | 1424 | /* |
1ee8f000 | 1425 | * Get next after first bucket node. The first bucket node is the |
4e9b9fbf MD |
1426 | * first node of the linked list. |
1427 | */ | |
9d72a73f | 1428 | iter->next = bucket_at(ht, 0)->next; |
4e9b9fbf MD |
1429 | cds_lfht_next(ht, iter); |
1430 | } | |
1431 | ||
0422d92c MD |
1432 | void cds_lfht_add(struct cds_lfht *ht, unsigned long hash, |
1433 | struct cds_lfht_node *node) | |
abc490a1 | 1434 | { |
0422d92c | 1435 | unsigned long size; |
ab7d5fc6 | 1436 | |
04db56f8 | 1437 | node->reverse_hash = bit_reverse_ulong((unsigned long) hash); |
7b3893e4 | 1438 | size = rcu_dereference(ht->size); |
04db56f8 | 1439 | _cds_lfht_add(ht, NULL, NULL, size, node, NULL, 0); |
14360f1c | 1440 | ht_count_add(ht, size, hash); |
3eca1b8c MD |
1441 | } |
1442 | ||
14044b37 | 1443 | struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, |
6f554439 | 1444 | unsigned long hash, |
0422d92c | 1445 | cds_lfht_match_fct match, |
996ff57c | 1446 | const void *key, |
48ed1c18 | 1447 | struct cds_lfht_node *node) |
3eca1b8c | 1448 | { |
0422d92c | 1449 | unsigned long size; |
83beee94 | 1450 | struct cds_lfht_iter iter; |
3eca1b8c | 1451 | |
04db56f8 | 1452 | node->reverse_hash = bit_reverse_ulong((unsigned long) hash); |
7b3893e4 | 1453 | size = rcu_dereference(ht->size); |
04db56f8 | 1454 | _cds_lfht_add(ht, match, key, size, node, &iter, 0); |
83beee94 | 1455 | if (iter.node == node) |
14360f1c | 1456 | ht_count_add(ht, size, hash); |
83beee94 | 1457 | return iter.node; |
2ed95849 MD |
1458 | } |
1459 | ||
9357c415 | 1460 | struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, |
6f554439 | 1461 | unsigned long hash, |
0422d92c | 1462 | cds_lfht_match_fct match, |
996ff57c | 1463 | const void *key, |
48ed1c18 MD |
1464 | struct cds_lfht_node *node) |
1465 | { | |
0422d92c | 1466 | unsigned long size; |
83beee94 | 1467 | struct cds_lfht_iter iter; |
48ed1c18 | 1468 | |
04db56f8 | 1469 | node->reverse_hash = bit_reverse_ulong((unsigned long) hash); |
7b3893e4 | 1470 | size = rcu_dereference(ht->size); |
83beee94 | 1471 | for (;;) { |
04db56f8 | 1472 | _cds_lfht_add(ht, match, key, size, node, &iter, 0); |
83beee94 | 1473 | if (iter.node == node) { |
14360f1c | 1474 | ht_count_add(ht, size, hash); |
83beee94 MD |
1475 | return NULL; |
1476 | } | |
1477 | ||
1478 | if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node)) | |
1479 | return iter.node; | |
1480 | } | |
48ed1c18 MD |
1481 | } |
1482 | ||
9357c415 MD |
1483 | int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter, |
1484 | struct cds_lfht_node *new_node) | |
1485 | { | |
1486 | unsigned long size; | |
1487 | ||
7b3893e4 | 1488 | size = rcu_dereference(ht->size); |
9357c415 MD |
1489 | return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next, |
1490 | new_node); | |
1491 | } | |
1492 | ||
1493 | int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter) | |
2ed95849 | 1494 | { |
14360f1c | 1495 | unsigned long size, hash; |
df44348d | 1496 | int ret; |
abc490a1 | 1497 | |
7b3893e4 | 1498 | size = rcu_dereference(ht->size); |
9357c415 | 1499 | ret = _cds_lfht_del(ht, size, iter->node, 0); |
14360f1c | 1500 | if (!ret) { |
04db56f8 | 1501 | hash = bit_reverse_ulong(iter->node->reverse_hash); |
14360f1c LJ |
1502 | ht_count_del(ht, size, hash); |
1503 | } | |
df44348d | 1504 | return ret; |
2ed95849 | 1505 | } |
ab7d5fc6 | 1506 | |
abc490a1 | 1507 | static |
1ee8f000 | 1508 | int cds_lfht_delete_bucket(struct cds_lfht *ht) |
674f7a69 | 1509 | { |
14044b37 | 1510 | struct cds_lfht_node *node; |
4105056a | 1511 | unsigned long order, i, size; |
674f7a69 | 1512 | |
abc490a1 | 1513 | /* Check that the table is empty */ |
9d72a73f | 1514 | node = bucket_at(ht, 0); |
abc490a1 | 1515 | do { |
04db56f8 | 1516 | node = clear_flag(node)->next; |
1ee8f000 | 1517 | if (!is_bucket(node)) |
abc490a1 | 1518 | return -EPERM; |
273399de | 1519 | assert(!is_removed(node)); |
bb7b2f26 | 1520 | } while (!is_end(node)); |
4105056a MD |
1521 | /* |
1522 | * size accessed without rcu_dereference because hash table is | |
1523 | * being destroyed. | |
1524 | */ | |
7b3893e4 | 1525 | size = ht->size; |
1ee8f000 | 1526 | /* Internal sanity check: all nodes left should be bucket */ |
48f1b16d LJ |
1527 | for (i = 0; i < size; i++) { |
1528 | node = bucket_at(ht, i); | |
1529 | dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n", | |
1530 | i, i, bit_reverse_ulong(node->reverse_hash)); | |
1531 | assert(is_bucket(node->next)); | |
1532 | } | |
24365af7 | 1533 | |
48f1b16d LJ |
1534 | for (order = get_count_order_ulong(size); (long)order >= 0; order--) |
1535 | cds_lfht_free_bucket_table(ht, order); | |
5488222b | 1536 | |
abc490a1 | 1537 | return 0; |
674f7a69 MD |
1538 | } |
1539 | ||
1540 | /* | |
1541 | * Should only be called when no more concurrent readers nor writers can | |
1542 | * possibly access the table. | |
1543 | */ | |
b7d619b0 | 1544 | int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) |
674f7a69 | 1545 | { |
5e28c532 MD |
1546 | int ret; |
1547 | ||
848d4088 | 1548 | /* Wait for in-flight resize operations to complete */ |
24953e08 MD |
1549 | _CMM_STORE_SHARED(ht->in_progress_destroy, 1); |
1550 | cmm_smp_mb(); /* Store destroy before load resize */ | |
848d4088 MD |
1551 | while (uatomic_read(&ht->in_progress_resize)) |
1552 | poll(NULL, 0, 100); /* wait for 100ms */ | |
1ee8f000 | 1553 | ret = cds_lfht_delete_bucket(ht); |
abc490a1 MD |
1554 | if (ret) |
1555 | return ret; | |
5afadd12 | 1556 | free_split_items_count(ht); |
b7d619b0 MD |
1557 | if (attr) |
1558 | *attr = ht->resize_attr; | |
98808fb1 | 1559 | poison_free(ht); |
5e28c532 | 1560 | return ret; |
674f7a69 MD |
1561 | } |
1562 | ||
14044b37 | 1563 | void cds_lfht_count_nodes(struct cds_lfht *ht, |
d933dd0e | 1564 | long *approx_before, |
273399de | 1565 | unsigned long *count, |
973e5e1b | 1566 | unsigned long *removed, |
d933dd0e | 1567 | long *approx_after) |
273399de | 1568 | { |
14044b37 | 1569 | struct cds_lfht_node *node, *next; |
1ee8f000 | 1570 | unsigned long nr_bucket = 0; |
273399de | 1571 | |
7ed7682f | 1572 | *approx_before = 0; |
5afadd12 | 1573 | if (ht->split_count) { |
973e5e1b MD |
1574 | int i; |
1575 | ||
4c42f1b8 LJ |
1576 | for (i = 0; i < split_count_mask + 1; i++) { |
1577 | *approx_before += uatomic_read(&ht->split_count[i].add); | |
1578 | *approx_before -= uatomic_read(&ht->split_count[i].del); | |
973e5e1b MD |
1579 | } |
1580 | } | |
1581 | ||
273399de MD |
1582 | *count = 0; |
1583 | *removed = 0; | |
1584 | ||
1ee8f000 | 1585 | /* Count non-bucket nodes in the table */ |
9d72a73f | 1586 | node = bucket_at(ht, 0); |
273399de | 1587 | do { |
04db56f8 | 1588 | next = rcu_dereference(node->next); |
b198f0fd | 1589 | if (is_removed(next)) { |
1ee8f000 | 1590 | if (!is_bucket(next)) |
973e5e1b MD |
1591 | (*removed)++; |
1592 | else | |
1ee8f000 LJ |
1593 | (nr_bucket)++; |
1594 | } else if (!is_bucket(next)) | |
273399de | 1595 | (*count)++; |
24365af7 | 1596 | else |
1ee8f000 | 1597 | (nr_bucket)++; |
273399de | 1598 | node = clear_flag(next); |
bb7b2f26 | 1599 | } while (!is_end(node)); |
1ee8f000 | 1600 | dbg_printf("number of bucket nodes: %lu\n", nr_bucket); |
7ed7682f | 1601 | *approx_after = 0; |
5afadd12 | 1602 | if (ht->split_count) { |
973e5e1b MD |
1603 | int i; |
1604 | ||
4c42f1b8 LJ |
1605 | for (i = 0; i < split_count_mask + 1; i++) { |
1606 | *approx_after += uatomic_read(&ht->split_count[i].add); | |
1607 | *approx_after -= uatomic_read(&ht->split_count[i].del); | |
973e5e1b MD |
1608 | } |
1609 | } | |
273399de MD |
1610 | } |
1611 | ||
1475579c | 1612 | /* called with resize mutex held */ |
abc490a1 | 1613 | static |
4105056a | 1614 | void _do_cds_lfht_grow(struct cds_lfht *ht, |
1475579c | 1615 | unsigned long old_size, unsigned long new_size) |
abc490a1 | 1616 | { |
1475579c | 1617 | unsigned long old_order, new_order; |
1475579c | 1618 | |
93d46c39 LJ |
1619 | old_order = get_count_order_ulong(old_size); |
1620 | new_order = get_count_order_ulong(new_size); | |
1a401918 LJ |
1621 | dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", |
1622 | old_size, old_order, new_size, new_order); | |
1475579c | 1623 | assert(new_size > old_size); |
93d46c39 | 1624 | init_table(ht, old_order + 1, new_order); |
abc490a1 MD |
1625 | } |
1626 | ||
1627 | /* called with resize mutex held */ | |
1628 | static | |
4105056a | 1629 | void _do_cds_lfht_shrink(struct cds_lfht *ht, |
1475579c | 1630 | unsigned long old_size, unsigned long new_size) |
464a1ec9 | 1631 | { |
1475579c | 1632 | unsigned long old_order, new_order; |
464a1ec9 | 1633 | |
d0d8f9aa | 1634 | new_size = max(new_size, MIN_TABLE_SIZE); |
93d46c39 LJ |
1635 | old_order = get_count_order_ulong(old_size); |
1636 | new_order = get_count_order_ulong(new_size); | |
1a401918 LJ |
1637 | dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", |
1638 | old_size, old_order, new_size, new_order); | |
1475579c | 1639 | assert(new_size < old_size); |
1475579c | 1640 | |
1ee8f000 | 1641 | /* Remove and unlink all bucket nodes to remove. */ |
93d46c39 | 1642 | fini_table(ht, new_order + 1, old_order); |
464a1ec9 MD |
1643 | } |
1644 | ||
1475579c MD |
1645 | |
1646 | /* called with resize mutex held */ | |
1647 | static | |
1648 | void _do_cds_lfht_resize(struct cds_lfht *ht) | |
1649 | { | |
1650 | unsigned long new_size, old_size; | |
4105056a MD |
1651 | |
1652 | /* | |
1653 | * Resize table, re-do if the target size has changed under us. | |
1654 | */ | |
1655 | do { | |
d2be3620 MD |
1656 | assert(uatomic_read(&ht->in_progress_resize)); |
1657 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) | |
1658 | break; | |
7b3893e4 LJ |
1659 | ht->resize_initiated = 1; |
1660 | old_size = ht->size; | |
1661 | new_size = CMM_LOAD_SHARED(ht->resize_target); | |
4105056a MD |
1662 | if (old_size < new_size) |
1663 | _do_cds_lfht_grow(ht, old_size, new_size); | |
1664 | else if (old_size > new_size) | |
1665 | _do_cds_lfht_shrink(ht, old_size, new_size); | |
7b3893e4 | 1666 | ht->resize_initiated = 0; |
4105056a MD |
1667 | /* write resize_initiated before read resize_target */ |
1668 | cmm_smp_mb(); | |
7b3893e4 | 1669 | } while (ht->size != CMM_LOAD_SHARED(ht->resize_target)); |
1475579c MD |
1670 | } |
1671 | ||
abc490a1 | 1672 | static |
ab65b890 | 1673 | unsigned long resize_target_grow(struct cds_lfht *ht, unsigned long new_size) |
464a1ec9 | 1674 | { |
7b3893e4 | 1675 | return _uatomic_xchg_monotonic_increase(&ht->resize_target, new_size); |
464a1ec9 MD |
1676 | } |
1677 | ||
1475579c | 1678 | static |
4105056a | 1679 | void resize_target_update_count(struct cds_lfht *ht, |
b8af5011 | 1680 | unsigned long count) |
1475579c | 1681 | { |
d0d8f9aa | 1682 | count = max(count, MIN_TABLE_SIZE); |
747d725c | 1683 | count = min(count, ht->max_nr_buckets); |
7b3893e4 | 1684 | uatomic_set(&ht->resize_target, count); |
1475579c MD |
1685 | } |
1686 | ||
1687 | void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size) | |
464a1ec9 | 1688 | { |
4105056a | 1689 | resize_target_update_count(ht, new_size); |
7b3893e4 | 1690 | CMM_STORE_SHARED(ht->resize_initiated, 1); |
7b17c13e | 1691 | ht->flavor->thread_offline(); |
1475579c MD |
1692 | pthread_mutex_lock(&ht->resize_mutex); |
1693 | _do_cds_lfht_resize(ht); | |
1694 | pthread_mutex_unlock(&ht->resize_mutex); | |
7b17c13e | 1695 | ht->flavor->thread_online(); |
abc490a1 | 1696 | } |
464a1ec9 | 1697 | |
abc490a1 MD |
1698 | static |
1699 | void do_resize_cb(struct rcu_head *head) | |
1700 | { | |
1701 | struct rcu_resize_work *work = | |
1702 | caa_container_of(head, struct rcu_resize_work, head); | |
14044b37 | 1703 | struct cds_lfht *ht = work->ht; |
abc490a1 | 1704 | |
7b17c13e | 1705 | ht->flavor->thread_offline(); |
abc490a1 | 1706 | pthread_mutex_lock(&ht->resize_mutex); |
14044b37 | 1707 | _do_cds_lfht_resize(ht); |
abc490a1 | 1708 | pthread_mutex_unlock(&ht->resize_mutex); |
7b17c13e | 1709 | ht->flavor->thread_online(); |
98808fb1 | 1710 | poison_free(work); |
848d4088 MD |
1711 | cmm_smp_mb(); /* finish resize before decrement */ |
1712 | uatomic_dec(&ht->in_progress_resize); | |
464a1ec9 MD |
1713 | } |
1714 | ||
abc490a1 | 1715 | static |
f1f119ee | 1716 | void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht) |
ab7d5fc6 | 1717 | { |
abc490a1 MD |
1718 | struct rcu_resize_work *work; |
1719 | ||
4105056a MD |
1720 | /* Store resize_target before read resize_initiated */ |
1721 | cmm_smp_mb(); | |
7b3893e4 | 1722 | if (!CMM_LOAD_SHARED(ht->resize_initiated)) { |
848d4088 | 1723 | uatomic_inc(&ht->in_progress_resize); |
59290e9d | 1724 | cmm_smp_mb(); /* increment resize count before load destroy */ |
ed35e6d8 MD |
1725 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) { |
1726 | uatomic_dec(&ht->in_progress_resize); | |
59290e9d | 1727 | return; |
ed35e6d8 | 1728 | } |
f9830efd MD |
1729 | work = malloc(sizeof(*work)); |
1730 | work->ht = ht; | |
7b17c13e | 1731 | ht->flavor->update_call_rcu(&work->head, do_resize_cb); |
7b3893e4 | 1732 | CMM_STORE_SHARED(ht->resize_initiated, 1); |
f9830efd | 1733 | } |
ab7d5fc6 | 1734 | } |
3171717f | 1735 | |
f1f119ee LJ |
1736 | static |
1737 | void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth) | |
1738 | { | |
1739 | unsigned long target_size = size << growth; | |
1740 | ||
747d725c | 1741 | target_size = min(target_size, ht->max_nr_buckets); |
f1f119ee LJ |
1742 | if (resize_target_grow(ht, target_size) >= target_size) |
1743 | return; | |
1744 | ||
1745 | __cds_lfht_resize_lazy_launch(ht); | |
1746 | } | |
1747 | ||
89bb121d LJ |
1748 | /* |
1749 | * We favor grow operations over shrink. A shrink operation never occurs | |
1750 | * if a grow operation is queued for lazy execution. A grow operation | |
1751 | * cancels any pending shrink lazy execution. | |
1752 | */ | |
3171717f | 1753 | static |
4105056a | 1754 | void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, |
3171717f MD |
1755 | unsigned long count) |
1756 | { | |
b8af5011 MD |
1757 | if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) |
1758 | return; | |
d0d8f9aa | 1759 | count = max(count, MIN_TABLE_SIZE); |
747d725c | 1760 | count = min(count, ht->max_nr_buckets); |
89bb121d LJ |
1761 | if (count == size) |
1762 | return; /* Already the right size, no resize needed */ | |
1763 | if (count > size) { /* lazy grow */ | |
1764 | if (resize_target_grow(ht, count) >= count) | |
1765 | return; | |
1766 | } else { /* lazy shrink */ | |
1767 | for (;;) { | |
1768 | unsigned long s; | |
1769 | ||
7b3893e4 | 1770 | s = uatomic_cmpxchg(&ht->resize_target, size, count); |
89bb121d LJ |
1771 | if (s == size) |
1772 | break; /* no resize needed */ | |
1773 | if (s > size) | |
1774 | return; /* growing is/(was just) in progress */ | |
1775 | if (s <= count) | |
1776 | return; /* some other thread do shrink */ | |
1777 | size = s; | |
1778 | } | |
1779 | } | |
f1f119ee | 1780 | __cds_lfht_resize_lazy_launch(ht); |
3171717f | 1781 | } |