Cleanup: use mutex_lock() wrapper in rculfhash
[urcu.git] / src / urcu-bp.c
1 /*
2 * urcu-bp.c
3 *
4 * Userspace RCU library, "bulletproof" version.
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
26 #define _LGPL_SOURCE
27 #include <stdio.h>
28 #include <pthread.h>
29 #include <signal.h>
30 #include <assert.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <poll.h>
35 #include <unistd.h>
36 #include <sys/mman.h>
37
38 #include "urcu/arch.h"
39 #include "urcu/wfcqueue.h"
40 #include "urcu/map/urcu-bp.h"
41 #include "urcu/static/urcu-bp.h"
42 #include "urcu-pointer.h"
43 #include "urcu/tls-compat.h"
44
45 #include "urcu-die.h"
46
47 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
48 #undef _LGPL_SOURCE
49 #include "urcu-bp.h"
50 #define _LGPL_SOURCE
51
52 #ifndef MAP_ANONYMOUS
53 #define MAP_ANONYMOUS MAP_ANON
54 #endif
55
56 #ifdef __linux__
57 static
58 void *mremap_wrapper(void *old_address, size_t old_size,
59 size_t new_size, int flags)
60 {
61 return mremap(old_address, old_size, new_size, flags);
62 }
63 #else
64
65 #define MREMAP_MAYMOVE 1
66 #define MREMAP_FIXED 2
67
68 /*
69 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
70 * This is not generic.
71 */
72 static
73 void *mremap_wrapper(void *old_address, size_t old_size,
74 size_t new_size, int flags)
75 {
76 assert(!(flags & MREMAP_MAYMOVE));
77
78 return MAP_FAILED;
79 }
80 #endif
81
82 /* Sleep delay in ms */
83 #define RCU_SLEEP_DELAY_MS 10
84 #define INIT_NR_THREADS 8
85 #define ARENA_INIT_ALLOC \
86 sizeof(struct registry_chunk) \
87 + INIT_NR_THREADS * sizeof(struct rcu_reader)
88
89 /*
90 * Active attempts to check for reader Q.S. before calling sleep().
91 */
92 #define RCU_QS_ACTIVE_ATTEMPTS 100
93
94 static
95 int rcu_bp_refcount;
96
97 /* If the headers do not support membarrier system call, fall back smp_mb. */
98 #ifdef __NR_membarrier
99 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
100 #else
101 # define membarrier(...) -ENOSYS
102 #endif
103
104 enum membarrier_cmd {
105 MEMBARRIER_CMD_QUERY = 0,
106 MEMBARRIER_CMD_SHARED = (1 << 0),
107 };
108
109 static
110 void __attribute__((constructor)) rcu_bp_init(void);
111 static
112 void __attribute__((destructor)) rcu_bp_exit(void);
113
114 #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
115 int urcu_bp_has_sys_membarrier;
116 #endif
117
118 /*
119 * rcu_gp_lock ensures mutual exclusion between threads calling
120 * synchronize_rcu().
121 */
122 static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
123 /*
124 * rcu_registry_lock ensures mutual exclusion between threads
125 * registering and unregistering themselves to/from the registry, and
126 * with threads reading that registry from synchronize_rcu(). However,
127 * this lock is not held all the way through the completion of awaiting
128 * for the grace period. It is sporadically released between iterations
129 * on the registry.
130 * rcu_registry_lock may nest inside rcu_gp_lock.
131 */
132 static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
133
134 static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
135 static int initialized;
136
137 static pthread_key_t urcu_bp_key;
138
139 struct rcu_gp rcu_gp = { .ctr = RCU_GP_COUNT };
140
141 /*
142 * Pointer to registry elements. Written to only by each individual reader. Read
143 * by both the reader and the writers.
144 */
145 DEFINE_URCU_TLS(struct rcu_reader *, rcu_reader);
146
147 static CDS_LIST_HEAD(registry);
148
149 struct registry_chunk {
150 size_t data_len; /* data length */
151 size_t used; /* amount of data used */
152 struct cds_list_head node; /* chunk_list node */
153 char data[];
154 };
155
156 struct registry_arena {
157 struct cds_list_head chunk_list;
158 };
159
160 static struct registry_arena registry_arena = {
161 .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
162 };
163
164 /* Saved fork signal mask, protected by rcu_gp_lock */
165 static sigset_t saved_fork_signal_mask;
166
167 static void mutex_lock(pthread_mutex_t *mutex)
168 {
169 int ret;
170
171 #ifndef DISTRUST_SIGNALS_EXTREME
172 ret = pthread_mutex_lock(mutex);
173 if (ret)
174 urcu_die(ret);
175 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
176 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
177 if (ret != EBUSY && ret != EINTR)
178 urcu_die(ret);
179 poll(NULL,0,10);
180 }
181 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
182 }
183
184 static void mutex_unlock(pthread_mutex_t *mutex)
185 {
186 int ret;
187
188 ret = pthread_mutex_unlock(mutex);
189 if (ret)
190 urcu_die(ret);
191 }
192
193 static void smp_mb_master(void)
194 {
195 if (caa_likely(urcu_bp_has_sys_membarrier))
196 (void) membarrier(MEMBARRIER_CMD_SHARED, 0);
197 else
198 cmm_smp_mb();
199 }
200
201 /*
202 * Always called with rcu_registry lock held. Releases this lock between
203 * iterations and grabs it again. Holds the lock when it returns.
204 */
205 static void wait_for_readers(struct cds_list_head *input_readers,
206 struct cds_list_head *cur_snap_readers,
207 struct cds_list_head *qsreaders)
208 {
209 unsigned int wait_loops = 0;
210 struct rcu_reader *index, *tmp;
211
212 /*
213 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
214 * indicate quiescence (not nested), or observe the current
215 * rcu_gp.ctr value.
216 */
217 for (;;) {
218 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
219 wait_loops++;
220
221 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
222 switch (rcu_reader_state(&index->ctr)) {
223 case RCU_READER_ACTIVE_CURRENT:
224 if (cur_snap_readers) {
225 cds_list_move(&index->node,
226 cur_snap_readers);
227 break;
228 }
229 /* Fall-through */
230 case RCU_READER_INACTIVE:
231 cds_list_move(&index->node, qsreaders);
232 break;
233 case RCU_READER_ACTIVE_OLD:
234 /*
235 * Old snapshot. Leaving node in
236 * input_readers will make us busy-loop
237 * until the snapshot becomes current or
238 * the reader becomes inactive.
239 */
240 break;
241 }
242 }
243
244 if (cds_list_empty(input_readers)) {
245 break;
246 } else {
247 /* Temporarily unlock the registry lock. */
248 mutex_unlock(&rcu_registry_lock);
249 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
250 (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
251 else
252 caa_cpu_relax();
253 /* Re-lock the registry lock before the next loop. */
254 mutex_lock(&rcu_registry_lock);
255 }
256 }
257 }
258
259 void synchronize_rcu(void)
260 {
261 CDS_LIST_HEAD(cur_snap_readers);
262 CDS_LIST_HEAD(qsreaders);
263 sigset_t newmask, oldmask;
264 int ret;
265
266 ret = sigfillset(&newmask);
267 assert(!ret);
268 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
269 assert(!ret);
270
271 mutex_lock(&rcu_gp_lock);
272
273 mutex_lock(&rcu_registry_lock);
274
275 if (cds_list_empty(&registry))
276 goto out;
277
278 /* All threads should read qparity before accessing data structure
279 * where new ptr points to. */
280 /* Write new ptr before changing the qparity */
281 smp_mb_master();
282
283 /*
284 * Wait for readers to observe original parity or be quiescent.
285 * wait_for_readers() can release and grab again rcu_registry_lock
286 * interally.
287 */
288 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
289
290 /*
291 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
292 * model easier to understand. It does not have a big performance impact
293 * anyway, given this is the write-side.
294 */
295 cmm_smp_mb();
296
297 /* Switch parity: 0 -> 1, 1 -> 0 */
298 CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR_PHASE);
299
300 /*
301 * Must commit qparity update to memory before waiting for other parity
302 * quiescent state. Failure to do so could result in the writer waiting
303 * forever while new readers are always accessing data (no progress).
304 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
305 */
306
307 /*
308 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
309 * model easier to understand. It does not have a big performance impact
310 * anyway, given this is the write-side.
311 */
312 cmm_smp_mb();
313
314 /*
315 * Wait for readers to observe new parity or be quiescent.
316 * wait_for_readers() can release and grab again rcu_registry_lock
317 * interally.
318 */
319 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
320
321 /*
322 * Put quiescent reader list back into registry.
323 */
324 cds_list_splice(&qsreaders, &registry);
325
326 /*
327 * Finish waiting for reader threads before letting the old ptr being
328 * freed.
329 */
330 smp_mb_master();
331 out:
332 mutex_unlock(&rcu_registry_lock);
333 mutex_unlock(&rcu_gp_lock);
334 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
335 assert(!ret);
336 }
337
338 /*
339 * library wrappers to be used by non-LGPL compatible source code.
340 */
341
342 void rcu_read_lock(void)
343 {
344 _rcu_read_lock();
345 }
346
347 void rcu_read_unlock(void)
348 {
349 _rcu_read_unlock();
350 }
351
352 int rcu_read_ongoing(void)
353 {
354 return _rcu_read_ongoing();
355 }
356
357 /*
358 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
359 * Else, try expanding the last chunk. If this fails, allocate a new
360 * chunk twice as big as the last chunk.
361 * Memory used by chunks _never_ moves. A chunk could theoretically be
362 * freed when all "used" slots are released, but we don't do it at this
363 * point.
364 */
365 static
366 void expand_arena(struct registry_arena *arena)
367 {
368 struct registry_chunk *new_chunk, *last_chunk;
369 size_t old_chunk_len, new_chunk_len;
370
371 /* No chunk. */
372 if (cds_list_empty(&arena->chunk_list)) {
373 assert(ARENA_INIT_ALLOC >=
374 sizeof(struct registry_chunk)
375 + sizeof(struct rcu_reader));
376 new_chunk_len = ARENA_INIT_ALLOC;
377 new_chunk = mmap(NULL, new_chunk_len,
378 PROT_READ | PROT_WRITE,
379 MAP_ANONYMOUS | MAP_PRIVATE,
380 -1, 0);
381 if (new_chunk == MAP_FAILED)
382 abort();
383 memset(new_chunk, 0, new_chunk_len);
384 new_chunk->data_len =
385 new_chunk_len - sizeof(struct registry_chunk);
386 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
387 return; /* We're done. */
388 }
389
390 /* Try expanding last chunk. */
391 last_chunk = cds_list_entry(arena->chunk_list.prev,
392 struct registry_chunk, node);
393 old_chunk_len =
394 last_chunk->data_len + sizeof(struct registry_chunk);
395 new_chunk_len = old_chunk_len << 1;
396
397 /* Don't allow memory mapping to move, just expand. */
398 new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
399 new_chunk_len, 0);
400 if (new_chunk != MAP_FAILED) {
401 /* Should not have moved. */
402 assert(new_chunk == last_chunk);
403 memset((char *) last_chunk + old_chunk_len, 0,
404 new_chunk_len - old_chunk_len);
405 last_chunk->data_len =
406 new_chunk_len - sizeof(struct registry_chunk);
407 return; /* We're done. */
408 }
409
410 /* Remap did not succeed, we need to add a new chunk. */
411 new_chunk = mmap(NULL, new_chunk_len,
412 PROT_READ | PROT_WRITE,
413 MAP_ANONYMOUS | MAP_PRIVATE,
414 -1, 0);
415 if (new_chunk == MAP_FAILED)
416 abort();
417 memset(new_chunk, 0, new_chunk_len);
418 new_chunk->data_len =
419 new_chunk_len - sizeof(struct registry_chunk);
420 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
421 }
422
423 static
424 struct rcu_reader *arena_alloc(struct registry_arena *arena)
425 {
426 struct registry_chunk *chunk;
427 struct rcu_reader *rcu_reader_reg;
428 int expand_done = 0; /* Only allow to expand once per alloc */
429 size_t len = sizeof(struct rcu_reader);
430
431 retry:
432 cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
433 if (chunk->data_len - chunk->used < len)
434 continue;
435 /* Find spot */
436 for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
437 rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
438 rcu_reader_reg++) {
439 if (!rcu_reader_reg->alloc) {
440 rcu_reader_reg->alloc = 1;
441 chunk->used += len;
442 return rcu_reader_reg;
443 }
444 }
445 }
446
447 if (!expand_done) {
448 expand_arena(arena);
449 expand_done = 1;
450 goto retry;
451 }
452
453 return NULL;
454 }
455
456 /* Called with signals off and mutex locked */
457 static
458 void add_thread(void)
459 {
460 struct rcu_reader *rcu_reader_reg;
461 int ret;
462
463 rcu_reader_reg = arena_alloc(&registry_arena);
464 if (!rcu_reader_reg)
465 abort();
466 ret = pthread_setspecific(urcu_bp_key, rcu_reader_reg);
467 if (ret)
468 abort();
469
470 /* Add to registry */
471 rcu_reader_reg->tid = pthread_self();
472 assert(rcu_reader_reg->ctr == 0);
473 cds_list_add(&rcu_reader_reg->node, &registry);
474 /*
475 * Reader threads are pointing to the reader registry. This is
476 * why its memory should never be relocated.
477 */
478 URCU_TLS(rcu_reader) = rcu_reader_reg;
479 }
480
481 /* Called with mutex locked */
482 static
483 void cleanup_thread(struct registry_chunk *chunk,
484 struct rcu_reader *rcu_reader_reg)
485 {
486 rcu_reader_reg->ctr = 0;
487 cds_list_del(&rcu_reader_reg->node);
488 rcu_reader_reg->tid = 0;
489 rcu_reader_reg->alloc = 0;
490 chunk->used -= sizeof(struct rcu_reader);
491 }
492
493 static
494 struct registry_chunk *find_chunk(struct rcu_reader *rcu_reader_reg)
495 {
496 struct registry_chunk *chunk;
497
498 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
499 if (rcu_reader_reg < (struct rcu_reader *) &chunk->data[0])
500 continue;
501 if (rcu_reader_reg >= (struct rcu_reader *) &chunk->data[chunk->data_len])
502 continue;
503 return chunk;
504 }
505 return NULL;
506 }
507
508 /* Called with signals off and mutex locked */
509 static
510 void remove_thread(struct rcu_reader *rcu_reader_reg)
511 {
512 cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
513 URCU_TLS(rcu_reader) = NULL;
514 }
515
516 /* Disable signals, take mutex, add to registry */
517 void rcu_bp_register(void)
518 {
519 sigset_t newmask, oldmask;
520 int ret;
521
522 ret = sigfillset(&newmask);
523 if (ret)
524 abort();
525 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
526 if (ret)
527 abort();
528
529 /*
530 * Check if a signal concurrently registered our thread since
531 * the check in rcu_read_lock().
532 */
533 if (URCU_TLS(rcu_reader))
534 goto end;
535
536 /*
537 * Take care of early registration before urcu_bp constructor.
538 */
539 rcu_bp_init();
540
541 mutex_lock(&rcu_registry_lock);
542 add_thread();
543 mutex_unlock(&rcu_registry_lock);
544 end:
545 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
546 if (ret)
547 abort();
548 }
549
550 /* Disable signals, take mutex, remove from registry */
551 static
552 void rcu_bp_unregister(struct rcu_reader *rcu_reader_reg)
553 {
554 sigset_t newmask, oldmask;
555 int ret;
556
557 ret = sigfillset(&newmask);
558 if (ret)
559 abort();
560 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
561 if (ret)
562 abort();
563
564 mutex_lock(&rcu_registry_lock);
565 remove_thread(rcu_reader_reg);
566 mutex_unlock(&rcu_registry_lock);
567 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
568 if (ret)
569 abort();
570 rcu_bp_exit();
571 }
572
573 /*
574 * Remove thread from the registry when it exits, and flag it as
575 * destroyed so garbage collection can take care of it.
576 */
577 static
578 void urcu_bp_thread_exit_notifier(void *rcu_key)
579 {
580 rcu_bp_unregister(rcu_key);
581 }
582
583 #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
584 static
585 void rcu_sys_membarrier_status(int available)
586 {
587 if (!available)
588 abort();
589 }
590 #else
591 static
592 void rcu_sys_membarrier_status(int available)
593 {
594 if (available)
595 urcu_bp_has_sys_membarrier = 1;
596 }
597 #endif
598
599 static
600 void rcu_bp_init(void)
601 {
602 mutex_lock(&init_lock);
603 if (!rcu_bp_refcount++) {
604 int ret;
605
606 ret = pthread_key_create(&urcu_bp_key,
607 urcu_bp_thread_exit_notifier);
608 if (ret)
609 abort();
610 ret = membarrier(MEMBARRIER_CMD_QUERY, 0);
611 rcu_sys_membarrier_status(ret >= 0
612 && (ret & MEMBARRIER_CMD_SHARED));
613 initialized = 1;
614 }
615 mutex_unlock(&init_lock);
616 }
617
618 static
619 void rcu_bp_exit(void)
620 {
621 mutex_lock(&init_lock);
622 if (!--rcu_bp_refcount) {
623 struct registry_chunk *chunk, *tmp;
624 int ret;
625
626 cds_list_for_each_entry_safe(chunk, tmp,
627 &registry_arena.chunk_list, node) {
628 munmap(chunk, chunk->data_len
629 + sizeof(struct registry_chunk));
630 }
631 CDS_INIT_LIST_HEAD(&registry_arena.chunk_list);
632 ret = pthread_key_delete(urcu_bp_key);
633 if (ret)
634 abort();
635 }
636 mutex_unlock(&init_lock);
637 }
638
639 /*
640 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
641 * sure we fork() don't race with a concurrent thread executing with
642 * any of those locks held. This ensures that the registry and data
643 * protected by rcu_gp_lock are in a coherent state in the child.
644 */
645 void rcu_bp_before_fork(void)
646 {
647 sigset_t newmask, oldmask;
648 int ret;
649
650 ret = sigfillset(&newmask);
651 assert(!ret);
652 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
653 assert(!ret);
654 mutex_lock(&rcu_gp_lock);
655 mutex_lock(&rcu_registry_lock);
656 saved_fork_signal_mask = oldmask;
657 }
658
659 void rcu_bp_after_fork_parent(void)
660 {
661 sigset_t oldmask;
662 int ret;
663
664 oldmask = saved_fork_signal_mask;
665 mutex_unlock(&rcu_registry_lock);
666 mutex_unlock(&rcu_gp_lock);
667 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
668 assert(!ret);
669 }
670
671 /*
672 * Prune all entries from registry except our own thread. Fits the Linux
673 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
674 */
675 static
676 void urcu_bp_prune_registry(void)
677 {
678 struct registry_chunk *chunk;
679 struct rcu_reader *rcu_reader_reg;
680
681 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
682 for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
683 rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
684 rcu_reader_reg++) {
685 if (!rcu_reader_reg->alloc)
686 continue;
687 if (rcu_reader_reg->tid == pthread_self())
688 continue;
689 cleanup_thread(chunk, rcu_reader_reg);
690 }
691 }
692 }
693
694 void rcu_bp_after_fork_child(void)
695 {
696 sigset_t oldmask;
697 int ret;
698
699 urcu_bp_prune_registry();
700 oldmask = saved_fork_signal_mask;
701 mutex_unlock(&rcu_registry_lock);
702 mutex_unlock(&rcu_gp_lock);
703 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
704 assert(!ret);
705 }
706
707 void *rcu_dereference_sym_bp(void *p)
708 {
709 return _rcu_dereference(p);
710 }
711
712 void *rcu_set_pointer_sym_bp(void **p, void *v)
713 {
714 cmm_wmb();
715 uatomic_set(p, v);
716 return v;
717 }
718
719 void *rcu_xchg_pointer_sym_bp(void **p, void *v)
720 {
721 cmm_wmb();
722 return uatomic_xchg(p, v);
723 }
724
725 void *rcu_cmpxchg_pointer_sym_bp(void **p, void *old, void *_new)
726 {
727 cmm_wmb();
728 return uatomic_cmpxchg(p, old, _new);
729 }
730
731 DEFINE_RCU_FLAVOR(rcu_flavor);
732
733 #include "urcu-call-rcu-impl.h"
734 #include "urcu-defer-impl.h"
This page took 0.043569 seconds and 4 git commands to generate.