Fix: urcu-bp: misaligned reader accesses
[urcu.git] / src / urcu-bp.c
... / ...
CommitLineData
1/*
2 * urcu-bp.c
3 *
4 * Userspace RCU library, "bulletproof" version.
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
26#define URCU_NO_COMPAT_IDENTIFIERS
27#define _LGPL_SOURCE
28#include <stdio.h>
29#include <pthread.h>
30#include <signal.h>
31#include <assert.h>
32#include <stdlib.h>
33#include <string.h>
34#include <errno.h>
35#include <poll.h>
36#include <unistd.h>
37#include <stdbool.h>
38#include <sys/mman.h>
39
40#include <urcu/config.h>
41#include <urcu/arch.h>
42#include <urcu/wfcqueue.h>
43#include <urcu/map/urcu-bp.h>
44#include <urcu/static/urcu-bp.h>
45#include <urcu/pointer.h>
46#include <urcu/tls-compat.h>
47
48#include "urcu-die.h"
49#include "urcu-utils.h"
50
51#define URCU_API_MAP
52/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
53#undef _LGPL_SOURCE
54#include <urcu/urcu-bp.h>
55#define _LGPL_SOURCE
56
57#ifndef MAP_ANONYMOUS
58#define MAP_ANONYMOUS MAP_ANON
59#endif
60
61#ifdef __linux__
62static
63void *mremap_wrapper(void *old_address, size_t old_size,
64 size_t new_size, int flags)
65{
66 return mremap(old_address, old_size, new_size, flags);
67}
68#else
69
70#define MREMAP_MAYMOVE 1
71#define MREMAP_FIXED 2
72
73/*
74 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
75 * This is not generic.
76*/
77static
78void *mremap_wrapper(void *old_address __attribute__((unused)),
79 size_t old_size __attribute__((unused)),
80 size_t new_size __attribute__((unused)),
81 int flags)
82{
83 assert(!(flags & MREMAP_MAYMOVE));
84
85 return MAP_FAILED;
86}
87#endif
88
89/* Sleep delay in ms */
90#define RCU_SLEEP_DELAY_MS 10
91#define INIT_READER_COUNT 8
92
93/*
94 * Active attempts to check for reader Q.S. before calling sleep().
95 */
96#define RCU_QS_ACTIVE_ATTEMPTS 100
97
98static
99int urcu_bp_refcount;
100
101/* If the headers do not support membarrier system call, fall back smp_mb. */
102#ifdef __NR_membarrier
103# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
104#else
105# define membarrier(...) -ENOSYS
106#endif
107
108enum membarrier_cmd {
109 MEMBARRIER_CMD_QUERY = 0,
110 MEMBARRIER_CMD_SHARED = (1 << 0),
111 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
112 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
113 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
114 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
115};
116
117static
118void __attribute__((constructor)) _urcu_bp_init(void);
119static
120void urcu_bp_exit(void);
121static
122void __attribute__((destructor)) urcu_bp_exit_destructor(void);
123static void urcu_call_rcu_exit(void);
124
125#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
126int urcu_bp_has_sys_membarrier;
127#endif
128
129/*
130 * rcu_gp_lock ensures mutual exclusion between threads calling
131 * synchronize_rcu().
132 */
133static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
134/*
135 * rcu_registry_lock ensures mutual exclusion between threads
136 * registering and unregistering themselves to/from the registry, and
137 * with threads reading that registry from synchronize_rcu(). However,
138 * this lock is not held all the way through the completion of awaiting
139 * for the grace period. It is sporadically released between iterations
140 * on the registry.
141 * rcu_registry_lock may nest inside rcu_gp_lock.
142 */
143static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
144
145static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
146static int initialized;
147
148static pthread_key_t urcu_bp_key;
149
150struct urcu_bp_gp urcu_bp_gp = { .ctr = URCU_BP_GP_COUNT };
151
152/*
153 * Pointer to registry elements. Written to only by each individual reader. Read
154 * by both the reader and the writers.
155 */
156DEFINE_URCU_TLS(struct urcu_bp_reader *, urcu_bp_reader);
157
158static CDS_LIST_HEAD(registry);
159
160struct registry_chunk {
161 size_t capacity; /* capacity of this chunk (in elements) */
162 size_t used; /* count of elements used */
163 struct cds_list_head node; /* chunk_list node */
164 struct urcu_bp_reader readers[];
165};
166
167struct registry_arena {
168 struct cds_list_head chunk_list;
169};
170
171static struct registry_arena registry_arena = {
172 .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
173};
174
175/* Saved fork signal mask, protected by rcu_gp_lock */
176static sigset_t saved_fork_signal_mask;
177
178static void mutex_lock(pthread_mutex_t *mutex)
179{
180 int ret;
181
182#ifndef DISTRUST_SIGNALS_EXTREME
183 ret = pthread_mutex_lock(mutex);
184 if (ret)
185 urcu_die(ret);
186#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
187 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
188 if (ret != EBUSY && ret != EINTR)
189 urcu_die(ret);
190 poll(NULL,0,10);
191 }
192#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
193}
194
195static void mutex_unlock(pthread_mutex_t *mutex)
196{
197 int ret;
198
199 ret = pthread_mutex_unlock(mutex);
200 if (ret)
201 urcu_die(ret);
202}
203
204static void smp_mb_master(void)
205{
206 if (caa_likely(urcu_bp_has_sys_membarrier)) {
207 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
208 urcu_die(errno);
209 } else {
210 cmm_smp_mb();
211 }
212}
213
214/* Get the size of a chunk's allocation from its capacity (an element count). */
215static size_t chunk_allocation_size(size_t capacity)
216{
217 return (capacity * sizeof(struct urcu_bp_reader)) +
218 sizeof(struct registry_chunk);
219}
220
221/*
222 * Always called with rcu_registry lock held. Releases this lock between
223 * iterations and grabs it again. Holds the lock when it returns.
224 */
225static void wait_for_readers(struct cds_list_head *input_readers,
226 struct cds_list_head *cur_snap_readers,
227 struct cds_list_head *qsreaders)
228{
229 unsigned int wait_loops = 0;
230 struct urcu_bp_reader *index, *tmp;
231
232 /*
233 * Wait for each thread URCU_TLS(urcu_bp_reader).ctr to either
234 * indicate quiescence (not nested), or observe the current
235 * rcu_gp.ctr value.
236 */
237 for (;;) {
238 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
239 wait_loops++;
240
241 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
242 switch (urcu_bp_reader_state(&index->ctr)) {
243 case URCU_BP_READER_ACTIVE_CURRENT:
244 if (cur_snap_readers) {
245 cds_list_move(&index->node,
246 cur_snap_readers);
247 break;
248 }
249 /* Fall-through */
250 case URCU_BP_READER_INACTIVE:
251 cds_list_move(&index->node, qsreaders);
252 break;
253 case URCU_BP_READER_ACTIVE_OLD:
254 /*
255 * Old snapshot. Leaving node in
256 * input_readers will make us busy-loop
257 * until the snapshot becomes current or
258 * the reader becomes inactive.
259 */
260 break;
261 }
262 }
263
264 if (cds_list_empty(input_readers)) {
265 break;
266 } else {
267 /* Temporarily unlock the registry lock. */
268 mutex_unlock(&rcu_registry_lock);
269 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
270 (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
271 else
272 caa_cpu_relax();
273 /* Re-lock the registry lock before the next loop. */
274 mutex_lock(&rcu_registry_lock);
275 }
276 }
277}
278
279void urcu_bp_synchronize_rcu(void)
280{
281 CDS_LIST_HEAD(cur_snap_readers);
282 CDS_LIST_HEAD(qsreaders);
283 sigset_t newmask, oldmask;
284 int ret;
285
286 ret = sigfillset(&newmask);
287 assert(!ret);
288 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
289 assert(!ret);
290
291 mutex_lock(&rcu_gp_lock);
292
293 mutex_lock(&rcu_registry_lock);
294
295 if (cds_list_empty(&registry))
296 goto out;
297
298 /* All threads should read qparity before accessing data structure
299 * where new ptr points to. */
300 /* Write new ptr before changing the qparity */
301 smp_mb_master();
302
303 /*
304 * Wait for readers to observe original parity or be quiescent.
305 * wait_for_readers() can release and grab again rcu_registry_lock
306 * interally.
307 */
308 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
309
310 /*
311 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
312 * model easier to understand. It does not have a big performance impact
313 * anyway, given this is the write-side.
314 */
315 cmm_smp_mb();
316
317 /* Switch parity: 0 -> 1, 1 -> 0 */
318 CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ URCU_BP_GP_CTR_PHASE);
319
320 /*
321 * Must commit qparity update to memory before waiting for other parity
322 * quiescent state. Failure to do so could result in the writer waiting
323 * forever while new readers are always accessing data (no progress).
324 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
325 */
326
327 /*
328 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
329 * model easier to understand. It does not have a big performance impact
330 * anyway, given this is the write-side.
331 */
332 cmm_smp_mb();
333
334 /*
335 * Wait for readers to observe new parity or be quiescent.
336 * wait_for_readers() can release and grab again rcu_registry_lock
337 * interally.
338 */
339 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
340
341 /*
342 * Put quiescent reader list back into registry.
343 */
344 cds_list_splice(&qsreaders, &registry);
345
346 /*
347 * Finish waiting for reader threads before letting the old ptr being
348 * freed.
349 */
350 smp_mb_master();
351out:
352 mutex_unlock(&rcu_registry_lock);
353 mutex_unlock(&rcu_gp_lock);
354 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
355 assert(!ret);
356}
357
358/*
359 * library wrappers to be used by non-LGPL compatible source code.
360 */
361
362void urcu_bp_read_lock(void)
363{
364 _urcu_bp_read_lock();
365}
366
367void urcu_bp_read_unlock(void)
368{
369 _urcu_bp_read_unlock();
370}
371
372int urcu_bp_read_ongoing(void)
373{
374 return _urcu_bp_read_ongoing();
375}
376
377/*
378 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
379 * Else, try expanding the last chunk. If this fails, allocate a new
380 * chunk twice as big as the last chunk.
381 * Memory used by chunks _never_ moves. A chunk could theoretically be
382 * freed when all "used" slots are released, but we don't do it at this
383 * point.
384 */
385static
386void expand_arena(struct registry_arena *arena)
387{
388 struct registry_chunk *new_chunk, *last_chunk;
389 size_t old_chunk_size_bytes, new_chunk_size_bytes, new_capacity;
390
391 /* No chunk. */
392 if (cds_list_empty(&arena->chunk_list)) {
393 new_chunk_size_bytes = chunk_allocation_size(INIT_READER_COUNT);
394 new_chunk = (struct registry_chunk *) mmap(NULL,
395 new_chunk_size_bytes,
396 PROT_READ | PROT_WRITE,
397 MAP_ANONYMOUS | MAP_PRIVATE,
398 -1, 0);
399 if (new_chunk == MAP_FAILED)
400 abort();
401 memset(new_chunk, 0, new_chunk_size_bytes);
402 new_chunk->capacity = INIT_READER_COUNT;
403 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
404 return; /* We're done. */
405 }
406
407 /* Try expanding last chunk. */
408 last_chunk = cds_list_entry(arena->chunk_list.prev,
409 struct registry_chunk, node);
410 old_chunk_size_bytes = chunk_allocation_size(last_chunk->capacity);
411 new_capacity = last_chunk->capacity << 1;
412 new_chunk_size_bytes = chunk_allocation_size(new_capacity);
413
414 /* Don't allow memory mapping to move, just expand. */
415 new_chunk = mremap_wrapper(last_chunk, old_chunk_size_bytes,
416 new_chunk_size_bytes, 0);
417 if (new_chunk != MAP_FAILED) {
418 /* Should not have moved. */
419 assert(new_chunk == last_chunk);
420 memset((char *) last_chunk + old_chunk_size_bytes, 0,
421 new_chunk_size_bytes - old_chunk_size_bytes);
422 last_chunk->capacity = new_capacity;
423 return; /* We're done. */
424 }
425
426 /* Remap did not succeed, we need to add a new chunk. */
427 new_chunk = (struct registry_chunk *) mmap(NULL,
428 new_chunk_size_bytes,
429 PROT_READ | PROT_WRITE,
430 MAP_ANONYMOUS | MAP_PRIVATE,
431 -1, 0);
432 if (new_chunk == MAP_FAILED)
433 abort();
434 memset(new_chunk, 0, new_chunk_size_bytes);
435 new_chunk->capacity = new_capacity;
436 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
437}
438
439static
440struct rcu_reader *arena_alloc(struct registry_arena *arena)
441{
442 struct registry_chunk *chunk;
443 int expand_done = 0; /* Only allow to expand once per alloc */
444
445retry:
446 cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
447 size_t spot_idx;
448
449 /* Skip fully used chunks. */
450 if (chunk->used == chunk->capacity) {
451 continue;
452 }
453
454 /* Find a spot. */
455 for (spot_idx = 0; spot_idx < chunk->capacity; spot_idx++) {
456 if (!chunk->readers[spot_idx].alloc) {
457 chunk->readers[spot_idx].alloc = 1;
458 chunk->used++;
459 return &chunk->readers[spot_idx];
460 }
461 }
462 }
463
464 if (!expand_done) {
465 expand_arena(arena);
466 expand_done = 1;
467 goto retry;
468 }
469
470 return NULL;
471}
472
473/* Called with signals off and mutex locked */
474static
475void add_thread(void)
476{
477 struct rcu_reader *rcu_reader_reg;
478 int ret;
479
480 rcu_reader_reg = arena_alloc(&registry_arena);
481 if (!rcu_reader_reg)
482 abort();
483 ret = pthread_setspecific(urcu_bp_key, rcu_reader_reg);
484 if (ret)
485 abort();
486
487 /* Add to registry */
488 rcu_reader_reg->tid = pthread_self();
489 assert(rcu_reader_reg->ctr == 0);
490 cds_list_add(&rcu_reader_reg->node, &registry);
491 /*
492 * Reader threads are pointing to the reader registry. This is
493 * why its memory should never be relocated.
494 */
495 URCU_TLS(urcu_bp_reader) = rcu_reader_reg;
496}
497
498/* Called with mutex locked */
499static
500void cleanup_thread(struct registry_chunk *chunk,
501 struct rcu_reader *rcu_reader_reg)
502{
503 rcu_reader_reg->ctr = 0;
504 cds_list_del(&rcu_reader_reg->node);
505 rcu_reader_reg->tid = 0;
506 rcu_reader_reg->alloc = 0;
507 chunk->used--;
508}
509
510static
511struct registry_chunk *find_chunk(struct rcu_reader *rcu_reader_reg)
512{
513 struct registry_chunk *chunk;
514
515 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
516 if (rcu_reader_reg < (struct urcu_bp_reader *) &chunk->readers[0])
517 continue;
518 if (rcu_reader_reg >= (struct urcu_bp_reader *) &chunk->readers[chunk->capacity])
519 continue;
520 return chunk;
521 }
522 return NULL;
523}
524
525/* Called with signals off and mutex locked */
526static
527void remove_thread(struct rcu_reader *rcu_reader_reg)
528{
529 cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
530 URCU_TLS(urcu_bp_reader) = NULL;
531}
532
533/* Disable signals, take mutex, add to registry */
534void urcu_bp_register(void)
535{
536 sigset_t newmask, oldmask;
537 int ret;
538
539 ret = sigfillset(&newmask);
540 if (ret)
541 abort();
542 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
543 if (ret)
544 abort();
545
546 /*
547 * Check if a signal concurrently registered our thread since
548 * the check in rcu_read_lock().
549 */
550 if (URCU_TLS(urcu_bp_reader))
551 goto end;
552
553 /*
554 * Take care of early registration before urcu_bp constructor.
555 */
556 _urcu_bp_init();
557
558 mutex_lock(&rcu_registry_lock);
559 add_thread();
560 mutex_unlock(&rcu_registry_lock);
561end:
562 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
563 if (ret)
564 abort();
565}
566
567void urcu_bp_register_thread(void)
568{
569 if (caa_unlikely(!URCU_TLS(urcu_bp_reader)))
570 urcu_bp_register(); /* If not yet registered. */
571}
572
573/* Disable signals, take mutex, remove from registry */
574static
575void urcu_bp_unregister(struct rcu_reader *rcu_reader_reg)
576{
577 sigset_t newmask, oldmask;
578 int ret;
579
580 ret = sigfillset(&newmask);
581 if (ret)
582 abort();
583 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
584 if (ret)
585 abort();
586
587 mutex_lock(&rcu_registry_lock);
588 remove_thread(rcu_reader_reg);
589 mutex_unlock(&rcu_registry_lock);
590 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
591 if (ret)
592 abort();
593 urcu_bp_exit();
594}
595
596/*
597 * Remove thread from the registry when it exits, and flag it as
598 * destroyed so garbage collection can take care of it.
599 */
600static
601void urcu_bp_thread_exit_notifier(void *rcu_key)
602{
603 urcu_bp_unregister(rcu_key);
604}
605
606#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
607static
608void urcu_bp_sys_membarrier_status(bool available)
609{
610 if (!available)
611 abort();
612}
613#else
614static
615void urcu_bp_sys_membarrier_status(bool available)
616{
617 if (!available)
618 return;
619 urcu_bp_has_sys_membarrier = 1;
620}
621#endif
622
623static
624void urcu_bp_sys_membarrier_init(void)
625{
626 bool available = false;
627 int mask;
628
629 mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
630 if (mask >= 0) {
631 if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
632 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
633 urcu_die(errno);
634 available = true;
635 }
636 }
637 urcu_bp_sys_membarrier_status(available);
638}
639
640static
641void _urcu_bp_init(void)
642{
643 mutex_lock(&init_lock);
644 if (!urcu_bp_refcount++) {
645 int ret;
646
647 ret = pthread_key_create(&urcu_bp_key,
648 urcu_bp_thread_exit_notifier);
649 if (ret)
650 abort();
651 urcu_bp_sys_membarrier_init();
652 initialized = 1;
653 }
654 mutex_unlock(&init_lock);
655}
656
657static
658void urcu_bp_exit(void)
659{
660 mutex_lock(&init_lock);
661 if (!--urcu_bp_refcount) {
662 struct registry_chunk *chunk, *tmp;
663 int ret;
664
665 cds_list_for_each_entry_safe(chunk, tmp,
666 &registry_arena.chunk_list, node) {
667 munmap((void *) chunk, chunk_allocation_size(chunk->capacity));
668 }
669 CDS_INIT_LIST_HEAD(&registry_arena.chunk_list);
670 ret = pthread_key_delete(urcu_bp_key);
671 if (ret)
672 abort();
673 }
674 mutex_unlock(&init_lock);
675}
676
677static
678void urcu_bp_exit_destructor(void)
679{
680 urcu_call_rcu_exit();
681 urcu_bp_exit();
682}
683
684/*
685 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
686 * sure we fork() don't race with a concurrent thread executing with
687 * any of those locks held. This ensures that the registry and data
688 * protected by rcu_gp_lock are in a coherent state in the child.
689 */
690void urcu_bp_before_fork(void)
691{
692 sigset_t newmask, oldmask;
693 int ret;
694
695 ret = sigfillset(&newmask);
696 assert(!ret);
697 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
698 assert(!ret);
699 mutex_lock(&rcu_gp_lock);
700 mutex_lock(&rcu_registry_lock);
701 saved_fork_signal_mask = oldmask;
702}
703
704void urcu_bp_after_fork_parent(void)
705{
706 sigset_t oldmask;
707 int ret;
708
709 oldmask = saved_fork_signal_mask;
710 mutex_unlock(&rcu_registry_lock);
711 mutex_unlock(&rcu_gp_lock);
712 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
713 assert(!ret);
714}
715
716/*
717 * Prune all entries from registry except our own thread. Fits the Linux
718 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
719 */
720static
721void urcu_bp_prune_registry(void)
722{
723 struct registry_chunk *chunk;
724
725 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
726 size_t spot_idx;
727
728 for (spot_idx = 0; spot_idx < chunk->capacity; spot_idx++) {
729 struct urcu_bp_reader *reader = &chunk->readers[spot_idx];
730
731 if (!reader->alloc)
732 continue;
733 if (reader->tid == pthread_self())
734 continue;
735 cleanup_thread(chunk, reader);
736 }
737 }
738}
739
740void urcu_bp_after_fork_child(void)
741{
742 sigset_t oldmask;
743 int ret;
744
745 urcu_bp_prune_registry();
746 oldmask = saved_fork_signal_mask;
747 mutex_unlock(&rcu_registry_lock);
748 mutex_unlock(&rcu_gp_lock);
749 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
750 assert(!ret);
751}
752
753void *urcu_bp_dereference_sym(void *p)
754{
755 return _rcu_dereference(p);
756}
757
758void *urcu_bp_set_pointer_sym(void **p, void *v)
759{
760 cmm_wmb();
761 uatomic_set(p, v);
762 return v;
763}
764
765void *urcu_bp_xchg_pointer_sym(void **p, void *v)
766{
767 cmm_wmb();
768 return uatomic_xchg(p, v);
769}
770
771void *urcu_bp_cmpxchg_pointer_sym(void **p, void *old, void *_new)
772{
773 cmm_wmb();
774 return uatomic_cmpxchg(p, old, _new);
775}
776
777DEFINE_RCU_FLAVOR(rcu_flavor);
778
779#include "urcu-call-rcu-impl.h"
780#include "urcu-defer-impl.h"
This page took 0.02472 seconds and 4 git commands to generate.