4 * Userspace RCU library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
38 #include "urcu-static.h"
39 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
44 int has_sys_membarrier
;
46 void __attribute__((constructor
)) rcu_init(void);
58 void __attribute__((constructor
)) rcu_init(void);
59 void __attribute__((destructor
)) rcu_exit(void);
62 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
67 * Global grace period counter.
68 * Contains the current RCU_GP_CTR_PHASE.
69 * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
70 * Written to only by writer with mutex taken. Read by both writer and readers.
72 unsigned long rcu_gp_ctr
= RCU_GP_COUNT
;
75 * Written to only by each individual reader. Read by both the reader and the
78 struct rcu_reader __thread rcu_reader
;
81 unsigned int yield_active
;
82 unsigned int __thread rand_yield
;
85 static CDS_LIST_HEAD(registry
);
87 static void mutex_lock(pthread_mutex_t
*mutex
)
91 #ifndef DISTRUST_SIGNALS_EXTREME
92 ret
= pthread_mutex_lock(mutex
);
94 perror("Error in pthread mutex lock");
97 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
98 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
99 if (ret
!= EBUSY
&& ret
!= EINTR
) {
100 printf("ret = %d, errno = %d\n", ret
, errno
);
101 perror("Error in pthread mutex lock");
104 if (CMM_LOAD_SHARED(rcu_reader
.need_mb
)) {
106 _CMM_STORE_SHARED(rcu_reader
.need_mb
, 0);
111 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
114 static void mutex_unlock(pthread_mutex_t
*mutex
)
118 ret
= pthread_mutex_unlock(mutex
);
120 perror("Error in pthread mutex unlock");
125 #ifdef RCU_MEMBARRIER
126 static void smp_mb_master(int group
)
128 if (likely(has_sys_membarrier
))
129 membarrier(MEMBARRIER_EXPEDITED
);
136 static void smp_mb_master(int group
)
143 static void force_mb_all_readers(void)
145 struct rcu_reader
*index
;
148 * Ask for each threads to execute a cmm_smp_mb() so we can consider the
149 * compiler barriers around rcu read lock as real memory barriers.
151 if (cds_list_empty(®istry
))
154 * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
155 * a cache flush on architectures with non-coherent cache. Let's play
156 * safe and don't assume anything : we use cmm_smp_mc() to make sure the
157 * cache flush is enforced.
159 cds_list_for_each_entry(index
, ®istry
, node
) {
160 CMM_STORE_SHARED(index
->need_mb
, 1);
161 pthread_kill(index
->tid
, SIGRCU
);
164 * Wait for sighandler (and thus mb()) to execute on every thread.
166 * Note that the pthread_kill() will never be executed on systems
167 * that correctly deliver signals in a timely manner. However, it
168 * is not uncommon for kernels to have bugs that can result in
169 * lost or unduly delayed signals.
171 * If you are seeing the below pthread_kill() executing much at
172 * all, we suggest testing the underlying kernel and filing the
173 * relevant bug report. For Linux kernels, we recommend getting
174 * the Linux Test Project (LTP).
176 cds_list_for_each_entry(index
, ®istry
, node
) {
177 while (CMM_LOAD_SHARED(index
->need_mb
)) {
178 pthread_kill(index
->tid
, SIGRCU
);
182 cmm_smp_mb(); /* read ->need_mb before ending the barrier */
185 static void smp_mb_master(int group
)
187 force_mb_all_readers();
189 #endif /* #ifdef RCU_SIGNAL */
192 * synchronize_rcu() waiting. Single thread.
194 static void wait_gp(void)
196 /* Read reader_gp before read futex */
197 smp_mb_master(RCU_MB_GROUP
);
198 if (uatomic_read(&gp_futex
) == -1)
199 futex_async(&gp_futex
, FUTEX_WAIT
, -1,
203 void update_counter_and_wait(void)
205 CDS_LIST_HEAD(qsreaders
);
207 struct rcu_reader
*index
, *tmp
;
209 /* Switch parity: 0 -> 1, 1 -> 0 */
210 CMM_STORE_SHARED(rcu_gp_ctr
, rcu_gp_ctr
^ RCU_GP_CTR_PHASE
);
213 * Must commit rcu_gp_ctr update to memory before waiting for quiescent
214 * state. Failure to do so could result in the writer waiting forever
215 * while new readers are always accessing data (no progress). Enforce
216 * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
222 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
223 * model easier to understand. It does not have a big performance impact
224 * anyway, given this is the write-side.
229 * Wait for each thread rcu_reader.ctr count to become 0.
233 if (wait_loops
== RCU_QS_ACTIVE_ATTEMPTS
) {
234 uatomic_dec(&gp_futex
);
235 /* Write futex before read reader_gp */
236 smp_mb_master(RCU_MB_GROUP
);
239 cds_list_for_each_entry_safe(index
, tmp
, ®istry
, node
) {
240 if (!rcu_gp_ongoing(&index
->ctr
))
241 cds_list_move(&index
->node
, &qsreaders
);
244 #ifndef HAS_INCOHERENT_CACHES
245 if (cds_list_empty(®istry
)) {
246 if (wait_loops
== RCU_QS_ACTIVE_ATTEMPTS
) {
247 /* Read reader_gp before write futex */
248 smp_mb_master(RCU_MB_GROUP
);
249 uatomic_set(&gp_futex
, 0);
253 if (wait_loops
== RCU_QS_ACTIVE_ATTEMPTS
)
258 #else /* #ifndef HAS_INCOHERENT_CACHES */
260 * BUSY-LOOP. Force the reader thread to commit its
261 * rcu_reader.ctr update to memory if we wait for too long.
263 if (cds_list_empty(®istry
)) {
264 if (wait_loops
== RCU_QS_ACTIVE_ATTEMPTS
) {
265 /* Read reader_gp before write futex */
266 smp_mb_master(RCU_MB_GROUP
);
267 uatomic_set(&gp_futex
, 0);
271 switch (wait_loops
) {
272 case RCU_QS_ACTIVE_ATTEMPTS
:
274 break; /* only escape switch */
275 case KICK_READER_LOOPS
:
276 smp_mb_master(RCU_MB_GROUP
);
278 break; /* only escape switch */
283 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
285 /* put back the reader list in the registry */
286 cds_list_splice(&qsreaders
, ®istry
);
289 void synchronize_rcu(void)
291 mutex_lock(&rcu_gp_lock
);
293 if (cds_list_empty(®istry
))
296 /* All threads should read qparity before accessing data structure
297 * where new ptr points to. Must be done within rcu_gp_lock because it
298 * iterates on reader threads.*/
299 /* Write new ptr before changing the qparity */
300 smp_mb_master(RCU_MB_GROUP
);
303 * Wait for previous parity to be empty of readers.
305 update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
308 * Must finish waiting for quiescent state for parity 0 before
309 * committing next rcu_gp_ctr update to memory. Failure to do so could
310 * result in the writer waiting forever while new readers are always
311 * accessing data (no progress). Enforce compiler-order of load
312 * rcu_reader ctr before store to rcu_gp_ctr.
317 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
318 * model easier to understand. It does not have a big performance impact
319 * anyway, given this is the write-side.
324 * Wait for previous parity to be empty of readers.
326 update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
328 /* Finish waiting for reader threads before letting the old ptr being
329 * freed. Must be done within rcu_gp_lock because it iterates on reader
331 smp_mb_master(RCU_MB_GROUP
);
333 mutex_unlock(&rcu_gp_lock
);
337 * library wrappers to be used by non-LGPL compatible source code.
340 void rcu_read_lock(void)
345 void rcu_read_unlock(void)
350 void rcu_register_thread(void)
352 rcu_reader
.tid
= pthread_self();
353 assert(rcu_reader
.need_mb
== 0);
354 assert(!(rcu_reader
.ctr
& RCU_GP_CTR_NEST_MASK
));
356 mutex_lock(&rcu_gp_lock
);
357 rcu_init(); /* In case gcc does not support constructor attribute */
358 cds_list_add(&rcu_reader
.node
, ®istry
);
359 mutex_unlock(&rcu_gp_lock
);
362 void rcu_unregister_thread(void)
364 mutex_lock(&rcu_gp_lock
);
365 cds_list_del(&rcu_reader
.node
);
366 mutex_unlock(&rcu_gp_lock
);
369 #ifdef RCU_MEMBARRIER
375 if (!membarrier(MEMBARRIER_EXPEDITED
| MEMBARRIER_QUERY
))
376 has_sys_membarrier
= 1;
381 static void sigrcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
384 * Executing this cmm_smp_mb() is the only purpose of this signal handler.
385 * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
389 _CMM_STORE_SHARED(rcu_reader
.need_mb
, 0);
394 * rcu_init constructor. Called when the library is linked, but also when
395 * reader threads are calling rcu_register_thread().
396 * Should only be called by a single thread at a given time. This is ensured by
397 * holing the rcu_gp_lock from rcu_register_thread() or by running at library
398 * load time, which should not be executed by multiple threads nor concurrently
399 * with rcu_register_thread() anyway.
403 struct sigaction act
;
410 act
.sa_sigaction
= sigrcu_handler
;
411 act
.sa_flags
= SA_SIGINFO
| SA_RESTART
;
412 sigemptyset(&act
.sa_mask
);
413 ret
= sigaction(SIGRCU
, &act
, NULL
);
415 perror("Error in sigaction");
422 struct sigaction act
;
425 ret
= sigaction(SIGRCU
, NULL
, &act
);
427 perror("Error in sigaction");
430 assert(act
.sa_sigaction
== sigrcu_handler
);
431 assert(cds_list_empty(®istry
));
434 #endif /* #ifdef RCU_SIGNAL */
436 #include "urcu-call-rcu-impl.h"