use autoconf symbolic linking
[userspace-rcu.git] / urcu.c
CommitLineData
b257a10b
MD
1/*
2 * urcu.c
3 *
4 * Userspace RCU library
5 *
af02d47e
MD
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
b257a10b 8 *
af02d47e
MD
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
54843abc
PM
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
b257a10b
MD
24 */
25
fdf01eed 26#define _BSD_SOURCE
27b012e2
MD
27#include <stdio.h>
28#include <pthread.h>
29#include <signal.h>
30#include <assert.h>
f69f195a
MD
31#include <stdlib.h>
32#include <string.h>
09a9f986 33#include <errno.h>
e8043c1b 34#include <poll.h>
27b012e2 35
121a5d44
MD
36#include "urcu-static.h"
37/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
27b012e2
MD
38#include "urcu.h"
39
fdf01eed 40#ifdef RCU_MEMBARRIER
834a45ba 41static int init_done;
fdf01eed 42int has_sys_membarrier;
834a45ba 43
02be5561 44void __attribute__((constructor)) rcu_init(void);
fdf01eed
MD
45#endif
46
47#ifdef RCU_MB
02be5561 48void rcu_init(void)
e90a6e9c
MD
49{
50}
51#endif
8a5fb4c9 52
fdf01eed
MD
53#ifdef RCU_SIGNAL
54static int init_done;
55
56void __attribute__((constructor)) rcu_init(void);
57void __attribute__((destructor)) rcu_exit(void);
58#endif
59
6abb4bd5 60static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
27b012e2 61
bc6c15bb
MD
62int gp_futex;
63
128166c9
MD
64/*
65 * Global grace period counter.
02be5561 66 * Contains the current RCU_GP_CTR_PHASE.
afb8f2c9 67 * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
b0d5e790 68 * Written to only by writer with mutex taken. Read by both writer and readers.
128166c9 69 */
27d65bc5 70unsigned long rcu_gp_ctr = RCU_GP_COUNT;
27b012e2 71
b0d5e790
MD
72/*
73 * Written to only by each individual reader. Read by both the reader and the
74 * writers.
75 */
02be5561 76struct rcu_reader __thread rcu_reader;
27b012e2 77
cf380c2f 78#ifdef DEBUG_YIELD
9d335088
MD
79unsigned int yield_active;
80unsigned int __thread rand_yield;
cf380c2f
MD
81#endif
82
e3b0cef0 83static LIST_HEAD(registry);
27b012e2 84
6abb4bd5 85static void mutex_lock(pthread_mutex_t *mutex)
41718ff9
MD
86{
87 int ret;
09a9f986
PM
88
89#ifndef DISTRUST_SIGNALS_EXTREME
6abb4bd5 90 ret = pthread_mutex_lock(mutex);
41718ff9
MD
91 if (ret) {
92 perror("Error in pthread mutex lock");
93 exit(-1);
94 }
09a9f986 95#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
6abb4bd5 96 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
09a9f986
PM
97 if (ret != EBUSY && ret != EINTR) {
98 printf("ret = %d, errno = %d\n", ret, errno);
99 perror("Error in pthread mutex lock");
100 exit(-1);
101 }
0d342f2f 102 if (LOAD_SHARED(rcu_reader.need_mb)) {
09a9f986 103 smp_mb();
0d342f2f 104 _STORE_SHARED(rcu_reader.need_mb, 0);
09a9f986
PM
105 smp_mb();
106 }
107 poll(NULL,0,10);
108 }
109#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
41718ff9
MD
110}
111
6abb4bd5 112static void mutex_unlock(pthread_mutex_t *mutex)
41718ff9
MD
113{
114 int ret;
115
6abb4bd5 116 ret = pthread_mutex_unlock(mutex);
41718ff9
MD
117 if (ret) {
118 perror("Error in pthread mutex unlock");
119 exit(-1);
120 }
121}
122
fdf01eed 123#ifdef RCU_MEMBARRIER
25cc6d18 124static void smp_mb_master(int group)
fdf01eed
MD
125{
126 if (likely(has_sys_membarrier))
f0708810 127 membarrier(MEMBARRIER_EXPEDITED);
fdf01eed
MD
128 else
129 smp_mb();
130}
131#endif
132
02be5561 133#ifdef RCU_MB
25cc6d18 134static void smp_mb_master(int group)
40e140c9
MD
135{
136 smp_mb();
137}
fdf01eed
MD
138#endif
139
140#ifdef RCU_SIGNAL
78ff9419 141static void force_mb_all_readers(void)
27b012e2 142{
02be5561 143 struct rcu_reader *index;
e3b0cef0 144
27b012e2 145 /*
b715b99e 146 * Ask for each threads to execute a smp_mb() so we can consider the
27b012e2
MD
147 * compiler barriers around rcu read lock as real memory barriers.
148 */
e3b0cef0 149 if (list_empty(&registry))
27b012e2 150 return;
3a86deba
MD
151 /*
152 * pthread_kill has a smp_mb(). But beware, we assume it performs
157dca95
MD
153 * a cache flush on architectures with non-coherent cache. Let's play
154 * safe and don't assume anything : we use smp_mc() to make sure the
155 * cache flush is enforced.
3a86deba 156 */
e3b0cef0 157 list_for_each_entry(index, &registry, head) {
0d342f2f 158 STORE_SHARED(index->need_mb, 1);
02be5561 159 pthread_kill(index->tid, SIGRCU);
09a9f986 160 }
27b012e2
MD
161 /*
162 * Wait for sighandler (and thus mb()) to execute on every thread.
09a9f986
PM
163 *
164 * Note that the pthread_kill() will never be executed on systems
165 * that correctly deliver signals in a timely manner. However, it
166 * is not uncommon for kernels to have bugs that can result in
167 * lost or unduly delayed signals.
168 *
169 * If you are seeing the below pthread_kill() executing much at
170 * all, we suggest testing the underlying kernel and filing the
171 * relevant bug report. For Linux kernels, we recommend getting
172 * the Linux Test Project (LTP).
27b012e2 173 */
e3b0cef0 174 list_for_each_entry(index, &registry, head) {
0d342f2f 175 while (LOAD_SHARED(index->need_mb)) {
02be5561 176 pthread_kill(index->tid, SIGRCU);
09a9f986
PM
177 poll(NULL, 0, 1);
178 }
179 }
180 smp_mb(); /* read ->need_mb before ending the barrier */
27b012e2 181}
9d7e3f89 182
25cc6d18 183static void smp_mb_master(int group)
9d7e3f89
MD
184{
185 force_mb_all_readers();
186}
fdf01eed 187#endif /* #ifdef RCU_SIGNAL */
27b012e2 188
bc6c15bb
MD
189/*
190 * synchronize_rcu() waiting. Single thread.
191 */
cfe78e25 192static void wait_gp(void)
bc6c15bb 193{
cfe78e25 194 /* Read reader_gp before read futex */
25cc6d18 195 smp_mb_master(RCU_MB_GROUP);
cfe78e25 196 if (uatomic_read(&gp_futex) == -1)
0854ccff 197 futex_async(&gp_futex, FUTEX_WAIT, -1,
cfe78e25 198 NULL, NULL, 0);
bc6c15bb
MD
199}
200
2dfb8b5e 201void update_counter_and_wait(void)
27b012e2 202{
cfe78e25
MD
203 LIST_HEAD(qsreaders);
204 int wait_loops = 0;
02be5561 205 struct rcu_reader *index, *tmp;
27b012e2 206
32c15e4e 207 /* Switch parity: 0 -> 1, 1 -> 0 */
2dfb8b5e
MD
208 STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
209
210 /*
211 * Must commit qparity update to memory before waiting for other parity
212 * quiescent state. Failure to do so could result in the writer waiting
213 * forever while new readers are always accessing data (no progress).
214 * Ensured by STORE_SHARED and LOAD_SHARED.
215 */
216
217 /*
218 * Adding a smp_mb() which is _not_ formally required, but makes the
219 * model easier to understand. It does not have a big performance impact
220 * anyway, given this is the write-side.
221 */
222 smp_mb();
223
40e140c9 224 /*
02be5561 225 * Wait for each thread rcu_reader.ctr count to become 0.
27b012e2 226 */
cfe78e25
MD
227 for (;;) {
228 wait_loops++;
229 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
230 uatomic_dec(&gp_futex);
231 /* Write futex before read reader_gp */
25cc6d18 232 smp_mb_master(RCU_MB_GROUP);
cfe78e25
MD
233 }
234
23758cc9 235 list_for_each_entry_safe(index, tmp, &registry, head) {
b95a001f 236 if (!rcu_gp_ongoing(&index->ctr))
cfe78e25
MD
237 list_move(&index->head, &qsreaders);
238 }
239
e8043c1b 240#ifndef HAS_INCOHERENT_CACHES
cfe78e25
MD
241 if (list_empty(&registry)) {
242 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
243 /* Read reader_gp before write futex */
25cc6d18 244 smp_mb_master(RCU_MB_GROUP);
cfe78e25 245 uatomic_set(&gp_futex, 0);
bc6c15bb 246 }
cfe78e25
MD
247 break;
248 } else {
249 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
250 wait_gp();
251 else
252 cpu_relax();
bc6c15bb 253 }
e8043c1b 254#else /* #ifndef HAS_INCOHERENT_CACHES */
27b012e2 255 /*
40e140c9 256 * BUSY-LOOP. Force the reader thread to commit its
02be5561 257 * rcu_reader.ctr update to memory if we wait for too long.
27b012e2 258 */
cfe78e25
MD
259 if (list_empty(&registry)) {
260 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
261 /* Read reader_gp before write futex */
25cc6d18 262 smp_mb_master(RCU_MB_GROUP);
cfe78e25
MD
263 uatomic_set(&gp_futex, 0);
264 }
265 break;
266 } else {
267 switch (wait_loops) {
bc6c15bb 268 case RCU_QS_ACTIVE_ATTEMPTS:
cfe78e25
MD
269 wait_gp();
270 break; /* only escape switch */
bc6c15bb 271 case KICK_READER_LOOPS:
25cc6d18 272 smp_mb_master(RCU_MB_GROUP);
40e140c9 273 wait_loops = 0;
cfe78e25 274 break; /* only escape switch */
bc6c15bb 275 default:
3b55dbf4 276 cpu_relax();
40e140c9
MD
277 }
278 }
e8043c1b 279#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
27b012e2 280 }
cfe78e25 281 /* put back the reader list in the registry */
23758cc9 282 list_splice(&qsreaders, &registry);
27b012e2
MD
283}
284
9598a481 285void synchronize_rcu(void)
2bc59bd7 286{
6abb4bd5 287 mutex_lock(&rcu_gp_lock);
135530fd 288
2dfb8b5e
MD
289 if (list_empty(&registry))
290 goto out;
291
9598a481 292 /* All threads should read qparity before accessing data structure
6abb4bd5
MD
293 * where new ptr points to. Must be done within rcu_gp_lock because it
294 * iterates on reader threads.*/
9598a481 295 /* Write new ptr before changing the qparity */
25cc6d18 296 smp_mb_master(RCU_MB_GROUP);
9598a481 297
9598a481
MD
298 /*
299 * Wait for previous parity to be empty of readers.
300 */
2dfb8b5e 301 update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
9598a481
MD
302
303 /*
304 * Must finish waiting for quiescent state for parity 0 before
305 * committing qparity update to memory. Failure to do so could result in
306 * the writer waiting forever while new readers are always accessing
307 * data (no progress).
b0d5e790 308 * Ensured by STORE_SHARED and LOAD_SHARED.
9598a481 309 */
9598a481 310
5dba80f9
MD
311 /*
312 * Adding a smp_mb() which is _not_ formally required, but makes the
313 * model easier to understand. It does not have a big performance impact
314 * anyway, given this is the write-side.
315 */
316 smp_mb();
67c2d80b 317
9598a481
MD
318 /*
319 * Wait for previous parity to be empty of readers.
320 */
2dfb8b5e 321 update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
9598a481 322
9598a481 323 /* Finish waiting for reader threads before letting the old ptr being
6abb4bd5
MD
324 * freed. Must be done within rcu_gp_lock because it iterates on reader
325 * threads. */
25cc6d18 326 smp_mb_master(RCU_MB_GROUP);
2dfb8b5e 327out:
6abb4bd5 328 mutex_unlock(&rcu_gp_lock);
2bc59bd7
PM
329}
330
121a5d44
MD
331/*
332 * library wrappers to be used by non-LGPL compatible source code.
333 */
334
335void rcu_read_lock(void)
336{
337 _rcu_read_lock();
338}
339
340void rcu_read_unlock(void)
341{
342 _rcu_read_unlock();
343}
344
121a5d44 345void rcu_register_thread(void)
27b012e2 346{
02be5561
MD
347 rcu_reader.tid = pthread_self();
348 assert(rcu_reader.need_mb == 0);
349 assert(rcu_reader.ctr == 0);
350
6abb4bd5 351 mutex_lock(&rcu_gp_lock);
02be5561
MD
352 rcu_init(); /* In case gcc does not support constructor attribute */
353 list_add(&rcu_reader.head, &registry);
6abb4bd5 354 mutex_unlock(&rcu_gp_lock);
27b012e2
MD
355}
356
121a5d44 357void rcu_unregister_thread(void)
27b012e2 358{
6abb4bd5 359 mutex_lock(&rcu_gp_lock);
02be5561 360 list_del(&rcu_reader.head);
6abb4bd5 361 mutex_unlock(&rcu_gp_lock);
27b012e2
MD
362}
363
fdf01eed
MD
364#ifdef RCU_MEMBARRIER
365void rcu_init(void)
366{
367 if (init_done)
368 return;
369 init_done = 1;
cf5271ee 370 if (!membarrier(MEMBARRIER_EXPEDITED | MEMBARRIER_QUERY))
fdf01eed
MD
371 has_sys_membarrier = 1;
372}
373#endif
374
375#ifdef RCU_SIGNAL
02be5561 376static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
27b012e2 377{
40e140c9
MD
378 /*
379 * Executing this smp_mb() is the only purpose of this signal handler.
380 * It punctually promotes barrier() into smp_mb() on every thread it is
381 * executed on.
382 */
b715b99e 383 smp_mb();
0d342f2f 384 _STORE_SHARED(rcu_reader.need_mb, 0);
09a9f986 385 smp_mb();
27b012e2
MD
386}
387
8a5fb4c9 388/*
02be5561 389 * rcu_init constructor. Called when the library is linked, but also when
8a5fb4c9
MD
390 * reader threads are calling rcu_register_thread().
391 * Should only be called by a single thread at a given time. This is ensured by
6abb4bd5
MD
392 * holing the rcu_gp_lock from rcu_register_thread() or by running at library
393 * load time, which should not be executed by multiple threads nor concurrently
394 * with rcu_register_thread() anyway.
8a5fb4c9 395 */
02be5561 396void rcu_init(void)
27b012e2
MD
397{
398 struct sigaction act;
399 int ret;
400
8a5fb4c9
MD
401 if (init_done)
402 return;
403 init_done = 1;
404
02be5561 405 act.sa_sigaction = sigrcu_handler;
dd052bd3 406 act.sa_flags = SA_SIGINFO | SA_RESTART;
c297c21c 407 sigemptyset(&act.sa_mask);
02be5561 408 ret = sigaction(SIGRCU, &act, NULL);
f69f195a
MD
409 if (ret) {
410 perror("Error in sigaction");
27b012e2
MD
411 exit(-1);
412 }
413}
414
02be5561 415void rcu_exit(void)
27b012e2
MD
416{
417 struct sigaction act;
418 int ret;
419
02be5561 420 ret = sigaction(SIGRCU, NULL, &act);
f69f195a
MD
421 if (ret) {
422 perror("Error in sigaction");
27b012e2
MD
423 exit(-1);
424 }
02be5561 425 assert(act.sa_sigaction == sigrcu_handler);
e3b0cef0 426 assert(list_empty(&registry));
27b012e2 427}
fdf01eed 428#endif /* #ifdef RCU_SIGNAL */
This page took 0.060109 seconds and 4 git commands to generate.