ccaf3e95a2d5ad6f7428625b399429cd843344b6
[urcu.git] / urcu.c
1 /*
2 * urcu.c
3 *
4 * Userspace RCU library
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
26 #define _BSD_SOURCE
27 #define _GNU_SOURCE
28 #include <stdio.h>
29 #include <pthread.h>
30 #include <signal.h>
31 #include <assert.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <errno.h>
35 #include <poll.h>
36
37 #include "urcu/map/urcu.h"
38
39 #include "urcu/static/urcu.h"
40 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
41 #include "urcu.h"
42
43 #ifdef RCU_MEMBARRIER
44 static int init_done;
45 int has_sys_membarrier;
46
47 void __attribute__((constructor)) rcu_init(void);
48 #endif
49
50 #ifdef RCU_MB
51 void rcu_init(void)
52 {
53 }
54 #endif
55
56 #ifdef RCU_SIGNAL
57 static int init_done;
58
59 void __attribute__((constructor)) rcu_init(void);
60 void __attribute__((destructor)) rcu_exit(void);
61 #endif
62
63 static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
64
65 int gp_futex;
66
67 /*
68 * Global grace period counter.
69 * Contains the current RCU_GP_CTR_PHASE.
70 * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
71 * Written to only by writer with mutex taken. Read by both writer and readers.
72 */
73 unsigned long rcu_gp_ctr = RCU_GP_COUNT;
74
75 /*
76 * Written to only by each individual reader. Read by both the reader and the
77 * writers.
78 */
79 struct rcu_reader __thread rcu_reader;
80
81 #ifdef DEBUG_YIELD
82 unsigned int yield_active;
83 unsigned int __thread rand_yield;
84 #endif
85
86 static CDS_LIST_HEAD(registry);
87
88 static void mutex_lock(pthread_mutex_t *mutex)
89 {
90 int ret;
91
92 #ifndef DISTRUST_SIGNALS_EXTREME
93 ret = pthread_mutex_lock(mutex);
94 if (ret) {
95 perror("Error in pthread mutex lock");
96 exit(-1);
97 }
98 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
99 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
100 if (ret != EBUSY && ret != EINTR) {
101 printf("ret = %d, errno = %d\n", ret, errno);
102 perror("Error in pthread mutex lock");
103 exit(-1);
104 }
105 if (CMM_LOAD_SHARED(rcu_reader.need_mb)) {
106 cmm_smp_mb();
107 _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
108 cmm_smp_mb();
109 }
110 poll(NULL,0,10);
111 }
112 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
113 }
114
115 static void mutex_unlock(pthread_mutex_t *mutex)
116 {
117 int ret;
118
119 ret = pthread_mutex_unlock(mutex);
120 if (ret) {
121 perror("Error in pthread mutex unlock");
122 exit(-1);
123 }
124 }
125
126 #ifdef RCU_MEMBARRIER
127 static void smp_mb_master(int group)
128 {
129 if (likely(has_sys_membarrier))
130 membarrier(MEMBARRIER_EXPEDITED);
131 else
132 cmm_smp_mb();
133 }
134 #endif
135
136 #ifdef RCU_MB
137 static void smp_mb_master(int group)
138 {
139 cmm_smp_mb();
140 }
141 #endif
142
143 #ifdef RCU_SIGNAL
144 static void force_mb_all_readers(void)
145 {
146 struct rcu_reader *index;
147
148 /*
149 * Ask for each threads to execute a cmm_smp_mb() so we can consider the
150 * compiler barriers around rcu read lock as real memory barriers.
151 */
152 if (cds_list_empty(&registry))
153 return;
154 /*
155 * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
156 * a cache flush on architectures with non-coherent cache. Let's play
157 * safe and don't assume anything : we use cmm_smp_mc() to make sure the
158 * cache flush is enforced.
159 */
160 cds_list_for_each_entry(index, &registry, node) {
161 CMM_STORE_SHARED(index->need_mb, 1);
162 pthread_kill(index->tid, SIGRCU);
163 }
164 /*
165 * Wait for sighandler (and thus mb()) to execute on every thread.
166 *
167 * Note that the pthread_kill() will never be executed on systems
168 * that correctly deliver signals in a timely manner. However, it
169 * is not uncommon for kernels to have bugs that can result in
170 * lost or unduly delayed signals.
171 *
172 * If you are seeing the below pthread_kill() executing much at
173 * all, we suggest testing the underlying kernel and filing the
174 * relevant bug report. For Linux kernels, we recommend getting
175 * the Linux Test Project (LTP).
176 */
177 cds_list_for_each_entry(index, &registry, node) {
178 while (CMM_LOAD_SHARED(index->need_mb)) {
179 pthread_kill(index->tid, SIGRCU);
180 poll(NULL, 0, 1);
181 }
182 }
183 cmm_smp_mb(); /* read ->need_mb before ending the barrier */
184 }
185
186 static void smp_mb_master(int group)
187 {
188 force_mb_all_readers();
189 }
190 #endif /* #ifdef RCU_SIGNAL */
191
192 /*
193 * synchronize_rcu() waiting. Single thread.
194 */
195 static void wait_gp(void)
196 {
197 /* Read reader_gp before read futex */
198 smp_mb_master(RCU_MB_GROUP);
199 if (uatomic_read(&gp_futex) == -1)
200 futex_async(&gp_futex, FUTEX_WAIT, -1,
201 NULL, NULL, 0);
202 }
203
204 void update_counter_and_wait(void)
205 {
206 CDS_LIST_HEAD(qsreaders);
207 int wait_loops = 0;
208 struct rcu_reader *index, *tmp;
209
210 /* Switch parity: 0 -> 1, 1 -> 0 */
211 CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
212
213 /*
214 * Must commit rcu_gp_ctr update to memory before waiting for quiescent
215 * state. Failure to do so could result in the writer waiting forever
216 * while new readers are always accessing data (no progress). Enforce
217 * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr.
218 */
219 cmm_barrier();
220
221 /*
222 *
223 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
224 * model easier to understand. It does not have a big performance impact
225 * anyway, given this is the write-side.
226 */
227 cmm_smp_mb();
228
229 /*
230 * Wait for each thread rcu_reader.ctr count to become 0.
231 */
232 for (;;) {
233 wait_loops++;
234 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
235 uatomic_dec(&gp_futex);
236 /* Write futex before read reader_gp */
237 smp_mb_master(RCU_MB_GROUP);
238 }
239
240 cds_list_for_each_entry_safe(index, tmp, &registry, node) {
241 if (!rcu_gp_ongoing(&index->ctr))
242 cds_list_move(&index->node, &qsreaders);
243 }
244
245 #ifndef HAS_INCOHERENT_CACHES
246 if (cds_list_empty(&registry)) {
247 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
248 /* Read reader_gp before write futex */
249 smp_mb_master(RCU_MB_GROUP);
250 uatomic_set(&gp_futex, 0);
251 }
252 break;
253 } else {
254 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
255 wait_gp();
256 else
257 caa_cpu_relax();
258 }
259 #else /* #ifndef HAS_INCOHERENT_CACHES */
260 /*
261 * BUSY-LOOP. Force the reader thread to commit its
262 * rcu_reader.ctr update to memory if we wait for too long.
263 */
264 if (cds_list_empty(&registry)) {
265 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
266 /* Read reader_gp before write futex */
267 smp_mb_master(RCU_MB_GROUP);
268 uatomic_set(&gp_futex, 0);
269 }
270 break;
271 } else {
272 switch (wait_loops) {
273 case RCU_QS_ACTIVE_ATTEMPTS:
274 wait_gp();
275 break; /* only escape switch */
276 case KICK_READER_LOOPS:
277 smp_mb_master(RCU_MB_GROUP);
278 wait_loops = 0;
279 break; /* only escape switch */
280 default:
281 caa_cpu_relax();
282 }
283 }
284 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
285 }
286 /* put back the reader list in the registry */
287 cds_list_splice(&qsreaders, &registry);
288 }
289
290 void synchronize_rcu(void)
291 {
292 mutex_lock(&rcu_gp_lock);
293
294 if (cds_list_empty(&registry))
295 goto out;
296
297 /* All threads should read qparity before accessing data structure
298 * where new ptr points to. Must be done within rcu_gp_lock because it
299 * iterates on reader threads.*/
300 /* Write new ptr before changing the qparity */
301 smp_mb_master(RCU_MB_GROUP);
302
303 /*
304 * Wait for previous parity to be empty of readers.
305 */
306 update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
307
308 /*
309 * Must finish waiting for quiescent state for parity 0 before
310 * committing next rcu_gp_ctr update to memory. Failure to do so could
311 * result in the writer waiting forever while new readers are always
312 * accessing data (no progress). Enforce compiler-order of load
313 * rcu_reader ctr before store to rcu_gp_ctr.
314 */
315 cmm_barrier();
316
317 /*
318 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
319 * model easier to understand. It does not have a big performance impact
320 * anyway, given this is the write-side.
321 */
322 cmm_smp_mb();
323
324 /*
325 * Wait for previous parity to be empty of readers.
326 */
327 update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
328
329 /* Finish waiting for reader threads before letting the old ptr being
330 * freed. Must be done within rcu_gp_lock because it iterates on reader
331 * threads. */
332 smp_mb_master(RCU_MB_GROUP);
333 out:
334 mutex_unlock(&rcu_gp_lock);
335 }
336
337 /*
338 * library wrappers to be used by non-LGPL compatible source code.
339 */
340
341 void rcu_read_lock(void)
342 {
343 _rcu_read_lock();
344 }
345
346 void rcu_read_unlock(void)
347 {
348 _rcu_read_unlock();
349 }
350
351 void rcu_register_thread(void)
352 {
353 rcu_reader.tid = pthread_self();
354 assert(rcu_reader.need_mb == 0);
355 assert(!(rcu_reader.ctr & RCU_GP_CTR_NEST_MASK));
356
357 mutex_lock(&rcu_gp_lock);
358 rcu_init(); /* In case gcc does not support constructor attribute */
359 cds_list_add(&rcu_reader.node, &registry);
360 mutex_unlock(&rcu_gp_lock);
361 }
362
363 void rcu_unregister_thread(void)
364 {
365 mutex_lock(&rcu_gp_lock);
366 cds_list_del(&rcu_reader.node);
367 mutex_unlock(&rcu_gp_lock);
368 }
369
370 #ifdef RCU_MEMBARRIER
371 void rcu_init(void)
372 {
373 if (init_done)
374 return;
375 init_done = 1;
376 if (!membarrier(MEMBARRIER_EXPEDITED | MEMBARRIER_QUERY))
377 has_sys_membarrier = 1;
378 }
379 #endif
380
381 #ifdef RCU_SIGNAL
382 static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
383 {
384 /*
385 * Executing this cmm_smp_mb() is the only purpose of this signal handler.
386 * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
387 * executed on.
388 */
389 cmm_smp_mb();
390 _CMM_STORE_SHARED(rcu_reader.need_mb, 0);
391 cmm_smp_mb();
392 }
393
394 /*
395 * rcu_init constructor. Called when the library is linked, but also when
396 * reader threads are calling rcu_register_thread().
397 * Should only be called by a single thread at a given time. This is ensured by
398 * holing the rcu_gp_lock from rcu_register_thread() or by running at library
399 * load time, which should not be executed by multiple threads nor concurrently
400 * with rcu_register_thread() anyway.
401 */
402 void rcu_init(void)
403 {
404 struct sigaction act;
405 int ret;
406
407 if (init_done)
408 return;
409 init_done = 1;
410
411 act.sa_sigaction = sigrcu_handler;
412 act.sa_flags = SA_SIGINFO | SA_RESTART;
413 sigemptyset(&act.sa_mask);
414 ret = sigaction(SIGRCU, &act, NULL);
415 if (ret) {
416 perror("Error in sigaction");
417 exit(-1);
418 }
419 }
420
421 void rcu_exit(void)
422 {
423 struct sigaction act;
424 int ret;
425
426 ret = sigaction(SIGRCU, NULL, &act);
427 if (ret) {
428 perror("Error in sigaction");
429 exit(-1);
430 }
431 assert(act.sa_sigaction == sigrcu_handler);
432 assert(cds_list_empty(&registry));
433 }
434
435 #endif /* #ifdef RCU_SIGNAL */
436
437 #include "urcu-call-rcu-impl.h"
438 #include "urcu-defer-impl.h"
This page took 0.036336 seconds and 3 git commands to generate.