urcu: use unsigned long instead of long
[urcu.git] / urcu.c
... / ...
CommitLineData
1/*
2 * urcu.c
3 *
4 * Userspace RCU library
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
26#define _BSD_SOURCE
27#include <stdio.h>
28#include <pthread.h>
29#include <signal.h>
30#include <assert.h>
31#include <stdlib.h>
32#include <string.h>
33#include <errno.h>
34#include <poll.h>
35
36#include "urcu-static.h"
37/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
38#include "urcu.h"
39
40#ifdef RCU_MEMBARRIER
41static int init_done;
42int has_sys_membarrier;
43
44void __attribute__((constructor)) rcu_init(void);
45#endif
46
47#ifdef RCU_MB
48void rcu_init(void)
49{
50}
51#endif
52
53#ifdef RCU_SIGNAL
54static int init_done;
55
56void __attribute__((constructor)) rcu_init(void);
57void __attribute__((destructor)) rcu_exit(void);
58#endif
59
60static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
61
62int gp_futex;
63
64/*
65 * Global grace period counter.
66 * Contains the current RCU_GP_CTR_PHASE.
67 * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path.
68 * Written to only by writer with mutex taken. Read by both writer and readers.
69 */
70unsigned long rcu_gp_ctr = RCU_GP_COUNT;
71
72/*
73 * Written to only by each individual reader. Read by both the reader and the
74 * writers.
75 */
76struct rcu_reader __thread rcu_reader;
77
78#ifdef DEBUG_YIELD
79unsigned int yield_active;
80unsigned int __thread rand_yield;
81#endif
82
83static LIST_HEAD(registry);
84
85static void mutex_lock(pthread_mutex_t *mutex)
86{
87 int ret;
88
89#ifndef DISTRUST_SIGNALS_EXTREME
90 ret = pthread_mutex_lock(mutex);
91 if (ret) {
92 perror("Error in pthread mutex lock");
93 exit(-1);
94 }
95#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
96 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
97 if (ret != EBUSY && ret != EINTR) {
98 printf("ret = %d, errno = %d\n", ret, errno);
99 perror("Error in pthread mutex lock");
100 exit(-1);
101 }
102 if (rcu_reader.need_mb) {
103 smp_mb();
104 rcu_reader.need_mb = 0;
105 smp_mb();
106 }
107 poll(NULL,0,10);
108 }
109#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
110}
111
112static void mutex_unlock(pthread_mutex_t *mutex)
113{
114 int ret;
115
116 ret = pthread_mutex_unlock(mutex);
117 if (ret) {
118 perror("Error in pthread mutex unlock");
119 exit(-1);
120 }
121}
122
123#ifdef RCU_MEMBARRIER
124static void smp_mb_master(int group)
125{
126 if (likely(has_sys_membarrier))
127 membarrier(MEMBARRIER_EXPEDITED);
128 else
129 smp_mb();
130}
131#endif
132
133#ifdef RCU_MB
134static void smp_mb_master(int group)
135{
136 smp_mb();
137}
138#endif
139
140#ifdef RCU_SIGNAL
141static void force_mb_all_readers(void)
142{
143 struct rcu_reader *index;
144
145 /*
146 * Ask for each threads to execute a smp_mb() so we can consider the
147 * compiler barriers around rcu read lock as real memory barriers.
148 */
149 if (list_empty(&registry))
150 return;
151 /*
152 * pthread_kill has a smp_mb(). But beware, we assume it performs
153 * a cache flush on architectures with non-coherent cache. Let's play
154 * safe and don't assume anything : we use smp_mc() to make sure the
155 * cache flush is enforced.
156 */
157 list_for_each_entry(index, &registry, head) {
158 index->need_mb = 1;
159 smp_mc(); /* write need_mb before sending the signal */
160 pthread_kill(index->tid, SIGRCU);
161 }
162 /*
163 * Wait for sighandler (and thus mb()) to execute on every thread.
164 *
165 * Note that the pthread_kill() will never be executed on systems
166 * that correctly deliver signals in a timely manner. However, it
167 * is not uncommon for kernels to have bugs that can result in
168 * lost or unduly delayed signals.
169 *
170 * If you are seeing the below pthread_kill() executing much at
171 * all, we suggest testing the underlying kernel and filing the
172 * relevant bug report. For Linux kernels, we recommend getting
173 * the Linux Test Project (LTP).
174 */
175 list_for_each_entry(index, &registry, head) {
176 while (index->need_mb) {
177 pthread_kill(index->tid, SIGRCU);
178 poll(NULL, 0, 1);
179 }
180 }
181 smp_mb(); /* read ->need_mb before ending the barrier */
182}
183
184static void smp_mb_master(int group)
185{
186 force_mb_all_readers();
187}
188#endif /* #ifdef RCU_SIGNAL */
189
190/*
191 * synchronize_rcu() waiting. Single thread.
192 */
193static void wait_gp(void)
194{
195 /* Read reader_gp before read futex */
196 smp_mb_master(RCU_MB_GROUP);
197 if (uatomic_read(&gp_futex) == -1)
198 futex_async(&gp_futex, FUTEX_WAIT, -1,
199 NULL, NULL, 0);
200}
201
202void update_counter_and_wait(void)
203{
204 LIST_HEAD(qsreaders);
205 int wait_loops = 0;
206 struct rcu_reader *index, *tmp;
207
208 /* Switch parity: 0 -> 1, 1 -> 0 */
209 STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE);
210
211 /*
212 * Must commit qparity update to memory before waiting for other parity
213 * quiescent state. Failure to do so could result in the writer waiting
214 * forever while new readers are always accessing data (no progress).
215 * Ensured by STORE_SHARED and LOAD_SHARED.
216 */
217
218 /*
219 * Adding a smp_mb() which is _not_ formally required, but makes the
220 * model easier to understand. It does not have a big performance impact
221 * anyway, given this is the write-side.
222 */
223 smp_mb();
224
225 /*
226 * Wait for each thread rcu_reader.ctr count to become 0.
227 */
228 for (;;) {
229 wait_loops++;
230 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
231 uatomic_dec(&gp_futex);
232 /* Write futex before read reader_gp */
233 smp_mb_master(RCU_MB_GROUP);
234 }
235
236 list_for_each_entry_safe(index, tmp, &registry, head) {
237 if (!rcu_old_gp_ongoing(&index->ctr))
238 list_move(&index->head, &qsreaders);
239 }
240
241#ifndef HAS_INCOHERENT_CACHES
242 if (list_empty(&registry)) {
243 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
244 /* Read reader_gp before write futex */
245 smp_mb_master(RCU_MB_GROUP);
246 uatomic_set(&gp_futex, 0);
247 }
248 break;
249 } else {
250 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
251 wait_gp();
252 else
253 cpu_relax();
254 }
255#else /* #ifndef HAS_INCOHERENT_CACHES */
256 /*
257 * BUSY-LOOP. Force the reader thread to commit its
258 * rcu_reader.ctr update to memory if we wait for too long.
259 */
260 if (list_empty(&registry)) {
261 if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
262 /* Read reader_gp before write futex */
263 smp_mb_master(RCU_MB_GROUP);
264 uatomic_set(&gp_futex, 0);
265 }
266 break;
267 } else {
268 switch (wait_loops) {
269 case RCU_QS_ACTIVE_ATTEMPTS:
270 wait_gp();
271 break; /* only escape switch */
272 case KICK_READER_LOOPS:
273 smp_mb_master(RCU_MB_GROUP);
274 wait_loops = 0;
275 break; /* only escape switch */
276 default:
277 cpu_relax();
278 }
279 }
280#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
281 }
282 /* put back the reader list in the registry */
283 list_splice(&qsreaders, &registry);
284}
285
286void synchronize_rcu(void)
287{
288 mutex_lock(&rcu_gp_lock);
289
290 if (list_empty(&registry))
291 goto out;
292
293 /* All threads should read qparity before accessing data structure
294 * where new ptr points to. Must be done within rcu_gp_lock because it
295 * iterates on reader threads.*/
296 /* Write new ptr before changing the qparity */
297 smp_mb_master(RCU_MB_GROUP);
298
299 /*
300 * Wait for previous parity to be empty of readers.
301 */
302 update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */
303
304 /*
305 * Must finish waiting for quiescent state for parity 0 before
306 * committing qparity update to memory. Failure to do so could result in
307 * the writer waiting forever while new readers are always accessing
308 * data (no progress).
309 * Ensured by STORE_SHARED and LOAD_SHARED.
310 */
311
312 /*
313 * Adding a smp_mb() which is _not_ formally required, but makes the
314 * model easier to understand. It does not have a big performance impact
315 * anyway, given this is the write-side.
316 */
317 smp_mb();
318
319 /*
320 * Wait for previous parity to be empty of readers.
321 */
322 update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */
323
324 /* Finish waiting for reader threads before letting the old ptr being
325 * freed. Must be done within rcu_gp_lock because it iterates on reader
326 * threads. */
327 smp_mb_master(RCU_MB_GROUP);
328out:
329 mutex_unlock(&rcu_gp_lock);
330}
331
332/*
333 * library wrappers to be used by non-LGPL compatible source code.
334 */
335
336void rcu_read_lock(void)
337{
338 _rcu_read_lock();
339}
340
341void rcu_read_unlock(void)
342{
343 _rcu_read_unlock();
344}
345
346void rcu_register_thread(void)
347{
348 rcu_reader.tid = pthread_self();
349 assert(rcu_reader.need_mb == 0);
350 assert(rcu_reader.ctr == 0);
351
352 mutex_lock(&rcu_gp_lock);
353 rcu_init(); /* In case gcc does not support constructor attribute */
354 list_add(&rcu_reader.head, &registry);
355 mutex_unlock(&rcu_gp_lock);
356}
357
358void rcu_unregister_thread(void)
359{
360 mutex_lock(&rcu_gp_lock);
361 list_del(&rcu_reader.head);
362 mutex_unlock(&rcu_gp_lock);
363}
364
365#ifdef RCU_MEMBARRIER
366void rcu_init(void)
367{
368 if (init_done)
369 return;
370 init_done = 1;
371 if (!membarrier(MEMBARRIER_EXPEDITED | MEMBARRIER_QUERY))
372 has_sys_membarrier = 1;
373}
374#endif
375
376#ifdef RCU_SIGNAL
377static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context)
378{
379 /*
380 * Executing this smp_mb() is the only purpose of this signal handler.
381 * It punctually promotes barrier() into smp_mb() on every thread it is
382 * executed on.
383 */
384 smp_mb();
385 rcu_reader.need_mb = 0;
386 smp_mb();
387}
388
389/*
390 * rcu_init constructor. Called when the library is linked, but also when
391 * reader threads are calling rcu_register_thread().
392 * Should only be called by a single thread at a given time. This is ensured by
393 * holing the rcu_gp_lock from rcu_register_thread() or by running at library
394 * load time, which should not be executed by multiple threads nor concurrently
395 * with rcu_register_thread() anyway.
396 */
397void rcu_init(void)
398{
399 struct sigaction act;
400 int ret;
401
402 if (init_done)
403 return;
404 init_done = 1;
405
406 act.sa_sigaction = sigrcu_handler;
407 act.sa_flags = SA_SIGINFO | SA_RESTART;
408 sigemptyset(&act.sa_mask);
409 ret = sigaction(SIGRCU, &act, NULL);
410 if (ret) {
411 perror("Error in sigaction");
412 exit(-1);
413 }
414}
415
416void rcu_exit(void)
417{
418 struct sigaction act;
419 int ret;
420
421 ret = sigaction(SIGRCU, NULL, &act);
422 if (ret) {
423 perror("Error in sigaction");
424 exit(-1);
425 }
426 assert(act.sa_sigaction == sigrcu_handler);
427 assert(list_empty(&registry));
428}
429#endif /* #ifdef RCU_SIGNAL */
This page took 0.022741 seconds and 4 git commands to generate.