LGPLv2.1 relicensing
[urcu.git] / urcu.c
CommitLineData
b257a10b
MD
1/*
2 * urcu.c
3 *
4 * Userspace RCU library
5 *
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 *
121a5d44 8 * Distributed under LGPLv2.1
54843abc
PM
9 *
10 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
b257a10b
MD
11 */
12
27b012e2
MD
13#include <stdio.h>
14#include <pthread.h>
15#include <signal.h>
16#include <assert.h>
f69f195a
MD
17#include <stdlib.h>
18#include <string.h>
09a9f986 19#include <errno.h>
e8043c1b 20#include <poll.h>
27b012e2 21
121a5d44
MD
22#include "urcu-static.h"
23/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
27b012e2
MD
24#include "urcu.h"
25
26pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
27
128166c9
MD
28/*
29 * Global grace period counter.
30 * Contains the current RCU_GP_CTR_BIT.
31 * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
b0d5e790 32 * Written to only by writer with mutex taken. Read by both writer and readers.
128166c9
MD
33 */
34long urcu_gp_ctr = RCU_GP_COUNT;
27b012e2 35
b0d5e790
MD
36/*
37 * Written to only by each individual reader. Read by both the reader and the
38 * writers.
39 */
6e8b8429 40long __thread urcu_active_readers;
27b012e2
MD
41
42/* Thread IDs of registered readers */
43#define INIT_NUM_THREADS 4
44
0a52082b 45struct reader_registry {
27b012e2 46 pthread_t tid;
128166c9 47 long *urcu_active_readers;
09a9f986 48 char *need_mb;
27b012e2
MD
49};
50
cf380c2f 51#ifdef DEBUG_YIELD
9d335088
MD
52unsigned int yield_active;
53unsigned int __thread rand_yield;
cf380c2f
MD
54#endif
55
0a52082b 56static struct reader_registry *registry;
09a9f986 57static char __thread need_mb;
27b012e2 58static int num_readers, alloc_readers;
27b012e2 59
c265818b 60void internal_urcu_lock(void)
41718ff9
MD
61{
62 int ret;
09a9f986
PM
63
64#ifndef DISTRUST_SIGNALS_EXTREME
41718ff9
MD
65 ret = pthread_mutex_lock(&urcu_mutex);
66 if (ret) {
67 perror("Error in pthread mutex lock");
68 exit(-1);
69 }
09a9f986
PM
70#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
71 while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
72 if (ret != EBUSY && ret != EINTR) {
73 printf("ret = %d, errno = %d\n", ret, errno);
74 perror("Error in pthread mutex lock");
75 exit(-1);
76 }
77 if (need_mb) {
78 smp_mb();
79 need_mb = 0;
80 smp_mb();
81 }
82 poll(NULL,0,10);
83 }
84#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
41718ff9
MD
85}
86
c265818b 87void internal_urcu_unlock(void)
41718ff9
MD
88{
89 int ret;
90
91 ret = pthread_mutex_unlock(&urcu_mutex);
92 if (ret) {
93 perror("Error in pthread mutex unlock");
94 exit(-1);
95 }
96}
97
27b012e2
MD
98/*
99 * called with urcu_mutex held.
100 */
1430ee0b 101static void switch_next_urcu_qparity(void)
27b012e2 102{
b0d5e790 103 STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
27b012e2
MD
104}
105
bb488185 106#ifdef DEBUG_FULL_MB
e8043c1b 107#ifdef HAS_INCOHERENT_CACHES
09a9f986 108static void force_mb_single_thread(struct reader_registry *index)
40e140c9
MD
109{
110 smp_mb();
111}
e8043c1b 112#endif /* #ifdef HAS_INCOHERENT_CACHES */
40e140c9 113
bb488185
MD
114static void force_mb_all_threads(void)
115{
b715b99e 116 smp_mb();
bb488185 117}
e8043c1b
MD
118#else /* #ifdef DEBUG_FULL_MB */
119#ifdef HAS_INCOHERENT_CACHES
09a9f986 120static void force_mb_single_thread(struct reader_registry *index)
40e140c9 121{
0a52082b 122 assert(registry);
157dca95
MD
123 /*
124 * pthread_kill has a smp_mb(). But beware, we assume it performs
125 * a cache flush on architectures with non-coherent cache. Let's play
126 * safe and don't assume anything : we use smp_mc() to make sure the
127 * cache flush is enforced.
157dca95 128 */
09a9f986
PM
129 *index->need_mb = 1;
130 smp_mc(); /* write ->need_mb before sending the signals */
131 pthread_kill(index->tid, SIGURCU);
132 smp_mb();
40e140c9
MD
133 /*
134 * Wait for sighandler (and thus mb()) to execute on every thread.
135 * BUSY-LOOP.
136 */
09a9f986
PM
137 while (*index->need_mb) {
138 poll(NULL, 0, 1);
139 }
140 smp_mb(); /* read ->need_mb before ending the barrier */
40e140c9 141}
e8043c1b 142#endif /* #ifdef HAS_INCOHERENT_CACHES */
40e140c9 143
27b012e2
MD
144static void force_mb_all_threads(void)
145{
0a52082b 146 struct reader_registry *index;
27b012e2 147 /*
b715b99e 148 * Ask for each threads to execute a smp_mb() so we can consider the
27b012e2
MD
149 * compiler barriers around rcu read lock as real memory barriers.
150 */
0a52082b 151 if (!registry)
27b012e2 152 return;
3a86deba
MD
153 /*
154 * pthread_kill has a smp_mb(). But beware, we assume it performs
157dca95
MD
155 * a cache flush on architectures with non-coherent cache. Let's play
156 * safe and don't assume anything : we use smp_mc() to make sure the
157 * cache flush is enforced.
3a86deba 158 */
09a9f986
PM
159 for (index = registry; index < registry + num_readers; index++) {
160 *index->need_mb = 1;
161 smp_mc(); /* write need_mb before sending the signal */
f69f195a 162 pthread_kill(index->tid, SIGURCU);
09a9f986 163 }
27b012e2
MD
164 /*
165 * Wait for sighandler (and thus mb()) to execute on every thread.
09a9f986
PM
166 *
167 * Note that the pthread_kill() will never be executed on systems
168 * that correctly deliver signals in a timely manner. However, it
169 * is not uncommon for kernels to have bugs that can result in
170 * lost or unduly delayed signals.
171 *
172 * If you are seeing the below pthread_kill() executing much at
173 * all, we suggest testing the underlying kernel and filing the
174 * relevant bug report. For Linux kernels, we recommend getting
175 * the Linux Test Project (LTP).
27b012e2 176 */
09a9f986
PM
177 for (index = registry; index < registry + num_readers; index++) {
178 while (*index->need_mb) {
179 pthread_kill(index->tid, SIGURCU);
180 poll(NULL, 0, 1);
181 }
182 }
183 smp_mb(); /* read ->need_mb before ending the barrier */
27b012e2 184}
e8043c1b 185#endif /* #else #ifdef DEBUG_FULL_MB */
27b012e2 186
1430ee0b 187void wait_for_quiescent_state(void)
27b012e2 188{
0a52082b 189 struct reader_registry *index;
27b012e2 190
0a52082b 191 if (!registry)
27b012e2 192 return;
40e140c9
MD
193 /*
194 * Wait for each thread urcu_active_readers count to become 0.
27b012e2 195 */
0a52082b 196 for (index = registry; index < registry + num_readers; index++) {
e8043c1b
MD
197#ifndef HAS_INCOHERENT_CACHES
198 while (rcu_old_gp_ongoing(index->urcu_active_readers))
199 cpu_relax();
200#else /* #ifndef HAS_INCOHERENT_CACHES */
40e140c9 201 int wait_loops = 0;
27b012e2 202 /*
40e140c9
MD
203 * BUSY-LOOP. Force the reader thread to commit its
204 * urcu_active_readers update to memory if we wait for too long.
27b012e2 205 */
40e140c9
MD
206 while (rcu_old_gp_ongoing(index->urcu_active_readers)) {
207 if (wait_loops++ == KICK_READER_LOOPS) {
09a9f986 208 force_mb_single_thread(index);
40e140c9 209 wait_loops = 0;
3b55dbf4
MD
210 } else {
211 cpu_relax();
40e140c9
MD
212 }
213 }
e8043c1b 214#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
27b012e2 215 }
27b012e2
MD
216}
217
9598a481 218void synchronize_rcu(void)
2bc59bd7 219{
135530fd
MD
220 internal_urcu_lock();
221
9598a481 222 /* All threads should read qparity before accessing data structure
135530fd
MD
223 * where new ptr points to. Must be done within internal_urcu_lock
224 * because it iterates on reader threads.*/
9598a481 225 /* Write new ptr before changing the qparity */
2bc59bd7 226 force_mb_all_threads();
9598a481 227
9598a481 228 switch_next_urcu_qparity(); /* 0 -> 1 */
2bc59bd7
PM
229
230 /*
9598a481
MD
231 * Must commit qparity update to memory before waiting for parity
232 * 0 quiescent state. Failure to do so could result in the writer
233 * waiting forever while new readers are always accessing data (no
234 * progress).
b0d5e790 235 * Ensured by STORE_SHARED and LOAD_SHARED.
2bc59bd7 236 */
2bc59bd7 237
9598a481
MD
238 /*
239 * Wait for previous parity to be empty of readers.
240 */
241 wait_for_quiescent_state(); /* Wait readers in parity 0 */
9598a481
MD
242
243 /*
244 * Must finish waiting for quiescent state for parity 0 before
245 * committing qparity update to memory. Failure to do so could result in
246 * the writer waiting forever while new readers are always accessing
247 * data (no progress).
b0d5e790 248 * Ensured by STORE_SHARED and LOAD_SHARED.
9598a481 249 */
9598a481
MD
250
251 switch_next_urcu_qparity(); /* 1 -> 0 */
9598a481
MD
252
253 /*
254 * Must commit qparity update to memory before waiting for parity
255 * 1 quiescent state. Failure to do so could result in the writer
256 * waiting forever while new readers are always accessing data (no
257 * progress).
b0d5e790 258 * Ensured by STORE_SHARED and LOAD_SHARED.
9598a481 259 */
9598a481
MD
260
261 /*
262 * Wait for previous parity to be empty of readers.
263 */
264 wait_for_quiescent_state(); /* Wait readers in parity 1 */
9598a481 265
9598a481 266 /* Finish waiting for reader threads before letting the old ptr being
135530fd
MD
267 * freed. Must be done within internal_urcu_lock because it iterates on
268 * reader threads. */
9598a481 269 force_mb_all_threads();
135530fd
MD
270
271 internal_urcu_unlock();
2bc59bd7
PM
272}
273
121a5d44
MD
274/*
275 * library wrappers to be used by non-LGPL compatible source code.
276 */
277
278void rcu_read_lock(void)
279{
280 _rcu_read_lock();
281}
282
283void rcu_read_unlock(void)
284{
285 _rcu_read_unlock();
286}
287
288void *rcu_dereference(void *p)
289{
290 return _rcu_dereference(p);
291}
292
293void *rcu_assign_pointer_sym(void **p, void *v)
294{
295 wmb();
296 return STORE_SHARED(p, v);
297}
298
299void *rcu_xchg_pointer_sym(void **p, void *v)
300{
301 wmb();
302 return xchg(p, v);
303}
304
305void *rcu_publish_content_sym(void **p, void *v)
306{
307 void *oldptr;
308
309 oldptr = _rcu_xchg_pointer(p, v);
310 synchronize_rcu();
311 return oldptr;
312}
313
314static void rcu_add_reader(pthread_t id)
27b012e2 315{
0a52082b 316 struct reader_registry *oldarray;
f69f195a 317
0a52082b 318 if (!registry) {
27b012e2 319 alloc_readers = INIT_NUM_THREADS;
f69f195a 320 num_readers = 0;
0a52082b
MD
321 registry =
322 malloc(sizeof(struct reader_registry) * alloc_readers);
27b012e2
MD
323 }
324 if (alloc_readers < num_readers + 1) {
0a52082b
MD
325 oldarray = registry;
326 registry = malloc(sizeof(struct reader_registry)
27b012e2 327 * (alloc_readers << 1));
0a52082b
MD
328 memcpy(registry, oldarray,
329 sizeof(struct reader_registry) * alloc_readers);
27b012e2
MD
330 alloc_readers <<= 1;
331 free(oldarray);
332 }
0a52082b 333 registry[num_readers].tid = id;
27b012e2 334 /* reference to the TLS of _this_ reader thread. */
0a52082b 335 registry[num_readers].urcu_active_readers = &urcu_active_readers;
09a9f986 336 registry[num_readers].need_mb = &need_mb;
27b012e2
MD
337 num_readers++;
338}
339
340/*
341 * Never shrink (implementation limitation).
342 * This is O(nb threads). Eventually use a hash table.
343 */
121a5d44 344static void rcu_remove_reader(pthread_t id)
27b012e2 345{
0a52082b 346 struct reader_registry *index;
27b012e2 347
0a52082b
MD
348 assert(registry != NULL);
349 for (index = registry; index < registry + num_readers; index++) {
e6d6e2dc 350 if (pthread_equal(index->tid, id)) {
0a52082b
MD
351 memcpy(index, &registry[num_readers - 1],
352 sizeof(struct reader_registry));
353 registry[num_readers - 1].tid = 0;
354 registry[num_readers - 1].urcu_active_readers = NULL;
27b012e2
MD
355 num_readers--;
356 return;
357 }
358 }
359 /* Hrm not found, forgot to register ? */
360 assert(0);
361}
362
121a5d44 363void rcu_register_thread(void)
27b012e2 364{
c265818b 365 internal_urcu_lock();
121a5d44 366 rcu_add_reader(pthread_self());
c265818b 367 internal_urcu_unlock();
27b012e2
MD
368}
369
121a5d44 370void rcu_unregister_thread(void)
27b012e2 371{
c265818b 372 internal_urcu_lock();
121a5d44 373 rcu_remove_reader(pthread_self());
c265818b 374 internal_urcu_unlock();
27b012e2
MD
375}
376
bb488185 377#ifndef DEBUG_FULL_MB
121a5d44 378static void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
27b012e2 379{
40e140c9
MD
380 /*
381 * Executing this smp_mb() is the only purpose of this signal handler.
382 * It punctually promotes barrier() into smp_mb() on every thread it is
383 * executed on.
384 */
b715b99e 385 smp_mb();
09a9f986
PM
386 need_mb = 0;
387 smp_mb();
27b012e2
MD
388}
389
390void __attribute__((constructor)) urcu_init(void)
391{
392 struct sigaction act;
393 int ret;
394
395 act.sa_sigaction = sigurcu_handler;
396 ret = sigaction(SIGURCU, &act, NULL);
f69f195a
MD
397 if (ret) {
398 perror("Error in sigaction");
27b012e2
MD
399 exit(-1);
400 }
401}
402
403void __attribute__((destructor)) urcu_exit(void)
404{
405 struct sigaction act;
406 int ret;
407
408 ret = sigaction(SIGURCU, NULL, &act);
f69f195a
MD
409 if (ret) {
410 perror("Error in sigaction");
27b012e2
MD
411 exit(-1);
412 }
413 assert(act.sa_sigaction == sigurcu_handler);
0a52082b 414 free(registry);
27b012e2 415}
e8043c1b 416#endif /* #ifndef DEBUG_FULL_MB */
This page took 0.063513 seconds and 4 git commands to generate.