Fix opensuse powerpc build
[urcu.git] / urcu.c
CommitLineData
b257a10b
MD
1/*
2 * urcu.c
3 *
4 * Userspace RCU library
5 *
af02d47e
MD
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
b257a10b 8 *
af02d47e
MD
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
54843abc
PM
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
b257a10b
MD
24 */
25
27b012e2
MD
26#include <stdio.h>
27#include <pthread.h>
28#include <signal.h>
29#include <assert.h>
f69f195a
MD
30#include <stdlib.h>
31#include <string.h>
09a9f986 32#include <errno.h>
e8043c1b 33#include <poll.h>
27b012e2 34
121a5d44
MD
35#include "urcu-static.h"
36/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
27b012e2
MD
37#include "urcu.h"
38
39pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
40
128166c9
MD
41/*
42 * Global grace period counter.
43 * Contains the current RCU_GP_CTR_BIT.
44 * Also has a RCU_GP_CTR_BIT of 1, to accelerate the reader fast path.
b0d5e790 45 * Written to only by writer with mutex taken. Read by both writer and readers.
128166c9
MD
46 */
47long urcu_gp_ctr = RCU_GP_COUNT;
27b012e2 48
b0d5e790
MD
49/*
50 * Written to only by each individual reader. Read by both the reader and the
51 * writers.
52 */
6e8b8429 53long __thread urcu_active_readers;
27b012e2
MD
54
55/* Thread IDs of registered readers */
56#define INIT_NUM_THREADS 4
57
0a52082b 58struct reader_registry {
27b012e2 59 pthread_t tid;
128166c9 60 long *urcu_active_readers;
09a9f986 61 char *need_mb;
27b012e2
MD
62};
63
cf380c2f 64#ifdef DEBUG_YIELD
9d335088
MD
65unsigned int yield_active;
66unsigned int __thread rand_yield;
cf380c2f
MD
67#endif
68
0a52082b 69static struct reader_registry *registry;
09a9f986 70static char __thread need_mb;
27b012e2 71static int num_readers, alloc_readers;
27b012e2 72
c265818b 73void internal_urcu_lock(void)
41718ff9
MD
74{
75 int ret;
09a9f986
PM
76
77#ifndef DISTRUST_SIGNALS_EXTREME
41718ff9
MD
78 ret = pthread_mutex_lock(&urcu_mutex);
79 if (ret) {
80 perror("Error in pthread mutex lock");
81 exit(-1);
82 }
09a9f986
PM
83#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
84 while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) {
85 if (ret != EBUSY && ret != EINTR) {
86 printf("ret = %d, errno = %d\n", ret, errno);
87 perror("Error in pthread mutex lock");
88 exit(-1);
89 }
90 if (need_mb) {
91 smp_mb();
92 need_mb = 0;
93 smp_mb();
94 }
95 poll(NULL,0,10);
96 }
97#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
41718ff9
MD
98}
99
c265818b 100void internal_urcu_unlock(void)
41718ff9
MD
101{
102 int ret;
103
104 ret = pthread_mutex_unlock(&urcu_mutex);
105 if (ret) {
106 perror("Error in pthread mutex unlock");
107 exit(-1);
108 }
109}
110
27b012e2
MD
111/*
112 * called with urcu_mutex held.
113 */
1430ee0b 114static void switch_next_urcu_qparity(void)
27b012e2 115{
b0d5e790 116 STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT);
27b012e2
MD
117}
118
bb488185 119#ifdef DEBUG_FULL_MB
e8043c1b 120#ifdef HAS_INCOHERENT_CACHES
09a9f986 121static void force_mb_single_thread(struct reader_registry *index)
40e140c9
MD
122{
123 smp_mb();
124}
e8043c1b 125#endif /* #ifdef HAS_INCOHERENT_CACHES */
40e140c9 126
bb488185
MD
127static void force_mb_all_threads(void)
128{
b715b99e 129 smp_mb();
bb488185 130}
e8043c1b
MD
131#else /* #ifdef DEBUG_FULL_MB */
132#ifdef HAS_INCOHERENT_CACHES
09a9f986 133static void force_mb_single_thread(struct reader_registry *index)
40e140c9 134{
0a52082b 135 assert(registry);
157dca95
MD
136 /*
137 * pthread_kill has a smp_mb(). But beware, we assume it performs
138 * a cache flush on architectures with non-coherent cache. Let's play
139 * safe and don't assume anything : we use smp_mc() to make sure the
140 * cache flush is enforced.
157dca95 141 */
09a9f986
PM
142 *index->need_mb = 1;
143 smp_mc(); /* write ->need_mb before sending the signals */
144 pthread_kill(index->tid, SIGURCU);
145 smp_mb();
40e140c9
MD
146 /*
147 * Wait for sighandler (and thus mb()) to execute on every thread.
148 * BUSY-LOOP.
149 */
09a9f986
PM
150 while (*index->need_mb) {
151 poll(NULL, 0, 1);
152 }
153 smp_mb(); /* read ->need_mb before ending the barrier */
40e140c9 154}
e8043c1b 155#endif /* #ifdef HAS_INCOHERENT_CACHES */
40e140c9 156
27b012e2
MD
157static void force_mb_all_threads(void)
158{
0a52082b 159 struct reader_registry *index;
27b012e2 160 /*
b715b99e 161 * Ask for each threads to execute a smp_mb() so we can consider the
27b012e2
MD
162 * compiler barriers around rcu read lock as real memory barriers.
163 */
0a52082b 164 if (!registry)
27b012e2 165 return;
3a86deba
MD
166 /*
167 * pthread_kill has a smp_mb(). But beware, we assume it performs
157dca95
MD
168 * a cache flush on architectures with non-coherent cache. Let's play
169 * safe and don't assume anything : we use smp_mc() to make sure the
170 * cache flush is enforced.
3a86deba 171 */
09a9f986
PM
172 for (index = registry; index < registry + num_readers; index++) {
173 *index->need_mb = 1;
174 smp_mc(); /* write need_mb before sending the signal */
f69f195a 175 pthread_kill(index->tid, SIGURCU);
09a9f986 176 }
27b012e2
MD
177 /*
178 * Wait for sighandler (and thus mb()) to execute on every thread.
09a9f986
PM
179 *
180 * Note that the pthread_kill() will never be executed on systems
181 * that correctly deliver signals in a timely manner. However, it
182 * is not uncommon for kernels to have bugs that can result in
183 * lost or unduly delayed signals.
184 *
185 * If you are seeing the below pthread_kill() executing much at
186 * all, we suggest testing the underlying kernel and filing the
187 * relevant bug report. For Linux kernels, we recommend getting
188 * the Linux Test Project (LTP).
27b012e2 189 */
09a9f986
PM
190 for (index = registry; index < registry + num_readers; index++) {
191 while (*index->need_mb) {
192 pthread_kill(index->tid, SIGURCU);
193 poll(NULL, 0, 1);
194 }
195 }
196 smp_mb(); /* read ->need_mb before ending the barrier */
27b012e2 197}
e8043c1b 198#endif /* #else #ifdef DEBUG_FULL_MB */
27b012e2 199
1430ee0b 200void wait_for_quiescent_state(void)
27b012e2 201{
0a52082b 202 struct reader_registry *index;
27b012e2 203
0a52082b 204 if (!registry)
27b012e2 205 return;
40e140c9
MD
206 /*
207 * Wait for each thread urcu_active_readers count to become 0.
27b012e2 208 */
0a52082b 209 for (index = registry; index < registry + num_readers; index++) {
e8043c1b
MD
210#ifndef HAS_INCOHERENT_CACHES
211 while (rcu_old_gp_ongoing(index->urcu_active_readers))
212 cpu_relax();
213#else /* #ifndef HAS_INCOHERENT_CACHES */
40e140c9 214 int wait_loops = 0;
27b012e2 215 /*
40e140c9
MD
216 * BUSY-LOOP. Force the reader thread to commit its
217 * urcu_active_readers update to memory if we wait for too long.
27b012e2 218 */
40e140c9
MD
219 while (rcu_old_gp_ongoing(index->urcu_active_readers)) {
220 if (wait_loops++ == KICK_READER_LOOPS) {
09a9f986 221 force_mb_single_thread(index);
40e140c9 222 wait_loops = 0;
3b55dbf4
MD
223 } else {
224 cpu_relax();
40e140c9
MD
225 }
226 }
e8043c1b 227#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
27b012e2 228 }
27b012e2
MD
229}
230
9598a481 231void synchronize_rcu(void)
2bc59bd7 232{
135530fd
MD
233 internal_urcu_lock();
234
9598a481 235 /* All threads should read qparity before accessing data structure
135530fd
MD
236 * where new ptr points to. Must be done within internal_urcu_lock
237 * because it iterates on reader threads.*/
9598a481 238 /* Write new ptr before changing the qparity */
2bc59bd7 239 force_mb_all_threads();
9598a481 240
9598a481 241 switch_next_urcu_qparity(); /* 0 -> 1 */
2bc59bd7
PM
242
243 /*
9598a481
MD
244 * Must commit qparity update to memory before waiting for parity
245 * 0 quiescent state. Failure to do so could result in the writer
246 * waiting forever while new readers are always accessing data (no
247 * progress).
b0d5e790 248 * Ensured by STORE_SHARED and LOAD_SHARED.
2bc59bd7 249 */
2bc59bd7 250
67c2d80b
MD
251 /*
252 * Current RCU formal verification model assumes sequential execution of
253 * the write-side. Add core synchronization instructions. Can be removed
254 * if the formal model is extended to prove that reordering is still
255 * correct.
256 */
257 sync_core(); /* Formal model assumes serialized execution */
258
9598a481
MD
259 /*
260 * Wait for previous parity to be empty of readers.
261 */
262 wait_for_quiescent_state(); /* Wait readers in parity 0 */
9598a481
MD
263
264 /*
265 * Must finish waiting for quiescent state for parity 0 before
266 * committing qparity update to memory. Failure to do so could result in
267 * the writer waiting forever while new readers are always accessing
268 * data (no progress).
b0d5e790 269 * Ensured by STORE_SHARED and LOAD_SHARED.
9598a481 270 */
9598a481 271
67c2d80b
MD
272 sync_core(); /* Formal model assumes serialized execution */
273
9598a481 274 switch_next_urcu_qparity(); /* 1 -> 0 */
9598a481
MD
275
276 /*
277 * Must commit qparity update to memory before waiting for parity
278 * 1 quiescent state. Failure to do so could result in the writer
279 * waiting forever while new readers are always accessing data (no
280 * progress).
b0d5e790 281 * Ensured by STORE_SHARED and LOAD_SHARED.
9598a481 282 */
9598a481 283
67c2d80b
MD
284 sync_core(); /* Formal model assumes serialized execution */
285
9598a481
MD
286 /*
287 * Wait for previous parity to be empty of readers.
288 */
289 wait_for_quiescent_state(); /* Wait readers in parity 1 */
9598a481 290
9598a481 291 /* Finish waiting for reader threads before letting the old ptr being
135530fd
MD
292 * freed. Must be done within internal_urcu_lock because it iterates on
293 * reader threads. */
9598a481 294 force_mb_all_threads();
135530fd
MD
295
296 internal_urcu_unlock();
2bc59bd7
PM
297}
298
121a5d44
MD
299/*
300 * library wrappers to be used by non-LGPL compatible source code.
301 */
302
303void rcu_read_lock(void)
304{
305 _rcu_read_lock();
306}
307
308void rcu_read_unlock(void)
309{
310 _rcu_read_unlock();
311}
312
313void *rcu_dereference(void *p)
314{
315 return _rcu_dereference(p);
316}
317
318void *rcu_assign_pointer_sym(void **p, void *v)
319{
320 wmb();
321 return STORE_SHARED(p, v);
322}
323
324void *rcu_xchg_pointer_sym(void **p, void *v)
325{
326 wmb();
327 return xchg(p, v);
328}
329
330void *rcu_publish_content_sym(void **p, void *v)
331{
332 void *oldptr;
333
334 oldptr = _rcu_xchg_pointer(p, v);
335 synchronize_rcu();
336 return oldptr;
337}
338
339static void rcu_add_reader(pthread_t id)
27b012e2 340{
0a52082b 341 struct reader_registry *oldarray;
f69f195a 342
0a52082b 343 if (!registry) {
27b012e2 344 alloc_readers = INIT_NUM_THREADS;
f69f195a 345 num_readers = 0;
0a52082b
MD
346 registry =
347 malloc(sizeof(struct reader_registry) * alloc_readers);
27b012e2
MD
348 }
349 if (alloc_readers < num_readers + 1) {
0a52082b
MD
350 oldarray = registry;
351 registry = malloc(sizeof(struct reader_registry)
27b012e2 352 * (alloc_readers << 1));
0a52082b
MD
353 memcpy(registry, oldarray,
354 sizeof(struct reader_registry) * alloc_readers);
27b012e2
MD
355 alloc_readers <<= 1;
356 free(oldarray);
357 }
0a52082b 358 registry[num_readers].tid = id;
27b012e2 359 /* reference to the TLS of _this_ reader thread. */
0a52082b 360 registry[num_readers].urcu_active_readers = &urcu_active_readers;
09a9f986 361 registry[num_readers].need_mb = &need_mb;
27b012e2
MD
362 num_readers++;
363}
364
365/*
366 * Never shrink (implementation limitation).
367 * This is O(nb threads). Eventually use a hash table.
368 */
121a5d44 369static void rcu_remove_reader(pthread_t id)
27b012e2 370{
0a52082b 371 struct reader_registry *index;
27b012e2 372
0a52082b
MD
373 assert(registry != NULL);
374 for (index = registry; index < registry + num_readers; index++) {
e6d6e2dc 375 if (pthread_equal(index->tid, id)) {
0a52082b
MD
376 memcpy(index, &registry[num_readers - 1],
377 sizeof(struct reader_registry));
378 registry[num_readers - 1].tid = 0;
379 registry[num_readers - 1].urcu_active_readers = NULL;
27b012e2
MD
380 num_readers--;
381 return;
382 }
383 }
384 /* Hrm not found, forgot to register ? */
385 assert(0);
386}
387
121a5d44 388void rcu_register_thread(void)
27b012e2 389{
c265818b 390 internal_urcu_lock();
121a5d44 391 rcu_add_reader(pthread_self());
c265818b 392 internal_urcu_unlock();
27b012e2
MD
393}
394
121a5d44 395void rcu_unregister_thread(void)
27b012e2 396{
c265818b 397 internal_urcu_lock();
121a5d44 398 rcu_remove_reader(pthread_self());
c265818b 399 internal_urcu_unlock();
27b012e2
MD
400}
401
bb488185 402#ifndef DEBUG_FULL_MB
121a5d44 403static void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
27b012e2 404{
40e140c9
MD
405 /*
406 * Executing this smp_mb() is the only purpose of this signal handler.
407 * It punctually promotes barrier() into smp_mb() on every thread it is
408 * executed on.
409 */
b715b99e 410 smp_mb();
09a9f986
PM
411 need_mb = 0;
412 smp_mb();
27b012e2
MD
413}
414
415void __attribute__((constructor)) urcu_init(void)
416{
417 struct sigaction act;
418 int ret;
419
420 act.sa_sigaction = sigurcu_handler;
421 ret = sigaction(SIGURCU, &act, NULL);
f69f195a
MD
422 if (ret) {
423 perror("Error in sigaction");
27b012e2
MD
424 exit(-1);
425 }
426}
427
428void __attribute__((destructor)) urcu_exit(void)
429{
430 struct sigaction act;
431 int ret;
432
433 ret = sigaction(SIGURCU, NULL, &act);
f69f195a
MD
434 if (ret) {
435 perror("Error in sigaction");
27b012e2
MD
436 exit(-1);
437 }
438 assert(act.sa_sigaction == sigurcu_handler);
0a52082b 439 free(registry);
27b012e2 440}
e8043c1b 441#endif /* #ifndef DEBUG_FULL_MB */
This page took 0.042745 seconds and 4 git commands to generate.