Commit | Line | Data |
---|---|---|
b257a10b MD |
1 | /* |
2 | * urcu.c | |
3 | * | |
4 | * Userspace RCU library | |
5 | * | |
6982d6d7 | 6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
af02d47e | 7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
b257a10b | 8 | * |
af02d47e MD |
9 | * This library is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
54843abc PM |
22 | * |
23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
b257a10b MD |
24 | */ |
25 | ||
fdf01eed | 26 | #define _BSD_SOURCE |
c1d2c60b | 27 | #define _GNU_SOURCE |
71c811bf | 28 | #define _LGPL_SOURCE |
27b012e2 MD |
29 | #include <stdio.h> |
30 | #include <pthread.h> | |
31 | #include <signal.h> | |
32 | #include <assert.h> | |
f69f195a | 33 | #include <stdlib.h> |
6d841bc2 | 34 | #include <stdint.h> |
f69f195a | 35 | #include <string.h> |
09a9f986 | 36 | #include <errno.h> |
e8043c1b | 37 | #include <poll.h> |
27b012e2 | 38 | |
71c811bf | 39 | #include "urcu/wfqueue.h" |
57760d44 | 40 | #include "urcu/map/urcu.h" |
af7c2dbe | 41 | #include "urcu/static/urcu.h" |
618b2595 | 42 | #include "urcu-pointer.h" |
bd252a04 | 43 | #include "urcu/tls-compat.h" |
71c811bf | 44 | |
4a6d7378 MD |
45 | #include "urcu-die.h" |
46 | ||
121a5d44 | 47 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
71c811bf | 48 | #undef _LGPL_SOURCE |
27b012e2 | 49 | #include "urcu.h" |
71c811bf | 50 | #define _LGPL_SOURCE |
27b012e2 | 51 | |
3a71751e PB |
52 | /* |
53 | * If a reader is really non-cooperative and refuses to commit its | |
54 | * rcu_active_readers count to memory (there is no barrier in the reader | |
6b702fa4 | 55 | * per-se), kick it after 10 loops waiting for it. |
3a71751e | 56 | */ |
6b702fa4 | 57 | #define KICK_READER_LOOPS 10 |
3a71751e PB |
58 | |
59 | /* | |
60 | * Active attempts to check for reader Q.S. before calling futex(). | |
61 | */ | |
62 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
63 | ||
fdf01eed | 64 | #ifdef RCU_MEMBARRIER |
834a45ba | 65 | static int init_done; |
fdf01eed | 66 | int has_sys_membarrier; |
834a45ba | 67 | |
02be5561 | 68 | void __attribute__((constructor)) rcu_init(void); |
fdf01eed MD |
69 | #endif |
70 | ||
71 | #ifdef RCU_MB | |
02be5561 | 72 | void rcu_init(void) |
e90a6e9c MD |
73 | { |
74 | } | |
75 | #endif | |
8a5fb4c9 | 76 | |
fdf01eed MD |
77 | #ifdef RCU_SIGNAL |
78 | static int init_done; | |
79 | ||
80 | void __attribute__((constructor)) rcu_init(void); | |
81 | void __attribute__((destructor)) rcu_exit(void); | |
82 | #endif | |
83 | ||
66bc4dcd MD |
84 | /* |
85 | * rcu_gp_lock ensures mutual exclusion between threads calling | |
86 | * synchronize_rcu(). | |
87 | */ | |
6abb4bd5 | 88 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; |
66bc4dcd MD |
89 | /* |
90 | * rcu_registry_lock ensures mutual exclusion between threads | |
91 | * registering and unregistering themselves to/from the registry, and | |
92 | * with threads reading that registry from synchronize_rcu(). However, | |
93 | * this lock is not held all the way through the completion of awaiting | |
94 | * for the grace period. It is sporadically released between iterations | |
95 | * on the registry. | |
96 | * rcu_registry_lock may nest inside rcu_gp_lock. | |
97 | */ | |
98 | static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; | |
27b012e2 | 99 | |
6d841bc2 | 100 | int32_t gp_futex; |
bc6c15bb | 101 | |
128166c9 MD |
102 | /* |
103 | * Global grace period counter. | |
02be5561 | 104 | * Contains the current RCU_GP_CTR_PHASE. |
afb8f2c9 | 105 | * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path. |
b0d5e790 | 106 | * Written to only by writer with mutex taken. Read by both writer and readers. |
128166c9 | 107 | */ |
27d65bc5 | 108 | unsigned long rcu_gp_ctr = RCU_GP_COUNT; |
b0d5e790 MD |
109 | /* |
110 | * Written to only by each individual reader. Read by both the reader and the | |
111 | * writers. | |
112 | */ | |
1745be1a | 113 | __DEFINE_URCU_TLS_GLOBAL(struct rcu_reader, rcu_reader); |
27b012e2 | 114 | |
cf380c2f | 115 | #ifdef DEBUG_YIELD |
9d335088 | 116 | unsigned int yield_active; |
1745be1a | 117 | __DEFINE_URCU_TLS_GLOBAL(unsigned int, rand_yield); |
cf380c2f MD |
118 | #endif |
119 | ||
16aa9ee8 | 120 | static CDS_LIST_HEAD(registry); |
27b012e2 | 121 | |
6abb4bd5 | 122 | static void mutex_lock(pthread_mutex_t *mutex) |
41718ff9 MD |
123 | { |
124 | int ret; | |
09a9f986 PM |
125 | |
126 | #ifndef DISTRUST_SIGNALS_EXTREME | |
6abb4bd5 | 127 | ret = pthread_mutex_lock(mutex); |
4a6d7378 MD |
128 | if (ret) |
129 | urcu_die(ret); | |
09a9f986 | 130 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
6abb4bd5 | 131 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { |
4a6d7378 MD |
132 | if (ret != EBUSY && ret != EINTR) |
133 | urcu_die(ret); | |
bd252a04 | 134 | if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) { |
5481ddb3 | 135 | cmm_smp_mb(); |
bd252a04 | 136 | _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0); |
5481ddb3 | 137 | cmm_smp_mb(); |
09a9f986 PM |
138 | } |
139 | poll(NULL,0,10); | |
140 | } | |
141 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
41718ff9 MD |
142 | } |
143 | ||
6abb4bd5 | 144 | static void mutex_unlock(pthread_mutex_t *mutex) |
41718ff9 MD |
145 | { |
146 | int ret; | |
147 | ||
6abb4bd5 | 148 | ret = pthread_mutex_unlock(mutex); |
4a6d7378 MD |
149 | if (ret) |
150 | urcu_die(ret); | |
41718ff9 MD |
151 | } |
152 | ||
fdf01eed | 153 | #ifdef RCU_MEMBARRIER |
25cc6d18 | 154 | static void smp_mb_master(int group) |
fdf01eed | 155 | { |
a0b7f7ea | 156 | if (caa_likely(has_sys_membarrier)) |
f0708810 | 157 | membarrier(MEMBARRIER_EXPEDITED); |
fdf01eed | 158 | else |
5481ddb3 | 159 | cmm_smp_mb(); |
fdf01eed MD |
160 | } |
161 | #endif | |
162 | ||
02be5561 | 163 | #ifdef RCU_MB |
25cc6d18 | 164 | static void smp_mb_master(int group) |
40e140c9 | 165 | { |
5481ddb3 | 166 | cmm_smp_mb(); |
40e140c9 | 167 | } |
fdf01eed MD |
168 | #endif |
169 | ||
170 | #ifdef RCU_SIGNAL | |
78ff9419 | 171 | static void force_mb_all_readers(void) |
27b012e2 | 172 | { |
02be5561 | 173 | struct rcu_reader *index; |
e3b0cef0 | 174 | |
27b012e2 | 175 | /* |
5481ddb3 | 176 | * Ask for each threads to execute a cmm_smp_mb() so we can consider the |
27b012e2 MD |
177 | * compiler barriers around rcu read lock as real memory barriers. |
178 | */ | |
16aa9ee8 | 179 | if (cds_list_empty(®istry)) |
27b012e2 | 180 | return; |
3a86deba | 181 | /* |
5481ddb3 | 182 | * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs |
157dca95 | 183 | * a cache flush on architectures with non-coherent cache. Let's play |
5481ddb3 | 184 | * safe and don't assume anything : we use cmm_smp_mc() to make sure the |
157dca95 | 185 | * cache flush is enforced. |
3a86deba | 186 | */ |
16aa9ee8 | 187 | cds_list_for_each_entry(index, ®istry, node) { |
6cf3827c | 188 | CMM_STORE_SHARED(index->need_mb, 1); |
02be5561 | 189 | pthread_kill(index->tid, SIGRCU); |
09a9f986 | 190 | } |
27b012e2 MD |
191 | /* |
192 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
09a9f986 PM |
193 | * |
194 | * Note that the pthread_kill() will never be executed on systems | |
195 | * that correctly deliver signals in a timely manner. However, it | |
196 | * is not uncommon for kernels to have bugs that can result in | |
197 | * lost or unduly delayed signals. | |
198 | * | |
199 | * If you are seeing the below pthread_kill() executing much at | |
200 | * all, we suggest testing the underlying kernel and filing the | |
201 | * relevant bug report. For Linux kernels, we recommend getting | |
202 | * the Linux Test Project (LTP). | |
27b012e2 | 203 | */ |
16aa9ee8 | 204 | cds_list_for_each_entry(index, ®istry, node) { |
6cf3827c | 205 | while (CMM_LOAD_SHARED(index->need_mb)) { |
02be5561 | 206 | pthread_kill(index->tid, SIGRCU); |
09a9f986 PM |
207 | poll(NULL, 0, 1); |
208 | } | |
209 | } | |
5481ddb3 | 210 | cmm_smp_mb(); /* read ->need_mb before ending the barrier */ |
27b012e2 | 211 | } |
9d7e3f89 | 212 | |
25cc6d18 | 213 | static void smp_mb_master(int group) |
9d7e3f89 MD |
214 | { |
215 | force_mb_all_readers(); | |
216 | } | |
fdf01eed | 217 | #endif /* #ifdef RCU_SIGNAL */ |
27b012e2 | 218 | |
bc6c15bb MD |
219 | /* |
220 | * synchronize_rcu() waiting. Single thread. | |
221 | */ | |
cfe78e25 | 222 | static void wait_gp(void) |
bc6c15bb | 223 | { |
cfe78e25 | 224 | /* Read reader_gp before read futex */ |
25cc6d18 | 225 | smp_mb_master(RCU_MB_GROUP); |
cfe78e25 | 226 | if (uatomic_read(&gp_futex) == -1) |
0854ccff | 227 | futex_async(&gp_futex, FUTEX_WAIT, -1, |
cfe78e25 | 228 | NULL, NULL, 0); |
bc6c15bb MD |
229 | } |
230 | ||
66bc4dcd MD |
231 | /* |
232 | * Always called with rcu_registry lock held. Releases this lock between | |
233 | * iterations and grabs it again. Holds the lock when it returns. | |
234 | */ | |
2dfb8b5e | 235 | void update_counter_and_wait(void) |
27b012e2 | 236 | { |
16aa9ee8 | 237 | CDS_LIST_HEAD(qsreaders); |
6b702fa4 | 238 | unsigned int wait_loops = 0; |
02be5561 | 239 | struct rcu_reader *index, *tmp; |
6b702fa4 MD |
240 | #ifdef HAS_INCOHERENT_CACHES |
241 | unsigned int wait_gp_loops = 0; | |
242 | #endif /* HAS_INCOHERENT_CACHES */ | |
27b012e2 | 243 | |
32c15e4e | 244 | /* Switch parity: 0 -> 1, 1 -> 0 */ |
6cf3827c | 245 | CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); |
2dfb8b5e MD |
246 | |
247 | /* | |
d40fde2c MD |
248 | * Must commit rcu_gp_ctr update to memory before waiting for quiescent |
249 | * state. Failure to do so could result in the writer waiting forever | |
250 | * while new readers are always accessing data (no progress). Enforce | |
251 | * compiler-order of store to rcu_gp_ctr before load rcu_reader ctr. | |
2dfb8b5e | 252 | */ |
5481ddb3 | 253 | cmm_barrier(); |
2dfb8b5e MD |
254 | |
255 | /* | |
935b11ff | 256 | * |
5481ddb3 | 257 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
2dfb8b5e MD |
258 | * model easier to understand. It does not have a big performance impact |
259 | * anyway, given this is the write-side. | |
260 | */ | |
5481ddb3 | 261 | cmm_smp_mb(); |
2dfb8b5e | 262 | |
40e140c9 | 263 | /* |
bd252a04 | 264 | * Wait for each thread URCU_TLS(rcu_reader).ctr count to become 0. |
27b012e2 | 265 | */ |
cfe78e25 | 266 | for (;;) { |
cca4c8dc MD |
267 | if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) |
268 | wait_loops++; | |
6b702fa4 | 269 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
cfe78e25 MD |
270 | uatomic_dec(&gp_futex); |
271 | /* Write futex before read reader_gp */ | |
25cc6d18 | 272 | smp_mb_master(RCU_MB_GROUP); |
cfe78e25 MD |
273 | } |
274 | ||
16aa9ee8 | 275 | cds_list_for_each_entry_safe(index, tmp, ®istry, node) { |
b95a001f | 276 | if (!rcu_gp_ongoing(&index->ctr)) |
16aa9ee8 | 277 | cds_list_move(&index->node, &qsreaders); |
cfe78e25 MD |
278 | } |
279 | ||
e8043c1b | 280 | #ifndef HAS_INCOHERENT_CACHES |
16aa9ee8 | 281 | if (cds_list_empty(®istry)) { |
6b702fa4 | 282 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
cfe78e25 | 283 | /* Read reader_gp before write futex */ |
25cc6d18 | 284 | smp_mb_master(RCU_MB_GROUP); |
cfe78e25 | 285 | uatomic_set(&gp_futex, 0); |
bc6c15bb | 286 | } |
cfe78e25 MD |
287 | break; |
288 | } else { | |
66bc4dcd MD |
289 | /* Temporarily unlock the registry lock. */ |
290 | mutex_unlock(&rcu_registry_lock); | |
6b702fa4 | 291 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) |
cfe78e25 MD |
292 | wait_gp(); |
293 | else | |
06f22bdb | 294 | caa_cpu_relax(); |
66bc4dcd MD |
295 | /* Re-lock the registry lock before the next loop. */ |
296 | mutex_lock(&rcu_registry_lock); | |
bc6c15bb | 297 | } |
e8043c1b | 298 | #else /* #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 299 | /* |
40e140c9 | 300 | * BUSY-LOOP. Force the reader thread to commit its |
bd252a04 MD |
301 | * URCU_TLS(rcu_reader).ctr update to memory if we wait |
302 | * for too long. | |
27b012e2 | 303 | */ |
16aa9ee8 | 304 | if (cds_list_empty(®istry)) { |
6b702fa4 | 305 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
cfe78e25 | 306 | /* Read reader_gp before write futex */ |
25cc6d18 | 307 | smp_mb_master(RCU_MB_GROUP); |
cfe78e25 MD |
308 | uatomic_set(&gp_futex, 0); |
309 | } | |
310 | break; | |
311 | } else { | |
6b702fa4 | 312 | if (wait_gp_loops == KICK_READER_LOOPS) { |
25cc6d18 | 313 | smp_mb_master(RCU_MB_GROUP); |
6b702fa4 MD |
314 | wait_gp_loops = 0; |
315 | } | |
66bc4dcd MD |
316 | /* Temporarily unlock the registry lock. */ |
317 | mutex_unlock(&rcu_registry_lock); | |
6b702fa4 MD |
318 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
319 | wait_gp(); | |
320 | wait_gp_loops++; | |
321 | } else { | |
06f22bdb | 322 | caa_cpu_relax(); |
40e140c9 | 323 | } |
66bc4dcd MD |
324 | /* Re-lock the registry lock before the next loop. */ |
325 | mutex_lock(&rcu_registry_lock); | |
40e140c9 | 326 | } |
e8043c1b | 327 | #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 328 | } |
cfe78e25 | 329 | /* put back the reader list in the registry */ |
16aa9ee8 | 330 | cds_list_splice(&qsreaders, ®istry); |
27b012e2 MD |
331 | } |
332 | ||
9598a481 | 333 | void synchronize_rcu(void) |
2bc59bd7 | 334 | { |
6abb4bd5 | 335 | mutex_lock(&rcu_gp_lock); |
66bc4dcd | 336 | mutex_lock(&rcu_registry_lock); |
135530fd | 337 | |
16aa9ee8 | 338 | if (cds_list_empty(®istry)) |
2dfb8b5e MD |
339 | goto out; |
340 | ||
66bc4dcd MD |
341 | /* |
342 | * All threads should read qparity before accessing data structure | |
343 | * where new ptr points to. Must be done within rcu_registry_lock | |
344 | * because it iterates on reader threads. | |
345 | */ | |
9598a481 | 346 | /* Write new ptr before changing the qparity */ |
25cc6d18 | 347 | smp_mb_master(RCU_MB_GROUP); |
9598a481 | 348 | |
9598a481 MD |
349 | /* |
350 | * Wait for previous parity to be empty of readers. | |
66bc4dcd MD |
351 | * update_counter_and_wait() can release and grab again |
352 | * rcu_registry_lock interally. | |
9598a481 | 353 | */ |
2dfb8b5e | 354 | update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */ |
9598a481 MD |
355 | |
356 | /* | |
357 | * Must finish waiting for quiescent state for parity 0 before | |
d40fde2c MD |
358 | * committing next rcu_gp_ctr update to memory. Failure to do so could |
359 | * result in the writer waiting forever while new readers are always | |
360 | * accessing data (no progress). Enforce compiler-order of load | |
bd252a04 | 361 | * URCU_TLS(rcu_reader).ctr before store to rcu_gp_ctr. |
9598a481 | 362 | */ |
5481ddb3 | 363 | cmm_barrier(); |
9598a481 | 364 | |
5dba80f9 | 365 | /* |
5481ddb3 | 366 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
5dba80f9 MD |
367 | * model easier to understand. It does not have a big performance impact |
368 | * anyway, given this is the write-side. | |
369 | */ | |
5481ddb3 | 370 | cmm_smp_mb(); |
67c2d80b | 371 | |
9598a481 MD |
372 | /* |
373 | * Wait for previous parity to be empty of readers. | |
66bc4dcd MD |
374 | * update_counter_and_wait() can release and grab again |
375 | * rcu_registry_lock interally. | |
9598a481 | 376 | */ |
2dfb8b5e | 377 | update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */ |
9598a481 | 378 | |
66bc4dcd MD |
379 | /* |
380 | * Finish waiting for reader threads before letting the old ptr | |
381 | * being freed. Must be done within rcu_registry_lock because it | |
382 | * iterates on reader threads. | |
383 | */ | |
25cc6d18 | 384 | smp_mb_master(RCU_MB_GROUP); |
2dfb8b5e | 385 | out: |
66bc4dcd | 386 | mutex_unlock(&rcu_registry_lock); |
6abb4bd5 | 387 | mutex_unlock(&rcu_gp_lock); |
2bc59bd7 PM |
388 | } |
389 | ||
121a5d44 MD |
390 | /* |
391 | * library wrappers to be used by non-LGPL compatible source code. | |
392 | */ | |
393 | ||
394 | void rcu_read_lock(void) | |
395 | { | |
396 | _rcu_read_lock(); | |
397 | } | |
398 | ||
399 | void rcu_read_unlock(void) | |
400 | { | |
401 | _rcu_read_unlock(); | |
402 | } | |
403 | ||
121a5d44 | 404 | void rcu_register_thread(void) |
27b012e2 | 405 | { |
bd252a04 MD |
406 | URCU_TLS(rcu_reader).tid = pthread_self(); |
407 | assert(URCU_TLS(rcu_reader).need_mb == 0); | |
408 | assert(!(URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK)); | |
02be5561 | 409 | |
66bc4dcd | 410 | mutex_lock(&rcu_registry_lock); |
02be5561 | 411 | rcu_init(); /* In case gcc does not support constructor attribute */ |
bd252a04 | 412 | cds_list_add(&URCU_TLS(rcu_reader).node, ®istry); |
66bc4dcd | 413 | mutex_unlock(&rcu_registry_lock); |
27b012e2 MD |
414 | } |
415 | ||
121a5d44 | 416 | void rcu_unregister_thread(void) |
27b012e2 | 417 | { |
66bc4dcd | 418 | mutex_lock(&rcu_registry_lock); |
bd252a04 | 419 | cds_list_del(&URCU_TLS(rcu_reader).node); |
66bc4dcd | 420 | mutex_unlock(&rcu_registry_lock); |
27b012e2 MD |
421 | } |
422 | ||
fdf01eed MD |
423 | #ifdef RCU_MEMBARRIER |
424 | void rcu_init(void) | |
425 | { | |
426 | if (init_done) | |
427 | return; | |
428 | init_done = 1; | |
cf5271ee | 429 | if (!membarrier(MEMBARRIER_EXPEDITED | MEMBARRIER_QUERY)) |
fdf01eed MD |
430 | has_sys_membarrier = 1; |
431 | } | |
432 | #endif | |
433 | ||
434 | #ifdef RCU_SIGNAL | |
02be5561 | 435 | static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context) |
27b012e2 | 436 | { |
40e140c9 | 437 | /* |
5481ddb3 DG |
438 | * Executing this cmm_smp_mb() is the only purpose of this signal handler. |
439 | * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is | |
40e140c9 MD |
440 | * executed on. |
441 | */ | |
5481ddb3 | 442 | cmm_smp_mb(); |
bd252a04 | 443 | _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0); |
5481ddb3 | 444 | cmm_smp_mb(); |
27b012e2 MD |
445 | } |
446 | ||
8a5fb4c9 | 447 | /* |
02be5561 | 448 | * rcu_init constructor. Called when the library is linked, but also when |
8a5fb4c9 MD |
449 | * reader threads are calling rcu_register_thread(). |
450 | * Should only be called by a single thread at a given time. This is ensured by | |
66bc4dcd MD |
451 | * holing the rcu_registry_lock from rcu_register_thread() or by running |
452 | * at library load time, which should not be executed by multiple | |
453 | * threads nor concurrently with rcu_register_thread() anyway. | |
8a5fb4c9 | 454 | */ |
02be5561 | 455 | void rcu_init(void) |
27b012e2 MD |
456 | { |
457 | struct sigaction act; | |
458 | int ret; | |
459 | ||
8a5fb4c9 MD |
460 | if (init_done) |
461 | return; | |
462 | init_done = 1; | |
463 | ||
02be5561 | 464 | act.sa_sigaction = sigrcu_handler; |
dd052bd3 | 465 | act.sa_flags = SA_SIGINFO | SA_RESTART; |
c297c21c | 466 | sigemptyset(&act.sa_mask); |
02be5561 | 467 | ret = sigaction(SIGRCU, &act, NULL); |
4a6d7378 MD |
468 | if (ret) |
469 | urcu_die(errno); | |
27b012e2 MD |
470 | } |
471 | ||
02be5561 | 472 | void rcu_exit(void) |
27b012e2 MD |
473 | { |
474 | struct sigaction act; | |
475 | int ret; | |
476 | ||
02be5561 | 477 | ret = sigaction(SIGRCU, NULL, &act); |
4a6d7378 MD |
478 | if (ret) |
479 | urcu_die(errno); | |
02be5561 | 480 | assert(act.sa_sigaction == sigrcu_handler); |
16aa9ee8 | 481 | assert(cds_list_empty(®istry)); |
27b012e2 | 482 | } |
5e77fc1f | 483 | |
fdf01eed | 484 | #endif /* #ifdef RCU_SIGNAL */ |
5e77fc1f | 485 | |
5e6b23a6 | 486 | DEFINE_RCU_FLAVOR(rcu_flavor); |
541d828d | 487 | |
5e77fc1f | 488 | #include "urcu-call-rcu-impl.h" |
0376e7b2 | 489 | #include "urcu-defer-impl.h" |