Commit | Line | Data |
---|---|---|
b257a10b MD |
1 | /* |
2 | * urcu.c | |
3 | * | |
4 | * Userspace RCU library | |
5 | * | |
af02d47e MD |
6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
b257a10b | 8 | * |
af02d47e MD |
9 | * This library is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
54843abc PM |
22 | * |
23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
b257a10b MD |
24 | */ |
25 | ||
27b012e2 MD |
26 | #include <stdio.h> |
27 | #include <pthread.h> | |
28 | #include <signal.h> | |
29 | #include <assert.h> | |
f69f195a MD |
30 | #include <stdlib.h> |
31 | #include <string.h> | |
09a9f986 | 32 | #include <errno.h> |
e8043c1b | 33 | #include <poll.h> |
27b012e2 | 34 | |
121a5d44 MD |
35 | #include "urcu-static.h" |
36 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
27b012e2 MD |
37 | #include "urcu.h" |
38 | ||
0a1d290b | 39 | #ifndef URCU_MB |
834a45ba MD |
40 | static int init_done; |
41 | ||
8a5fb4c9 MD |
42 | void __attribute__((constructor)) urcu_init(void); |
43 | void __attribute__((destructor)) urcu_exit(void); | |
e90a6e9c | 44 | #else |
538d7df5 | 45 | void urcu_init(void) |
e90a6e9c MD |
46 | { |
47 | } | |
48 | #endif | |
8a5fb4c9 | 49 | |
81d1f1f5 | 50 | static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; |
27b012e2 | 51 | |
bc6c15bb MD |
52 | int gp_futex; |
53 | ||
128166c9 MD |
54 | /* |
55 | * Global grace period counter. | |
56 | * Contains the current RCU_GP_CTR_BIT. | |
afb8f2c9 | 57 | * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path. |
b0d5e790 | 58 | * Written to only by writer with mutex taken. Read by both writer and readers. |
128166c9 MD |
59 | */ |
60 | long urcu_gp_ctr = RCU_GP_COUNT; | |
27b012e2 | 61 | |
b0d5e790 MD |
62 | /* |
63 | * Written to only by each individual reader. Read by both the reader and the | |
64 | * writers. | |
65 | */ | |
e3b0cef0 | 66 | struct urcu_reader __thread urcu_reader; |
27b012e2 MD |
67 | |
68 | /* Thread IDs of registered readers */ | |
69 | #define INIT_NUM_THREADS 4 | |
70 | ||
cf380c2f | 71 | #ifdef DEBUG_YIELD |
9d335088 MD |
72 | unsigned int yield_active; |
73 | unsigned int __thread rand_yield; | |
cf380c2f MD |
74 | #endif |
75 | ||
e3b0cef0 | 76 | static LIST_HEAD(registry); |
27b012e2 | 77 | |
81d1f1f5 | 78 | static void internal_urcu_lock(void) |
41718ff9 MD |
79 | { |
80 | int ret; | |
09a9f986 PM |
81 | |
82 | #ifndef DISTRUST_SIGNALS_EXTREME | |
41718ff9 MD |
83 | ret = pthread_mutex_lock(&urcu_mutex); |
84 | if (ret) { | |
85 | perror("Error in pthread mutex lock"); | |
86 | exit(-1); | |
87 | } | |
09a9f986 PM |
88 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
89 | while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) { | |
90 | if (ret != EBUSY && ret != EINTR) { | |
91 | printf("ret = %d, errno = %d\n", ret, errno); | |
92 | perror("Error in pthread mutex lock"); | |
93 | exit(-1); | |
94 | } | |
e3b0cef0 | 95 | if (urcu_reader.need_mb) { |
09a9f986 | 96 | smp_mb(); |
e3b0cef0 | 97 | urcu_reader.need_mb = 0; |
09a9f986 PM |
98 | smp_mb(); |
99 | } | |
100 | poll(NULL,0,10); | |
101 | } | |
102 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
41718ff9 MD |
103 | } |
104 | ||
81d1f1f5 | 105 | static void internal_urcu_unlock(void) |
41718ff9 MD |
106 | { |
107 | int ret; | |
108 | ||
109 | ret = pthread_mutex_unlock(&urcu_mutex); | |
110 | if (ret) { | |
111 | perror("Error in pthread mutex unlock"); | |
112 | exit(-1); | |
113 | } | |
114 | } | |
115 | ||
27b012e2 MD |
116 | /* |
117 | * called with urcu_mutex held. | |
118 | */ | |
1430ee0b | 119 | static void switch_next_urcu_qparity(void) |
27b012e2 | 120 | { |
b0d5e790 | 121 | STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT); |
27b012e2 MD |
122 | } |
123 | ||
0a1d290b | 124 | #ifdef URCU_MB |
e3b0cef0 | 125 | static void force_mb_single_thread(struct urcu_reader *index) |
40e140c9 MD |
126 | { |
127 | smp_mb(); | |
128 | } | |
129 | ||
bb488185 MD |
130 | static void force_mb_all_threads(void) |
131 | { | |
b715b99e | 132 | smp_mb(); |
bb488185 | 133 | } |
0a1d290b | 134 | #else /* #ifdef URCU_MB */ |
e3b0cef0 | 135 | static void force_mb_single_thread(struct urcu_reader *index) |
40e140c9 | 136 | { |
e3b0cef0 | 137 | assert(!list_empty(®istry)); |
157dca95 MD |
138 | /* |
139 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
140 | * a cache flush on architectures with non-coherent cache. Let's play | |
141 | * safe and don't assume anything : we use smp_mc() to make sure the | |
142 | * cache flush is enforced. | |
157dca95 | 143 | */ |
e3b0cef0 | 144 | index->need_mb = 1; |
09a9f986 PM |
145 | smp_mc(); /* write ->need_mb before sending the signals */ |
146 | pthread_kill(index->tid, SIGURCU); | |
147 | smp_mb(); | |
40e140c9 MD |
148 | /* |
149 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
150 | * BUSY-LOOP. | |
151 | */ | |
e3b0cef0 | 152 | while (index->need_mb) { |
09a9f986 PM |
153 | poll(NULL, 0, 1); |
154 | } | |
155 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
40e140c9 MD |
156 | } |
157 | ||
27b012e2 MD |
158 | static void force_mb_all_threads(void) |
159 | { | |
e3b0cef0 MD |
160 | struct urcu_reader *index; |
161 | ||
27b012e2 | 162 | /* |
b715b99e | 163 | * Ask for each threads to execute a smp_mb() so we can consider the |
27b012e2 MD |
164 | * compiler barriers around rcu read lock as real memory barriers. |
165 | */ | |
e3b0cef0 | 166 | if (list_empty(®istry)) |
27b012e2 | 167 | return; |
3a86deba MD |
168 | /* |
169 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
157dca95 MD |
170 | * a cache flush on architectures with non-coherent cache. Let's play |
171 | * safe and don't assume anything : we use smp_mc() to make sure the | |
172 | * cache flush is enforced. | |
3a86deba | 173 | */ |
e3b0cef0 MD |
174 | list_for_each_entry(index, ®istry, head) { |
175 | index->need_mb = 1; | |
09a9f986 | 176 | smp_mc(); /* write need_mb before sending the signal */ |
f69f195a | 177 | pthread_kill(index->tid, SIGURCU); |
09a9f986 | 178 | } |
27b012e2 MD |
179 | /* |
180 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
09a9f986 PM |
181 | * |
182 | * Note that the pthread_kill() will never be executed on systems | |
183 | * that correctly deliver signals in a timely manner. However, it | |
184 | * is not uncommon for kernels to have bugs that can result in | |
185 | * lost or unduly delayed signals. | |
186 | * | |
187 | * If you are seeing the below pthread_kill() executing much at | |
188 | * all, we suggest testing the underlying kernel and filing the | |
189 | * relevant bug report. For Linux kernels, we recommend getting | |
190 | * the Linux Test Project (LTP). | |
27b012e2 | 191 | */ |
e3b0cef0 MD |
192 | list_for_each_entry(index, ®istry, head) { |
193 | while (index->need_mb) { | |
09a9f986 PM |
194 | pthread_kill(index->tid, SIGURCU); |
195 | poll(NULL, 0, 1); | |
196 | } | |
197 | } | |
198 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
27b012e2 | 199 | } |
0a1d290b | 200 | #endif /* #else #ifdef URCU_MB */ |
27b012e2 | 201 | |
bc6c15bb MD |
202 | /* |
203 | * synchronize_rcu() waiting. Single thread. | |
204 | */ | |
e3b0cef0 | 205 | static void wait_gp(struct urcu_reader *index) |
bc6c15bb | 206 | { |
ec4e58a3 | 207 | uatomic_dec(&gp_futex); |
bc6c15bb | 208 | force_mb_single_thread(index); /* Write futex before read reader_gp */ |
e3b0cef0 | 209 | if (!rcu_old_gp_ongoing(&index->ctr)) { |
bc6c15bb MD |
210 | /* Read reader_gp before write futex */ |
211 | force_mb_single_thread(index); | |
212 | /* Callbacks are queued, don't wait. */ | |
ec4e58a3 | 213 | uatomic_set(&gp_futex, 0); |
bc6c15bb MD |
214 | } else { |
215 | /* Read reader_gp before read futex */ | |
216 | force_mb_single_thread(index); | |
ec4e58a3 | 217 | if (uatomic_read(&gp_futex) == -1) |
bc6c15bb MD |
218 | futex(&gp_futex, FUTEX_WAIT, -1, |
219 | NULL, NULL, 0); | |
220 | } | |
221 | } | |
222 | ||
1430ee0b | 223 | void wait_for_quiescent_state(void) |
27b012e2 | 224 | { |
e3b0cef0 | 225 | struct urcu_reader *index; |
27b012e2 | 226 | |
e3b0cef0 | 227 | if (list_empty(®istry)) |
27b012e2 | 228 | return; |
40e140c9 | 229 | /* |
e3b0cef0 | 230 | * Wait for each thread urcu_reader.ctr count to become 0. |
27b012e2 | 231 | */ |
e3b0cef0 | 232 | list_for_each_entry(index, ®istry, head) { |
bc6c15bb | 233 | int wait_loops = 0; |
e8043c1b | 234 | #ifndef HAS_INCOHERENT_CACHES |
e3b0cef0 | 235 | while (rcu_old_gp_ongoing(&index->ctr)) { |
bc6c15bb MD |
236 | if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) { |
237 | wait_gp(index); | |
238 | } else { | |
239 | cpu_relax(); | |
240 | } | |
241 | } | |
e8043c1b | 242 | #else /* #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 243 | /* |
40e140c9 | 244 | * BUSY-LOOP. Force the reader thread to commit its |
e3b0cef0 | 245 | * urcu_reader.ctr update to memory if we wait for too long. |
27b012e2 | 246 | */ |
e3b0cef0 | 247 | while (rcu_old_gp_ongoing(&index->ctr)) { |
bc6c15bb MD |
248 | switch (wait_loops++) { |
249 | case RCU_QS_ACTIVE_ATTEMPTS: | |
250 | wait_gp(index); | |
251 | break; | |
252 | case KICK_READER_LOOPS: | |
09a9f986 | 253 | force_mb_single_thread(index); |
40e140c9 | 254 | wait_loops = 0; |
bc6c15bb MD |
255 | break; |
256 | default: | |
3b55dbf4 | 257 | cpu_relax(); |
40e140c9 MD |
258 | } |
259 | } | |
e8043c1b | 260 | #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 261 | } |
27b012e2 MD |
262 | } |
263 | ||
9598a481 | 264 | void synchronize_rcu(void) |
2bc59bd7 | 265 | { |
135530fd MD |
266 | internal_urcu_lock(); |
267 | ||
9598a481 | 268 | /* All threads should read qparity before accessing data structure |
135530fd MD |
269 | * where new ptr points to. Must be done within internal_urcu_lock |
270 | * because it iterates on reader threads.*/ | |
9598a481 | 271 | /* Write new ptr before changing the qparity */ |
2bc59bd7 | 272 | force_mb_all_threads(); |
9598a481 | 273 | |
9598a481 | 274 | switch_next_urcu_qparity(); /* 0 -> 1 */ |
2bc59bd7 PM |
275 | |
276 | /* | |
9598a481 MD |
277 | * Must commit qparity update to memory before waiting for parity |
278 | * 0 quiescent state. Failure to do so could result in the writer | |
279 | * waiting forever while new readers are always accessing data (no | |
280 | * progress). | |
b0d5e790 | 281 | * Ensured by STORE_SHARED and LOAD_SHARED. |
2bc59bd7 | 282 | */ |
2bc59bd7 | 283 | |
67c2d80b | 284 | /* |
5dba80f9 MD |
285 | * Adding a smp_mb() which is _not_ formally required, but makes the |
286 | * model easier to understand. It does not have a big performance impact | |
287 | * anyway, given this is the write-side. | |
67c2d80b | 288 | */ |
5dba80f9 | 289 | smp_mb(); |
67c2d80b | 290 | |
9598a481 MD |
291 | /* |
292 | * Wait for previous parity to be empty of readers. | |
293 | */ | |
294 | wait_for_quiescent_state(); /* Wait readers in parity 0 */ | |
9598a481 MD |
295 | |
296 | /* | |
297 | * Must finish waiting for quiescent state for parity 0 before | |
298 | * committing qparity update to memory. Failure to do so could result in | |
299 | * the writer waiting forever while new readers are always accessing | |
300 | * data (no progress). | |
b0d5e790 | 301 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 302 | */ |
9598a481 | 303 | |
5dba80f9 MD |
304 | /* |
305 | * Adding a smp_mb() which is _not_ formally required, but makes the | |
306 | * model easier to understand. It does not have a big performance impact | |
307 | * anyway, given this is the write-side. | |
308 | */ | |
309 | smp_mb(); | |
67c2d80b | 310 | |
9598a481 | 311 | switch_next_urcu_qparity(); /* 1 -> 0 */ |
9598a481 MD |
312 | |
313 | /* | |
314 | * Must commit qparity update to memory before waiting for parity | |
315 | * 1 quiescent state. Failure to do so could result in the writer | |
316 | * waiting forever while new readers are always accessing data (no | |
317 | * progress). | |
b0d5e790 | 318 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 319 | */ |
9598a481 | 320 | |
5dba80f9 MD |
321 | /* |
322 | * Adding a smp_mb() which is _not_ formally required, but makes the | |
323 | * model easier to understand. It does not have a big performance impact | |
324 | * anyway, given this is the write-side. | |
325 | */ | |
326 | smp_mb(); | |
67c2d80b | 327 | |
9598a481 MD |
328 | /* |
329 | * Wait for previous parity to be empty of readers. | |
330 | */ | |
331 | wait_for_quiescent_state(); /* Wait readers in parity 1 */ | |
9598a481 | 332 | |
9598a481 | 333 | /* Finish waiting for reader threads before letting the old ptr being |
135530fd MD |
334 | * freed. Must be done within internal_urcu_lock because it iterates on |
335 | * reader threads. */ | |
9598a481 | 336 | force_mb_all_threads(); |
135530fd MD |
337 | |
338 | internal_urcu_unlock(); | |
2bc59bd7 PM |
339 | } |
340 | ||
121a5d44 MD |
341 | /* |
342 | * library wrappers to be used by non-LGPL compatible source code. | |
343 | */ | |
344 | ||
345 | void rcu_read_lock(void) | |
346 | { | |
347 | _rcu_read_lock(); | |
348 | } | |
349 | ||
350 | void rcu_read_unlock(void) | |
351 | { | |
352 | _rcu_read_unlock(); | |
353 | } | |
354 | ||
355 | void *rcu_dereference(void *p) | |
356 | { | |
357 | return _rcu_dereference(p); | |
358 | } | |
359 | ||
360 | void *rcu_assign_pointer_sym(void **p, void *v) | |
361 | { | |
362 | wmb(); | |
363 | return STORE_SHARED(p, v); | |
364 | } | |
365 | ||
366 | void *rcu_xchg_pointer_sym(void **p, void *v) | |
367 | { | |
368 | wmb(); | |
ec4e58a3 | 369 | return uatomic_xchg(p, v); |
121a5d44 MD |
370 | } |
371 | ||
4d1ce26f MD |
372 | void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new) |
373 | { | |
374 | wmb(); | |
ec4e58a3 | 375 | return uatomic_cmpxchg(p, old, _new); |
4d1ce26f MD |
376 | } |
377 | ||
121a5d44 MD |
378 | void *rcu_publish_content_sym(void **p, void *v) |
379 | { | |
380 | void *oldptr; | |
381 | ||
382 | oldptr = _rcu_xchg_pointer(p, v); | |
383 | synchronize_rcu(); | |
384 | return oldptr; | |
385 | } | |
386 | ||
121a5d44 | 387 | void rcu_register_thread(void) |
27b012e2 | 388 | { |
c265818b | 389 | internal_urcu_lock(); |
8a5fb4c9 | 390 | urcu_init(); /* In case gcc does not support constructor attribute */ |
e3b0cef0 MD |
391 | urcu_reader.tid = pthread_self(); |
392 | assert(urcu_reader.need_mb == 0); | |
393 | assert(urcu_reader.ctr == 0); | |
394 | list_add(&urcu_reader.head, ®istry); | |
c265818b | 395 | internal_urcu_unlock(); |
27b012e2 MD |
396 | } |
397 | ||
121a5d44 | 398 | void rcu_unregister_thread(void) |
27b012e2 | 399 | { |
c265818b | 400 | internal_urcu_lock(); |
e3b0cef0 | 401 | list_del(&urcu_reader.head); |
c265818b | 402 | internal_urcu_unlock(); |
27b012e2 MD |
403 | } |
404 | ||
0a1d290b | 405 | #ifndef URCU_MB |
121a5d44 | 406 | static void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) |
27b012e2 | 407 | { |
40e140c9 MD |
408 | /* |
409 | * Executing this smp_mb() is the only purpose of this signal handler. | |
410 | * It punctually promotes barrier() into smp_mb() on every thread it is | |
411 | * executed on. | |
412 | */ | |
b715b99e | 413 | smp_mb(); |
e3b0cef0 | 414 | urcu_reader.need_mb = 0; |
09a9f986 | 415 | smp_mb(); |
27b012e2 MD |
416 | } |
417 | ||
8a5fb4c9 MD |
418 | /* |
419 | * urcu_init constructor. Called when the library is linked, but also when | |
420 | * reader threads are calling rcu_register_thread(). | |
421 | * Should only be called by a single thread at a given time. This is ensured by | |
422 | * holing the internal_urcu_lock() from rcu_register_thread() or by running at | |
423 | * library load time, which should not be executed by multiple threads nor | |
424 | * concurrently with rcu_register_thread() anyway. | |
425 | */ | |
426 | void urcu_init(void) | |
27b012e2 MD |
427 | { |
428 | struct sigaction act; | |
429 | int ret; | |
430 | ||
8a5fb4c9 MD |
431 | if (init_done) |
432 | return; | |
433 | init_done = 1; | |
434 | ||
27b012e2 | 435 | act.sa_sigaction = sigurcu_handler; |
dd052bd3 | 436 | act.sa_flags = SA_SIGINFO | SA_RESTART; |
c297c21c | 437 | sigemptyset(&act.sa_mask); |
27b012e2 | 438 | ret = sigaction(SIGURCU, &act, NULL); |
f69f195a MD |
439 | if (ret) { |
440 | perror("Error in sigaction"); | |
27b012e2 MD |
441 | exit(-1); |
442 | } | |
443 | } | |
444 | ||
8a5fb4c9 | 445 | void urcu_exit(void) |
27b012e2 MD |
446 | { |
447 | struct sigaction act; | |
448 | int ret; | |
449 | ||
450 | ret = sigaction(SIGURCU, NULL, &act); | |
f69f195a MD |
451 | if (ret) { |
452 | perror("Error in sigaction"); | |
27b012e2 MD |
453 | exit(-1); |
454 | } | |
455 | assert(act.sa_sigaction == sigurcu_handler); | |
e3b0cef0 | 456 | assert(list_empty(®istry)); |
27b012e2 | 457 | } |
0a1d290b | 458 | #endif /* #ifndef URCU_MB */ |