Commit | Line | Data |
---|---|---|
b257a10b MD |
1 | /* |
2 | * urcu.c | |
3 | * | |
4 | * Userspace RCU library | |
5 | * | |
af02d47e MD |
6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
b257a10b | 8 | * |
af02d47e MD |
9 | * This library is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
54843abc PM |
22 | * |
23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
b257a10b MD |
24 | */ |
25 | ||
27b012e2 MD |
26 | #include <stdio.h> |
27 | #include <pthread.h> | |
28 | #include <signal.h> | |
29 | #include <assert.h> | |
f69f195a MD |
30 | #include <stdlib.h> |
31 | #include <string.h> | |
09a9f986 | 32 | #include <errno.h> |
e8043c1b | 33 | #include <poll.h> |
27b012e2 | 34 | |
121a5d44 MD |
35 | #include "urcu-static.h" |
36 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
27b012e2 MD |
37 | #include "urcu.h" |
38 | ||
0a1d290b | 39 | #ifndef URCU_MB |
834a45ba MD |
40 | static int init_done; |
41 | ||
8a5fb4c9 MD |
42 | void __attribute__((constructor)) urcu_init(void); |
43 | void __attribute__((destructor)) urcu_exit(void); | |
e90a6e9c | 44 | #else |
538d7df5 | 45 | void urcu_init(void) |
e90a6e9c MD |
46 | { |
47 | } | |
48 | #endif | |
8a5fb4c9 | 49 | |
81d1f1f5 | 50 | static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; |
27b012e2 | 51 | |
bc6c15bb MD |
52 | int gp_futex; |
53 | ||
128166c9 MD |
54 | /* |
55 | * Global grace period counter. | |
56 | * Contains the current RCU_GP_CTR_BIT. | |
afb8f2c9 | 57 | * Also has a RCU_GP_COUNT of 1, to accelerate the reader fast path. |
b0d5e790 | 58 | * Written to only by writer with mutex taken. Read by both writer and readers. |
128166c9 MD |
59 | */ |
60 | long urcu_gp_ctr = RCU_GP_COUNT; | |
27b012e2 | 61 | |
b0d5e790 MD |
62 | /* |
63 | * Written to only by each individual reader. Read by both the reader and the | |
64 | * writers. | |
65 | */ | |
e3b0cef0 | 66 | struct urcu_reader __thread urcu_reader; |
27b012e2 | 67 | |
cf380c2f | 68 | #ifdef DEBUG_YIELD |
9d335088 MD |
69 | unsigned int yield_active; |
70 | unsigned int __thread rand_yield; | |
cf380c2f MD |
71 | #endif |
72 | ||
e3b0cef0 | 73 | static LIST_HEAD(registry); |
27b012e2 | 74 | |
81d1f1f5 | 75 | static void internal_urcu_lock(void) |
41718ff9 MD |
76 | { |
77 | int ret; | |
09a9f986 PM |
78 | |
79 | #ifndef DISTRUST_SIGNALS_EXTREME | |
41718ff9 MD |
80 | ret = pthread_mutex_lock(&urcu_mutex); |
81 | if (ret) { | |
82 | perror("Error in pthread mutex lock"); | |
83 | exit(-1); | |
84 | } | |
09a9f986 PM |
85 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
86 | while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) { | |
87 | if (ret != EBUSY && ret != EINTR) { | |
88 | printf("ret = %d, errno = %d\n", ret, errno); | |
89 | perror("Error in pthread mutex lock"); | |
90 | exit(-1); | |
91 | } | |
e3b0cef0 | 92 | if (urcu_reader.need_mb) { |
09a9f986 | 93 | smp_mb(); |
e3b0cef0 | 94 | urcu_reader.need_mb = 0; |
09a9f986 PM |
95 | smp_mb(); |
96 | } | |
97 | poll(NULL,0,10); | |
98 | } | |
99 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
41718ff9 MD |
100 | } |
101 | ||
81d1f1f5 | 102 | static void internal_urcu_unlock(void) |
41718ff9 MD |
103 | { |
104 | int ret; | |
105 | ||
106 | ret = pthread_mutex_unlock(&urcu_mutex); | |
107 | if (ret) { | |
108 | perror("Error in pthread mutex unlock"); | |
109 | exit(-1); | |
110 | } | |
111 | } | |
112 | ||
27b012e2 MD |
113 | /* |
114 | * called with urcu_mutex held. | |
115 | */ | |
1430ee0b | 116 | static void switch_next_urcu_qparity(void) |
27b012e2 | 117 | { |
b0d5e790 | 118 | STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR_BIT); |
27b012e2 MD |
119 | } |
120 | ||
0a1d290b | 121 | #ifdef URCU_MB |
cfe78e25 | 122 | #if 0 /* unused */ |
e3b0cef0 | 123 | static void force_mb_single_thread(struct urcu_reader *index) |
40e140c9 MD |
124 | { |
125 | smp_mb(); | |
126 | } | |
cfe78e25 | 127 | #endif //0 |
40e140c9 | 128 | |
bb488185 MD |
129 | static void force_mb_all_threads(void) |
130 | { | |
b715b99e | 131 | smp_mb(); |
bb488185 | 132 | } |
0a1d290b | 133 | #else /* #ifdef URCU_MB */ |
cfe78e25 | 134 | #if 0 /* unused */ |
e3b0cef0 | 135 | static void force_mb_single_thread(struct urcu_reader *index) |
40e140c9 | 136 | { |
e3b0cef0 | 137 | assert(!list_empty(®istry)); |
157dca95 MD |
138 | /* |
139 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
140 | * a cache flush on architectures with non-coherent cache. Let's play | |
141 | * safe and don't assume anything : we use smp_mc() to make sure the | |
142 | * cache flush is enforced. | |
157dca95 | 143 | */ |
e3b0cef0 | 144 | index->need_mb = 1; |
09a9f986 PM |
145 | smp_mc(); /* write ->need_mb before sending the signals */ |
146 | pthread_kill(index->tid, SIGURCU); | |
147 | smp_mb(); | |
40e140c9 MD |
148 | /* |
149 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
150 | * BUSY-LOOP. | |
151 | */ | |
e3b0cef0 | 152 | while (index->need_mb) { |
09a9f986 PM |
153 | poll(NULL, 0, 1); |
154 | } | |
155 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
40e140c9 | 156 | } |
cfe78e25 | 157 | #endif //0 |
40e140c9 | 158 | |
27b012e2 MD |
159 | static void force_mb_all_threads(void) |
160 | { | |
e3b0cef0 MD |
161 | struct urcu_reader *index; |
162 | ||
27b012e2 | 163 | /* |
b715b99e | 164 | * Ask for each threads to execute a smp_mb() so we can consider the |
27b012e2 MD |
165 | * compiler barriers around rcu read lock as real memory barriers. |
166 | */ | |
e3b0cef0 | 167 | if (list_empty(®istry)) |
27b012e2 | 168 | return; |
3a86deba MD |
169 | /* |
170 | * pthread_kill has a smp_mb(). But beware, we assume it performs | |
157dca95 MD |
171 | * a cache flush on architectures with non-coherent cache. Let's play |
172 | * safe and don't assume anything : we use smp_mc() to make sure the | |
173 | * cache flush is enforced. | |
3a86deba | 174 | */ |
e3b0cef0 MD |
175 | list_for_each_entry(index, ®istry, head) { |
176 | index->need_mb = 1; | |
09a9f986 | 177 | smp_mc(); /* write need_mb before sending the signal */ |
f69f195a | 178 | pthread_kill(index->tid, SIGURCU); |
09a9f986 | 179 | } |
27b012e2 MD |
180 | /* |
181 | * Wait for sighandler (and thus mb()) to execute on every thread. | |
09a9f986 PM |
182 | * |
183 | * Note that the pthread_kill() will never be executed on systems | |
184 | * that correctly deliver signals in a timely manner. However, it | |
185 | * is not uncommon for kernels to have bugs that can result in | |
186 | * lost or unduly delayed signals. | |
187 | * | |
188 | * If you are seeing the below pthread_kill() executing much at | |
189 | * all, we suggest testing the underlying kernel and filing the | |
190 | * relevant bug report. For Linux kernels, we recommend getting | |
191 | * the Linux Test Project (LTP). | |
27b012e2 | 192 | */ |
e3b0cef0 MD |
193 | list_for_each_entry(index, ®istry, head) { |
194 | while (index->need_mb) { | |
09a9f986 PM |
195 | pthread_kill(index->tid, SIGURCU); |
196 | poll(NULL, 0, 1); | |
197 | } | |
198 | } | |
199 | smp_mb(); /* read ->need_mb before ending the barrier */ | |
27b012e2 | 200 | } |
0a1d290b | 201 | #endif /* #else #ifdef URCU_MB */ |
27b012e2 | 202 | |
bc6c15bb MD |
203 | /* |
204 | * synchronize_rcu() waiting. Single thread. | |
205 | */ | |
cfe78e25 | 206 | static void wait_gp(void) |
bc6c15bb | 207 | { |
cfe78e25 MD |
208 | /* Read reader_gp before read futex */ |
209 | force_mb_all_threads(); | |
210 | if (uatomic_read(&gp_futex) == -1) | |
211 | futex(&gp_futex, FUTEX_WAIT, -1, | |
212 | NULL, NULL, 0); | |
bc6c15bb MD |
213 | } |
214 | ||
1430ee0b | 215 | void wait_for_quiescent_state(void) |
27b012e2 | 216 | { |
cfe78e25 MD |
217 | LIST_HEAD(qsreaders); |
218 | int wait_loops = 0; | |
23758cc9 | 219 | struct urcu_reader *index, *tmp; |
27b012e2 | 220 | |
e3b0cef0 | 221 | if (list_empty(®istry)) |
27b012e2 | 222 | return; |
40e140c9 | 223 | /* |
e3b0cef0 | 224 | * Wait for each thread urcu_reader.ctr count to become 0. |
27b012e2 | 225 | */ |
cfe78e25 MD |
226 | for (;;) { |
227 | wait_loops++; | |
228 | if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { | |
229 | uatomic_dec(&gp_futex); | |
230 | /* Write futex before read reader_gp */ | |
231 | force_mb_all_threads(); | |
232 | } | |
233 | ||
23758cc9 | 234 | list_for_each_entry_safe(index, tmp, ®istry, head) { |
cfe78e25 MD |
235 | if (!rcu_old_gp_ongoing(&index->ctr)) |
236 | list_move(&index->head, &qsreaders); | |
237 | } | |
238 | ||
e8043c1b | 239 | #ifndef HAS_INCOHERENT_CACHES |
cfe78e25 MD |
240 | if (list_empty(®istry)) { |
241 | if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { | |
242 | /* Read reader_gp before write futex */ | |
243 | force_mb_all_threads(); | |
244 | uatomic_set(&gp_futex, 0); | |
bc6c15bb | 245 | } |
cfe78e25 MD |
246 | break; |
247 | } else { | |
248 | if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) | |
249 | wait_gp(); | |
250 | else | |
251 | cpu_relax(); | |
bc6c15bb | 252 | } |
e8043c1b | 253 | #else /* #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 254 | /* |
40e140c9 | 255 | * BUSY-LOOP. Force the reader thread to commit its |
e3b0cef0 | 256 | * urcu_reader.ctr update to memory if we wait for too long. |
27b012e2 | 257 | */ |
cfe78e25 MD |
258 | if (list_empty(®istry)) { |
259 | if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { | |
260 | /* Read reader_gp before write futex */ | |
261 | force_mb_all_threads(); | |
262 | uatomic_set(&gp_futex, 0); | |
263 | } | |
264 | break; | |
265 | } else { | |
266 | switch (wait_loops) { | |
bc6c15bb | 267 | case RCU_QS_ACTIVE_ATTEMPTS: |
cfe78e25 MD |
268 | wait_gp(); |
269 | break; /* only escape switch */ | |
bc6c15bb | 270 | case KICK_READER_LOOPS: |
cfe78e25 | 271 | force_mb_all_threads(); |
40e140c9 | 272 | wait_loops = 0; |
cfe78e25 | 273 | break; /* only escape switch */ |
bc6c15bb | 274 | default: |
3b55dbf4 | 275 | cpu_relax(); |
40e140c9 MD |
276 | } |
277 | } | |
e8043c1b | 278 | #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ |
27b012e2 | 279 | } |
cfe78e25 | 280 | /* put back the reader list in the registry */ |
23758cc9 | 281 | list_splice(&qsreaders, ®istry); |
27b012e2 MD |
282 | } |
283 | ||
9598a481 | 284 | void synchronize_rcu(void) |
2bc59bd7 | 285 | { |
135530fd MD |
286 | internal_urcu_lock(); |
287 | ||
9598a481 | 288 | /* All threads should read qparity before accessing data structure |
135530fd MD |
289 | * where new ptr points to. Must be done within internal_urcu_lock |
290 | * because it iterates on reader threads.*/ | |
9598a481 | 291 | /* Write new ptr before changing the qparity */ |
2bc59bd7 | 292 | force_mb_all_threads(); |
9598a481 | 293 | |
9598a481 | 294 | switch_next_urcu_qparity(); /* 0 -> 1 */ |
2bc59bd7 PM |
295 | |
296 | /* | |
9598a481 MD |
297 | * Must commit qparity update to memory before waiting for parity |
298 | * 0 quiescent state. Failure to do so could result in the writer | |
299 | * waiting forever while new readers are always accessing data (no | |
300 | * progress). | |
b0d5e790 | 301 | * Ensured by STORE_SHARED and LOAD_SHARED. |
2bc59bd7 | 302 | */ |
2bc59bd7 | 303 | |
67c2d80b | 304 | /* |
5dba80f9 MD |
305 | * Adding a smp_mb() which is _not_ formally required, but makes the |
306 | * model easier to understand. It does not have a big performance impact | |
307 | * anyway, given this is the write-side. | |
67c2d80b | 308 | */ |
5dba80f9 | 309 | smp_mb(); |
67c2d80b | 310 | |
9598a481 MD |
311 | /* |
312 | * Wait for previous parity to be empty of readers. | |
313 | */ | |
314 | wait_for_quiescent_state(); /* Wait readers in parity 0 */ | |
9598a481 MD |
315 | |
316 | /* | |
317 | * Must finish waiting for quiescent state for parity 0 before | |
318 | * committing qparity update to memory. Failure to do so could result in | |
319 | * the writer waiting forever while new readers are always accessing | |
320 | * data (no progress). | |
b0d5e790 | 321 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 322 | */ |
9598a481 | 323 | |
5dba80f9 MD |
324 | /* |
325 | * Adding a smp_mb() which is _not_ formally required, but makes the | |
326 | * model easier to understand. It does not have a big performance impact | |
327 | * anyway, given this is the write-side. | |
328 | */ | |
329 | smp_mb(); | |
67c2d80b | 330 | |
9598a481 | 331 | switch_next_urcu_qparity(); /* 1 -> 0 */ |
9598a481 MD |
332 | |
333 | /* | |
334 | * Must commit qparity update to memory before waiting for parity | |
335 | * 1 quiescent state. Failure to do so could result in the writer | |
336 | * waiting forever while new readers are always accessing data (no | |
337 | * progress). | |
b0d5e790 | 338 | * Ensured by STORE_SHARED and LOAD_SHARED. |
9598a481 | 339 | */ |
9598a481 | 340 | |
5dba80f9 MD |
341 | /* |
342 | * Adding a smp_mb() which is _not_ formally required, but makes the | |
343 | * model easier to understand. It does not have a big performance impact | |
344 | * anyway, given this is the write-side. | |
345 | */ | |
346 | smp_mb(); | |
67c2d80b | 347 | |
9598a481 MD |
348 | /* |
349 | * Wait for previous parity to be empty of readers. | |
350 | */ | |
351 | wait_for_quiescent_state(); /* Wait readers in parity 1 */ | |
9598a481 | 352 | |
9598a481 | 353 | /* Finish waiting for reader threads before letting the old ptr being |
135530fd MD |
354 | * freed. Must be done within internal_urcu_lock because it iterates on |
355 | * reader threads. */ | |
9598a481 | 356 | force_mb_all_threads(); |
135530fd MD |
357 | |
358 | internal_urcu_unlock(); | |
2bc59bd7 PM |
359 | } |
360 | ||
121a5d44 MD |
361 | /* |
362 | * library wrappers to be used by non-LGPL compatible source code. | |
363 | */ | |
364 | ||
365 | void rcu_read_lock(void) | |
366 | { | |
367 | _rcu_read_lock(); | |
368 | } | |
369 | ||
370 | void rcu_read_unlock(void) | |
371 | { | |
372 | _rcu_read_unlock(); | |
373 | } | |
374 | ||
121a5d44 | 375 | void rcu_register_thread(void) |
27b012e2 | 376 | { |
e3b0cef0 MD |
377 | urcu_reader.tid = pthread_self(); |
378 | assert(urcu_reader.need_mb == 0); | |
379 | assert(urcu_reader.ctr == 0); | |
812cadd7 MD |
380 | |
381 | internal_urcu_lock(); | |
382 | urcu_init(); /* In case gcc does not support constructor attribute */ | |
e3b0cef0 | 383 | list_add(&urcu_reader.head, ®istry); |
c265818b | 384 | internal_urcu_unlock(); |
27b012e2 MD |
385 | } |
386 | ||
121a5d44 | 387 | void rcu_unregister_thread(void) |
27b012e2 | 388 | { |
c265818b | 389 | internal_urcu_lock(); |
e3b0cef0 | 390 | list_del(&urcu_reader.head); |
c265818b | 391 | internal_urcu_unlock(); |
27b012e2 MD |
392 | } |
393 | ||
0a1d290b | 394 | #ifndef URCU_MB |
121a5d44 | 395 | static void sigurcu_handler(int signo, siginfo_t *siginfo, void *context) |
27b012e2 | 396 | { |
40e140c9 MD |
397 | /* |
398 | * Executing this smp_mb() is the only purpose of this signal handler. | |
399 | * It punctually promotes barrier() into smp_mb() on every thread it is | |
400 | * executed on. | |
401 | */ | |
b715b99e | 402 | smp_mb(); |
e3b0cef0 | 403 | urcu_reader.need_mb = 0; |
09a9f986 | 404 | smp_mb(); |
27b012e2 MD |
405 | } |
406 | ||
8a5fb4c9 MD |
407 | /* |
408 | * urcu_init constructor. Called when the library is linked, but also when | |
409 | * reader threads are calling rcu_register_thread(). | |
410 | * Should only be called by a single thread at a given time. This is ensured by | |
411 | * holing the internal_urcu_lock() from rcu_register_thread() or by running at | |
412 | * library load time, which should not be executed by multiple threads nor | |
413 | * concurrently with rcu_register_thread() anyway. | |
414 | */ | |
415 | void urcu_init(void) | |
27b012e2 MD |
416 | { |
417 | struct sigaction act; | |
418 | int ret; | |
419 | ||
8a5fb4c9 MD |
420 | if (init_done) |
421 | return; | |
422 | init_done = 1; | |
423 | ||
27b012e2 | 424 | act.sa_sigaction = sigurcu_handler; |
dd052bd3 | 425 | act.sa_flags = SA_SIGINFO | SA_RESTART; |
c297c21c | 426 | sigemptyset(&act.sa_mask); |
27b012e2 | 427 | ret = sigaction(SIGURCU, &act, NULL); |
f69f195a MD |
428 | if (ret) { |
429 | perror("Error in sigaction"); | |
27b012e2 MD |
430 | exit(-1); |
431 | } | |
432 | } | |
433 | ||
8a5fb4c9 | 434 | void urcu_exit(void) |
27b012e2 MD |
435 | { |
436 | struct sigaction act; | |
437 | int ret; | |
438 | ||
439 | ret = sigaction(SIGURCU, NULL, &act); | |
f69f195a MD |
440 | if (ret) { |
441 | perror("Error in sigaction"); | |
27b012e2 MD |
442 | exit(-1); |
443 | } | |
444 | assert(act.sa_sigaction == sigurcu_handler); | |
e3b0cef0 | 445 | assert(list_empty(®istry)); |
27b012e2 | 446 | } |
0a1d290b | 447 | #endif /* #ifndef URCU_MB */ |