uatomic/x86: Remove redundant memory barriers
[urcu.git] / src / urcu.c
1 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 // SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
3 //
4 // SPDX-License-Identifier: LGPL-2.1-or-later
5
6 /*
7 * Userspace RCU library
8 *
9 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
10 */
11
12 #define URCU_NO_COMPAT_IDENTIFIERS
13 #define _BSD_SOURCE
14 #define _LGPL_SOURCE
15 #define _DEFAULT_SOURCE
16 #include <stdio.h>
17 #include <pthread.h>
18 #include <signal.h>
19 #include <stdlib.h>
20 #include <stdint.h>
21 #include <string.h>
22 #include <errno.h>
23 #include <stdbool.h>
24 #include <poll.h>
25
26 #include <urcu/config.h>
27 #include <urcu/annotate.h>
28 #include <urcu/assert.h>
29 #include <urcu/arch.h>
30 #include <urcu/wfcqueue.h>
31 #include <urcu/map/urcu.h>
32 #include <urcu/static/urcu.h>
33 #include <urcu/pointer.h>
34 #include <urcu/tls-compat.h>
35
36 #include "urcu-die.h"
37 #include "urcu-wait.h"
38 #include "urcu-utils.h"
39
40 #define URCU_API_MAP
41 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
42 #undef _LGPL_SOURCE
43 #include <urcu/urcu.h>
44 #define _LGPL_SOURCE
45
46 /*
47 * If a reader is really non-cooperative and refuses to commit its
48 * rcu_active_readers count to memory (there is no barrier in the reader
49 * per-se), kick it after 10 loops waiting for it.
50 */
51 #define KICK_READER_LOOPS 10
52
53 /*
54 * Active attempts to check for reader Q.S. before calling futex().
55 */
56 #define RCU_QS_ACTIVE_ATTEMPTS 100
57
58 /* If the headers do not support membarrier system call, fall back on RCU_MB */
59 #ifdef __NR_membarrier
60 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
61 #else
62 # define membarrier(...) -ENOSYS
63 #endif
64
65 enum membarrier_cmd {
66 MEMBARRIER_CMD_QUERY = 0,
67 MEMBARRIER_CMD_SHARED = (1 << 0),
68 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
69 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
70 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
71 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
72 };
73
74 #ifdef RCU_MEMBARRIER
75 static int init_done;
76 static int urcu_memb_has_sys_membarrier_private_expedited;
77
78 #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
79 /*
80 * Explicitly initialize to zero because we can't alias a non-static
81 * uninitialized variable.
82 */
83 int urcu_memb_has_sys_membarrier = 0;
84 #endif
85
86 void __attribute__((constructor)) rcu_init(void);
87 #endif
88
89 #if defined(RCU_MB)
90 void rcu_init(void)
91 {
92 }
93 #endif
94
95 void __attribute__((destructor)) rcu_exit(void);
96 static void urcu_call_rcu_exit(void);
97
98 /*
99 * rcu_gp_lock ensures mutual exclusion between threads calling
100 * synchronize_rcu().
101 */
102 static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
103 /*
104 * rcu_registry_lock ensures mutual exclusion between threads
105 * registering and unregistering themselves to/from the registry, and
106 * with threads reading that registry from synchronize_rcu(). However,
107 * this lock is not held all the way through the completion of awaiting
108 * for the grace period. It is sporadically released between iterations
109 * on the registry.
110 * rcu_registry_lock may nest inside rcu_gp_lock.
111 */
112 static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
113 struct urcu_gp rcu_gp = { .ctr = URCU_GP_COUNT };
114
115 /*
116 * Written to only by each individual reader. Read by both the reader and the
117 * writers.
118 */
119 DEFINE_URCU_TLS(struct urcu_reader, rcu_reader);
120
121 static CDS_LIST_HEAD(registry);
122
123 /*
124 * Queue keeping threads awaiting to wait for a grace period. Contains
125 * struct gp_waiters_thread objects.
126 */
127 static DEFINE_URCU_WAIT_QUEUE(gp_waiters);
128
129 static void mutex_lock(pthread_mutex_t *mutex)
130 {
131 int ret;
132
133 #ifndef DISTRUST_SIGNALS_EXTREME
134 ret = pthread_mutex_lock(mutex);
135 if (ret)
136 urcu_die(ret);
137 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
138 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
139 if (ret != EBUSY && ret != EINTR)
140 urcu_die(ret);
141 if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) {
142 cmm_smp_mb();
143 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
144 cmm_smp_mb();
145 }
146 (void) poll(NULL, 0, 10);
147 }
148 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
149 }
150
151 static void mutex_unlock(pthread_mutex_t *mutex)
152 {
153 int ret;
154
155 ret = pthread_mutex_unlock(mutex);
156 if (ret)
157 urcu_die(ret);
158 }
159
160 #ifdef RCU_MEMBARRIER
161 static void smp_mb_master(void)
162 {
163 if (caa_likely(urcu_memb_has_sys_membarrier)) {
164 if (membarrier(urcu_memb_has_sys_membarrier_private_expedited ?
165 MEMBARRIER_CMD_PRIVATE_EXPEDITED :
166 MEMBARRIER_CMD_SHARED, 0))
167 urcu_die(errno);
168 } else {
169 cmm_smp_mb();
170 }
171 }
172 #endif
173
174 #if defined(RCU_MB)
175 static void smp_mb_master(void)
176 {
177 cmm_smp_mb();
178 }
179 #endif
180
181 /*
182 * synchronize_rcu() waiting. Single thread.
183 * Always called with rcu_registry lock held. Releases this lock and
184 * grabs it again. Holds the lock when it returns.
185 */
186 static void wait_gp(void)
187 {
188 /*
189 * Read reader_gp before read futex.
190 */
191 smp_mb_master();
192 /* Temporarily unlock the registry lock. */
193 mutex_unlock(&rcu_registry_lock);
194 while (uatomic_read(&rcu_gp.futex) == -1) {
195 if (!futex_async(&rcu_gp.futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
196 /*
197 * Prior queued wakeups queued by unrelated code
198 * using the same address can cause futex wait to
199 * return 0 even through the futex value is still
200 * -1 (spurious wakeups). Check the value again
201 * in user-space to validate whether it really
202 * differs from -1.
203 */
204 continue;
205 }
206 switch (errno) {
207 case EAGAIN:
208 /* Value already changed. */
209 goto end;
210 case EINTR:
211 /* Retry if interrupted by signal. */
212 break; /* Get out of switch. Check again. */
213 default:
214 /* Unexpected error. */
215 urcu_die(errno);
216 }
217 }
218 end:
219 /*
220 * Re-lock the registry lock before the next loop.
221 */
222 mutex_lock(&rcu_registry_lock);
223 }
224
225 /*
226 * Always called with rcu_registry lock held. Releases this lock between
227 * iterations and grabs it again. Holds the lock when it returns.
228 */
229 static void wait_for_readers(struct cds_list_head *input_readers,
230 struct cds_list_head *cur_snap_readers,
231 struct cds_list_head *qsreaders,
232 cmm_annotate_t *group)
233 {
234 unsigned int wait_loops = 0;
235 struct urcu_reader *index, *tmp;
236 #ifdef HAS_INCOHERENT_CACHES
237 unsigned int wait_gp_loops = 0;
238 #endif /* HAS_INCOHERENT_CACHES */
239
240 /*
241 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
242 * indicate quiescence (not nested), or observe the current
243 * rcu_gp.ctr value.
244 */
245 for (;;) {
246 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
247 wait_loops++;
248 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
249 uatomic_dec(&rcu_gp.futex);
250 /* Write futex before read reader_gp */
251 smp_mb_master();
252 }
253
254 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
255 switch (urcu_common_reader_state(&rcu_gp, &index->ctr, group)) {
256 case URCU_READER_ACTIVE_CURRENT:
257 if (cur_snap_readers) {
258 cds_list_move(&index->node,
259 cur_snap_readers);
260 break;
261 }
262 /* Fall-through */
263 case URCU_READER_INACTIVE:
264 cds_list_move(&index->node, qsreaders);
265 break;
266 case URCU_READER_ACTIVE_OLD:
267 /*
268 * Old snapshot. Leaving node in
269 * input_readers will make us busy-loop
270 * until the snapshot becomes current or
271 * the reader becomes inactive.
272 */
273 break;
274 }
275 }
276
277 #ifndef HAS_INCOHERENT_CACHES
278 if (cds_list_empty(input_readers)) {
279 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
280 /* Read reader_gp before write futex */
281 smp_mb_master();
282 uatomic_set(&rcu_gp.futex, 0);
283 }
284 break;
285 } else {
286 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
287 /* wait_gp unlocks/locks registry lock. */
288 wait_gp();
289 } else {
290 /* Temporarily unlock the registry lock. */
291 mutex_unlock(&rcu_registry_lock);
292 caa_cpu_relax();
293 /*
294 * Re-lock the registry lock before the
295 * next loop.
296 */
297 mutex_lock(&rcu_registry_lock);
298 }
299 }
300 #else /* #ifndef HAS_INCOHERENT_CACHES */
301 /*
302 * BUSY-LOOP. Force the reader thread to commit its
303 * URCU_TLS(rcu_reader).ctr update to memory if we wait
304 * for too long.
305 */
306 if (cds_list_empty(input_readers)) {
307 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
308 /* Read reader_gp before write futex */
309 smp_mb_master();
310 uatomic_set(&rcu_gp.futex, 0);
311 }
312 break;
313 } else {
314 if (wait_gp_loops == KICK_READER_LOOPS) {
315 smp_mb_master();
316 wait_gp_loops = 0;
317 }
318 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
319 /* wait_gp unlocks/locks registry lock. */
320 wait_gp();
321 wait_gp_loops++;
322 } else {
323 /* Temporarily unlock the registry lock. */
324 mutex_unlock(&rcu_registry_lock);
325 caa_cpu_relax();
326 /*
327 * Re-lock the registry lock before the
328 * next loop.
329 */
330 mutex_lock(&rcu_registry_lock);
331 }
332 }
333 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
334 }
335 }
336
337 void synchronize_rcu(void)
338 {
339 cmm_annotate_define(acquire_group);
340 cmm_annotate_define(release_group);
341 CDS_LIST_HEAD(cur_snap_readers);
342 CDS_LIST_HEAD(qsreaders);
343 DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
344 struct urcu_waiters waiters;
345
346 /*
347 * Add ourself to gp_waiters queue of threads awaiting to wait
348 * for a grace period. Proceed to perform the grace period only
349 * if we are the first thread added into the queue.
350 * The implicit memory barrier before urcu_wait_add()
351 * orders prior memory accesses of threads put into the wait
352 * queue before their insertion into the wait queue.
353 */
354 if (urcu_wait_add(&gp_waiters, &wait) != 0) {
355 /*
356 * Not first in queue: will be awakened by another thread.
357 * Implies a memory barrier after grace period.
358 */
359 urcu_adaptative_busy_wait(&wait);
360 return;
361 }
362 /* We won't need to wake ourself up */
363 urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
364
365 mutex_lock(&rcu_gp_lock);
366
367 /*
368 * Move all waiters into our local queue.
369 */
370 urcu_move_waiters(&waiters, &gp_waiters);
371
372 mutex_lock(&rcu_registry_lock);
373
374 if (cds_list_empty(&registry))
375 goto out;
376
377 /*
378 * All threads should read qparity before accessing data structure
379 * where new ptr points to. Must be done within rcu_registry_lock
380 * because it iterates on reader threads.
381 */
382 /* Write new ptr before changing the qparity */
383 smp_mb_master();
384 cmm_annotate_group_mb_release(&release_group);
385
386 /*
387 * Wait for readers to observe original parity or be quiescent.
388 * wait_for_readers() can release and grab again rcu_registry_lock
389 * internally.
390 */
391 wait_for_readers(&registry, &cur_snap_readers, &qsreaders, &acquire_group);
392
393 /*
394 * Must finish waiting for quiescent state for original parity before
395 * committing next rcu_gp.ctr update to memory. Failure to do so could
396 * result in the writer waiting forever while new readers are always
397 * accessing data (no progress). Enforce compiler-order of load
398 * URCU_TLS(rcu_reader).ctr before store to rcu_gp.ctr.
399 */
400 cmm_barrier();
401
402 /*
403 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
404 * model easier to understand. It does not have a big performance impact
405 * anyway, given this is the write-side.
406 */
407 cmm_smp_mb();
408
409 /* Switch parity: 0 -> 1, 1 -> 0 */
410 cmm_annotate_group_mem_release(&release_group, &rcu_gp.ctr);
411 uatomic_store(&rcu_gp.ctr, rcu_gp.ctr ^ URCU_GP_CTR_PHASE, CMM_RELAXED);
412
413 /*
414 * Must commit rcu_gp.ctr update to memory before waiting for quiescent
415 * state. Failure to do so could result in the writer waiting forever
416 * while new readers are always accessing data (no progress). Enforce
417 * compiler-order of store to rcu_gp.ctr before load rcu_reader ctr.
418 */
419 cmm_barrier();
420
421 /*
422 *
423 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
424 * model easier to understand. It does not have a big performance impact
425 * anyway, given this is the write-side.
426 */
427 cmm_smp_mb();
428
429 /*
430 * Wait for readers to observe new parity or be quiescent.
431 * wait_for_readers() can release and grab again rcu_registry_lock
432 * internally.
433 */
434 wait_for_readers(&cur_snap_readers, NULL, &qsreaders, &acquire_group);
435
436 /*
437 * Put quiescent reader list back into registry.
438 */
439 cds_list_splice(&qsreaders, &registry);
440
441 /*
442 * Finish waiting for reader threads before letting the old ptr
443 * being freed. Must be done within rcu_registry_lock because it
444 * iterates on reader threads.
445 */
446 smp_mb_master();
447 cmm_annotate_group_mb_acquire(&acquire_group);
448 out:
449 mutex_unlock(&rcu_registry_lock);
450 mutex_unlock(&rcu_gp_lock);
451
452 /*
453 * Wakeup waiters only after we have completed the grace period
454 * and have ensured the memory barriers at the end of the grace
455 * period have been issued.
456 */
457 urcu_wake_all_waiters(&waiters);
458 }
459
460 /*
461 * library wrappers to be used by non-LGPL compatible source code.
462 */
463
464 void rcu_read_lock(void)
465 {
466 _rcu_read_lock();
467 }
468
469 void rcu_read_unlock(void)
470 {
471 _rcu_read_unlock();
472 }
473
474 int rcu_read_ongoing(void)
475 {
476 return _rcu_read_ongoing();
477 }
478
479 void rcu_register_thread(void)
480 {
481 URCU_TLS(rcu_reader).tid = pthread_self();
482 urcu_posix_assert(URCU_TLS(rcu_reader).need_mb == 0);
483 urcu_posix_assert(!(URCU_TLS(rcu_reader).ctr & URCU_GP_CTR_NEST_MASK));
484
485 mutex_lock(&rcu_registry_lock);
486 urcu_posix_assert(!URCU_TLS(rcu_reader).registered);
487 URCU_TLS(rcu_reader).registered = 1;
488 rcu_init(); /* In case gcc does not support constructor attribute */
489 cds_list_add(&URCU_TLS(rcu_reader).node, &registry);
490 mutex_unlock(&rcu_registry_lock);
491 }
492
493 void rcu_unregister_thread(void)
494 {
495 mutex_lock(&rcu_registry_lock);
496 urcu_posix_assert(URCU_TLS(rcu_reader).registered);
497 URCU_TLS(rcu_reader).registered = 0;
498 cds_list_del(&URCU_TLS(rcu_reader).node);
499 mutex_unlock(&rcu_registry_lock);
500 }
501
502 #ifdef RCU_MEMBARRIER
503
504 #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
505 static
506 void rcu_sys_membarrier_status(bool available)
507 {
508 if (!available)
509 abort();
510 }
511 #else
512 static
513 void rcu_sys_membarrier_status(bool available)
514 {
515 if (!available)
516 return;
517 urcu_memb_has_sys_membarrier = 1;
518 }
519 #endif
520
521 static
522 void rcu_sys_membarrier_init(void)
523 {
524 bool available = false;
525 int mask;
526
527 mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
528 if (mask >= 0) {
529 if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
530 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
531 urcu_die(errno);
532 urcu_memb_has_sys_membarrier_private_expedited = 1;
533 available = true;
534 } else if (mask & MEMBARRIER_CMD_SHARED) {
535 available = true;
536 }
537 }
538 rcu_sys_membarrier_status(available);
539 }
540
541 void rcu_init(void)
542 {
543 if (init_done)
544 return;
545 init_done = 1;
546 rcu_sys_membarrier_init();
547 }
548 #endif
549
550 void rcu_exit(void)
551 {
552 urcu_call_rcu_exit();
553 }
554
555 DEFINE_RCU_FLAVOR(rcu_flavor);
556
557 #include "urcu-call-rcu-impl.h"
558 #include "urcu-defer-impl.h"
559 #include "urcu-poll-impl.h"
This page took 0.03949 seconds and 5 git commands to generate.