| 1 | /* |
| 2 | * urcu-qsbr.c |
| 3 | * |
| 4 | * Userspace RCU QSBR library |
| 5 | * |
| 6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
| 8 | * |
| 9 | * This library is free software; you can redistribute it and/or |
| 10 | * modify it under the terms of the GNU Lesser General Public |
| 11 | * License as published by the Free Software Foundation; either |
| 12 | * version 2.1 of the License, or (at your option) any later version. |
| 13 | * |
| 14 | * This library is distributed in the hope that it will be useful, |
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 17 | * Lesser General Public License for more details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU Lesser General Public |
| 20 | * License along with this library; if not, write to the Free Software |
| 21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 22 | * |
| 23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. |
| 24 | */ |
| 25 | |
| 26 | #define _GNU_SOURCE |
| 27 | #define _LGPL_SOURCE |
| 28 | #include <stdio.h> |
| 29 | #include <pthread.h> |
| 30 | #include <signal.h> |
| 31 | #include <assert.h> |
| 32 | #include <stdlib.h> |
| 33 | #include <stdint.h> |
| 34 | #include <string.h> |
| 35 | #include <errno.h> |
| 36 | #include <poll.h> |
| 37 | |
| 38 | #include "urcu/wfcqueue.h" |
| 39 | #include "urcu/map/urcu-qsbr.h" |
| 40 | #define BUILD_QSBR_LIB |
| 41 | #include "urcu/static/urcu-qsbr.h" |
| 42 | #include "urcu-pointer.h" |
| 43 | #include "urcu/tls-compat.h" |
| 44 | |
| 45 | #include "urcu-die.h" |
| 46 | #include "urcu-wait.h" |
| 47 | |
| 48 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
| 49 | #undef _LGPL_SOURCE |
| 50 | #include "urcu-qsbr.h" |
| 51 | #define _LGPL_SOURCE |
| 52 | |
| 53 | void __attribute__((destructor)) rcu_exit(void); |
| 54 | |
| 55 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; |
| 56 | struct rcu_gp rcu_gp = { .ctr = RCU_GP_ONLINE }; |
| 57 | |
| 58 | /* |
| 59 | * Active attempts to check for reader Q.S. before calling futex(). |
| 60 | */ |
| 61 | #define RCU_QS_ACTIVE_ATTEMPTS 100 |
| 62 | |
| 63 | /* |
| 64 | * Written to only by each individual reader. Read by both the reader and the |
| 65 | * writers. |
| 66 | */ |
| 67 | DEFINE_URCU_TLS(struct rcu_reader, rcu_reader); |
| 68 | |
| 69 | #ifdef DEBUG_YIELD |
| 70 | unsigned int rcu_yield_active; |
| 71 | DEFINE_URCU_TLS(unsigned int, rcu_rand_yield); |
| 72 | #endif |
| 73 | |
| 74 | static CDS_LIST_HEAD(registry); |
| 75 | |
| 76 | /* |
| 77 | * Queue keeping threads awaiting to wait for a grace period. Contains |
| 78 | * struct gp_waiters_thread objects. |
| 79 | */ |
| 80 | static DEFINE_URCU_WAIT_QUEUE(gp_waiters); |
| 81 | |
| 82 | static void mutex_lock(pthread_mutex_t *mutex) |
| 83 | { |
| 84 | int ret; |
| 85 | |
| 86 | #ifndef DISTRUST_SIGNALS_EXTREME |
| 87 | ret = pthread_mutex_lock(mutex); |
| 88 | if (ret) |
| 89 | urcu_die(ret); |
| 90 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
| 91 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { |
| 92 | if (ret != EBUSY && ret != EINTR) |
| 93 | urcu_die(ret); |
| 94 | poll(NULL,0,10); |
| 95 | } |
| 96 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ |
| 97 | } |
| 98 | |
| 99 | static void mutex_unlock(pthread_mutex_t *mutex) |
| 100 | { |
| 101 | int ret; |
| 102 | |
| 103 | ret = pthread_mutex_unlock(mutex); |
| 104 | if (ret) |
| 105 | urcu_die(ret); |
| 106 | } |
| 107 | |
| 108 | /* |
| 109 | * synchronize_rcu() waiting. Single thread. |
| 110 | */ |
| 111 | static void wait_gp(void) |
| 112 | { |
| 113 | /* Read reader_gp before read futex */ |
| 114 | cmm_smp_rmb(); |
| 115 | if (uatomic_read(&rcu_gp.futex) == -1) |
| 116 | futex_noasync(&rcu_gp.futex, FUTEX_WAIT, -1, |
| 117 | NULL, NULL, 0); |
| 118 | } |
| 119 | |
| 120 | static void wait_for_readers(struct cds_list_head *input_readers, |
| 121 | struct cds_list_head *cur_snap_readers, |
| 122 | struct cds_list_head *qsreaders) |
| 123 | { |
| 124 | int wait_loops = 0; |
| 125 | struct rcu_reader *index, *tmp; |
| 126 | |
| 127 | /* |
| 128 | * Wait for each thread URCU_TLS(rcu_reader).ctr to either |
| 129 | * indicate quiescence (offline), or for them to observe the |
| 130 | * current rcu_gp.ctr value. |
| 131 | */ |
| 132 | for (;;) { |
| 133 | wait_loops++; |
| 134 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
| 135 | uatomic_set(&rcu_gp.futex, -1); |
| 136 | /* |
| 137 | * Write futex before write waiting (the other side |
| 138 | * reads them in the opposite order). |
| 139 | */ |
| 140 | cmm_smp_wmb(); |
| 141 | cds_list_for_each_entry(index, input_readers, node) { |
| 142 | _CMM_STORE_SHARED(index->waiting, 1); |
| 143 | } |
| 144 | /* Write futex before read reader_gp */ |
| 145 | cmm_smp_mb(); |
| 146 | } |
| 147 | cds_list_for_each_entry_safe(index, tmp, input_readers, node) { |
| 148 | switch (rcu_reader_state(&index->ctr)) { |
| 149 | case RCU_READER_ACTIVE_CURRENT: |
| 150 | if (cur_snap_readers) { |
| 151 | cds_list_move(&index->node, |
| 152 | cur_snap_readers); |
| 153 | break; |
| 154 | } |
| 155 | /* Fall-through */ |
| 156 | case RCU_READER_INACTIVE: |
| 157 | cds_list_move(&index->node, qsreaders); |
| 158 | break; |
| 159 | case RCU_READER_ACTIVE_OLD: |
| 160 | /* |
| 161 | * Old snapshot. Leaving node in |
| 162 | * input_readers will make us busy-loop |
| 163 | * until the snapshot becomes current or |
| 164 | * the reader becomes inactive. |
| 165 | */ |
| 166 | break; |
| 167 | } |
| 168 | } |
| 169 | |
| 170 | if (cds_list_empty(input_readers)) { |
| 171 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
| 172 | /* Read reader_gp before write futex */ |
| 173 | cmm_smp_mb(); |
| 174 | uatomic_set(&rcu_gp.futex, 0); |
| 175 | } |
| 176 | break; |
| 177 | } else { |
| 178 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
| 179 | wait_gp(); |
| 180 | } else { |
| 181 | #ifndef HAS_INCOHERENT_CACHES |
| 182 | caa_cpu_relax(); |
| 183 | #else /* #ifndef HAS_INCOHERENT_CACHES */ |
| 184 | cmm_smp_mb(); |
| 185 | #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ |
| 186 | } |
| 187 | } |
| 188 | } |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * Using a two-subphases algorithm for architectures with smaller than 64-bit |
| 193 | * long-size to ensure we do not encounter an overflow bug. |
| 194 | */ |
| 195 | |
| 196 | #if (CAA_BITS_PER_LONG < 64) |
| 197 | void synchronize_rcu(void) |
| 198 | { |
| 199 | CDS_LIST_HEAD(cur_snap_readers); |
| 200 | CDS_LIST_HEAD(qsreaders); |
| 201 | unsigned long was_online; |
| 202 | DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING); |
| 203 | struct urcu_waiters waiters; |
| 204 | |
| 205 | was_online = rcu_read_ongoing(); |
| 206 | |
| 207 | /* All threads should read qparity before accessing data structure |
| 208 | * where new ptr points to. In the "then" case, rcu_thread_offline |
| 209 | * includes a memory barrier. |
| 210 | * |
| 211 | * Mark the writer thread offline to make sure we don't wait for |
| 212 | * our own quiescent state. This allows using synchronize_rcu() |
| 213 | * in threads registered as readers. |
| 214 | */ |
| 215 | if (was_online) |
| 216 | rcu_thread_offline(); |
| 217 | else |
| 218 | cmm_smp_mb(); |
| 219 | |
| 220 | /* |
| 221 | * Add ourself to gp_waiters queue of threads awaiting to wait |
| 222 | * for a grace period. Proceed to perform the grace period only |
| 223 | * if we are the first thread added into the queue. |
| 224 | */ |
| 225 | if (urcu_wait_add(&gp_waiters, &wait) != 0) { |
| 226 | /* Not first in queue: will be awakened by another thread. */ |
| 227 | urcu_adaptative_busy_wait(&wait); |
| 228 | goto gp_end; |
| 229 | } |
| 230 | /* We won't need to wake ourself up */ |
| 231 | urcu_wait_set_state(&wait, URCU_WAIT_RUNNING); |
| 232 | |
| 233 | mutex_lock(&rcu_gp_lock); |
| 234 | |
| 235 | /* |
| 236 | * Move all waiters into our local queue. |
| 237 | */ |
| 238 | urcu_move_waiters(&waiters, &gp_waiters); |
| 239 | |
| 240 | if (cds_list_empty(®istry)) |
| 241 | goto out; |
| 242 | |
| 243 | /* |
| 244 | * Wait for readers to observe original parity or be quiescent. |
| 245 | */ |
| 246 | wait_for_readers(®istry, &cur_snap_readers, &qsreaders); |
| 247 | |
| 248 | /* |
| 249 | * Must finish waiting for quiescent state for original parity |
| 250 | * before committing next rcu_gp.ctr update to memory. Failure |
| 251 | * to do so could result in the writer waiting forever while new |
| 252 | * readers are always accessing data (no progress). Enforce |
| 253 | * compiler-order of load URCU_TLS(rcu_reader).ctr before store |
| 254 | * to rcu_gp.ctr. |
| 255 | */ |
| 256 | cmm_barrier(); |
| 257 | |
| 258 | /* |
| 259 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
| 260 | * model easier to understand. It does not have a big performance impact |
| 261 | * anyway, given this is the write-side. |
| 262 | */ |
| 263 | cmm_smp_mb(); |
| 264 | |
| 265 | /* Switch parity: 0 -> 1, 1 -> 0 */ |
| 266 | CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR); |
| 267 | |
| 268 | /* |
| 269 | * Must commit rcu_gp.ctr update to memory before waiting for |
| 270 | * quiescent state. Failure to do so could result in the writer |
| 271 | * waiting forever while new readers are always accessing data |
| 272 | * (no progress). Enforce compiler-order of store to rcu_gp.ctr |
| 273 | * before load URCU_TLS(rcu_reader).ctr. |
| 274 | */ |
| 275 | cmm_barrier(); |
| 276 | |
| 277 | /* |
| 278 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
| 279 | * model easier to understand. It does not have a big performance impact |
| 280 | * anyway, given this is the write-side. |
| 281 | */ |
| 282 | cmm_smp_mb(); |
| 283 | |
| 284 | /* |
| 285 | * Wait for readers to observe new parity or be quiescent. |
| 286 | */ |
| 287 | wait_for_readers(&cur_snap_readers, NULL, &qsreaders); |
| 288 | |
| 289 | /* |
| 290 | * Put quiescent reader list back into registry. |
| 291 | */ |
| 292 | cds_list_splice(&qsreaders, ®istry); |
| 293 | out: |
| 294 | mutex_unlock(&rcu_gp_lock); |
| 295 | urcu_wake_all_waiters(&waiters); |
| 296 | gp_end: |
| 297 | /* |
| 298 | * Finish waiting for reader threads before letting the old ptr being |
| 299 | * freed. |
| 300 | */ |
| 301 | if (was_online) |
| 302 | rcu_thread_online(); |
| 303 | else |
| 304 | cmm_smp_mb(); |
| 305 | } |
| 306 | #else /* !(CAA_BITS_PER_LONG < 64) */ |
| 307 | void synchronize_rcu(void) |
| 308 | { |
| 309 | CDS_LIST_HEAD(qsreaders); |
| 310 | unsigned long was_online; |
| 311 | DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING); |
| 312 | struct urcu_waiters waiters; |
| 313 | |
| 314 | was_online = rcu_read_ongoing(); |
| 315 | |
| 316 | /* |
| 317 | * Mark the writer thread offline to make sure we don't wait for |
| 318 | * our own quiescent state. This allows using synchronize_rcu() |
| 319 | * in threads registered as readers. |
| 320 | */ |
| 321 | if (was_online) |
| 322 | rcu_thread_offline(); |
| 323 | else |
| 324 | cmm_smp_mb(); |
| 325 | |
| 326 | /* |
| 327 | * Add ourself to gp_waiters queue of threads awaiting to wait |
| 328 | * for a grace period. Proceed to perform the grace period only |
| 329 | * if we are the first thread added into the queue. |
| 330 | */ |
| 331 | if (urcu_wait_add(&gp_waiters, &wait) != 0) { |
| 332 | /* Not first in queue: will be awakened by another thread. */ |
| 333 | urcu_adaptative_busy_wait(&wait); |
| 334 | goto gp_end; |
| 335 | } |
| 336 | /* We won't need to wake ourself up */ |
| 337 | urcu_wait_set_state(&wait, URCU_WAIT_RUNNING); |
| 338 | |
| 339 | mutex_lock(&rcu_gp_lock); |
| 340 | |
| 341 | /* |
| 342 | * Move all waiters into our local queue. |
| 343 | */ |
| 344 | urcu_move_waiters(&waiters, &gp_waiters); |
| 345 | |
| 346 | if (cds_list_empty(®istry)) |
| 347 | goto out; |
| 348 | |
| 349 | /* Increment current G.P. */ |
| 350 | CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr + RCU_GP_CTR); |
| 351 | |
| 352 | /* |
| 353 | * Must commit rcu_gp.ctr update to memory before waiting for |
| 354 | * quiescent state. Failure to do so could result in the writer |
| 355 | * waiting forever while new readers are always accessing data |
| 356 | * (no progress). Enforce compiler-order of store to rcu_gp.ctr |
| 357 | * before load URCU_TLS(rcu_reader).ctr. |
| 358 | */ |
| 359 | cmm_barrier(); |
| 360 | |
| 361 | /* |
| 362 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
| 363 | * model easier to understand. It does not have a big performance impact |
| 364 | * anyway, given this is the write-side. |
| 365 | */ |
| 366 | cmm_smp_mb(); |
| 367 | |
| 368 | /* |
| 369 | * Wait for readers to observe new count of be quiescent. |
| 370 | */ |
| 371 | wait_for_readers(®istry, NULL, &qsreaders); |
| 372 | |
| 373 | /* |
| 374 | * Put quiescent reader list back into registry. |
| 375 | */ |
| 376 | cds_list_splice(&qsreaders, ®istry); |
| 377 | out: |
| 378 | mutex_unlock(&rcu_gp_lock); |
| 379 | urcu_wake_all_waiters(&waiters); |
| 380 | gp_end: |
| 381 | if (was_online) |
| 382 | rcu_thread_online(); |
| 383 | else |
| 384 | cmm_smp_mb(); |
| 385 | } |
| 386 | #endif /* !(CAA_BITS_PER_LONG < 64) */ |
| 387 | |
| 388 | /* |
| 389 | * library wrappers to be used by non-LGPL compatible source code. |
| 390 | */ |
| 391 | |
| 392 | void rcu_read_lock(void) |
| 393 | { |
| 394 | _rcu_read_lock(); |
| 395 | } |
| 396 | |
| 397 | void rcu_read_unlock(void) |
| 398 | { |
| 399 | _rcu_read_unlock(); |
| 400 | } |
| 401 | |
| 402 | int rcu_read_ongoing(void) |
| 403 | { |
| 404 | return _rcu_read_ongoing(); |
| 405 | } |
| 406 | |
| 407 | void rcu_quiescent_state(void) |
| 408 | { |
| 409 | _rcu_quiescent_state(); |
| 410 | } |
| 411 | |
| 412 | void rcu_thread_offline(void) |
| 413 | { |
| 414 | _rcu_thread_offline(); |
| 415 | } |
| 416 | |
| 417 | void rcu_thread_online(void) |
| 418 | { |
| 419 | _rcu_thread_online(); |
| 420 | } |
| 421 | |
| 422 | void rcu_register_thread(void) |
| 423 | { |
| 424 | URCU_TLS(rcu_reader).tid = pthread_self(); |
| 425 | assert(URCU_TLS(rcu_reader).ctr == 0); |
| 426 | |
| 427 | mutex_lock(&rcu_gp_lock); |
| 428 | cds_list_add(&URCU_TLS(rcu_reader).node, ®istry); |
| 429 | mutex_unlock(&rcu_gp_lock); |
| 430 | _rcu_thread_online(); |
| 431 | } |
| 432 | |
| 433 | void rcu_unregister_thread(void) |
| 434 | { |
| 435 | /* |
| 436 | * We have to make the thread offline otherwise we end up dealocking |
| 437 | * with a waiting writer. |
| 438 | */ |
| 439 | _rcu_thread_offline(); |
| 440 | mutex_lock(&rcu_gp_lock); |
| 441 | cds_list_del(&URCU_TLS(rcu_reader).node); |
| 442 | mutex_unlock(&rcu_gp_lock); |
| 443 | } |
| 444 | |
| 445 | void rcu_exit(void) |
| 446 | { |
| 447 | /* |
| 448 | * Assertion disabled because call_rcu threads are now rcu |
| 449 | * readers, and left running at exit. |
| 450 | * assert(cds_list_empty(®istry)); |
| 451 | */ |
| 452 | } |
| 453 | |
| 454 | DEFINE_RCU_FLAVOR(rcu_flavor); |
| 455 | |
| 456 | #include "urcu-call-rcu-impl.h" |
| 457 | #include "urcu-defer-impl.h" |