Commit | Line | Data |
---|---|---|
10544ee8 | 1 | /* |
c0c0989a | 2 | * SPDX-License-Identifier: LGPL-2.1-or-later |
10544ee8 MD |
3 | * |
4 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
6 | * | |
c0c0989a | 7 | * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version. |
10544ee8 MD |
8 | */ |
9 | ||
10 | #define _LGPL_SOURCE | |
11 | #include <stdio.h> | |
12 | #include <pthread.h> | |
13 | #include <signal.h> | |
14 | #include <assert.h> | |
15 | #include <stdlib.h> | |
16 | #include <string.h> | |
17 | #include <errno.h> | |
18 | #include <poll.h> | |
19 | #include <unistd.h> | |
20 | #include <stdbool.h> | |
21 | #include <sys/mman.h> | |
22 | ||
23 | #include <urcu/arch.h> | |
24 | #include <urcu/wfcqueue.h> | |
25 | #include <lttng/urcu/static/urcu-ust.h> | |
26 | #include <lttng/urcu/pointer.h> | |
27 | #include <urcu/tls-compat.h> | |
28 | ||
29 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
30 | #undef _LGPL_SOURCE | |
31 | #include <lttng/urcu/urcu-ust.h> | |
32 | #define _LGPL_SOURCE | |
33 | ||
34 | #ifndef MAP_ANONYMOUS | |
35 | #define MAP_ANONYMOUS MAP_ANON | |
36 | #endif | |
37 | ||
38 | #ifdef __linux__ | |
39 | static | |
40 | void *mremap_wrapper(void *old_address, size_t old_size, | |
41 | size_t new_size, int flags) | |
42 | { | |
43 | return mremap(old_address, old_size, new_size, flags); | |
44 | } | |
45 | #else | |
46 | ||
47 | #define MREMAP_MAYMOVE 1 | |
48 | #define MREMAP_FIXED 2 | |
49 | ||
50 | /* | |
51 | * mremap wrapper for non-Linux systems not allowing MAYMOVE. | |
52 | * This is not generic. | |
53 | */ | |
54 | static | |
55 | void *mremap_wrapper(void *old_address, size_t old_size, | |
56 | size_t new_size, int flags) | |
57 | { | |
58 | assert(!(flags & MREMAP_MAYMOVE)); | |
59 | ||
60 | return MAP_FAILED; | |
61 | } | |
62 | #endif | |
63 | ||
64 | /* Sleep delay in ms */ | |
65 | #define RCU_SLEEP_DELAY_MS 10 | |
66 | #define INIT_NR_THREADS 8 | |
67 | #define ARENA_INIT_ALLOC \ | |
68 | sizeof(struct registry_chunk) \ | |
69 | + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader) | |
70 | ||
71 | /* | |
72 | * Active attempts to check for reader Q.S. before calling sleep(). | |
73 | */ | |
74 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
75 | ||
76 | static | |
77 | int lttng_ust_urcu_refcount; | |
78 | ||
79 | /* If the headers do not support membarrier system call, fall back smp_mb. */ | |
80 | #ifdef __NR_membarrier | |
81 | # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__) | |
82 | #else | |
83 | # define membarrier(...) -ENOSYS | |
84 | #endif | |
85 | ||
86 | enum membarrier_cmd { | |
87 | MEMBARRIER_CMD_QUERY = 0, | |
88 | MEMBARRIER_CMD_SHARED = (1 << 0), | |
89 | /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ | |
90 | /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ | |
91 | MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), | |
92 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), | |
93 | }; | |
94 | ||
95 | static | |
465a0d04 MJ |
96 | void _lttng_ust_urcu_init(void) |
97 | __attribute__((constructor)); | |
10544ee8 MD |
98 | static |
99 | void __attribute__((destructor)) lttng_ust_urcu_exit(void); | |
100 | ||
101 | #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER | |
102 | int lttng_ust_urcu_has_sys_membarrier; | |
103 | #endif | |
104 | ||
105 | /* | |
106 | * rcu_gp_lock ensures mutual exclusion between threads calling | |
107 | * synchronize_rcu(). | |
108 | */ | |
109 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; | |
110 | /* | |
111 | * rcu_registry_lock ensures mutual exclusion between threads | |
112 | * registering and unregistering themselves to/from the registry, and | |
113 | * with threads reading that registry from synchronize_rcu(). However, | |
114 | * this lock is not held all the way through the completion of awaiting | |
115 | * for the grace period. It is sporadically released between iterations | |
116 | * on the registry. | |
117 | * rcu_registry_lock may nest inside rcu_gp_lock. | |
118 | */ | |
119 | static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; | |
120 | ||
121 | static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER; | |
122 | static int initialized; | |
123 | ||
124 | static pthread_key_t lttng_ust_urcu_key; | |
125 | ||
126 | struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT }; | |
127 | ||
128 | /* | |
129 | * Pointer to registry elements. Written to only by each individual reader. Read | |
130 | * by both the reader and the writers. | |
131 | */ | |
132 | DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader); | |
133 | ||
134 | static CDS_LIST_HEAD(registry); | |
135 | ||
136 | struct registry_chunk { | |
137 | size_t data_len; /* data length */ | |
138 | size_t used; /* amount of data used */ | |
139 | struct cds_list_head node; /* chunk_list node */ | |
140 | char data[]; | |
141 | }; | |
142 | ||
143 | struct registry_arena { | |
144 | struct cds_list_head chunk_list; | |
145 | }; | |
146 | ||
147 | static struct registry_arena registry_arena = { | |
148 | .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list), | |
149 | }; | |
150 | ||
151 | /* Saved fork signal mask, protected by rcu_gp_lock */ | |
152 | static sigset_t saved_fork_signal_mask; | |
153 | ||
154 | static void mutex_lock(pthread_mutex_t *mutex) | |
155 | { | |
156 | int ret; | |
157 | ||
158 | #ifndef DISTRUST_SIGNALS_EXTREME | |
159 | ret = pthread_mutex_lock(mutex); | |
160 | if (ret) | |
161 | abort(); | |
162 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
163 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { | |
164 | if (ret != EBUSY && ret != EINTR) | |
165 | abort(); | |
166 | poll(NULL,0,10); | |
167 | } | |
168 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
169 | } | |
170 | ||
171 | static void mutex_unlock(pthread_mutex_t *mutex) | |
172 | { | |
173 | int ret; | |
174 | ||
175 | ret = pthread_mutex_unlock(mutex); | |
176 | if (ret) | |
177 | abort(); | |
178 | } | |
179 | ||
180 | static void smp_mb_master(void) | |
181 | { | |
182 | if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) { | |
183 | if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0)) | |
184 | abort(); | |
185 | } else { | |
186 | cmm_smp_mb(); | |
187 | } | |
188 | } | |
189 | ||
190 | /* | |
191 | * Always called with rcu_registry lock held. Releases this lock between | |
192 | * iterations and grabs it again. Holds the lock when it returns. | |
193 | */ | |
194 | static void wait_for_readers(struct cds_list_head *input_readers, | |
195 | struct cds_list_head *cur_snap_readers, | |
196 | struct cds_list_head *qsreaders) | |
197 | { | |
198 | unsigned int wait_loops = 0; | |
199 | struct lttng_ust_urcu_reader *index, *tmp; | |
200 | ||
201 | /* | |
202 | * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either | |
203 | * indicate quiescence (not nested), or observe the current | |
204 | * rcu_gp.ctr value. | |
205 | */ | |
206 | for (;;) { | |
207 | if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) | |
208 | wait_loops++; | |
209 | ||
210 | cds_list_for_each_entry_safe(index, tmp, input_readers, node) { | |
211 | switch (lttng_ust_urcu_reader_state(&index->ctr)) { | |
212 | case LTTNG_UST_URCU_READER_ACTIVE_CURRENT: | |
213 | if (cur_snap_readers) { | |
214 | cds_list_move(&index->node, | |
215 | cur_snap_readers); | |
216 | break; | |
217 | } | |
218 | /* Fall-through */ | |
219 | case LTTNG_UST_URCU_READER_INACTIVE: | |
220 | cds_list_move(&index->node, qsreaders); | |
221 | break; | |
222 | case LTTNG_UST_URCU_READER_ACTIVE_OLD: | |
223 | /* | |
224 | * Old snapshot. Leaving node in | |
225 | * input_readers will make us busy-loop | |
226 | * until the snapshot becomes current or | |
227 | * the reader becomes inactive. | |
228 | */ | |
229 | break; | |
230 | } | |
231 | } | |
232 | ||
233 | if (cds_list_empty(input_readers)) { | |
234 | break; | |
235 | } else { | |
236 | /* Temporarily unlock the registry lock. */ | |
237 | mutex_unlock(&rcu_registry_lock); | |
238 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) | |
239 | (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS); | |
240 | else | |
241 | caa_cpu_relax(); | |
242 | /* Re-lock the registry lock before the next loop. */ | |
243 | mutex_lock(&rcu_registry_lock); | |
244 | } | |
245 | } | |
246 | } | |
247 | ||
248 | void lttng_ust_urcu_synchronize_rcu(void) | |
249 | { | |
250 | CDS_LIST_HEAD(cur_snap_readers); | |
251 | CDS_LIST_HEAD(qsreaders); | |
252 | sigset_t newmask, oldmask; | |
253 | int ret; | |
254 | ||
255 | ret = sigfillset(&newmask); | |
256 | assert(!ret); | |
257 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
258 | assert(!ret); | |
259 | ||
260 | mutex_lock(&rcu_gp_lock); | |
261 | ||
262 | mutex_lock(&rcu_registry_lock); | |
263 | ||
264 | if (cds_list_empty(®istry)) | |
265 | goto out; | |
266 | ||
267 | /* All threads should read qparity before accessing data structure | |
268 | * where new ptr points to. */ | |
269 | /* Write new ptr before changing the qparity */ | |
270 | smp_mb_master(); | |
271 | ||
272 | /* | |
273 | * Wait for readers to observe original parity or be quiescent. | |
274 | * wait_for_readers() can release and grab again rcu_registry_lock | |
275 | * interally. | |
276 | */ | |
277 | wait_for_readers(®istry, &cur_snap_readers, &qsreaders); | |
278 | ||
279 | /* | |
280 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
281 | * model easier to understand. It does not have a big performance impact | |
282 | * anyway, given this is the write-side. | |
283 | */ | |
284 | cmm_smp_mb(); | |
285 | ||
286 | /* Switch parity: 0 -> 1, 1 -> 0 */ | |
287 | CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE); | |
288 | ||
289 | /* | |
290 | * Must commit qparity update to memory before waiting for other parity | |
291 | * quiescent state. Failure to do so could result in the writer waiting | |
292 | * forever while new readers are always accessing data (no progress). | |
293 | * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED. | |
294 | */ | |
295 | ||
296 | /* | |
297 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
298 | * model easier to understand. It does not have a big performance impact | |
299 | * anyway, given this is the write-side. | |
300 | */ | |
301 | cmm_smp_mb(); | |
302 | ||
303 | /* | |
304 | * Wait for readers to observe new parity or be quiescent. | |
305 | * wait_for_readers() can release and grab again rcu_registry_lock | |
306 | * interally. | |
307 | */ | |
308 | wait_for_readers(&cur_snap_readers, NULL, &qsreaders); | |
309 | ||
310 | /* | |
311 | * Put quiescent reader list back into registry. | |
312 | */ | |
313 | cds_list_splice(&qsreaders, ®istry); | |
314 | ||
315 | /* | |
316 | * Finish waiting for reader threads before letting the old ptr being | |
317 | * freed. | |
318 | */ | |
319 | smp_mb_master(); | |
320 | out: | |
321 | mutex_unlock(&rcu_registry_lock); | |
322 | mutex_unlock(&rcu_gp_lock); | |
323 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
324 | assert(!ret); | |
325 | } | |
326 | ||
327 | /* | |
328 | * library wrappers to be used by non-LGPL compatible source code. | |
329 | */ | |
330 | ||
331 | void lttng_ust_urcu_read_lock(void) | |
332 | { | |
333 | _lttng_ust_urcu_read_lock(); | |
334 | } | |
335 | ||
336 | void lttng_ust_urcu_read_unlock(void) | |
337 | { | |
338 | _lttng_ust_urcu_read_unlock(); | |
339 | } | |
340 | ||
341 | int lttng_ust_urcu_read_ongoing(void) | |
342 | { | |
343 | return _lttng_ust_urcu_read_ongoing(); | |
344 | } | |
345 | ||
346 | /* | |
347 | * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk. | |
348 | * Else, try expanding the last chunk. If this fails, allocate a new | |
349 | * chunk twice as big as the last chunk. | |
350 | * Memory used by chunks _never_ moves. A chunk could theoretically be | |
351 | * freed when all "used" slots are released, but we don't do it at this | |
352 | * point. | |
353 | */ | |
354 | static | |
355 | void expand_arena(struct registry_arena *arena) | |
356 | { | |
357 | struct registry_chunk *new_chunk, *last_chunk; | |
358 | size_t old_chunk_len, new_chunk_len; | |
359 | ||
360 | /* No chunk. */ | |
361 | if (cds_list_empty(&arena->chunk_list)) { | |
362 | assert(ARENA_INIT_ALLOC >= | |
363 | sizeof(struct registry_chunk) | |
364 | + sizeof(struct lttng_ust_urcu_reader)); | |
365 | new_chunk_len = ARENA_INIT_ALLOC; | |
366 | new_chunk = (struct registry_chunk *) mmap(NULL, | |
367 | new_chunk_len, | |
368 | PROT_READ | PROT_WRITE, | |
369 | MAP_ANONYMOUS | MAP_PRIVATE, | |
370 | -1, 0); | |
371 | if (new_chunk == MAP_FAILED) | |
372 | abort(); | |
373 | memset(new_chunk, 0, new_chunk_len); | |
374 | new_chunk->data_len = | |
375 | new_chunk_len - sizeof(struct registry_chunk); | |
376 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
377 | return; /* We're done. */ | |
378 | } | |
379 | ||
380 | /* Try expanding last chunk. */ | |
381 | last_chunk = cds_list_entry(arena->chunk_list.prev, | |
382 | struct registry_chunk, node); | |
383 | old_chunk_len = | |
384 | last_chunk->data_len + sizeof(struct registry_chunk); | |
385 | new_chunk_len = old_chunk_len << 1; | |
386 | ||
387 | /* Don't allow memory mapping to move, just expand. */ | |
388 | new_chunk = mremap_wrapper(last_chunk, old_chunk_len, | |
389 | new_chunk_len, 0); | |
390 | if (new_chunk != MAP_FAILED) { | |
391 | /* Should not have moved. */ | |
392 | assert(new_chunk == last_chunk); | |
393 | memset((char *) last_chunk + old_chunk_len, 0, | |
394 | new_chunk_len - old_chunk_len); | |
395 | last_chunk->data_len = | |
396 | new_chunk_len - sizeof(struct registry_chunk); | |
397 | return; /* We're done. */ | |
398 | } | |
399 | ||
400 | /* Remap did not succeed, we need to add a new chunk. */ | |
401 | new_chunk = (struct registry_chunk *) mmap(NULL, | |
402 | new_chunk_len, | |
403 | PROT_READ | PROT_WRITE, | |
404 | MAP_ANONYMOUS | MAP_PRIVATE, | |
405 | -1, 0); | |
406 | if (new_chunk == MAP_FAILED) | |
407 | abort(); | |
408 | memset(new_chunk, 0, new_chunk_len); | |
409 | new_chunk->data_len = | |
410 | new_chunk_len - sizeof(struct registry_chunk); | |
411 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
412 | } | |
413 | ||
414 | static | |
415 | struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena) | |
416 | { | |
417 | struct registry_chunk *chunk; | |
418 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
419 | int expand_done = 0; /* Only allow to expand once per alloc */ | |
420 | size_t len = sizeof(struct lttng_ust_urcu_reader); | |
421 | ||
422 | retry: | |
423 | cds_list_for_each_entry(chunk, &arena->chunk_list, node) { | |
424 | if (chunk->data_len - chunk->used < len) | |
425 | continue; | |
426 | /* Find spot */ | |
427 | for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0]; | |
428 | rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len]; | |
429 | rcu_reader_reg++) { | |
430 | if (!rcu_reader_reg->alloc) { | |
431 | rcu_reader_reg->alloc = 1; | |
432 | chunk->used += len; | |
433 | return rcu_reader_reg; | |
434 | } | |
435 | } | |
436 | } | |
437 | ||
438 | if (!expand_done) { | |
439 | expand_arena(arena); | |
440 | expand_done = 1; | |
441 | goto retry; | |
442 | } | |
443 | ||
444 | return NULL; | |
445 | } | |
446 | ||
447 | /* Called with signals off and mutex locked */ | |
448 | static | |
449 | void add_thread(void) | |
450 | { | |
451 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
452 | int ret; | |
453 | ||
454 | rcu_reader_reg = arena_alloc(®istry_arena); | |
455 | if (!rcu_reader_reg) | |
456 | abort(); | |
457 | ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg); | |
458 | if (ret) | |
459 | abort(); | |
460 | ||
461 | /* Add to registry */ | |
462 | rcu_reader_reg->tid = pthread_self(); | |
463 | assert(rcu_reader_reg->ctr == 0); | |
464 | cds_list_add(&rcu_reader_reg->node, ®istry); | |
465 | /* | |
466 | * Reader threads are pointing to the reader registry. This is | |
467 | * why its memory should never be relocated. | |
468 | */ | |
469 | URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg; | |
470 | } | |
471 | ||
472 | /* Called with mutex locked */ | |
473 | static | |
474 | void cleanup_thread(struct registry_chunk *chunk, | |
475 | struct lttng_ust_urcu_reader *rcu_reader_reg) | |
476 | { | |
477 | rcu_reader_reg->ctr = 0; | |
478 | cds_list_del(&rcu_reader_reg->node); | |
479 | rcu_reader_reg->tid = 0; | |
480 | rcu_reader_reg->alloc = 0; | |
481 | chunk->used -= sizeof(struct lttng_ust_urcu_reader); | |
482 | } | |
483 | ||
484 | static | |
485 | struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
486 | { | |
487 | struct registry_chunk *chunk; | |
488 | ||
489 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
490 | if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0]) | |
491 | continue; | |
492 | if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len]) | |
493 | continue; | |
494 | return chunk; | |
495 | } | |
496 | return NULL; | |
497 | } | |
498 | ||
499 | /* Called with signals off and mutex locked */ | |
500 | static | |
501 | void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
502 | { | |
503 | cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg); | |
504 | URCU_TLS(lttng_ust_urcu_reader) = NULL; | |
505 | } | |
506 | ||
507 | /* Disable signals, take mutex, add to registry */ | |
508 | void lttng_ust_urcu_register(void) | |
509 | { | |
510 | sigset_t newmask, oldmask; | |
511 | int ret; | |
512 | ||
513 | ret = sigfillset(&newmask); | |
514 | if (ret) | |
515 | abort(); | |
516 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
517 | if (ret) | |
518 | abort(); | |
519 | ||
520 | /* | |
521 | * Check if a signal concurrently registered our thread since | |
522 | * the check in rcu_read_lock(). | |
523 | */ | |
524 | if (URCU_TLS(lttng_ust_urcu_reader)) | |
525 | goto end; | |
526 | ||
527 | /* | |
528 | * Take care of early registration before lttng_ust_urcu constructor. | |
529 | */ | |
530 | _lttng_ust_urcu_init(); | |
531 | ||
532 | mutex_lock(&rcu_registry_lock); | |
533 | add_thread(); | |
534 | mutex_unlock(&rcu_registry_lock); | |
535 | end: | |
536 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
537 | if (ret) | |
538 | abort(); | |
539 | } | |
540 | ||
541 | void lttng_ust_urcu_register_thread(void) | |
542 | { | |
543 | if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader))) | |
544 | lttng_ust_urcu_register(); /* If not yet registered. */ | |
545 | } | |
546 | ||
547 | /* Disable signals, take mutex, remove from registry */ | |
548 | static | |
549 | void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
550 | { | |
551 | sigset_t newmask, oldmask; | |
552 | int ret; | |
553 | ||
554 | ret = sigfillset(&newmask); | |
555 | if (ret) | |
556 | abort(); | |
557 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
558 | if (ret) | |
559 | abort(); | |
560 | ||
561 | mutex_lock(&rcu_registry_lock); | |
562 | remove_thread(rcu_reader_reg); | |
563 | mutex_unlock(&rcu_registry_lock); | |
564 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
565 | if (ret) | |
566 | abort(); | |
567 | lttng_ust_urcu_exit(); | |
568 | } | |
569 | ||
570 | /* | |
571 | * Remove thread from the registry when it exits, and flag it as | |
572 | * destroyed so garbage collection can take care of it. | |
573 | */ | |
574 | static | |
575 | void lttng_ust_urcu_thread_exit_notifier(void *rcu_key) | |
576 | { | |
577 | lttng_ust_urcu_unregister(rcu_key); | |
578 | } | |
579 | ||
580 | #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER | |
581 | static | |
582 | void lttng_ust_urcu_sys_membarrier_status(bool available) | |
583 | { | |
584 | if (!available) | |
585 | abort(); | |
586 | } | |
587 | #else | |
588 | static | |
589 | void lttng_ust_urcu_sys_membarrier_status(bool available) | |
590 | { | |
591 | if (!available) | |
592 | return; | |
593 | lttng_ust_urcu_has_sys_membarrier = 1; | |
594 | } | |
595 | #endif | |
596 | ||
597 | static | |
598 | void lttng_ust_urcu_sys_membarrier_init(void) | |
599 | { | |
600 | bool available = false; | |
601 | int mask; | |
602 | ||
603 | mask = membarrier(MEMBARRIER_CMD_QUERY, 0); | |
604 | if (mask >= 0) { | |
605 | if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) { | |
606 | if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0)) | |
607 | abort(); | |
608 | available = true; | |
609 | } | |
610 | } | |
611 | lttng_ust_urcu_sys_membarrier_status(available); | |
612 | } | |
613 | ||
614 | static | |
615 | void _lttng_ust_urcu_init(void) | |
616 | { | |
617 | mutex_lock(&init_lock); | |
618 | if (!lttng_ust_urcu_refcount++) { | |
619 | int ret; | |
620 | ||
621 | ret = pthread_key_create(<tng_ust_urcu_key, | |
622 | lttng_ust_urcu_thread_exit_notifier); | |
623 | if (ret) | |
624 | abort(); | |
625 | lttng_ust_urcu_sys_membarrier_init(); | |
626 | initialized = 1; | |
627 | } | |
628 | mutex_unlock(&init_lock); | |
629 | } | |
630 | ||
631 | static | |
632 | void lttng_ust_urcu_exit(void) | |
633 | { | |
634 | mutex_lock(&init_lock); | |
635 | if (!--lttng_ust_urcu_refcount) { | |
636 | struct registry_chunk *chunk, *tmp; | |
637 | int ret; | |
638 | ||
639 | cds_list_for_each_entry_safe(chunk, tmp, | |
640 | ®istry_arena.chunk_list, node) { | |
641 | munmap((void *) chunk, chunk->data_len | |
642 | + sizeof(struct registry_chunk)); | |
643 | } | |
644 | CDS_INIT_LIST_HEAD(®istry_arena.chunk_list); | |
645 | ret = pthread_key_delete(lttng_ust_urcu_key); | |
646 | if (ret) | |
647 | abort(); | |
648 | } | |
649 | mutex_unlock(&init_lock); | |
650 | } | |
651 | ||
652 | /* | |
653 | * Holding the rcu_gp_lock and rcu_registry_lock across fork will make | |
654 | * sure we fork() don't race with a concurrent thread executing with | |
655 | * any of those locks held. This ensures that the registry and data | |
656 | * protected by rcu_gp_lock are in a coherent state in the child. | |
657 | */ | |
658 | void lttng_ust_urcu_before_fork(void) | |
659 | { | |
660 | sigset_t newmask, oldmask; | |
661 | int ret; | |
662 | ||
663 | ret = sigfillset(&newmask); | |
664 | assert(!ret); | |
665 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
666 | assert(!ret); | |
667 | mutex_lock(&rcu_gp_lock); | |
668 | mutex_lock(&rcu_registry_lock); | |
669 | saved_fork_signal_mask = oldmask; | |
670 | } | |
671 | ||
672 | void lttng_ust_urcu_after_fork_parent(void) | |
673 | { | |
674 | sigset_t oldmask; | |
675 | int ret; | |
676 | ||
677 | oldmask = saved_fork_signal_mask; | |
678 | mutex_unlock(&rcu_registry_lock); | |
679 | mutex_unlock(&rcu_gp_lock); | |
680 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
681 | assert(!ret); | |
682 | } | |
683 | ||
684 | /* | |
685 | * Prune all entries from registry except our own thread. Fits the Linux | |
686 | * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held. | |
687 | */ | |
688 | static | |
689 | void lttng_ust_urcu_prune_registry(void) | |
690 | { | |
691 | struct registry_chunk *chunk; | |
692 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
693 | ||
694 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
695 | for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0]; | |
696 | rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len]; | |
697 | rcu_reader_reg++) { | |
698 | if (!rcu_reader_reg->alloc) | |
699 | continue; | |
700 | if (rcu_reader_reg->tid == pthread_self()) | |
701 | continue; | |
702 | cleanup_thread(chunk, rcu_reader_reg); | |
703 | } | |
704 | } | |
705 | } | |
706 | ||
707 | void lttng_ust_urcu_after_fork_child(void) | |
708 | { | |
709 | sigset_t oldmask; | |
710 | int ret; | |
711 | ||
712 | lttng_ust_urcu_prune_registry(); | |
713 | oldmask = saved_fork_signal_mask; | |
714 | mutex_unlock(&rcu_registry_lock); | |
715 | mutex_unlock(&rcu_gp_lock); | |
716 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
717 | assert(!ret); | |
718 | } |