X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=liblttng-ust%2Flttng-ust-comm.c;h=378ca21cfc71f354a282fe4a9a521cf77319ef02;hb=3df53fae30ba42a0ce6f61054fe38b15f13ceeb6;hp=0c96f012f528fd4929581107e45ba9314430a92e;hpb=bd703713989223b66362ed670e2b3b0097118f46;p=lttng-ust.git diff --git a/liblttng-ust/lttng-ust-comm.c b/liblttng-ust/lttng-ust-comm.c index 0c96f012..378ca21c 100644 --- a/liblttng-ust/lttng-ust-comm.c +++ b/liblttng-ust/lttng-ust-comm.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include #include @@ -68,9 +67,21 @@ static int initialized; * probe registration. * * ust_exit_mutex must never nest in ust_mutex. + * + * ust_fork_mutex must never nest in ust_mutex. + * + * ust_mutex_nest is a per-thread nesting counter, allowing the perf + * counter lazy initialization called by events within the statedump, + * which traces while the ust_mutex is held. + * + * ust_lock nests within the dynamic loader lock (within glibc) because + * it is taken within the library constructor. */ static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER; +/* Allow nesting the ust_mutex within the same thread. */ +static DEFINE_URCU_TLS(int, ust_mutex_nest); + /* * ust_exit_mutex protects thread_active variable wrt thread exit. It * cannot be done by ust_mutex because pthread_cancel(), which takes an @@ -80,16 +91,38 @@ static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER; */ static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER; +/* + * ust_fork_mutex protects base address statedump tracing against forks. It + * prevents the dynamic loader lock to be taken (by base address statedump + * tracing) while a fork is happening, thus preventing deadlock issues with + * the dynamic loader lock. + */ +static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER; + /* Should the ust comm thread quit ? */ static int lttng_ust_comm_should_quit; /* - * Return 0 on success, -1 if should quilt. + * Return 0 on success, -1 if should quit. * The lock is taken in both cases. + * Signal-safe. */ int ust_lock(void) { - pthread_mutex_lock(&ust_mutex); + sigset_t sig_all_blocked, orig_mask; + int ret; + + sigfillset(&sig_all_blocked); + ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + if (!URCU_TLS(ust_mutex_nest)++) + pthread_mutex_lock(&ust_mutex); + ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } if (lttng_ust_comm_should_quit) { return -1; } else { @@ -101,15 +134,45 @@ int ust_lock(void) * ust_lock_nocheck() can be used in constructors/destructors, because * they are already nested within the dynamic loader lock, and therefore * have exclusive access against execution of liblttng-ust destructor. + * Signal-safe. */ void ust_lock_nocheck(void) { - pthread_mutex_lock(&ust_mutex); + sigset_t sig_all_blocked, orig_mask; + int ret; + + sigfillset(&sig_all_blocked); + ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + if (!URCU_TLS(ust_mutex_nest)++) + pthread_mutex_lock(&ust_mutex); + ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } } +/* + * Signal-safe. + */ void ust_unlock(void) { - pthread_mutex_unlock(&ust_mutex); + sigset_t sig_all_blocked, orig_mask; + int ret; + + sigfillset(&sig_all_blocked); + ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } + if (!--URCU_TLS(ust_mutex_nest)) + pthread_mutex_unlock(&ust_mutex); + ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); + if (ret) { + ERR("pthread_sigmask: %s", strerror(ret)); + } } /* @@ -266,6 +329,22 @@ void lttng_fixup_nest_count_tls(void) asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count))); } +static +void lttng_fixup_ust_mutex_nest_tls(void) +{ + asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest))); +} + +/* + * Fixup urcu bp TLS. + */ +static +void lttng_fixup_urcu_bp_tls(void) +{ + rcu_read_lock(); + rcu_read_unlock(); +} + int lttng_get_notify_socket(void *owner) { struct sock_info *info = owner; @@ -450,7 +529,9 @@ void handle_pending_statedump(struct sock_info *sock_info) if (ctor_passed && sock_info->statedump_pending) { sock_info->statedump_pending = 0; + pthread_mutex_lock(&ust_fork_mutex); lttng_handle_pending_statedump(sock_info); + pthread_mutex_unlock(&ust_fork_mutex); } } @@ -820,9 +901,14 @@ void cleanup_sock_info(struct sock_info *sock_info, int exiting) sock_info->notify_socket = -1; } if (sock_info->wait_shm_mmap) { - ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE)); - if (ret) { - ERR("Error unmapping wait shm"); + long page_size; + + page_size = sysconf(_SC_PAGE_SIZE); + if (page_size > 0) { + ret = munmap(sock_info->wait_shm_mmap, page_size); + if (ret) { + ERR("Error unmapping wait shm"); + } } sock_info->wait_shm_mmap = NULL; } @@ -995,15 +1081,20 @@ error_close: static char *get_map_shm(struct sock_info *sock_info) { - size_t mmap_size = sysconf(_SC_PAGE_SIZE); + long page_size; int wait_shm_fd, ret; char *wait_shm_mmap; - wait_shm_fd = get_wait_shm(sock_info, mmap_size); + page_size = sysconf(_SC_PAGE_SIZE); + if (page_size < 0) { + goto error; + } + + wait_shm_fd = get_wait_shm(sock_info, page_size); if (wait_shm_fd < 0) { goto error; } - wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ, + wait_shm_mmap = mmap(NULL, page_size, PROT_READ, MAP_SHARED, wait_shm_fd, 0); /* close shm fd immediately after taking the mmap reference */ ret = close(wait_shm_fd); @@ -1310,6 +1401,14 @@ quit: return NULL; } +/* + * Weak symbol to call when the ust malloc wrapper is not loaded. + */ +__attribute__((weak)) +void lttng_ust_malloc_wrapper_init(void) +{ +} + /* * sessiond monitoring thread: monitor presence of global and per-user * sessiond by polling the application common named pipe. @@ -1330,10 +1429,12 @@ void __attribute__((constructor)) lttng_ust_init(void) * to be the dynamic linker mutex) and ust_lock, taken within * the ust lock. */ + lttng_fixup_urcu_bp_tls(); lttng_fixup_ringbuffer_tls(); lttng_fixup_vtid_tls(); lttng_fixup_nest_count_tls(); lttng_fixup_procname_tls(); + lttng_fixup_ust_mutex_nest_tls(); /* * We want precise control over the order in which we construct @@ -1349,7 +1450,12 @@ void __attribute__((constructor)) lttng_ust_init(void) lttng_ring_buffer_client_overwrite_rt_init(); lttng_ring_buffer_client_discard_init(); lttng_ring_buffer_client_discard_rt_init(); + lttng_perf_counter_init(); lttng_context_init(); + /* + * Invoke ust malloc wrapper init before starting other threads. + */ + lttng_ust_malloc_wrapper_init(); timeout_mode = get_constructor_timeout(&constructor_timeout); @@ -1453,6 +1559,7 @@ void lttng_ust_cleanup(int exiting) lttng_ust_abi_exit(); lttng_ust_events_exit(); lttng_context_exit(); + lttng_perf_counter_exit(); lttng_ring_buffer_client_discard_rt_exit(); lttng_ring_buffer_client_discard_exit(); lttng_ring_buffer_client_overwrite_rt_exit(); @@ -1546,6 +1653,9 @@ void ust_before_fork(sigset_t *save_sigset) if (ret == -1) { PERROR("sigprocmask"); } + + pthread_mutex_lock(&ust_fork_mutex); + ust_lock_nocheck(); rcu_bp_before_fork(); } @@ -1556,6 +1666,9 @@ static void ust_after_fork_common(sigset_t *restore_sigset) DBG("process %d", getpid()); ust_unlock(); + + pthread_mutex_unlock(&ust_fork_mutex); + /* Restore signals */ ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL); if (ret == -1) {