X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libust%2Flttng-ust-comm.c;h=58e9a6487707ab5097dfcbe8eb563cd5463186f4;hb=6882fef16bfc5b060b74b3de6081e9cbd798fa7d;hp=1496408f12356d382567f13fe25ea527fb178a72;hpb=7b766b16cc94fd3ade981b8ada8c912a01ff98c1;p=lttng-ust.git diff --git a/libust/lttng-ust-comm.c b/libust/lttng-ust-comm.c index 1496408f..58e9a648 100644 --- a/libust/lttng-ust-comm.c +++ b/libust/lttng-ust-comm.c @@ -21,21 +21,34 @@ #include #include +#include #include #include -#include -#include -#include #include #include #include #include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "ltt-tracer-core.h" /* - * communication thread mutex. Held when handling a command, also held - * by fork() to deal with removal of threads, and by exit path. + * Has lttng ust comm constructor been called ? + */ +static int initialized; + +/* + * The ust_lock/ust_unlock lock is used as a communication thread mutex. + * Held when handling a command, also held by fork() to deal with + * removal of threads, and by exit path. */ -static pthread_mutex_t lttng_ust_comm_mutex = PTHREAD_MUTEX_INITIALIZER; /* Should the ust comm thread quit ? */ static int lttng_ust_comm_should_quit; @@ -51,6 +64,10 @@ static int lttng_ust_comm_should_quit; * daemon problems). */ static sem_t constructor_wait; +/* + * Doing this for both the global and local sessiond. + */ +static int sem_count = { 2 }; /* * Info about socket and associated listener thread. @@ -61,6 +78,8 @@ struct sock_info { int socket; pthread_t ust_listener; /* listener thread */ int root_handle; + int constructor_sem_posted; + int allowed; }; /* Socket from app (connect) to session daemon (listen) for communication */ @@ -69,6 +88,7 @@ struct sock_info global_apps = { .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK, .socket = -1, .root_handle = -1, + .allowed = 1, }; /* TODO: allow global_apps_sock_path override */ @@ -77,13 +97,30 @@ struct sock_info local_apps = { .name = "local", .socket = -1, .root_handle = -1, + .allowed = 0, /* Check setuid bit first */ }; +extern void ltt_ring_buffer_client_overwrite_init(void); +extern void ltt_ring_buffer_client_discard_init(void); +extern void ltt_ring_buffer_metadata_client_init(void); +extern void ltt_ring_buffer_client_overwrite_exit(void); +extern void ltt_ring_buffer_client_discard_exit(void); +extern void ltt_ring_buffer_metadata_client_exit(void); + static -int setup_local_apps_socket(void) +int setup_local_apps(void) { const char *home_dir; + /* + * Disallow per-user tracing for setuid binaries. + */ + if (getuid() != geteuid()) { + local_apps.allowed = 0; + return 0; + } else { + local_apps.allowed = 1; + } home_dir = (const char *) getenv("HOME"); if (!home_dir) return -ENOENT; @@ -96,17 +133,28 @@ static int register_app_to_sessiond(int socket) { ssize_t ret; + int prctl_ret; struct { uint32_t major; uint32_t minor; pid_t pid; + pid_t ppid; uid_t uid; + gid_t gid; + char name[16]; /* process name */ } reg_msg; reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR; reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR; reg_msg.pid = getpid(); + reg_msg.ppid = getppid(); reg_msg.uid = getuid(); + reg_msg.gid = getgid(); + prctl_ret = prctl(PR_GET_NAME, (unsigned long) reg_msg.name, 0, 0, 0); + if (prctl_ret) { + ERR("Error executing prctl"); + return -errno; + } ret = lttcomm_send_unix_sock(socket, ®_msg, sizeof(reg_msg)); if (ret >= 0 && ret != sizeof(reg_msg)) @@ -137,12 +185,18 @@ int send_reply(int sock, struct lttcomm_ust_reply *lur) } static -int handle_register_done(void) +int handle_register_done(struct sock_info *sock_info) { int ret; - ret = sem_post(&constructor_wait); - assert(!ret); + if (sock_info->constructor_sem_posted) + return 0; + sock_info->constructor_sem_posted = 1; + ret = uatomic_add_return(&sem_count, -1); + if (ret == 0) { + ret = sem_post(&constructor_wait); + assert(!ret); + } return 0; } @@ -154,7 +208,7 @@ int handle_message(struct sock_info *sock_info, const struct objd_ops *ops; struct lttcomm_ust_reply lur; - pthread_mutex_lock(<tng_ust_comm_mutex); + ust_lock(); memset(&lur, 0, sizeof(lur)); @@ -172,7 +226,7 @@ int handle_message(struct sock_info *sock_info, switch (lum->cmd) { case LTTNG_UST_REGISTER_DONE: if (lum->handle == LTTNG_UST_ROOT_HANDLE) - ret = handle_register_done(); + ret = handle_register_done(sock_info); else ret = -EINVAL; break; @@ -202,7 +256,7 @@ end: } ret = send_reply(sock, &lur); - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); return ret; } @@ -242,10 +296,10 @@ void *ust_listener_thread(void *arg) /* Restart trying to connect to the session daemon */ restart: - pthread_mutex_lock(<tng_ust_comm_mutex); + ust_lock(); if (lttng_ust_comm_should_quit) { - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); goto quit; } @@ -267,9 +321,9 @@ restart: * If we cannot find the sessiond daemon, don't delay * constructor execution. */ - ret = handle_register_done(); + ret = handle_register_done(sock_info); assert(!ret); - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); sleep(5); goto restart; } @@ -284,7 +338,7 @@ restart: ret = lttng_abi_create_root_handle(); if (ret) { ERR("Error creating root handle"); - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); goto quit; } sock_info->root_handle = ret; @@ -297,13 +351,13 @@ restart: * If we cannot register to the sessiond daemon, don't * delay constructor execution. */ - ret = handle_register_done(); + ret = handle_register_done(sock_info); assert(!ret); - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); sleep(5); goto restart; } - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); for (;;) { ssize_t len; @@ -369,9 +423,9 @@ int get_timeout(struct timespec *constructor_timeout) if (ret) { return -1; } - - constructor_timeout->tv_nsec = - constructor_timeout->tv_nsec + (constructor_delay_ms * 1000000UL); + constructor_timeout->tv_sec += constructor_delay_ms / 1000UL; + constructor_timeout->tv_nsec += + (constructor_delay_ms % 1000UL) * 1000000UL; if (constructor_timeout->tv_nsec >= 1000000000UL) { constructor_timeout->tv_sec++; constructor_timeout->tv_nsec -= 1000000000UL; @@ -385,39 +439,52 @@ int get_timeout(struct timespec *constructor_timeout) */ /* TODO */ -void __attribute__((constructor)) lttng_ust_comm_init(void) +void __attribute__((constructor)) lttng_ust_init(void) { struct timespec constructor_timeout; int timeout_mode; int ret; + if (uatomic_xchg(&initialized, 1) == 1) + return; + + /* + * We want precise control over the order in which we construct + * our sub-libraries vs starting to receive commands from + * sessiond (otherwise leading to errors when trying to create + * sessiond before the init functions are completed). + */ init_usterr(); + init_tracepoint(); + ltt_ring_buffer_metadata_client_init(); + ltt_ring_buffer_client_overwrite_init(); + ltt_ring_buffer_client_discard_init(); timeout_mode = get_timeout(&constructor_timeout); - ret = sem_init(&constructor_wait, 0, 2); + ret = sem_init(&constructor_wait, 0, 0); assert(!ret); - ret = setup_local_apps_socket(); + ret = setup_local_apps(); if (ret) { - ERR("Error setting up to local apps socket"); + ERR("Error setting up to local apps"); } - - /* - * Wait for the pthread cond to let us continue to main program - * execution. Hold mutex across thread creation, so we start - * waiting for the condition before the threads can signal its - * completion. - */ - pthread_mutex_lock(<tng_ust_comm_mutex); - ret = pthread_create(&global_apps.ust_listener, NULL, - ust_listener_thread, &global_apps); ret = pthread_create(&local_apps.ust_listener, NULL, ust_listener_thread, &local_apps); + if (local_apps.allowed) { + ret = pthread_create(&global_apps.ust_listener, NULL, + ust_listener_thread, &global_apps); + } else { + handle_register_done(&local_apps); + } + switch (timeout_mode) { case 1: /* timeout wait */ - ret = sem_timedwait(&constructor_wait, &constructor_timeout); + do { + ret = sem_timedwait(&constructor_wait, + &constructor_timeout); + } while (ret < 0 && errno == EINTR); if (ret < 0 && errno == ETIMEDOUT) { ERR("Timed out waiting for ltt-sessiond"); } else { @@ -425,16 +492,38 @@ void __attribute__((constructor)) lttng_ust_comm_init(void) } break; case -1:/* wait forever */ - ret = sem_wait(&constructor_wait); + do { + ret = sem_wait(&constructor_wait); + } while (ret < 0 && errno == EINTR); assert(!ret); break; case 0: /* no timeout */ break; } - pthread_mutex_unlock(<tng_ust_comm_mutex); } -void __attribute__((destructor)) lttng_ust_comm_exit(void) +static +void lttng_ust_cleanup(int exiting) +{ + cleanup_sock_info(&global_apps); + if (local_apps.allowed) { + cleanup_sock_info(&local_apps); + } + lttng_ust_abi_exit(); + ltt_events_exit(); + ltt_ring_buffer_client_discard_exit(); + ltt_ring_buffer_client_overwrite_exit(); + ltt_ring_buffer_metadata_client_exit(); + exit_tracepoint(); + if (!exiting) { + /* Reinitialize values for fork */ + sem_count = 2; + lttng_ust_comm_should_quit = 0; + initialized = 0; + } +} + +void __attribute__((destructor)) lttng_ust_exit(void) { int ret; @@ -449,26 +538,88 @@ void __attribute__((destructor)) lttng_ust_comm_exit(void) * mutexes to ensure it is not in a mutex critical section when * pthread_cancel is later called. */ - pthread_mutex_lock(<tng_ust_comm_mutex); + ust_lock(); lttng_ust_comm_should_quit = 1; - pthread_mutex_unlock(<tng_ust_comm_mutex); + ust_unlock(); -#if 0 ret = pthread_cancel(global_apps.ust_listener); if (ret) { ERR("Error cancelling global ust listener thread"); } -#endif //0 + if (local_apps.allowed) { + ret = pthread_cancel(local_apps.ust_listener); + if (ret) { + ERR("Error cancelling local ust listener thread"); + } + } + lttng_ust_cleanup(1); +} - cleanup_sock_info(&global_apps); +/* + * We exclude the worker threads across fork and clone (except + * CLONE_VM), because these system calls only keep the forking thread + * running in the child. Therefore, we don't want to call fork or clone + * in the middle of an tracepoint or ust tracing state modification. + * Holding this mutex protects these structures across fork and clone. + */ +void ust_before_fork(ust_fork_info_t *fork_info) +{ + /* + * Disable signals. This is to avoid that the child intervenes + * before it is properly setup for tracing. It is safer to + * disable all signals, because then we know we are not breaking + * anything by restoring the original mask. + */ + sigset_t all_sigs; + int ret; - ret = pthread_cancel(local_apps.ust_listener); - if (ret) { - ERR("Error cancelling local ust listener thread"); + /* Disable signals */ + sigfillset(&all_sigs); + ret = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs); + if (ret == -1) { + PERROR("sigprocmask"); } + ust_lock(); + rcu_bp_before_fork(); +} - cleanup_sock_info(&local_apps); +static void ust_after_fork_common(ust_fork_info_t *fork_info) +{ + int ret; - lttng_ust_abi_exit(); - ltt_events_exit(); + DBG("process %d", getpid()); + ust_unlock(); + /* Restore signals */ + ret = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL); + if (ret == -1) { + PERROR("sigprocmask"); + } +} + +void ust_after_fork_parent(ust_fork_info_t *fork_info) +{ + DBG("process %d", getpid()); + rcu_bp_after_fork_parent(); + /* Release mutexes and reenable signals */ + ust_after_fork_common(fork_info); +} + +/* + * After fork, in the child, we need to cleanup all the leftover state, + * except the worker thread which already magically disappeared thanks + * to the weird Linux fork semantics. After tyding up, we call + * lttng_ust_init() again to start over as a new PID. + * + * This is meant for forks() that have tracing in the child between the + * fork and following exec call (if there is any). + */ +void ust_after_fork_child(ust_fork_info_t *fork_info) +{ + DBG("process %d", getpid()); + /* Release urcu mutexes */ + rcu_bp_after_fork_child(); + lttng_ust_cleanup(0); + lttng_ust_init(); + /* Release mutexes and reenable signals */ + ust_after_fork_common(fork_info); }