#include <lttng/ust.h>
#include <lttng/ust-error.h>
#include <lttng/ust-ctl.h>
+#include <lttng/ust-cancelstate.h>
#include <urcu/tls-compat.h>
#include <ust-comm.h>
#include <ust-fd.h>
*/
int lttng_ust_loaded __attribute__((weak));
+/*
+ * Notes on async-signal-safety of ust lock: a few libc functions are used
+ * which are not strictly async-signal-safe:
+ *
+ * - pthread_setcancelstate
+ * - pthread_mutex_lock
+ * - pthread_mutex_unlock
+ *
+ * As of glibc 2.35, the implementation of pthread_setcancelstate only
+ * touches TLS data, and it appears to be safe to use from signal
+ * handlers. If the libc implementation changes, this will need to be
+ * revisited, and we may ask glibc to provide an async-signal-safe
+ * pthread_setcancelstate.
+ *
+ * As of glibc 2.35, the implementation of pthread_mutex_lock/unlock
+ * for fast mutexes only relies on the pthread_mutex_t structure.
+ * Disabling signals around all uses of this mutex ensures
+ * signal-safety. If the libc implementation changes and eventually uses
+ * other global resources, this will need to be revisited and we may
+ * need to implement our own mutex.
+ */
+
/*
* Return 0 on success, -1 if should quit.
* The lock is taken in both cases.
int ust_lock(void)
{
sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
+ int ret;
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- if (oldstate != PTHREAD_CANCEL_ENABLE) {
- ERR("pthread_setcancelstate: unexpected oldstate");
+ if (lttng_ust_cancelstate_disable_push()) {
+ ERR("lttng_ust_cancelstate_disable_push");
}
sigfillset(&sig_all_blocked);
ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
+ ERR("pthread_sigmask: ret=%d", ret);
}
if (!URCU_TLS(ust_mutex_nest)++)
pthread_mutex_lock(&ust_mutex);
ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
+ ERR("pthread_sigmask: ret=%d", ret);
}
if (lttng_ust_comm_should_quit) {
return -1;
void ust_lock_nocheck(void)
{
sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
+ int ret;
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- if (oldstate != PTHREAD_CANCEL_ENABLE) {
- ERR("pthread_setcancelstate: unexpected oldstate");
+ if (lttng_ust_cancelstate_disable_push()) {
+ ERR("lttng_ust_cancelstate_disable_push");
}
sigfillset(&sig_all_blocked);
ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
+ ERR("pthread_sigmask: ret=%d", ret);
}
if (!URCU_TLS(ust_mutex_nest)++)
pthread_mutex_lock(&ust_mutex);
ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
+ ERR("pthread_sigmask: ret=%d", ret);
}
}
void ust_unlock(void)
{
sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
+ int ret;
sigfillset(&sig_all_blocked);
ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
+ ERR("pthread_sigmask: ret=%d", ret);
}
if (!--URCU_TLS(ust_mutex_nest))
pthread_mutex_unlock(&ust_mutex);
ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
+ ERR("pthread_sigmask: ret=%d", ret);
}
- ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- if (oldstate != PTHREAD_CANCEL_DISABLE) {
- ERR("pthread_setcancelstate: unexpected oldstate");
+ if (lttng_ust_cancelstate_disable_pop()) {
+ ERR("lttng_ust_cancelstate_disable_pop");
}
}
/* Keep track of lazy state dump not performed yet. */
int statedump_pending;
int initial_statedump_done;
+ /* Keep procname for statedump */
+ char procname[LTTNG_UST_PROCNAME_LEN];
};
/* Socket from app (connect) to session daemon (listen) for communication */
.statedump_pending = 0,
.initial_statedump_done = 0,
+ .procname[0] = '\0'
};
/* TODO: allow global_apps_sock_path override */
.statedump_pending = 0,
.initial_statedump_done = 0,
+ .procname[0] = '\0'
};
static int wait_poll_fallback;
lttng_fixup_ust_mutex_nest_tls();
lttng_ust_fixup_perf_counter_tls();
lttng_ust_fixup_fd_tracker_tls();
+ lttng_fixup_cgroup_ns_tls();
+ lttng_fixup_ipc_ns_tls();
+ lttng_fixup_net_ns_tls();
+ lttng_fixup_uts_ns_tls();
}
int lttng_get_notify_socket(void *owner)
return info->notify_socket;
}
+
+LTTNG_HIDDEN
+char* lttng_ust_sockinfo_get_procname(void *owner)
+{
+ struct sock_info *info = owner;
+
+ return info->procname;
+}
+
static
void print_cmd(int cmd, int handle)
{
}
global_apps.allowed = 1;
+ lttng_ust_getprocname(global_apps.procname);
error:
return ret;
}
ret = -EIO;
goto end;
}
+
+ lttng_ust_getprocname(local_apps.procname);
end:
return ret;
}
&args, sock_info);
else
ret = -ENOSYS;
+ if (args.channel.wakeup_fd >= 0) {
+ int close_ret;
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.channel.wakeup_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.channel.wakeup_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ free(args.channel.chan_data);
break;
}
case LTTNG_UST_STREAM:
{
+ int close_ret;
+
/* Receive shm_fd, wakeup_fd */
ret = ustcomm_recv_stream_from_sessiond(sock,
NULL,
&args, sock_info);
else
ret = -ENOSYS;
+ if (args.stream.shm_fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.stream.shm_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.stream.shm_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ if (args.stream.wakeup_fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.stream.wakeup_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.stream.wakeup_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
break;
}
case LTTNG_UST_CONTEXT:
}
sock_info->root_handle = -1;
}
- sock_info->registration_done = 0;
- sock_info->initial_statedump_done = 0;
+
/*
* wait_shm_mmap, socket and notify socket are used by listener
if (exiting)
return;
+ sock_info->registration_done = 0;
+ sock_info->initial_statedump_done = 0;
+
if (sock_info->socket != -1) {
ret = ustcomm_close_unix_sock(sock_info->socket);
if (ret) {
pid = fork();
URCU_TLS(lttng_ust_nest_count)--;
if (pid > 0) {
- int status;
+ int status, wait_ret;
/*
* Parent: wait for child to return, in which case the
* shared memory map will have been created.
*/
- pid = wait(&status);
- if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ wait_ret = waitpid(pid, &status, 0);
+ if (wait_ret < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
wait_shm_fd = -1;
goto end;
}
DBG("Waiting for %s apps sessiond", sock_info->name);
/* Wait for futex wakeup */
- if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
- goto end_wait;
-
- while (futex_async((int32_t *) sock_info->wait_shm_mmap,
- FUTEX_WAIT, 0, NULL, NULL, 0)) {
+ while (!uatomic_read((int32_t *) sock_info->wait_shm_mmap)) {
+ if (!futex_async((int32_t *) sock_info->wait_shm_mmap, FUTEX_WAIT, 0, NULL, NULL, 0)) {
+ /*
+ * Prior queued wakeups queued by unrelated code
+ * using the same address can cause futex wait to
+ * return 0 even through the futex value is still
+ * 0 (spurious wakeups). Check the value again
+ * in user-space to validate whether it really
+ * differs from 0.
+ */
+ continue;
+ }
switch (errno) {
- case EWOULDBLOCK:
+ case EAGAIN:
/* Value already changed. */
goto end_wait;
case EINTR:
/* Retry if interrupted by signal. */
- break; /* Get out of switch. */
+ break; /* Get out of switch. Check again. */
case EFAULT:
wait_poll_fallback = 1;
DBG(
lttng_ust_cleanup(1);
}
+static
+void ust_context_ns_reset(void)
+{
+ lttng_context_pid_ns_reset();
+ lttng_context_cgroup_ns_reset();
+ lttng_context_ipc_ns_reset();
+ lttng_context_mnt_ns_reset();
+ lttng_context_net_ns_reset();
+ lttng_context_user_ns_reset();
+ lttng_context_uts_ns_reset();
+}
+
+static
+void ust_context_vuids_reset(void)
+{
+ lttng_context_vuid_reset();
+ lttng_context_veuid_reset();
+ lttng_context_vsuid_reset();
+}
+
+static
+void ust_context_vgids_reset(void)
+{
+ lttng_context_vgid_reset();
+ lttng_context_vegid_reset();
+ lttng_context_vsgid_reset();
+}
+
/*
* We exclude the worker threads across fork and clone (except
* CLONE_VM), because these system calls only keep the forking thread
lttng_context_vpid_reset();
lttng_context_vtid_reset();
lttng_context_procname_reset();
+ ust_context_ns_reset();
+ ust_context_vuids_reset();
+ ust_context_vgids_reset();
DBG("process %d", getpid());
/* Release urcu mutexes */
urcu_bp_after_fork_child();
lttng_ust_init();
}
+void ust_after_setns(void)
+{
+ ust_context_ns_reset();
+ ust_context_vuids_reset();
+ ust_context_vgids_reset();
+}
+
+void ust_after_unshare(void)
+{
+ ust_context_ns_reset();
+ ust_context_vuids_reset();
+ ust_context_vgids_reset();
+}
+
+void ust_after_setuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void ust_after_seteuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void ust_after_setreuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void ust_after_setresuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void ust_after_setgid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void ust_after_setegid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void ust_after_setregid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void ust_after_setresgid(void)
+{
+ ust_context_vgids_reset();
+}
+
void lttng_ust_sockinfo_session_enabled(void *owner)
{
struct sock_info *sock_info = owner;