Add procname to lttng_ust_statedump information
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
index a09f676a42978146fd8d7d12b1e2638738c37334..9d0c010d502079a164e453cb67778f3a547cc2d1 100644 (file)
@@ -61,7 +61,7 @@
 #include "getenv.h"
 
 /* Concatenate lttng ust shared library name with its major version number. */
-#define LTTNG_UST_LIB_SO_NAME "liblttng-ust.so." LTTNG_UST_LIBRARY_VERSION_MAJOR
+#define LTTNG_UST_LIB_SO_NAME "liblttng-ust.so." __ust_stringify(CONFIG_LTTNG_UST_LIBRARY_VERSION_MAJOR)
 
 /*
  * Has lttng ust comm constructor been called ?
@@ -86,6 +86,8 @@ static int initialized;
  *
  * ust_lock nests within the dynamic loader lock (within glibc) because
  * it is taken within the library constructor.
+ *
+ * The ust fd tracker lock nests within the ust_mutex.
  */
 static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
 
@@ -228,7 +230,11 @@ static sem_t constructor_wait;
 /*
  * Doing this for both the global and local sessiond.
  */
-static int sem_count = { 2 };
+enum {
+       sem_count_initial_value = 4,
+};
+
+static int sem_count = sem_count_initial_value;
 
 /*
  * Counting nesting within lttng-ust. Used to ensure that calling fork()
@@ -243,7 +249,7 @@ struct sock_info {
        const char *name;
        pthread_t ust_listener; /* listener thread */
        int root_handle;
-       int constructor_sem_posted;
+       int registration_done;
        int allowed;
        int global;
        int thread_active;
@@ -256,6 +262,9 @@ struct sock_info {
        char *wait_shm_mmap;
        /* Keep track of lazy state dump not performed yet. */
        int statedump_pending;
+       int initial_statedump_done;
+       /* Keep procname for statedump */
+       char procname[LTTNG_UST_PROCNAME_LEN];
 };
 
 /* Socket from app (connect) to session daemon (listen) for communication */
@@ -264,7 +273,8 @@ struct sock_info global_apps = {
        .global = 1,
 
        .root_handle = -1,
-       .allowed = 1,
+       .registration_done = 0,
+       .allowed = 0,
        .thread_active = 0,
 
        .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
@@ -274,6 +284,8 @@ struct sock_info global_apps = {
        .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
 
        .statedump_pending = 0,
+       .initial_statedump_done = 0,
+       .procname[0] = '\0'
 };
 
 /* TODO: allow global_apps_sock_path override */
@@ -282,6 +294,7 @@ struct sock_info local_apps = {
        .name = "local",
        .global = 0,
        .root_handle = -1,
+       .registration_done = 0,
        .allowed = 0,   /* Check setuid bit first */
        .thread_active = 0,
 
@@ -289,6 +302,8 @@ struct sock_info local_apps = {
        .notify_socket = -1,
 
        .statedump_pending = 0,
+       .initial_statedump_done = 0,
+       .procname[0] = '\0'
 };
 
 static int wait_poll_fallback;
@@ -343,6 +358,8 @@ extern void lttng_ring_buffer_client_discard_exit(void);
 extern void lttng_ring_buffer_client_discard_rt_exit(void);
 extern void lttng_ring_buffer_metadata_client_exit(void);
 
+static char *get_map_shm(struct sock_info *sock_info);
+
 ssize_t lttng_ust_read(int fd, void *buf, size_t len)
 {
        ssize_t ret;
@@ -410,7 +427,12 @@ void lttng_ust_fixup_tls(void)
        lttng_fixup_nest_count_tls();
        lttng_fixup_procname_tls();
        lttng_fixup_ust_mutex_nest_tls();
+       lttng_ust_fixup_perf_counter_tls();
        lttng_ust_fixup_fd_tracker_tls();
+       lttng_fixup_cgroup_ns_tls();
+       lttng_fixup_ipc_ns_tls();
+       lttng_fixup_net_ns_tls();
+       lttng_fixup_uts_ns_tls();
 }
 
 int lttng_get_notify_socket(void *owner)
@@ -420,6 +442,15 @@ int lttng_get_notify_socket(void *owner)
        return info->notify_socket;
 }
 
+
+LTTNG_HIDDEN
+char* lttng_ust_sockinfo_get_procname(void *owner)
+{
+       struct sock_info *info = owner;
+
+       return info->procname;
+}
+
 static
 void print_cmd(int cmd, int handle)
 {
@@ -434,25 +465,49 @@ void print_cmd(int cmd, int handle)
                lttng_ust_obj_get_name(handle), handle);
 }
 
+static
+int setup_global_apps(void)
+{
+       int ret = 0;
+       assert(!global_apps.wait_shm_mmap);
+
+       global_apps.wait_shm_mmap = get_map_shm(&global_apps);
+       if (!global_apps.wait_shm_mmap) {
+               WARN("Unable to get map shm for global apps. Disabling LTTng-UST global tracing.");
+               global_apps.allowed = 0;
+               ret = -EIO;
+               goto error;
+       }
+
+       global_apps.allowed = 1;
+       lttng_ust_getprocname(global_apps.procname);
+error:
+       return ret;
+}
 static
 int setup_local_apps(void)
 {
+       int ret = 0;
        const char *home_dir;
        uid_t uid;
 
+       assert(!local_apps.wait_shm_mmap);
+
        uid = getuid();
        /*
         * Disallow per-user tracing for setuid binaries.
         */
        if (uid != geteuid()) {
                assert(local_apps.allowed == 0);
-               return 0;
+               ret = 0;
+               goto end;
        }
        home_dir = get_lttng_home_dir();
        if (!home_dir) {
                WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
                assert(local_apps.allowed == 0);
-               return -ENOENT;
+               ret = -ENOENT;
+               goto end;
        }
        local_apps.allowed = 1;
        snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
@@ -462,7 +517,18 @@ int setup_local_apps(void)
        snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
                LTTNG_UST_WAIT_FILENAME,
                uid);
-       return 0;
+
+       local_apps.wait_shm_mmap = get_map_shm(&local_apps);
+       if (!local_apps.wait_shm_mmap) {
+               WARN("Unable to get map shm for local apps. Disabling LTTng-UST per-user tracing.");
+               local_apps.allowed = 0;
+               ret = -EIO;
+               goto end;
+       }
+
+       lttng_ust_getprocname(local_apps.procname);
+end:
+       return ret;
 }
 
 /*
@@ -587,45 +653,85 @@ int send_reply(int sock, struct ustcomm_ust_reply *lur)
 }
 
 static
-int handle_register_done(struct sock_info *sock_info)
+void decrement_sem_count(unsigned int count)
 {
        int ret;
 
-       if (sock_info->constructor_sem_posted)
-               return 0;
-       sock_info->constructor_sem_posted = 1;
+       assert(uatomic_read(&sem_count) >= count);
+
        if (uatomic_read(&sem_count) <= 0) {
-               return 0;
+               return;
        }
-       ret = uatomic_add_return(&sem_count, -1);
+
+       ret = uatomic_add_return(&sem_count, -count);
        if (ret == 0) {
                ret = sem_post(&constructor_wait);
                assert(!ret);
        }
+}
+
+static
+int handle_register_done(struct sock_info *sock_info)
+{
+       if (sock_info->registration_done)
+               return 0;
+       sock_info->registration_done = 1;
+
+       decrement_sem_count(1);
+       if (!sock_info->statedump_pending) {
+               sock_info->initial_statedump_done = 1;
+               decrement_sem_count(1);
+       }
+
+       return 0;
+}
+
+static
+int handle_register_failed(struct sock_info *sock_info)
+{
+       if (sock_info->registration_done)
+               return 0;
+       sock_info->registration_done = 1;
+       sock_info->initial_statedump_done = 1;
+
+       decrement_sem_count(2);
+
        return 0;
 }
 
 /*
  * Only execute pending statedump after the constructor semaphore has
- * been posted by each listener thread. This means statedump will only
- * be performed after the "registration done" command is received from
- * each session daemon the application is connected to.
+ * been posted by the current listener thread. This means statedump will
+ * only be performed after the "registration done" command is received
+ * from this thread's session daemon.
  *
  * This ensures we don't run into deadlock issues with the dynamic
  * loader mutex, which is held while the constructor is called and
  * waiting on the constructor semaphore. All operations requiring this
  * dynamic loader lock need to be postponed using this mechanism.
+ *
+ * In a scenario with two session daemons connected to the application,
+ * it is possible that the first listener thread which receives the
+ * registration done command issues its statedump while the dynamic
+ * loader lock is still held by the application constructor waiting on
+ * the semaphore. It will however be allowed to proceed when the
+ * second session daemon sends the registration done command to the
+ * second listener thread. This situation therefore does not produce
+ * a deadlock.
  */
 static
 void handle_pending_statedump(struct sock_info *sock_info)
 {
-       int ctor_passed = sock_info->constructor_sem_posted;
-
-       if (ctor_passed && sock_info->statedump_pending) {
+       if (sock_info->registration_done && sock_info->statedump_pending) {
                sock_info->statedump_pending = 0;
                pthread_mutex_lock(&ust_fork_mutex);
                lttng_handle_pending_statedump(sock_info);
                pthread_mutex_unlock(&ust_fork_mutex);
+
+               if (!sock_info->initial_statedump_done) {
+                       sock_info->initial_statedump_done = 1;
+                       decrement_sem_count(1);
+               }
        }
 }
 
@@ -843,12 +949,13 @@ int handle_message(struct sock_info *sock_info,
        {
                /* Receive shm_fd, wakeup_fd */
                ret = ustcomm_recv_stream_from_sessiond(sock,
-                       &lum->u.stream.len,
+                       NULL,
                        &args.stream.shm_fd,
                        &args.stream.wakeup_fd);
                if (ret) {
                        goto error;
                }
+
                if (ops->cmd)
                        ret = ops->cmd(lum->handle, lum->cmd,
                                        (unsigned long) &lum->u,
@@ -1034,7 +1141,8 @@ void cleanup_sock_info(struct sock_info *sock_info, int exiting)
                }
                sock_info->root_handle = -1;
        }
-       sock_info->constructor_sem_posted = 0;
+       sock_info->registration_done = 0;
+       sock_info->initial_statedump_done = 0;
 
        /*
         * wait_shm_mmap, socket and notify socket are used by listener
@@ -1305,19 +1413,17 @@ error:
 static
 void wait_for_sessiond(struct sock_info *sock_info)
 {
+       /* Use ust_lock to check if we should quit. */
        if (ust_lock()) {
                goto quit;
        }
        if (wait_poll_fallback) {
                goto error;
        }
-       if (!sock_info->wait_shm_mmap) {
-               sock_info->wait_shm_mmap = get_map_shm(sock_info);
-               if (!sock_info->wait_shm_mmap)
-                       goto error;
-       }
        ust_unlock();
 
+       assert(sock_info->wait_shm_mmap);
+
        DBG("Waiting for %s apps sessiond", sock_info->name);
        /* Wait for futex wakeup */
        if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
@@ -1444,7 +1550,7 @@ restart:
                 * If we cannot find the sessiond daemon, don't delay
                 * constructor execution.
                 */
-               ret = handle_register_done(sock_info);
+               ret = handle_register_failed(sock_info);
                assert(!ret);
                ust_unlock();
                goto restart;
@@ -1498,7 +1604,7 @@ restart:
                 * If we cannot register to the sessiond daemon, don't
                 * delay constructor execution.
                 */
-               ret = handle_register_done(sock_info);
+               ret = handle_register_failed(sock_info);
                assert(!ret);
                ust_unlock();
                goto restart;
@@ -1527,7 +1633,7 @@ restart:
                 * If we cannot find the sessiond daemon, don't delay
                 * constructor execution.
                 */
-               ret = handle_register_done(sock_info);
+               ret = handle_register_failed(sock_info);
                assert(!ret);
                ust_unlock();
                goto restart;
@@ -1591,7 +1697,7 @@ restart:
                 * If we cannot register to the sessiond daemon, don't
                 * delay constructor execution.
                 */
-               ret = handle_register_done(sock_info);
+               ret = handle_register_failed(sock_info);
                assert(!ret);
                ust_unlock();
                goto restart;
@@ -1621,7 +1727,7 @@ restart:
                         * If we cannot register to the sessiond daemon, don't
                         * delay constructor execution.
                         */
-                       ret = handle_register_done(sock_info);
+                       ret = handle_register_failed(sock_info);
                        assert(!ret);
                        ust_unlock();
                        goto end;
@@ -1756,8 +1862,15 @@ void __attribute__((constructor)) lttng_ust_init(void)
                PERROR("sem_init");
        }
 
+       ret = setup_global_apps();
+       if (ret) {
+               assert(global_apps.allowed == 0);
+               DBG("global apps setup returned %d", ret);
+       }
+
        ret = setup_local_apps();
        if (ret) {
+               assert(local_apps.allowed == 0);
                DBG("local apps setup returned %d", ret);
        }
 
@@ -1781,14 +1894,18 @@ void __attribute__((constructor)) lttng_ust_init(void)
                ERR("pthread_attr_setdetachstate: %s", strerror(ret));
        }
 
-       pthread_mutex_lock(&ust_exit_mutex);
-       ret = pthread_create(&global_apps.ust_listener, &thread_attr,
-                       ust_listener_thread, &global_apps);
-       if (ret) {
-               ERR("pthread_create global: %s", strerror(ret));
+       if (global_apps.allowed) {
+               pthread_mutex_lock(&ust_exit_mutex);
+               ret = pthread_create(&global_apps.ust_listener, &thread_attr,
+                               ust_listener_thread, &global_apps);
+               if (ret) {
+                       ERR("pthread_create global: %s", strerror(ret));
+               }
+               global_apps.thread_active = 1;
+               pthread_mutex_unlock(&ust_exit_mutex);
+       } else {
+               handle_register_done(&global_apps);
        }
-       global_apps.thread_active = 1;
-       pthread_mutex_unlock(&ust_exit_mutex);
 
        if (local_apps.allowed) {
                pthread_mutex_lock(&ust_exit_mutex);
@@ -1859,6 +1976,7 @@ void lttng_ust_cleanup(int exiting)
        cleanup_sock_info(&global_apps, exiting);
        cleanup_sock_info(&local_apps, exiting);
        local_apps.allowed = 0;
+       global_apps.allowed = 0;
        /*
         * The teardown in this function all affect data structures
         * accessed under the UST lock by the listener thread. This
@@ -1878,7 +1996,7 @@ void lttng_ust_cleanup(int exiting)
        exit_tracepoint();
        if (!exiting) {
                /* Reinitialize values for fork */
-               sem_count = 2;
+               sem_count = sem_count_initial_value;
                lttng_ust_comm_should_quit = 0;
                initialized = 0;
        }
@@ -1936,6 +2054,34 @@ void __attribute__((destructor)) lttng_ust_exit(void)
        lttng_ust_cleanup(1);
 }
 
+static
+void ust_context_ns_reset(void)
+{
+       lttng_context_pid_ns_reset();
+       lttng_context_cgroup_ns_reset();
+       lttng_context_ipc_ns_reset();
+       lttng_context_mnt_ns_reset();
+       lttng_context_net_ns_reset();
+       lttng_context_user_ns_reset();
+       lttng_context_uts_ns_reset();
+}
+
+static
+void ust_context_vuids_reset(void)
+{
+       lttng_context_vuid_reset();
+       lttng_context_veuid_reset();
+       lttng_context_vsuid_reset();
+}
+
+static
+void ust_context_vgids_reset(void)
+{
+       lttng_context_vgid_reset();
+       lttng_context_vegid_reset();
+       lttng_context_vsgid_reset();
+}
+
 /*
  * We exclude the worker threads across fork and clone (except
  * CLONE_VM), because these system calls only keep the forking thread
@@ -1970,6 +2116,8 @@ void ust_before_fork(sigset_t *save_sigset)
 
        ust_lock_nocheck();
        urcu_bp_before_fork();
+       lttng_ust_lock_fd_tracker();
+       lttng_perf_lock();
 }
 
 static void ust_after_fork_common(sigset_t *restore_sigset)
@@ -1977,6 +2125,8 @@ static void ust_after_fork_common(sigset_t *restore_sigset)
        int ret;
 
        DBG("process %d", getpid());
+       lttng_perf_unlock();
+       lttng_ust_unlock_fd_tracker();
        ust_unlock();
 
        pthread_mutex_unlock(&ust_fork_mutex);
@@ -2014,6 +2164,9 @@ void ust_after_fork_child(sigset_t *restore_sigset)
        lttng_context_vpid_reset();
        lttng_context_vtid_reset();
        lttng_context_procname_reset();
+       ust_context_ns_reset();
+       ust_context_vuids_reset();
+       ust_context_vgids_reset();
        DBG("process %d", getpid());
        /* Release urcu mutexes */
        urcu_bp_after_fork_child();
@@ -2023,6 +2176,60 @@ void ust_after_fork_child(sigset_t *restore_sigset)
        lttng_ust_init();
 }
 
+void ust_after_setns(void)
+{
+       ust_context_ns_reset();
+       ust_context_vuids_reset();
+       ust_context_vgids_reset();
+}
+
+void ust_after_unshare(void)
+{
+       ust_context_ns_reset();
+       ust_context_vuids_reset();
+       ust_context_vgids_reset();
+}
+
+void ust_after_setuid(void)
+{
+       ust_context_vuids_reset();
+}
+
+void ust_after_seteuid(void)
+{
+       ust_context_vuids_reset();
+}
+
+void ust_after_setreuid(void)
+{
+       ust_context_vuids_reset();
+}
+
+void ust_after_setresuid(void)
+{
+       ust_context_vuids_reset();
+}
+
+void ust_after_setgid(void)
+{
+       ust_context_vgids_reset();
+}
+
+void ust_after_setegid(void)
+{
+       ust_context_vgids_reset();
+}
+
+void ust_after_setregid(void)
+{
+       ust_context_vgids_reset();
+}
+
+void ust_after_setresgid(void)
+{
+       ust_context_vgids_reset();
+}
+
 void lttng_ust_sockinfo_session_enabled(void *owner)
 {
        struct sock_info *sock_info = owner;
This page took 0.029956 seconds and 4 git commands to generate.