Fix: Value stored to 'has_waited' is never read
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
index 0c96f012f528fd4929581107e45ba9314430a92e..a7398286104d75a776124181549c33d9bc40e02c 100644 (file)
@@ -34,7 +34,6 @@
 #include <time.h>
 #include <assert.h>
 #include <signal.h>
-#include <dlfcn.h>
 #include <urcu/uatomic.h>
 #include <urcu/futex.h>
 #include <urcu/compiler.h>
@@ -53,6 +52,7 @@
 #include "compat.h"
 #include "../libringbuffer/tlsfixup.h"
 #include "lttng-ust-baddr.h"
+#include "getenv.h"
 
 /*
  * Has lttng ust comm constructor been called ?
@@ -68,9 +68,21 @@ static int initialized;
  * probe registration.
  *
  * ust_exit_mutex must never nest in ust_mutex.
+ *
+ * ust_fork_mutex must never nest in ust_mutex.
+ *
+ * ust_mutex_nest is a per-thread nesting counter, allowing the perf
+ * counter lazy initialization called by events within the statedump,
+ * which traces while the ust_mutex is held.
+ *
+ * ust_lock nests within the dynamic loader lock (within glibc) because
+ * it is taken within the library constructor.
  */
 static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
 
+/* Allow nesting the ust_mutex within the same thread. */
+static DEFINE_URCU_TLS(int, ust_mutex_nest);
+
 /*
  * ust_exit_mutex protects thread_active variable wrt thread exit. It
  * cannot be done by ust_mutex because pthread_cancel(), which takes an
@@ -80,16 +92,45 @@ static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
  */
 static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
 
+/*
+ * ust_fork_mutex protects base address statedump tracing against forks. It
+ * prevents the dynamic loader lock to be taken (by base address statedump
+ * tracing) while a fork is happening, thus preventing deadlock issues with
+ * the dynamic loader lock.
+ */
+static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
+
 /* Should the ust comm thread quit ? */
 static int lttng_ust_comm_should_quit;
 
 /*
- * Return 0 on success, -1 if should quilt.
+ * Return 0 on success, -1 if should quit.
  * The lock is taken in both cases.
+ * Signal-safe.
  */
 int ust_lock(void)
 {
-       pthread_mutex_lock(&ust_mutex);
+       sigset_t sig_all_blocked, orig_mask;
+       int ret, oldstate;
+
+       ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+       if (ret) {
+               ERR("pthread_setcancelstate: %s", strerror(ret));
+       }
+       if (oldstate != PTHREAD_CANCEL_ENABLE) {
+               ERR("pthread_setcancelstate: unexpected oldstate");
+       }
+       sigfillset(&sig_all_blocked);
+       ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+       if (ret) {
+               ERR("pthread_sigmask: %s", strerror(ret));
+       }
+       if (!URCU_TLS(ust_mutex_nest)++)
+               pthread_mutex_lock(&ust_mutex);
+       ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+       if (ret) {
+               ERR("pthread_sigmask: %s", strerror(ret));
+       }
        if (lttng_ust_comm_should_quit) {
                return -1;
        } else {
@@ -101,15 +142,59 @@ int ust_lock(void)
  * ust_lock_nocheck() can be used in constructors/destructors, because
  * they are already nested within the dynamic loader lock, and therefore
  * have exclusive access against execution of liblttng-ust destructor.
+ * Signal-safe.
  */
 void ust_lock_nocheck(void)
 {
-       pthread_mutex_lock(&ust_mutex);
+       sigset_t sig_all_blocked, orig_mask;
+       int ret, oldstate;
+
+       ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+       if (ret) {
+               ERR("pthread_setcancelstate: %s", strerror(ret));
+       }
+       if (oldstate != PTHREAD_CANCEL_ENABLE) {
+               ERR("pthread_setcancelstate: unexpected oldstate");
+       }
+       sigfillset(&sig_all_blocked);
+       ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+       if (ret) {
+               ERR("pthread_sigmask: %s", strerror(ret));
+       }
+       if (!URCU_TLS(ust_mutex_nest)++)
+               pthread_mutex_lock(&ust_mutex);
+       ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+       if (ret) {
+               ERR("pthread_sigmask: %s", strerror(ret));
+       }
 }
 
+/*
+ * Signal-safe.
+ */
 void ust_unlock(void)
 {
-       pthread_mutex_unlock(&ust_mutex);
+       sigset_t sig_all_blocked, orig_mask;
+       int ret, oldstate;
+
+       sigfillset(&sig_all_blocked);
+       ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+       if (ret) {
+               ERR("pthread_sigmask: %s", strerror(ret));
+       }
+       if (!--URCU_TLS(ust_mutex_nest))
+               pthread_mutex_unlock(&ust_mutex);
+       ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+       if (ret) {
+               ERR("pthread_sigmask: %s", strerror(ret));
+       }
+       ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
+       if (ret) {
+               ERR("pthread_setcancelstate: %s", strerror(ret));
+       }
+       if (oldstate != PTHREAD_CANCEL_DISABLE) {
+               ERR("pthread_setcancelstate: unexpected oldstate");
+       }
 }
 
 /*
@@ -250,11 +335,11 @@ const char *get_lttng_home_dir(void)
 {
        const char *val;
 
-       val = (const char *) getenv("LTTNG_HOME");
+       val = (const char *) lttng_secure_getenv("LTTNG_HOME");
        if (val != NULL) {
                return val;
        }
-       return (const char *) getenv("HOME");
+       return (const char *) lttng_secure_getenv("HOME");
 }
 
 /*
@@ -266,6 +351,22 @@ void lttng_fixup_nest_count_tls(void)
        asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
 }
 
+static
+void lttng_fixup_ust_mutex_nest_tls(void)
+{
+       asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest)));
+}
+
+/*
+ * Fixup urcu bp TLS.
+ */
+static
+void lttng_fixup_urcu_bp_tls(void)
+{
+       rcu_read_lock();
+       rcu_read_unlock();
+}
+
 int lttng_get_notify_socket(void *owner)
 {
        struct sock_info *info = owner;
@@ -320,7 +421,7 @@ int setup_local_apps(void)
 
 /*
  * Get notify_sock timeout, in ms.
- * -1: don't wait. 0: wait forever. >0: timeout, in ms.
+ * -1: wait forever. 0: don't wait. >0: timeout, in ms.
  */
 static
 long get_timeout(void)
@@ -343,7 +444,7 @@ long get_notify_sock_timeout(void)
 }
 
 /*
- * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
+ * Return values: -1: wait forever. 0: don't wait. 1: timeout wait.
  */
 static
 int get_constructor_timeout(struct timespec *constructor_timeout)
@@ -366,7 +467,8 @@ int get_constructor_timeout(struct timespec *constructor_timeout)
         */
        ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
        if (ret) {
-               return -1;
+               /* Don't wait. */
+               return 0;
        }
        constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
        constructor_timeout->tv_nsec +=
@@ -375,6 +477,7 @@ int get_constructor_timeout(struct timespec *constructor_timeout)
                constructor_timeout->tv_sec++;
                constructor_timeout->tv_nsec -= 1000000000UL;
        }
+       /* Timeout wait (constructor_delay_ms). */
        return 1;
 }
 
@@ -450,7 +553,9 @@ void handle_pending_statedump(struct sock_info *sock_info)
 
        if (ctor_passed && sock_info->statedump_pending) {
                sock_info->statedump_pending = 0;
+               pthread_mutex_lock(&ust_fork_mutex);
                lttng_handle_pending_statedump(sock_info);
+               pthread_mutex_unlock(&ust_fork_mutex);
        }
 }
 
@@ -468,13 +573,13 @@ int handle_message(struct sock_info *sock_info,
 
        if (ust_lock()) {
                ret = -LTTNG_UST_ERR_EXITING;
-               goto end;
+               goto error;
        }
 
        ops = objd_ops(lum->handle);
        if (!ops) {
                ret = -ENOENT;
-               goto end;
+               goto error;
        }
 
        switch (lum->cmd) {
@@ -535,12 +640,12 @@ int handle_message(struct sock_info *sock_info,
                                }
                                ret = len;
                                free(bytecode);
-                               goto end;
+                               goto error;
                        } else {
                                DBG("incorrect filter data message size: %zd", len);
                                ret = -EINVAL;
                                free(bytecode);
-                               goto end;
+                               goto error;
                        }
                }
                bytecode->bc.len = lum->u.filter.data_size;
@@ -600,12 +705,12 @@ int handle_message(struct sock_info *sock_info,
                                }
                                ret = len;
                                free(node);
-                               goto end;
+                               goto error;
                        } else {
                                DBG("Incorrect exclusion data message size: %zd", len);
                                ret = -EINVAL;
                                free(node);
-                               goto end;
+                               goto error;
                        }
                }
                if (ops->cmd) {
@@ -646,11 +751,11 @@ int handle_message(struct sock_info *sock_info,
                                        goto error;
                                }
                                ret = len;
-                               goto end;
+                               goto error;
                        } else {
                                DBG("incorrect channel data message size: %zd", len);
                                ret = -EINVAL;
-                               goto end;
+                               goto error;
                        }
                }
                args.channel.chan_data = chan_data;
@@ -671,7 +776,7 @@ int handle_message(struct sock_info *sock_info,
                        &args.stream.shm_fd,
                        &args.stream.wakeup_fd);
                if (ret) {
-                       goto end;
+                       goto error;
                }
                if (ops->cmd)
                        ret = ops->cmd(lum->handle, lum->cmd,
@@ -691,7 +796,6 @@ int handle_message(struct sock_info *sock_info,
                break;
        }
 
-end:
        lur.handle = lum->handle;
        lur.cmd = lum->cmd;
        lur.ret_val = ret;
@@ -820,9 +924,19 @@ void cleanup_sock_info(struct sock_info *sock_info, int exiting)
                sock_info->notify_socket = -1;
        }
        if (sock_info->wait_shm_mmap) {
-               ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
-               if (ret) {
-                       ERR("Error unmapping wait shm");
+               long page_size;
+
+               page_size = sysconf(_SC_PAGE_SIZE);
+               if (page_size <= 0) {
+                       if (!page_size) {
+                               errno = EINVAL;
+                       }
+                       PERROR("Error in sysconf(_SC_PAGE_SIZE)");
+               } else {
+                       ret = munmap(sock_info->wait_shm_mmap, page_size);
+                       if (ret) {
+                               ERR("Error unmapping wait shm");
+                       }
                }
                sock_info->wait_shm_mmap = NULL;
        }
@@ -995,15 +1109,24 @@ error_close:
 static
 char *get_map_shm(struct sock_info *sock_info)
 {
-       size_t mmap_size = sysconf(_SC_PAGE_SIZE);
+       long page_size;
        int wait_shm_fd, ret;
        char *wait_shm_mmap;
 
-       wait_shm_fd = get_wait_shm(sock_info, mmap_size);
+       page_size = sysconf(_SC_PAGE_SIZE);
+       if (page_size <= 0) {
+               if (!page_size) {
+                       errno = EINVAL;
+               }
+               PERROR("Error in sysconf(_SC_PAGE_SIZE)");
+               goto error;
+       }
+
+       wait_shm_fd = get_wait_shm(sock_info, page_size);
        if (wait_shm_fd < 0) {
                goto error;
        }
-       wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
+       wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
                  MAP_SHARED, wait_shm_fd, 0);
        /* close shm fd immediately after taking the mmap reference */
        ret = close(wait_shm_fd);
@@ -1023,8 +1146,6 @@ error:
 static
 void wait_for_sessiond(struct sock_info *sock_info)
 {
-       int ret;
-
        if (ust_lock()) {
                goto quit;
        }
@@ -1040,23 +1161,32 @@ void wait_for_sessiond(struct sock_info *sock_info)
 
        DBG("Waiting for %s apps sessiond", sock_info->name);
        /* Wait for futex wakeup */
-       if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
-               ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
-                       FUTEX_WAIT, 0, NULL, NULL, 0);
-               if (ret < 0) {
-                       if (errno == EFAULT) {
-                               wait_poll_fallback = 1;
-                               DBG(
+       if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
+               goto end_wait;
+
+       while (futex_async((int32_t *) sock_info->wait_shm_mmap,
+                       FUTEX_WAIT, 0, NULL, NULL, 0)) {
+               switch (errno) {
+               case EWOULDBLOCK:
+                       /* Value already changed. */
+                       goto end_wait;
+               case EINTR:
+                       /* Retry if interrupted by signal. */
+                       break;  /* Get out of switch. */
+               case EFAULT:
+                       wait_poll_fallback = 1;
+                       DBG(
 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
 "Please upgrade your kernel "
 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
 "mainline). LTTng-UST will use polling mode fallback.");
-                               if (ust_debug())
-                                       PERROR("futex");
-                       }
+                       if (ust_debug())
+                               PERROR("futex");
+                       goto end_wait;
                }
        }
+end_wait:
        return;
 
 quit:
@@ -1095,8 +1225,9 @@ restart:
                         * deals with a killed or broken session daemon.
                         */
                        sleep(5);
+               } else {
+                       has_waited = 1;
                }
-               has_waited = 1;
                prev_connect_failed = 0;
        }
 
@@ -1275,7 +1406,13 @@ restart:
                        print_cmd(lum.cmd, lum.handle);
                        ret = handle_message(sock_info, sock, &lum);
                        if (ret) {
-                               ERR("Error handling message for %s socket", sock_info->name);
+                               ERR("Error handling message for %s socket",
+                                       sock_info->name);
+                               /*
+                                * Close socket if protocol error is
+                                * detected.
+                                */
+                               goto end;
                        }
                        continue;
                default:
@@ -1310,6 +1447,14 @@ quit:
        return NULL;
 }
 
+/*
+ * Weak symbol to call when the ust malloc wrapper is not loaded.
+ */
+__attribute__((weak))
+void lttng_ust_malloc_wrapper_init(void)
+{
+}
+
 /*
  * sessiond monitoring thread: monitor presence of global and per-user
  * sessiond by polling the application common named pipe.
@@ -1330,10 +1475,12 @@ void __attribute__((constructor)) lttng_ust_init(void)
         * to be the dynamic linker mutex) and ust_lock, taken within
         * the ust lock.
         */
+       lttng_fixup_urcu_bp_tls();
        lttng_fixup_ringbuffer_tls();
        lttng_fixup_vtid_tls();
        lttng_fixup_nest_count_tls();
        lttng_fixup_procname_tls();
+       lttng_fixup_ust_mutex_nest_tls();
 
        /*
         * We want precise control over the order in which we construct
@@ -1349,7 +1496,12 @@ void __attribute__((constructor)) lttng_ust_init(void)
        lttng_ring_buffer_client_overwrite_rt_init();
        lttng_ring_buffer_client_discard_init();
        lttng_ring_buffer_client_discard_rt_init();
+       lttng_perf_counter_init();
        lttng_context_init();
+       /*
+        * Invoke ust malloc wrapper init before starting other threads.
+        */
+       lttng_ust_malloc_wrapper_init();
 
        timeout_mode = get_constructor_timeout(&constructor_timeout);
 
@@ -1440,9 +1592,7 @@ static
 void lttng_ust_cleanup(int exiting)
 {
        cleanup_sock_info(&global_apps, exiting);
-       if (local_apps.allowed) {
-               cleanup_sock_info(&local_apps, exiting);
-       }
+       cleanup_sock_info(&local_apps, exiting);
        /*
         * The teardown in this function all affect data structures
         * accessed under the UST lock by the listener thread. This
@@ -1453,6 +1603,7 @@ void lttng_ust_cleanup(int exiting)
        lttng_ust_abi_exit();
        lttng_ust_events_exit();
        lttng_context_exit();
+       lttng_perf_counter_exit();
        lttng_ring_buffer_client_discard_rt_exit();
        lttng_ring_buffer_client_discard_exit();
        lttng_ring_buffer_client_overwrite_rt_exit();
@@ -1546,6 +1697,9 @@ void ust_before_fork(sigset_t *save_sigset)
        if (ret == -1) {
                PERROR("sigprocmask");
        }
+
+       pthread_mutex_lock(&ust_fork_mutex);
+
        ust_lock_nocheck();
        rcu_bp_before_fork();
 }
@@ -1556,6 +1710,9 @@ static void ust_after_fork_common(sigset_t *restore_sigset)
 
        DBG("process %d", getpid());
        ust_unlock();
+
+       pthread_mutex_unlock(&ust_fork_mutex);
+
        /* Restore signals */
        ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
        if (ret == -1) {
This page took 0.030618 seconds and 4 git commands to generate.