*/
#define _LGPL_SOURCE
+#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/mman.h>
#include <lttng/ust-ctl.h>
#include <urcu/tls-compat.h>
#include <ust-comm.h>
+#include <ust-fd.h>
#include <usterr-signal-safe.h>
#include <helper.h>
#include "tracepoint-internal.h"
#include "lttng-tracer-core.h"
#include "compat.h"
#include "../libringbuffer/tlsfixup.h"
+#include "lttng-ust-statedump.h"
+#include "clock.h"
+#include "../libringbuffer/getcpu.h"
+#include "getenv.h"
/*
* Has lttng ust comm constructor been called ?
* The ust_lock/ust_unlock lock is used as a communication thread mutex.
* Held when handling a command, also held by fork() to deal with
* removal of threads, and by exit path.
+ *
+ * The UST lock is the centralized mutex across UST tracing control and
+ * probe registration.
+ *
+ * ust_exit_mutex must never nest in ust_mutex.
+ *
+ * ust_fork_mutex must never nest in ust_mutex.
+ *
+ * ust_mutex_nest is a per-thread nesting counter, allowing the perf
+ * counter lazy initialization called by events within the statedump,
+ * which traces while the ust_mutex is held.
+ *
+ * ust_lock nests within the dynamic loader lock (within glibc) because
+ * it is taken within the library constructor.
+ */
+static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* Allow nesting the ust_mutex within the same thread. */
+static DEFINE_URCU_TLS(int, ust_mutex_nest);
+
+/*
+ * ust_exit_mutex protects thread_active variable wrt thread exit. It
+ * cannot be done by ust_mutex because pthread_cancel(), which takes an
+ * internal libc lock, cannot nest within ust_mutex.
+ *
+ * It never nests within a ust_mutex.
*/
+static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * ust_fork_mutex protects base address statedump tracing against forks. It
+ * prevents the dynamic loader lock to be taken (by base address statedump
+ * tracing) while a fork is happening, thus preventing deadlock issues with
+ * the dynamic loader lock.
+ */
+static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
/* Should the ust comm thread quit ? */
static int lttng_ust_comm_should_quit;
+/*
+ * This variable can be tested by applications to check whether
+ * lttng-ust is loaded. They simply have to define their own
+ * "lttng_ust_loaded" weak symbol, and test it. It is set to 1 by the
+ * library constructor.
+ */
+int lttng_ust_loaded __attribute__((weak));
+
+/*
+ * Return 0 on success, -1 if should quit.
+ * The lock is taken in both cases.
+ * Signal-safe.
+ */
+int ust_lock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ if (oldstate != PTHREAD_CANCEL_ENABLE) {
+ ERR("pthread_setcancelstate: unexpected oldstate");
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_mutex_nest)++)
+ pthread_mutex_lock(&ust_mutex);
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (lttng_ust_comm_should_quit) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * ust_lock_nocheck() can be used in constructors/destructors, because
+ * they are already nested within the dynamic loader lock, and therefore
+ * have exclusive access against execution of liblttng-ust destructor.
+ * Signal-safe.
+ */
+void ust_lock_nocheck(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ if (oldstate != PTHREAD_CANCEL_ENABLE) {
+ ERR("pthread_setcancelstate: unexpected oldstate");
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_mutex_nest)++)
+ pthread_mutex_lock(&ust_mutex);
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+}
+
+/*
+ * Signal-safe.
+ */
+void ust_unlock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!--URCU_TLS(ust_mutex_nest))
+ pthread_mutex_unlock(&ust_mutex);
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ if (oldstate != PTHREAD_CANCEL_DISABLE) {
+ ERR("pthread_setcancelstate: unexpected oldstate");
+ }
+}
+
/*
* Wait for either of these before continuing to the main
* program:
char wait_shm_path[PATH_MAX];
char *wait_shm_mmap;
+ /* Keep track of lazy state dump not performed yet. */
+ int statedump_pending;
};
/* Socket from app (connect) to session daemon (listen) for communication */
.notify_socket = -1,
.wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
+
+ .statedump_pending = 0,
};
/* TODO: allow global_apps_sock_path override */
.socket = -1,
.notify_socket = -1,
+
+ .statedump_pending = 0,
};
static int wait_poll_fallback;
extern void lttng_ring_buffer_client_discard_rt_exit(void);
extern void lttng_ring_buffer_metadata_client_exit(void);
+ssize_t lttng_ust_read(int fd, void *buf, size_t len)
+{
+ ssize_t ret;
+ size_t copied = 0, to_copy = len;
+
+ do {
+ ret = read(fd, buf + copied, to_copy);
+ if (ret > 0) {
+ copied += ret;
+ to_copy -= ret;
+ }
+ } while ((ret > 0 && to_copy > 0)
+ || (ret < 0 && errno == EINTR));
+ if (ret > 0) {
+ ret = copied;
+ }
+ return ret;
+}
/*
* Returns the HOME directory path. Caller MUST NOT free(3) the returned
* pointer.
{
const char *val;
- val = (const char *) getenv("LTTNG_HOME");
+ val = (const char *) lttng_secure_getenv("LTTNG_HOME");
if (val != NULL) {
return val;
}
- return (const char *) getenv("HOME");
+ return (const char *) lttng_secure_getenv("HOME");
}
/*
asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
}
+static
+void lttng_fixup_ust_mutex_nest_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest)));
+}
+
+/*
+ * Fixup urcu bp TLS.
+ */
+static
+void lttng_fixup_urcu_bp_tls(void)
+{
+ rcu_read_lock();
+ rcu_read_unlock();
+}
+
+void lttng_ust_fixup_tls(void)
+{
+ lttng_fixup_urcu_bp_tls();
+ lttng_fixup_ringbuffer_tls();
+ lttng_fixup_vtid_tls();
+ lttng_fixup_nest_count_tls();
+ lttng_fixup_procname_tls();
+ lttng_fixup_ust_mutex_nest_tls();
+ lttng_ust_fixup_fd_tracker_tls();
+}
+
int lttng_get_notify_socket(void *owner)
{
struct sock_info *info = owner;
}
/*
- * Get notify_sock timeout, in ms.
- * -1: don't wait. 0: wait forever. >0: timeout, in ms.
+ * Get socket timeout, in ms.
+ * -1: wait forever. 0: don't wait. >0: timeout, in ms.
*/
static
long get_timeout(void)
}
if (str_timeout)
constructor_delay_ms = strtol(str_timeout, NULL, 10);
+ /* All negative values are considered as "-1". */
+ if (constructor_delay_ms < -1)
+ constructor_delay_ms = -1;
return constructor_delay_ms;
}
+/* Timeout for notify socket send and recv. */
static
long get_notify_sock_timeout(void)
{
return get_timeout();
}
+/* Timeout for connecting to cmd and notify sockets. */
+static
+long get_connect_sock_timeout(void)
+{
+ return get_timeout();
+}
+
/*
- * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
+ * Return values: -1: wait forever. 0: don't wait. 1: timeout wait.
*/
static
int get_constructor_timeout(struct timespec *constructor_timeout)
*/
ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
if (ret) {
- return -1;
+ /* Don't wait. */
+ return 0;
}
constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
constructor_timeout->tv_nsec +=
constructor_timeout->tv_sec++;
constructor_timeout->tv_nsec -= 1000000000UL;
}
+ /* Timeout wait (constructor_delay_ms). */
return 1;
}
return 0;
}
+/*
+ * Only execute pending statedump after the constructor semaphore has
+ * been posted by each listener thread. This means statedump will only
+ * be performed after the "registration done" command is received from
+ * each session daemon the application is connected to.
+ *
+ * This ensures we don't run into deadlock issues with the dynamic
+ * loader mutex, which is held while the constructor is called and
+ * waiting on the constructor semaphore. All operations requiring this
+ * dynamic loader lock need to be postponed using this mechanism.
+ */
+static
+void handle_pending_statedump(struct sock_info *sock_info)
+{
+ int ctor_passed = sock_info->constructor_sem_posted;
+
+ if (ctor_passed && sock_info->statedump_pending) {
+ sock_info->statedump_pending = 0;
+ pthread_mutex_lock(&ust_fork_mutex);
+ lttng_handle_pending_statedump(sock_info);
+ pthread_mutex_unlock(&ust_fork_mutex);
+ }
+}
+
static
int handle_message(struct sock_info *sock_info,
int sock, struct ustcomm_ust_msg *lum)
const struct lttng_ust_objd_ops *ops;
struct ustcomm_ust_reply lur;
union ust_args args;
+ char ctxstr[LTTNG_UST_SYM_NAME_LEN]; /* App context string. */
ssize_t len;
- ust_lock();
-
memset(&lur, 0, sizeof(lur));
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
ret = -LTTNG_UST_ERR_EXITING;
- goto end;
+ goto error;
}
ops = objd_ops(lum->handle);
if (!ops) {
ret = -ENOENT;
- goto end;
+ goto error;
}
switch (lum->cmd) {
}
ret = len;
free(bytecode);
- goto end;
+ goto error;
} else {
DBG("incorrect filter data message size: %zd", len);
ret = -EINVAL;
free(bytecode);
- goto end;
+ goto error;
}
}
bytecode->bc.len = lum->u.filter.data_size;
}
break;
}
+ case LTTNG_UST_EXCLUSION:
+ {
+ /* Receive exclusion names */
+ struct lttng_ust_excluder_node *node;
+ unsigned int count;
+
+ count = lum->u.exclusion.count;
+ if (count == 0) {
+ /* There are no names to read */
+ ret = 0;
+ goto error;
+ }
+ node = zmalloc(sizeof(*node) +
+ count * LTTNG_UST_SYM_NAME_LEN);
+ if (!node) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ node->excluder.count = count;
+ len = ustcomm_recv_unix_sock(sock, node->excluder.names,
+ count * LTTNG_UST_SYM_NAME_LEN);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ free(node);
+ goto error;
+ default:
+ if (len == count * LTTNG_UST_SYM_NAME_LEN) {
+ DBG("Exclusion data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ free(node);
+ goto error;
+ }
+ ret = len;
+ free(node);
+ goto error;
+ } else {
+ DBG("Incorrect exclusion data message size: %zd", len);
+ ret = -EINVAL;
+ free(node);
+ goto error;
+ }
+ }
+ if (ops->cmd) {
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) node,
+ &args, sock_info);
+ if (ret) {
+ free(node);
+ }
+ /* Don't free exclusion data if everything went fine. */
+ } else {
+ ret = -ENOSYS;
+ free(node);
+ }
+ break;
+ }
case LTTNG_UST_CHANNEL:
{
void *chan_data;
goto error;
}
ret = len;
- goto end;
+ goto error;
} else {
DBG("incorrect channel data message size: %zd", len);
ret = -EINVAL;
- goto end;
+ goto error;
}
}
args.channel.chan_data = chan_data;
&args.stream.shm_fd,
&args.stream.wakeup_fd);
if (ret) {
- goto end;
+ goto error;
}
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
ret = -ENOSYS;
break;
}
+ case LTTNG_UST_CONTEXT:
+ switch (lum->u.context.ctx) {
+ case LTTNG_UST_CONTEXT_APP_CONTEXT:
+ {
+ char *p;
+ size_t ctxlen, recvlen;
+
+ ctxlen = strlen("$app.") + lum->u.context.u.app_ctx.provider_name_len - 1
+ + strlen(":") + lum->u.context.u.app_ctx.ctx_name_len;
+ if (ctxlen >= LTTNG_UST_SYM_NAME_LEN) {
+ ERR("Application context string length size is too large: %zu bytes",
+ ctxlen);
+ ret = -EINVAL;
+ goto error;
+ }
+ strcpy(ctxstr, "$app.");
+ p = &ctxstr[strlen("$app.")];
+ recvlen = ctxlen - strlen("$app.");
+ len = ustcomm_recv_unix_sock(sock, p, recvlen);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == recvlen) {
+ DBG("app context data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect app context data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ /* Put : between provider and ctxname. */
+ p[lum->u.context.u.app_ctx.provider_name_len - 1] = ':';
+ args.app_context.ctxname = ctxstr;
+ break;
+ }
+ default:
+ break;
+ }
+ if (ops->cmd) {
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ } else {
+ ret = -ENOSYS;
+ }
+ break;
default:
if (ops->cmd)
ret = ops->cmd(lum->handle, lum->cmd,
break;
}
-end:
lur.handle = lum->handle;
lur.cmd = lum->cmd;
lur.ret_val = ret;
}
}
DBG("Return value: %d", lur.ret_val);
+
+ ust_unlock();
+
+ /*
+ * Performed delayed statedump operations outside of the UST
+ * lock. We need to take the dynamic loader lock before we take
+ * the UST lock internally within handle_pending_statedump().
+ */
+ handle_pending_statedump(sock_info);
+
+ if (ust_lock()) {
+ ret = -LTTNG_UST_ERR_EXITING;
+ goto error;
+ }
+
ret = send_reply(sock, &lur);
if (ret < 0) {
DBG("error sending reply");
error:
ust_unlock();
+
return ret;
}
sock_info->notify_socket = -1;
}
if (sock_info->wait_shm_mmap) {
- ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
- if (ret) {
- ERR("Error unmapping wait shm");
+ long page_size;
+
+ page_size = sysconf(_SC_PAGE_SIZE);
+ if (page_size <= 0) {
+ if (!page_size) {
+ errno = EINVAL;
+ }
+ PERROR("Error in sysconf(_SC_PAGE_SIZE)");
+ } else {
+ ret = munmap(sock_info->wait_shm_mmap, page_size);
+ if (ret) {
+ ERR("Error unmapping wait shm");
+ }
}
sock_info->wait_shm_mmap = NULL;
}
static
char *get_map_shm(struct sock_info *sock_info)
{
- size_t mmap_size = sysconf(_SC_PAGE_SIZE);
+ long page_size;
int wait_shm_fd, ret;
char *wait_shm_mmap;
- wait_shm_fd = get_wait_shm(sock_info, mmap_size);
+ page_size = sysconf(_SC_PAGE_SIZE);
+ if (page_size <= 0) {
+ if (!page_size) {
+ errno = EINVAL;
+ }
+ PERROR("Error in sysconf(_SC_PAGE_SIZE)");
+ goto error;
+ }
+
+ lttng_ust_lock_fd_tracker();
+ wait_shm_fd = get_wait_shm(sock_info, page_size);
if (wait_shm_fd < 0) {
+ lttng_ust_unlock_fd_tracker();
goto error;
}
- wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
+ lttng_ust_add_fd_to_tracker(wait_shm_fd);
+ lttng_ust_unlock_fd_tracker();
+
+ wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
MAP_SHARED, wait_shm_fd, 0);
+
/* close shm fd immediately after taking the mmap reference */
+ lttng_ust_lock_fd_tracker();
ret = close(wait_shm_fd);
- if (ret) {
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(wait_shm_fd);
+ } else {
PERROR("Error closing fd");
}
+ lttng_ust_unlock_fd_tracker();
+
if (wait_shm_mmap == MAP_FAILED) {
DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
goto error;
static
void wait_for_sessiond(struct sock_info *sock_info)
{
- int ret;
-
- ust_lock();
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
if (wait_poll_fallback) {
DBG("Waiting for %s apps sessiond", sock_info->name);
/* Wait for futex wakeup */
- if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
- ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
- FUTEX_WAIT, 0, NULL, NULL, 0);
- if (ret < 0) {
- if (errno == EFAULT) {
- wait_poll_fallback = 1;
- DBG(
+ if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
+ goto end_wait;
+
+ while (futex_async((int32_t *) sock_info->wait_shm_mmap,
+ FUTEX_WAIT, 0, NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ goto end_wait;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ case EFAULT:
+ wait_poll_fallback = 1;
+ DBG(
"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
"do not support FUTEX_WAKE on read-only memory mappings correctly. "
"Please upgrade your kernel "
"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
"mainline). LTTng-UST will use polling mode fallback.");
- if (ust_debug())
- PERROR("futex");
- }
+ if (ust_debug())
+ PERROR("futex");
+ goto end_wait;
}
}
+end_wait:
return;
quit:
int sock, ret, prev_connect_failed = 0, has_waited = 0;
long timeout;
+ lttng_ust_fixup_tls();
+ /*
+ * If available, add '-ust' to the end of this thread's
+ * process name
+ */
+ ret = lttng_ust_setustprocname();
+ if (ret) {
+ ERR("Unable to set UST process name");
+ }
+
/* Restart trying to connect to the session daemon */
restart:
if (prev_connect_failed) {
* deals with a killed or broken session daemon.
*/
sleep(5);
+ } else {
+ has_waited = 1;
}
- has_waited = 1;
prev_connect_failed = 0;
}
if (sock_info->socket != -1) {
+ /* FD tracker is updated by ustcomm_close_unix_sock() */
ret = ustcomm_close_unix_sock(sock_info->socket);
if (ret) {
ERR("Error closing %s ust cmd socket",
sock_info->socket = -1;
}
if (sock_info->notify_socket != -1) {
+ /* FD tracker is updated by ustcomm_close_unix_sock() */
ret = ustcomm_close_unix_sock(sock_info->notify_socket);
if (ret) {
ERR("Error closing %s ust notify socket",
sock_info->notify_socket = -1;
}
+ if (ust_lock()) {
+ goto quit;
+ }
+
/*
* Register. We need to perform both connect and sending
* registration message before doing the next connect otherwise
* first connect registration message.
*/
/* Connect cmd socket */
- ret = ustcomm_connect_unix_sock(sock_info->sock_path);
+ lttng_ust_lock_fd_tracker();
+ ret = ustcomm_connect_unix_sock(sock_info->sock_path,
+ get_connect_sock_timeout());
if (ret < 0) {
+ lttng_ust_unlock_fd_tracker();
DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
prev_connect_failed = 1;
- ust_lock();
-
- if (lttng_ust_comm_should_quit) {
- goto quit;
- }
-
/*
* If we cannot find the sessiond daemon, don't delay
* constructor execution.
ust_unlock();
goto restart;
}
+ lttng_ust_add_fd_to_tracker(ret);
+ lttng_ust_unlock_fd_tracker();
sock_info->socket = ret;
- ust_lock();
-
- if (lttng_ust_comm_should_quit) {
+ ust_unlock();
+ /*
+ * Unlock/relock ust lock because connect is blocking (with
+ * timeout). Don't delay constructors on the ust lock for too
+ * long.
+ */
+ if (ust_lock()) {
goto quit;
}
}
ust_unlock();
+ /*
+ * Unlock/relock ust lock because connect is blocking (with
+ * timeout). Don't delay constructors on the ust lock for too
+ * long.
+ */
+ if (ust_lock()) {
+ goto quit;
+ }
/* Connect notify socket */
- ret = ustcomm_connect_unix_sock(sock_info->sock_path);
+ lttng_ust_lock_fd_tracker();
+ ret = ustcomm_connect_unix_sock(sock_info->sock_path,
+ get_connect_sock_timeout());
if (ret < 0) {
+ lttng_ust_unlock_fd_tracker();
DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
prev_connect_failed = 1;
- ust_lock();
-
- if (lttng_ust_comm_should_quit) {
- goto quit;
- }
-
/*
* If we cannot find the sessiond daemon, don't delay
* constructor execution.
ust_unlock();
goto restart;
}
+ lttng_ust_add_fd_to_tracker(ret);
+ lttng_ust_unlock_fd_tracker();
sock_info->notify_socket = ret;
+ ust_unlock();
+ /*
+ * Unlock/relock ust lock because connect is blocking (with
+ * timeout). Don't delay constructors on the ust lock for too
+ * long.
+ */
+ if (ust_lock()) {
+ goto quit;
+ }
+
timeout = get_notify_sock_timeout();
if (timeout >= 0) {
/*
WARN("Unsupported timeout value %ld", timeout);
}
- ust_lock();
-
- if (lttng_ust_comm_should_quit) {
- goto quit;
- }
-
ret = register_to_sessiond(sock_info->notify_socket,
USTCTL_SOCKET_NOTIFY);
if (ret < 0) {
switch (len) {
case 0: /* orderly shutdown */
DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
- ust_lock();
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
/*
print_cmd(lum.cmd, lum.handle);
ret = handle_message(sock_info, sock, &lum);
if (ret) {
- ERR("Error handling message for %s socket", sock_info->name);
+ ERR("Error handling message for %s socket",
+ sock_info->name);
+ /*
+ * Close socket if protocol error is
+ * detected.
+ */
+ goto end;
}
continue;
default:
}
end:
- ust_lock();
- if (lttng_ust_comm_should_quit) {
+ if (ust_lock()) {
goto quit;
}
/* Cleanup socket handles before trying to reconnect */
goto restart; /* try to reconnect */
quit:
- sock_info->thread_active = 0;
ust_unlock();
+
+ pthread_mutex_lock(&ust_exit_mutex);
+ sock_info->thread_active = 0;
+ pthread_mutex_unlock(&ust_exit_mutex);
return NULL;
}
+/*
+ * Weak symbol to call when the ust malloc wrapper is not loaded.
+ */
+__attribute__((weak))
+void lttng_ust_malloc_wrapper_init(void)
+{
+}
+
/*
* sessiond monitoring thread: monitor presence of global and per-user
* sessiond by polling the application common named pipe.
* to be the dynamic linker mutex) and ust_lock, taken within
* the ust lock.
*/
- lttng_fixup_ringbuffer_tls();
- lttng_fixup_vtid_tls();
- lttng_fixup_nest_count_tls();
- lttng_fixup_procname_tls();
+ lttng_ust_fixup_tls();
+
+ lttng_ust_loaded = 1;
/*
* We want precise control over the order in which we construct
*/
init_usterr();
init_tracepoint();
+ lttng_ust_init_fd_tracker();
+ lttng_ust_clock_init();
+ lttng_ust_getcpu_init();
+ lttng_ust_statedump_init();
lttng_ring_buffer_metadata_client_init();
lttng_ring_buffer_client_overwrite_init();
lttng_ring_buffer_client_overwrite_rt_init();
lttng_ring_buffer_client_discard_init();
lttng_ring_buffer_client_discard_rt_init();
- lttng_context_init();
+ lttng_perf_counter_init();
+ /*
+ * Invoke ust malloc wrapper init before starting other threads.
+ */
+ lttng_ust_malloc_wrapper_init();
timeout_mode = get_constructor_timeout(&constructor_timeout);
ret = sem_init(&constructor_wait, 0, 0);
- assert(!ret);
+ if (ret) {
+ PERROR("sem_init");
+ }
ret = setup_local_apps();
if (ret) {
ERR("pthread_attr_setdetachstate: %s", strerror(ret));
}
- ust_lock();
+ pthread_mutex_lock(&ust_exit_mutex);
ret = pthread_create(&global_apps.ust_listener, &thread_attr,
ust_listener_thread, &global_apps);
if (ret) {
ERR("pthread_create global: %s", strerror(ret));
}
global_apps.thread_active = 1;
- ust_unlock();
+ pthread_mutex_unlock(&ust_exit_mutex);
if (local_apps.allowed) {
- ust_lock();
+ pthread_mutex_lock(&ust_exit_mutex);
ret = pthread_create(&local_apps.ust_listener, &thread_attr,
ust_listener_thread, &local_apps);
if (ret) {
ERR("pthread_create local: %s", strerror(ret));
}
local_apps.thread_active = 1;
- ust_unlock();
+ pthread_mutex_unlock(&ust_exit_mutex);
} else {
handle_register_done(&local_apps);
}
ret = sem_timedwait(&constructor_wait,
&constructor_timeout);
} while (ret < 0 && errno == EINTR);
- if (ret < 0 && errno == ETIMEDOUT) {
- ERR("Timed out waiting for lttng-sessiond");
- } else {
- assert(!ret);
+ if (ret < 0) {
+ switch (errno) {
+ case ETIMEDOUT:
+ ERR("Timed out waiting for lttng-sessiond");
+ break;
+ case EINVAL:
+ PERROR("sem_timedwait");
+ break;
+ default:
+ ERR("Unexpected error \"%s\" returned by sem_timedwait",
+ strerror(errno));
+ }
}
break;
case -1:/* wait forever */
do {
ret = sem_wait(&constructor_wait);
} while (ret < 0 && errno == EINTR);
- assert(!ret);
+ if (ret < 0) {
+ switch (errno) {
+ case EINVAL:
+ PERROR("sem_wait");
+ break;
+ default:
+ ERR("Unexpected error \"%s\" returned by sem_wait",
+ strerror(errno));
+ }
+ }
break;
case 0: /* no timeout */
break;
void lttng_ust_cleanup(int exiting)
{
cleanup_sock_info(&global_apps, exiting);
- if (local_apps.allowed) {
- cleanup_sock_info(&local_apps, exiting);
- }
+ cleanup_sock_info(&local_apps, exiting);
+ local_apps.allowed = 0;
/*
* The teardown in this function all affect data structures
* accessed under the UST lock by the listener thread. This
*/
lttng_ust_abi_exit();
lttng_ust_events_exit();
- lttng_context_exit();
+ lttng_perf_counter_exit();
lttng_ring_buffer_client_discard_rt_exit();
lttng_ring_buffer_client_discard_exit();
lttng_ring_buffer_client_overwrite_rt_exit();
lttng_ring_buffer_client_overwrite_exit();
lttng_ring_buffer_metadata_client_exit();
+ lttng_ust_statedump_destroy();
exit_tracepoint();
if (!exiting) {
/* Reinitialize values for fork */
* mutexes to ensure it is not in a mutex critical section when
* pthread_cancel is later called.
*/
- ust_lock();
+ ust_lock_nocheck();
lttng_ust_comm_should_quit = 1;
+ ust_unlock();
+ pthread_mutex_lock(&ust_exit_mutex);
/* cancel threads */
if (global_apps.thread_active) {
ret = pthread_cancel(global_apps.ust_listener);
local_apps.thread_active = 0;
}
}
- ust_unlock();
+ pthread_mutex_unlock(&ust_exit_mutex);
/*
* Do NOT join threads: use of sys_futex makes it impossible to
sigset_t all_sigs;
int ret;
+ /* Fixup lttng-ust TLS. */
+ lttng_ust_fixup_tls();
+
if (URCU_TLS(lttng_ust_nest_count))
return;
/* Disable signals */
if (ret == -1) {
PERROR("sigprocmask");
}
- ust_lock();
+
+ pthread_mutex_lock(&ust_fork_mutex);
+
+ ust_lock_nocheck();
rcu_bp_before_fork();
}
DBG("process %d", getpid());
ust_unlock();
+
+ pthread_mutex_unlock(&ust_fork_mutex);
+
/* Restore signals */
ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
if (ret == -1) {
{
if (URCU_TLS(lttng_ust_nest_count))
return;
+ lttng_context_vtid_reset();
DBG("process %d", getpid());
/* Release urcu mutexes */
rcu_bp_after_fork_child();
lttng_ust_cleanup(0);
- lttng_context_vtid_reset();
/* Release mutexes and reenable signals */
ust_after_fork_common(restore_sigset);
lttng_ust_init();
}
+
+void lttng_ust_sockinfo_session_enabled(void *owner)
+{
+ struct sock_info *sock_info = owner;
+ sock_info->statedump_pending = 1;
+}