static int kernel_tracer_fd;
static int kernel_poll_pipe[2];
+/*
+ * Quit pipe for all threads. This permits a single cancellation point
+ * for all threads when receiving an event on the pipe.
+ */
+static int thread_quit_pipe[2];
+
/* Pthread, Mutexes and Semaphores */
static pthread_t kconsumerd_thread;
static pthread_t apps_thread;
* This points to the tracing session list containing the session count and a
* mutex lock. The lock MUST be taken if you iterate over the list. The lock
* MUST NOT be taken if you call a public function in session.c.
+ *
+ * The lock is nested inside the structure: session_list_ptr->lock.
*/
static struct ltt_session_list *session_list_ptr;
+/*
+ * Init quit pipe.
+ *
+ * Return -1 on error or 0 if all pipes are created.
+ */
+static int init_thread_quit_pipe(void)
+{
+ int ret;
+
+ ret = pipe2(thread_quit_pipe, O_CLOEXEC);
+ if (ret < 0) {
+ perror("thread quit pipe");
+ goto error;
+ }
+
+error:
+ return ret;
+}
+
/*
* teardown_kernel_session
*
}
/*
- * cleanup
- *
- * Cleanup the daemon on exit
+ * Cleanup the daemon
*/
static void cleanup()
{
DBG("Cleaning up");
/* <fun> */
- MSG("\n%c[%d;%dm*** assert failed *** ==> %c[%dm", 27,1,31,27,0);
- MSG("%c[%d;%dmMatthew, BEET driven development works!%c[%dm",27,1,33,27,0);
+ MSG("\n%c[%d;%dm*** assert failed *** ==> %c[%dm%c[%d;%dm"
+ "Matthew, BEET driven development works!%c[%dm",
+ 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
/* </fun> */
/* Stopping all threads */
DBG("Terminating all threads");
- pthread_cancel(client_thread);
- pthread_cancel(apps_thread);
- pthread_cancel(kernel_thread);
- if (kconsumerd_pid != 0) {
- pthread_cancel(kconsumerd_thread);
- }
-
- DBG("Unlinking all unix socket");
- unlink(client_unix_sock_path);
- unlink(apps_unix_sock_path);
- unlink(kconsumerd_err_unix_sock_path);
+ close(thread_quit_pipe[0]);
+ close(thread_quit_pipe[1]);
DBG("Removing %s directory", LTTNG_RUNDIR);
ret = asprintf(&cmd, "rm -rf " LTTNG_RUNDIR);
}
DBG("Cleaning up all session");
- /* Cleanup ALL session */
- cds_list_for_each_entry(sess, &session_list_ptr->head, list) {
- teardown_kernel_session(sess);
- // TODO complete session cleanup (including UST)
- }
/* Destroy session list mutex */
- pthread_mutex_destroy(&session_list_ptr->lock);
+ if (session_list_ptr != NULL) {
+ pthread_mutex_destroy(&session_list_ptr->lock);
+
+ /* Cleanup ALL session */
+ cds_list_for_each_entry(sess, &session_list_ptr->head, list) {
+ teardown_kernel_session(sess);
+ // TODO complete session cleanup (including UST)
+ }
+ }
+
+ pthread_mutex_destroy(&kconsumerd_pid_mutex);
DBG("Closing kernel fd");
close(kernel_tracer_fd);
- close(kernel_poll_pipe[0]);
- close(kernel_poll_pipe[1]);
}
/*
*
* Free memory of a command context structure.
*/
-static void clean_command_ctx(struct command_ctx *cmd_ctx)
+static void clean_command_ctx(struct command_ctx **cmd_ctx)
{
- DBG("Clean command context structure %p", cmd_ctx);
- if (cmd_ctx) {
- if (cmd_ctx->llm) {
- free(cmd_ctx->llm);
+ DBG("Clean command context structure");
+ if (*cmd_ctx) {
+ if ((*cmd_ctx)->llm) {
+ free((*cmd_ctx)->llm);
}
- if (cmd_ctx->lsm) {
- free(cmd_ctx->lsm);
+ if ((*cmd_ctx)->lsm) {
+ free((*cmd_ctx)->lsm);
}
- free(cmd_ctx);
- cmd_ctx = NULL;
+ free(*cmd_ctx);
+ *cmd_ctx = NULL;
}
}
static int update_kernel_pollfd(void)
{
int i = 0;
- unsigned int nb_fd = 1;
+ /*
+ * The wakup pipe and the quit pipe are needed so the number of fds starts
+ * at 2 for those pipes.
+ */
+ unsigned int nb_fd = 2;
struct ltt_session *session;
struct ltt_kernel_channel *channel;
DBG("Updating kernel_pollfd");
/* Get the number of channel of all kernel session */
- pthread_mutex_lock(&session_list_ptr->lock);
+ lock_session_list();
cds_list_for_each_entry(session, &session_list_ptr->head, list) {
lock_session(session);
if (session->kernel_session == NULL) {
}
unlock_session(session);
}
- pthread_mutex_unlock(&session_list_ptr->lock);
+ unlock_session_list();
/* Adding wake up pipe */
- kernel_pollfd[nb_fd - 1].fd = kernel_poll_pipe[0];
- kernel_pollfd[nb_fd - 1].events = POLLIN;
+ kernel_pollfd[nb_fd - 2].fd = kernel_poll_pipe[0];
+ kernel_pollfd[nb_fd - 2].events = POLLIN;
+
+ /* Adding the quit pipe */
+ kernel_pollfd[nb_fd - 1].fd = thread_quit_pipe[0];
return nb_fd;
error:
- pthread_mutex_unlock(&session_list_ptr->lock);
+ unlock_session_list();
return -1;
}
DBG("Updating kernel streams for channel fd %d", fd);
- pthread_mutex_lock(&session_list_ptr->lock);
+ lock_session_list();
cds_list_for_each_entry(session, &session_list_ptr->head, list) {
lock_session(session);
if (session->kernel_session == NULL) {
}
end:
+ unlock_session_list();
if (session) {
unlock_session(session);
}
- pthread_mutex_unlock(&session_list_ptr->lock);
return ret;
}
continue;
}
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (kernel_pollfd[nb_fd - 1].revents == POLLNVAL) {
+ goto error;
+ }
+
DBG("Kernel poll event triggered");
/*
* Check if the wake up pipe was triggered. If so, the kernel_pollfd
* must be updated.
*/
- switch (kernel_pollfd[nb_fd - 1].revents) {
+ switch (kernel_pollfd[nb_fd - 2].revents) {
case POLLIN:
ret = read(kernel_poll_pipe[0], &tmp, 1);
update_poll_flag = 1;
if (kernel_pollfd) {
free(kernel_pollfd);
}
+
+ close(kernel_poll_pipe[0]);
+ close(kernel_poll_pipe[1]);
return NULL;
}
*/
static void *thread_manage_kconsumerd(void *data)
{
- int sock, ret;
+ int sock = 0, ret;
enum lttcomm_return_code code;
+ struct pollfd pollfd[2];
DBG("[thread] Manage kconsumerd started");
goto error;
}
+ /* First fd is always the quit pipe */
+ pollfd[0].fd = thread_quit_pipe[0];
+
+ /* Apps socket */
+ pollfd[1].fd = kconsumerd_err_sock;
+ pollfd[1].events = POLLIN;
+
+ /* Inifinite blocking call, waiting for transmission */
+ ret = poll(pollfd, 2, -1);
+ if (ret < 0) {
+ perror("poll kconsumerd thread");
+ goto error;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (pollfd[0].revents == POLLNVAL) {
+ goto error;
+ } else if (pollfd[1].revents == POLLERR) {
+ ERR("Kconsumerd err socket poll error");
+ goto error;
+ }
+
sock = lttcomm_accept_unix_sock(kconsumerd_err_sock);
if (sock < 0) {
goto error;
ERR("Kconsumerd return code : %s", lttcomm_get_readable_code(-code));
error:
- kconsumerd_pid = 0;
DBG("Kconsumerd thread dying");
+ if (kconsumerd_err_sock) {
+ close(kconsumerd_err_sock);
+ }
+ if (kconsumerd_cmd_sock) {
+ close(kconsumerd_cmd_sock);
+ }
+ if (sock) {
+ close(sock);
+ }
+
+ unlink(kconsumerd_err_unix_sock_path);
+ unlink(kconsumerd_cmd_unix_sock_path);
+
+ kconsumerd_pid = 0;
return NULL;
}
*/
static void *thread_manage_apps(void *data)
{
- int sock, ret;
+ int sock = 0, ret;
+ struct pollfd pollfd[2];
/* TODO: Something more elegant is needed but fine for now */
/* FIXME: change all types to either uint8_t, uint32_t, uint64_t
goto error;
}
+ /* First fd is always the quit pipe */
+ pollfd[0].fd = thread_quit_pipe[0];
+
+ /* Apps socket */
+ pollfd[1].fd = apps_sock;
+ pollfd[1].events = POLLIN;
+
/* Notify all applications to register */
notify_apps(default_global_apps_pipe);
while (1) {
DBG("Accepting application registration");
- /* Blocking call, waiting for transmission */
+
+ /* Inifinite blocking call, waiting for transmission */
+ ret = poll(pollfd, 2, -1);
+ if (ret < 0) {
+ perror("poll apps thread");
+ goto error;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (pollfd[0].revents == POLLNVAL) {
+ goto error;
+ } else if (pollfd[1].revents == POLLERR) {
+ ERR("Apps socket poll error");
+ goto error;
+ }
+
sock = lttcomm_accept_unix_sock(apps_sock);
if (sock < 0) {
goto error;
}
- /* Basic recv here to handle the very simple data
+ /*
+ * Basic recv here to handle the very simple data
* that the libust send to register (reg_msg).
*/
ret = recv(sock, ®_msg, sizeof(reg_msg), 0);
}
error:
+ DBG("Apps thread dying");
+ if (apps_sock) {
+ close(apps_sock);
+ }
+ if (sock) {
+ close(sock);
+ }
+ unlink(apps_unix_sock_path);
return NULL;
}
pthread_mutex_lock(&kconsumerd_pid_mutex);
if (kconsumerd_pid != 0) {
+ pthread_mutex_unlock(&kconsumerd_pid_mutex);
goto end;
}
}
end:
- pthread_mutex_unlock(&kconsumerd_pid_mutex);
return 0;
error:
return ret;
}
+/*
+ * Using the session list, filled a lttng_session array to send back to the
+ * client for session listing.
+ *
+ * The session list lock MUST be acquired before calling this function. Use
+ * lock_session_list() and unlock_session_list().
+ */
+static void list_lttng_sessions(struct lttng_session *sessions)
+{
+ int i = 0;
+ struct ltt_session *session;
+
+ DBG("Getting all available session");
+ /*
+ * Iterate over session list and append data after the control struct in
+ * the buffer.
+ */
+ cds_list_for_each_entry(session, &session_list_ptr->head, list) {
+ strncpy(sessions[i].path, session->path, PATH_MAX);
+ strncpy(sessions[i].name, session->name, NAME_MAX);
+ i++;
+ }
+}
+
/*
* process_client_msg
*
*/
case LTTNG_LIST_SESSIONS:
{
- unsigned int session_count;
+ lock_session_list();
- session_count = get_session_count();
- if (session_count == 0) {
+ if (session_list_ptr->count == 0) {
ret = LTTCOMM_NO_SESSION;
goto error;
}
- ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * session_count);
+ ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) *
+ session_list_ptr->count);
if (ret < 0) {
goto setup_error;
}
- get_lttng_session((struct lttng_session *)(cmd_ctx->llm->payload));
+ /* Filled the session array */
+ list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload));
+
+ unlock_session_list();
ret = LTTCOMM_OK;
break;
*/
static void *thread_manage_clients(void *data)
{
- int sock, ret;
- struct command_ctx *cmd_ctx;
+ int sock = 0, ret;
+ struct command_ctx *cmd_ctx = NULL;
+ struct pollfd pollfd[2];
DBG("[thread] Manage client started");
goto error;
}
+ /* First fd is always the quit pipe */
+ pollfd[0].fd = thread_quit_pipe[0];
+
+ /* Apps socket */
+ pollfd[1].fd = client_sock;
+ pollfd[1].events = POLLIN;
+
/* Notify parent pid that we are ready
* to accept command for client side.
*/
}
while (1) {
- /* Blocking call, waiting for transmission */
DBG("Accepting client command ...");
+
+ /* Inifinite blocking call, waiting for transmission */
+ ret = poll(pollfd, 2, -1);
+ if (ret < 0) {
+ perror("poll client thread");
+ goto error;
+ }
+
+ /* Thread quit pipe has been closed. Killing thread. */
+ if (pollfd[0].revents == POLLNVAL) {
+ goto error;
+ } else if (pollfd[1].revents == POLLERR) {
+ ERR("Client socket poll error");
+ goto error;
+ }
+
sock = lttcomm_accept_unix_sock(client_sock);
if (sock < 0) {
goto error;
/* TODO: Inform client somehow of the fatal error. At this point,
* ret < 0 means that a malloc failed (ENOMEM). */
/* Error detected but still accept command */
- clean_command_ctx(cmd_ctx);
+ clean_command_ctx(&cmd_ctx);
continue;
}
ERR("Failed to send data back to client");
}
- clean_command_ctx(cmd_ctx);
+ clean_command_ctx(&cmd_ctx);
/* End of transmission */
close(sock);
}
error:
+ DBG("Client thread dying");
+ if (client_sock) {
+ close(client_sock);
+ }
+ if (sock) {
+ close(sock);
+ }
+
+ unlink(client_unix_sock_path);
+
+ clean_command_ctx(&cmd_ctx);
return NULL;
}
int ret;
struct rlimit lim;
+ /* The kernel does not allowed an infinite limit for open files */
lim.rlim_cur = 65535;
lim.rlim_max = 65535;
void *status;
const char *home_path;
+ /* Create thread quit pipe */
+ if (init_thread_quit_pipe() < 0) {
+ goto exit;
+ }
+
/* Parse arguments */
progname = argv[0];
if ((ret = parse_args(argc, argv) < 0)) {
- goto error;
+ goto exit;
}
/* Daemonize */
ret = daemon(0, 0);
if (ret < 0) {
perror("daemon");
- goto error;
+ goto exit;
}
}
if (is_root) {
ret = create_lttng_rundir();
if (ret < 0) {
- goto error;
+ goto exit;
}
if (strlen(apps_unix_sock_path) == 0) {
snprintf(client_unix_sock_path, PATH_MAX,
DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
}
-
- ret = set_kconsumerd_sockets();
- if (ret < 0) {
- goto error;
- }
-
- /* Setup kernel tracer */
- init_kernel_tracer();
-
- /* Set ulimit for open files */
- set_ulimit();
} else {
home_path = get_home_dir();
if (home_path == NULL) {
- ERR("Can't get HOME directory for sockets creation.\n \
- Please specify --socket PATH.");
- goto error;
+ /* TODO: Add --socket PATH option */
+ ERR("Can't get HOME directory for sockets creation.");
+ goto exit;
}
if (strlen(apps_unix_sock_path) == 0) {
DBG("Client socket path %s", client_unix_sock_path);
DBG("Application socket path %s", apps_unix_sock_path);
- /* See if daemon already exist. If any of the two
- * socket needed by the daemon are present, this test fails
+ /*
+ * See if daemon already exist. If any of the two socket needed by the
+ * daemon are present, this test fails. However, if the daemon is killed
+ * with a SIGKILL, those unix socket must be unlinked by hand.
*/
if ((ret = check_existing_daemon()) == 0) {
ERR("Already running daemon.\n");
- /* We do not goto error because we must not
- * cleanup() because a daemon is already running.
+ /*
+ * We do not goto error because we must not cleanup() because a daemon
+ * is already running.
*/
- exit(EXIT_FAILURE);
+ goto exit;
+ }
+
+ /* After this point, we can safely call cleanup() so goto error is used */
+
+ /*
+ * These actions must be executed as root. We do that *after* setting up
+ * the sockets path because we MUST make the check for another daemon using
+ * those paths *before* trying to set the kernel consumer sockets and init
+ * kernel tracer.
+ */
+ if (is_root) {
+ ret = set_kconsumerd_sockets();
+ if (ret < 0) {
+ goto error;
+ }
+
+ /* Setup kernel tracer */
+ init_kernel_tracer();
+
+ /* Set ulimit for open files */
+ set_ulimit();
}
if (set_signal_handler() < 0) {
goto error;
}
- /* Get session list pointer */
+ /*
+ * Get session list pointer. This pointer MUST NOT be free().
+ * This list is statically declared in session.c
+ */
session_list_ptr = get_session_list();
while (1) {
error:
cleanup();
+
+exit:
exit(EXIT_FAILURE);
}