#include "ust-consumer.h"
#include "utils.h"
#include "fd-limit.h"
-#include "filter.h"
#include "health.h"
#include "testpoint.h"
struct health_state health_thread_app_reg;
struct health_state health_thread_kernel;
+/*
+ * Socket timeout for receiving and sending in seconds.
+ */
+static int app_socket_timeout;
+
static
void setup_consumerd_path(void)
{
static void cleanup(void)
{
int ret;
- char *cmd;
+ char *cmd = NULL;
struct ltt_session *sess, *stmp;
DBG("Cleaning up");
ERR("Unable to clean %s", rundir);
}
free(cmd);
+ free(rundir);
DBG("Cleaning up all sessions");
modprobe_remove_lttng_all();
}
- utils_close_pipe(kernel_poll_pipe);
- utils_close_pipe(apps_cmd_pipe);
-
/* <fun> */
DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
"Matthew, BEET driven development works!%c[%dm",
static int send_unix_sock(int sock, void *buf, size_t len)
{
/* Check valid length */
- if (len <= 0) {
+ if (len == 0) {
return -1;
}
assert(socket->fd >= 0);
pthread_mutex_lock(socket->lock);
- ret = kernel_consumer_send_channel_stream(socket->fd,
+ ret = kernel_consumer_send_channel_stream(socket,
channel, ksess);
pthread_mutex_unlock(socket->lock);
if (ret < 0) {
char tmp;
struct lttng_poll_event events;
- DBG("Thread manage kernel started");
+ DBG("[thread] Thread manage kernel started");
- testpoint(thread_manage_kernel);
+ if (testpoint(thread_manage_kernel)) {
+ goto error_testpoint;
+ }
health_code_update(&health_thread_kernel);
- testpoint(thread_manage_kernel_before_loop);
-
ret = create_thread_poll_set(&events, 2);
if (ret < 0) {
goto error_poll_create;
goto error;
}
+ if (testpoint(thread_manage_kernel_before_loop)) {
+ goto error;
+ }
+
while (1) {
health_code_update(&health_thread_kernel);
update_poll_flag = 0;
}
- nb_fd = LTTNG_POLL_GETNB(&events);
-
- DBG("Thread kernel polling on %d fds", nb_fd);
-
- /* Zeroed the poll events */
- lttng_poll_reset(&events);
+ DBG("Thread kernel polling on %d fds", events.nb_fd);
/* Poll infinite value of time */
restart:
continue;
}
+ nb_fd = ret;
+
for (i = 0; i < nb_fd; i++) {
/* Fetch once the poll data */
revents = LTTNG_POLL_GETEV(&events, i);
error:
lttng_poll_clean(&events);
error_poll_create:
+error_testpoint:
+ utils_close_pipe(kernel_poll_pipe);
+ kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
if (err) {
health_error(&health_thread_kernel);
ERR("Health error occurred in %s", __func__);
+ WARN("Kernel thread died unexpectedly. "
+ "Kernel tracing can continue but CPU hotplug is disabled.");
}
health_exit(&health_thread_kernel);
DBG("Kernel thread dying");
DBG("[thread] Manage consumer started");
- health_code_update(&consumer_data->health);
-
- ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
- if (ret < 0) {
- goto error_listen;
- }
+ /*
+ * Since the consumer thread can be spawned at any moment in time, we init
+ * the health to a poll status (1, which is a valid health over time).
+ * When the thread starts, we update here the health to a "code" path being
+ * an even value so this thread, when reaching a poll wait, does not
+ * trigger an error with an even value.
+ *
+ * Here is the use case we avoid.
+ *
+ * +1: the first poll update during initialization (main())
+ * +2 * x: multiple code update once in this thread.
+ * +1: poll wait in this thread (being a good health state).
+ * == even number which after the wait period shows as a bad health.
+ *
+ * In a nutshell, the following poll update to the health state brings back
+ * the state to an even value meaning a code path.
+ */
+ health_poll_update(&consumer_data->health);
/*
* Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
goto error_poll;
}
+ /*
+ * The error socket here is already in a listening state which was done
+ * just before spawning this thread to avoid a race between the consumer
+ * daemon exec trying to connect and the listen() call.
+ */
ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
if (ret < 0) {
goto error;
}
- nb_fd = LTTNG_POLL_GETNB(&events);
-
health_code_update(&consumer_data->health);
/* Inifinite blocking call, waiting for transmission */
restart:
health_poll_update(&consumer_data->health);
- testpoint(thread_manage_consumer);
+ if (testpoint(thread_manage_consumer)) {
+ goto error;
+ }
ret = lttng_poll_wait(&events, -1);
health_poll_update(&consumer_data->health);
goto error;
}
+ nb_fd = ret;
+
for (i = 0; i < nb_fd; i++) {
/* Fetch once the poll data */
revents = LTTNG_POLL_GETEV(&events, i);
goto error;
}
+ /*
+ * Set the CLOEXEC flag. Return code is useless because either way, the
+ * show must go on.
+ */
+ (void) utils_set_fd_cloexec(sock);
+
health_code_update(&consumer_data->health);
DBG2("Receiving code from consumer err_sock");
health_code_update(&consumer_data->health);
- /* Update number of fd */
- nb_fd = LTTNG_POLL_GETNB(&events);
-
/* Inifinite blocking call, waiting for transmission */
restart_poll:
health_poll_update(&consumer_data->health);
goto error;
}
+ nb_fd = ret;
+
for (i = 0; i < nb_fd; i++) {
/* Fetch once the poll data */
revents = LTTNG_POLL_GETEV(&events, i);
lttng_poll_clean(&events);
error_poll:
-error_listen:
if (err) {
health_error(&consumer_data->health);
ERR("Health error occurred in %s", __func__);
DBG("[thread] Manage application started");
- testpoint(thread_manage_apps);
-
rcu_register_thread();
rcu_thread_online();
+ if (testpoint(thread_manage_apps)) {
+ goto error_testpoint;
+ }
+
health_code_update(&health_thread_app_manage);
ret = create_thread_poll_set(&events, 2);
goto error;
}
- testpoint(thread_manage_apps_before_loop);
+ if (testpoint(thread_manage_apps_before_loop)) {
+ goto error;
+ }
health_code_update(&health_thread_app_manage);
while (1) {
- /* Zeroed the events structure */
- lttng_poll_reset(&events);
-
- nb_fd = LTTNG_POLL_GETNB(&events);
-
- DBG("Apps thread polling on %d fds", nb_fd);
+ DBG("Apps thread polling on %d fds", events.nb_fd);
/* Inifinite blocking call, waiting for transmission */
restart:
goto error;
}
+ nb_fd = ret;
+
for (i = 0; i < nb_fd; i++) {
/* Fetch once the poll data */
revents = LTTNG_POLL_GETEV(&events, i);
ust_app_unregister(ust_cmd.sock);
} else {
/*
- * We just need here to monitor the close of the UST
- * socket and poll set monitor those by default.
- * Listen on POLLIN (even if we never expect any
- * data) to ensure that hangup wakes us.
+ * We only monitor the error events of the socket. This
+ * thread does not handle any incoming data from UST
+ * (POLLIN).
*/
- ret = lttng_poll_add(&events, ust_cmd.sock, LPOLLIN);
+ ret = lttng_poll_add(&events, ust_cmd.sock,
+ LPOLLERR & LPOLLHUP & LPOLLRDHUP);
if (ret < 0) {
goto error;
}
+ /* Set socket timeout for both receiving and ending */
+ (void) lttcomm_setsockopt_rcv_timeout(ust_cmd.sock,
+ app_socket_timeout);
+ (void) lttcomm_setsockopt_snd_timeout(ust_cmd.sock,
+ app_socket_timeout);
+
DBG("Apps with sock %d added to poll set",
ust_cmd.sock);
}
error:
lttng_poll_clean(&events);
error_poll_create:
+error_testpoint:
+ utils_close_pipe(apps_cmd_pipe);
+ apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
+
+ /*
+ * We don't clean the UST app hash table here since already registered
+ * applications can still be controlled so let them be until the session
+ * daemon dies or the applications stop.
+ */
+
if (err) {
health_error(&health_thread_app_manage);
ERR("Health error occurred in %s", __func__);
* call is blocking so we can be assured that the data will be read
* at some point in time or wait to the end of the world :)
*/
- ret = write(apps_cmd_pipe[1], ust_cmd,
- sizeof(struct ust_command));
- if (ret < 0) {
- PERROR("write apps cmd pipe");
- if (errno == EBADF) {
- /*
- * We can't inform the application thread to process
- * registration. We will exit or else application
- * registration will not occur and tracing will never
- * start.
- */
- goto error;
+ if (apps_cmd_pipe[1] >= 0) {
+ ret = write(apps_cmd_pipe[1], ust_cmd,
+ sizeof(struct ust_command));
+ if (ret < 0) {
+ PERROR("write apps cmd pipe");
+ if (errno == EBADF) {
+ /*
+ * We can't inform the application thread to process
+ * registration. We will exit or else application
+ * registration will not occur and tracing will never
+ * start.
+ */
+ goto error;
+ }
+ }
+ } else {
+ /* Application manager thread is not available. */
+ ret = close(ust_cmd->sock);
+ if (ret < 0) {
+ PERROR("close ust_cmd sock");
}
}
free(ust_cmd);
DBG("[thread] Manage application registration started");
- testpoint(thread_registration_apps);
+ if (testpoint(thread_registration_apps)) {
+ goto error_testpoint;
+ }
ret = lttcomm_listen_unix_sock(apps_sock);
if (ret < 0) {
while (1) {
DBG("Accepting application registration");
- nb_fd = LTTNG_POLL_GETNB(&events);
-
/* Inifinite blocking call, waiting for transmission */
restart:
health_poll_update(&health_thread_app_reg);
goto error;
}
+ nb_fd = ret;
+
for (i = 0; i < nb_fd; i++) {
health_code_update(&health_thread_app_reg);
goto error;
}
+ /*
+ * Set the CLOEXEC flag. Return code is useless because
+ * either way, the show must go on.
+ */
+ (void) utils_set_fd_cloexec(sock);
+
/* Create UST registration command for enqueuing */
ust_cmd = zmalloc(sizeof(struct ust_command));
if (ust_cmd == NULL) {
health_error(&health_thread_app_reg);
ERR("Health error occurred in %s", __func__);
}
- health_exit(&health_thread_app_reg);
/* Notify that the registration thread is gone */
notify_ust_apps(0);
lttng_poll_clean(&events);
error_listen:
error_create_poll:
+error_testpoint:
DBG("UST Registration thread cleanup complete");
+ health_exit(&health_thread_app_reg);
return NULL;
}
static int join_consumer_thread(struct consumer_data *consumer_data)
{
void *status;
- int ret;
/* Consumer pid must be a real one. */
if (consumer_data->pid > 0) {
+ int ret;
ret = kill(consumer_data->pid, SIGTERM);
if (ret) {
ERR("Error killing consumer daemon");
{
int ret;
+ /*
+ * Set the listen() state on the socket since there is a possible race
+ * between the exec() of the consumer daemon and this call if place in the
+ * consumer thread. See bug #366 for more details.
+ */
+ ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
+ if (ret < 0) {
+ goto error;
+ }
+
pthread_mutex_lock(&consumer_data->pid_mutex);
if (consumer_data->pid != 0) {
pthread_mutex_unlock(&consumer_data->pid_mutex);
return 0;
error:
+ /* Cleanup already created socket on error. */
+ if (consumer_data->err_sock >= 0) {
+ int err;
+
+ err = close(consumer_data->err_sock);
+ if (err < 0) {
+ PERROR("close consumer data error socket");
+ }
+ }
return ret;
}
switch (domain) {
case LTTNG_DOMAIN_KERNEL:
DBG3("Copying tracing session consumer output in kernel session");
+ /*
+ * XXX: We should audit the session creation and what this function
+ * does "extra" in order to avoid a destroy since this function is used
+ * in the domain session creation (kernel and ust) only. Same for UST
+ * domain.
+ */
+ if (session->kernel_session->consumer) {
+ consumer_destroy_output(session->kernel_session->consumer);
+ }
session->kernel_session->consumer =
consumer_copy_output(session->consumer);
/* Ease our life a bit for the next part */
break;
case LTTNG_DOMAIN_UST:
DBG3("Copying tracing session consumer output in UST session");
+ if (session->ust_session->consumer) {
+ consumer_destroy_output(session->ust_session->consumer);
+ }
session->ust_session->consumer =
consumer_copy_output(session->consumer);
/* Ease our life a bit for the next part */
case LTTNG_LIST_DOMAINS:
case LTTNG_START_TRACE:
case LTTNG_STOP_TRACE:
+ case LTTNG_DATA_PENDING:
need_domain = 0;
break;
default:
{
ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
cmd_ctx->lsm->u.context.channel_name,
- cmd_ctx->lsm->u.context.event_name,
- &cmd_ctx->lsm->u.context.ctx);
+ &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
break;
}
case LTTNG_DISABLE_CHANNEL:
{
ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
cmd_ctx->lsm->u.enable.channel_name,
- &cmd_ctx->lsm->u.enable.event, kernel_poll_pipe[1]);
+ &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
break;
}
case LTTNG_ENABLE_ALL_EVENT:
ret = cmd_enable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
cmd_ctx->lsm->u.enable.channel_name,
- cmd_ctx->lsm->u.enable.event.type, kernel_poll_pipe[1]);
+ cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
break;
}
case LTTNG_LIST_TRACEPOINTS:
DBG("No URIs received from client... continuing");
*sock_error = 1;
ret = LTTNG_ERR_SESSION_FAIL;
+ free(uris);
goto error;
}
ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
nb_uri, uris);
if (ret != LTTNG_OK) {
+ free(uris);
goto error;
}
}
}
+ free(uris);
+
break;
}
case LTTNG_START_TRACE:
DBG("No URIs received from client... continuing");
*sock_error = 1;
ret = LTTNG_ERR_SESSION_FAIL;
+ free(uris);
goto error;
}
if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
DBG("Creating session with ONE network URI is a bad call");
ret = LTTNG_ERR_SESSION_FAIL;
+ free(uris);
goto error;
}
}
ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
&cmd_ctx->creds);
+ free(uris);
+
break;
}
case LTTNG_DESTROY_SESSION:
cmd_ctx->lsm->u.reg.path, cdata);
break;
}
- case LTTNG_SET_FILTER:
+ case LTTNG_ENABLE_EVENT_WITH_FILTER:
{
struct lttng_filter_bytecode *bytecode;
- if (cmd_ctx->lsm->u.filter.bytecode_len > LTTNG_FILTER_MAX_LEN) {
+ if (cmd_ctx->lsm->u.enable.bytecode_len > LTTNG_FILTER_MAX_LEN) {
+ ret = LTTNG_ERR_FILTER_INVAL;
+ goto error;
+ }
+ if (cmd_ctx->lsm->u.enable.bytecode_len == 0) {
ret = LTTNG_ERR_FILTER_INVAL;
goto error;
}
- bytecode = zmalloc(cmd_ctx->lsm->u.filter.bytecode_len);
+ bytecode = zmalloc(cmd_ctx->lsm->u.enable.bytecode_len);
if (!bytecode) {
ret = LTTNG_ERR_FILTER_NOMEM;
goto error;
/* Receive var. len. data */
DBG("Receiving var len data from client ...");
ret = lttcomm_recv_unix_sock(sock, bytecode,
- cmd_ctx->lsm->u.filter.bytecode_len);
+ cmd_ctx->lsm->u.enable.bytecode_len);
if (ret <= 0) {
DBG("Nothing recv() from client var len data... continuing");
*sock_error = 1;
}
if (bytecode->len + sizeof(*bytecode)
- != cmd_ctx->lsm->u.filter.bytecode_len) {
+ != cmd_ctx->lsm->u.enable.bytecode_len) {
free(bytecode);
ret = LTTNG_ERR_FILTER_INVAL;
goto error;
}
- ret = cmd_set_filter(cmd_ctx->session, cmd_ctx->lsm->domain.type,
- cmd_ctx->lsm->u.filter.channel_name,
- cmd_ctx->lsm->u.filter.event_name,
- bytecode);
+ ret = cmd_enable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
+ cmd_ctx->lsm->u.enable.channel_name,
+ &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
+ break;
+ }
+ case LTTNG_DATA_PENDING:
+ {
+ ret = cmd_data_pending(cmd_ctx->session);
break;
}
default:
goto error;
}
+ /*
+ * Set the CLOEXEC flag. Return code is useless because either way, the
+ * show must go on.
+ */
+ (void) utils_set_fd_cloexec(sock);
+
ret = lttcomm_listen_unix_sock(sock);
if (ret < 0) {
goto error;
while (1) {
DBG("Health check ready");
- nb_fd = LTTNG_POLL_GETNB(&events);
-
/* Inifinite blocking call, waiting for transmission */
restart:
ret = lttng_poll_wait(&events, -1);
goto error;
}
+ nb_fd = ret;
+
for (i = 0; i < nb_fd; i++) {
/* Fetch once the poll data */
revents = LTTNG_POLL_GETEV(&events, i);
goto error;
}
+ /*
+ * Set the CLOEXEC flag. Return code is useless because either way, the
+ * show must go on.
+ */
+ (void) utils_set_fd_cloexec(new_sock);
+
DBG("Receiving data from client for health...");
ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
if (ret <= 0) {
DBG("[thread] Manage client started");
- testpoint(thread_manage_clients);
-
rcu_register_thread();
+ if (testpoint(thread_manage_clients)) {
+ goto error_testpoint;
+ }
+
health_code_update(&health_thread_cmd);
ret = lttcomm_listen_unix_sock(client_sock);
if (ret < 0) {
- goto error;
+ goto error_listen;
}
/*
*/
ret = create_thread_poll_set(&events, 2);
if (ret < 0) {
- goto error;
+ goto error_create_poll;
}
/* Add the application registration socket */
kill(ppid, SIGUSR1);
}
- testpoint(thread_manage_clients_before_loop);
+ if (testpoint(thread_manage_clients_before_loop)) {
+ goto error;
+ }
health_code_update(&health_thread_cmd);
while (1) {
DBG("Accepting client command ...");
- nb_fd = LTTNG_POLL_GETNB(&events);
-
/* Inifinite blocking call, waiting for transmission */
restart:
health_poll_update(&health_thread_cmd);
goto error;
}
+ nb_fd = ret;
+
for (i = 0; i < nb_fd; i++) {
/* Fetch once the poll data */
revents = LTTNG_POLL_GETEV(&events, i);
goto error;
}
+ /*
+ * Set the CLOEXEC flag. Return code is useless because either way, the
+ * show must go on.
+ */
+ (void) utils_set_fd_cloexec(sock);
+
/* Set socket option for credentials retrieval */
ret = lttcomm_setsockopt_creds_unix_sock(sock);
if (ret < 0) {
exit:
error:
- if (err) {
- health_error(&health_thread_cmd);
- ERR("Health error occurred in %s", __func__);
+ if (sock >= 0) {
+ ret = close(sock);
+ if (ret) {
+ PERROR("close");
+ }
}
- health_exit(&health_thread_cmd);
- DBG("Client thread dying");
+ lttng_poll_clean(&events);
+ clean_command_ctx(&cmd_ctx);
+
+error_listen:
+error_create_poll:
+error_testpoint:
unlink(client_unix_sock_path);
if (client_sock >= 0) {
ret = close(client_sock);
PERROR("close");
}
}
- if (sock >= 0) {
- ret = close(sock);
- if (ret) {
- PERROR("close");
- }
+
+ if (err) {
+ health_error(&health_thread_cmd);
+ ERR("Health error occurred in %s", __func__);
}
- lttng_poll_clean(&events);
- clean_command_ctx(&cmd_ctx);
+ health_exit(&health_thread_cmd);
+
+ DBG("Client thread dying");
rcu_unregister_thread();
return NULL;
goto end;
}
+ /* Set the cloexec flag */
+ ret = utils_set_fd_cloexec(client_sock);
+ if (ret < 0) {
+ ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
+ "Continuing but note that the consumer daemon will have a "
+ "reference to this socket on exec()", client_sock);
+ }
+
/* File permission MUST be 660 */
ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
if (ret < 0) {
goto end;
}
+ /* Set the cloexec flag */
+ ret = utils_set_fd_cloexec(apps_sock);
+ if (ret < 0) {
+ ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
+ "Continuing but note that the consumer daemon will have a "
+ "reference to this socket on exec()", apps_sock);
+ }
+
/* File permission MUST be 666 */
ret = chmod(apps_unix_sock_path,
S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
goto end;
}
+ DBG3("Session daemon client socket %d and application socket %d created",
+ client_sock, apps_sock);
+
end:
umask(old_umask);
return ret;
{
int ret = 0;
void *status;
- const char *home_path;
+ const char *home_path, *env_app_timeout;
init_kernel_workarounds();
/* Parse arguments */
progname = argv[0];
- if ((ret = parse_args(argc, argv) < 0)) {
+ if ((ret = parse_args(argc, argv)) < 0) {
goto error;
}
}
/* Setup the kernel pipe for waking up the kernel thread */
- if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
- goto exit;
+ if (is_root && !opt_no_kernel) {
+ if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
+ goto exit;
+ }
}
/* Setup the thread apps communication pipe. */
/*
* Init health counters of the consumer thread. We do a quick hack here to
* the state of the consumer health is fine even if the thread is not
- * started. This is simply to ease our life and has no cost what so ever.
+ * started. Once the thread starts, the health state is updated with a poll
+ * value to set a health code path. This is simply to ease our life and has
+ * no cost what so ever.
*/
health_init(&kconsumer_data.health);
health_poll_update(&kconsumer_data.health);
health_init(&ustconsumer64_data.health);
health_poll_update(&ustconsumer64_data.health);
+ /* Check for the application socket timeout env variable. */
+ env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
+ if (env_app_timeout) {
+ app_socket_timeout = atoi(env_app_timeout);
+ } else {
+ app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
+ }
+
/* Create thread to manage the client socket */
ret = pthread_create(&health_thread, NULL,
thread_manage_health, (void *) NULL);
goto exit_apps;
}
- /* Create kernel thread to manage kernel event */
- ret = pthread_create(&kernel_thread, NULL,
- thread_manage_kernel, (void *) NULL);
- if (ret != 0) {
- PERROR("pthread_create kernel");
- goto exit_kernel;
- }
+ /* Don't start this thread if kernel tracing is not requested nor root */
+ if (is_root && !opt_no_kernel) {
+ /* Create kernel thread to manage kernel event */
+ ret = pthread_create(&kernel_thread, NULL,
+ thread_manage_kernel, (void *) NULL);
+ if (ret != 0) {
+ PERROR("pthread_create kernel");
+ goto exit_kernel;
+ }
- ret = pthread_join(kernel_thread, &status);
- if (ret != 0) {
- PERROR("pthread_join");
- goto error; /* join error, exit without cleanup */
+ ret = pthread_join(kernel_thread, &status);
+ if (ret != 0) {
+ PERROR("pthread_join");
+ goto error; /* join error, exit without cleanup */
+ }
}
exit_kernel:
goto error; /* join error, exit without cleanup */
}
+ ret = join_consumer_thread(&ustconsumer32_data);
+ if (ret != 0) {
+ PERROR("join_consumer ust32");
+ goto error; /* join error, exit without cleanup */
+ }
+
+ ret = join_consumer_thread(&ustconsumer64_data);
+ if (ret != 0) {
+ PERROR("join_consumer ust64");
+ goto error; /* join error, exit without cleanup */
+ }
+
exit_client:
+ ret = pthread_join(health_thread, &status);
+ if (ret != 0) {
+ PERROR("pthread_join health thread");
+ goto error; /* join error, exit without cleanup */
+ }
+
exit_health:
exit:
/*