#include "agent-thread.h"
#include "save.h"
#include "load-session-thread.h"
+#include "syscall.h"
#define CONSUMERD_FILE "lttng-consumerd"
{ "config", 1, 0, 'f' },
{ "load", 1, 0, 'l' },
{ "kmod-probes", 1, 0, 'P' },
+ { "extra-kmod-probes", 1, 0, 'e' },
{ NULL, 0, 0, 0 }
};
}
}
+ DBG("Cleaning up all agent apps");
+ agent_app_ht_clean();
+
DBG("Closing all UST sockets");
ust_app_clean_list();
buffer_reg_destroy_registries();
}
DBG("Unloading kernel modules");
modprobe_remove_lttng_all();
+ free(syscall_table);
}
close_consumer_sockets();
update_poll_flag = 0;
}
- DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
+ DBG("Thread kernel polling");
/* Poll infinite value of time */
restart:
health_poll_entry();
ret = lttng_poll_wait(&events, -1);
+ DBG("Thread kernel return from poll on %d fds",
+ LTTNG_POLL_GETNB(&events));
health_poll_exit();
if (ret < 0) {
/*
health_code_update();
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
}
/* Check for data on kernel pipe */
- if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
- (void) lttng_read(kernel_poll_pipe[0],
- &tmp, 1);
- /*
- * Ret value is useless here, if this pipe gets any actions an
- * update is required anyway.
- */
- update_poll_flag = 1;
- continue;
- } else {
- /*
- * New CPU detected by the kernel. Adding kernel stream to
- * kernel session and updating the kernel consumer
- */
- if (revents & LPOLLIN) {
+ if (revents & LPOLLIN) {
+ if (pollfd == kernel_poll_pipe[0]) {
+ (void) lttng_read(kernel_poll_pipe[0],
+ &tmp, 1);
+ /*
+ * Ret value is useless here, if this pipe gets any actions an
+ * update is required anyway.
+ */
+ update_poll_flag = 1;
+ continue;
+ } else {
+ /*
+ * New CPU detected by the kernel. Adding kernel stream to
+ * kernel session and updating the kernel consumer
+ */
ret = update_kernel_stream(&kconsumer_data, pollfd);
if (ret < 0) {
continue;
}
break;
- /*
- * TODO: We might want to handle the LPOLLERR | LPOLLHUP
- * and unregister kernel stream at this point.
- */
}
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ update_poll_flag = 1;
+ continue;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
*/
static void *thread_manage_consumer(void *data)
{
- int sock = -1, i, ret, pollfd, err = -1;
+ int sock = -1, i, ret, pollfd, err = -1, should_quit = 0;
uint32_t revents, nb_fd;
enum lttcomm_return_code code;
struct lttng_poll_event events;
DBG("[thread] Manage consumer started");
+ rcu_register_thread();
+ rcu_thread_online();
+
health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
health_code_update();
health_code_update();
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
/* Event on the registration socket */
if (pollfd == consumer_data->err_sock) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ if (revents & LPOLLIN) {
+ continue;
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
ERR("consumer err socket poll error");
goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
/* Infinite blocking call, waiting for transmission */
restart_poll:
while (1) {
+ health_code_update();
+
+ /* Exit the thread because the thread quit pipe has been triggered. */
+ if (should_quit) {
+ /* Not a health error. */
+ err = 0;
+ goto exit;
+ }
+
health_poll_entry();
ret = lttng_poll_wait(&events, -1);
health_poll_exit();
health_code_update();
- /* Thread quit pipe has been closed. Killing thread. */
- ret = sessiond_check_thread_quit_pipe(pollfd, revents);
- if (ret) {
- err = 0;
- goto exit;
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
}
+ /*
+ * Thread quit pipe has been triggered, flag that we should stop
+ * but continue the current loop to handle potential data from
+ * consumer.
+ */
+ should_quit = sessiond_check_thread_quit_pipe(pollfd, revents);
+
if (pollfd == sock) {
/* Event on the consumerd socket */
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
+ && !(revents & LPOLLIN)) {
ERR("consumer err socket second poll error");
goto error;
}
goto exit;
} else if (pollfd == consumer_data->metadata_fd) {
+ if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)
+ && !(revents & LPOLLIN)) {
+ ERR("consumer err metadata socket second poll error");
+ goto error;
+ }
/* UST metadata requests */
ret = ust_consumer_metadata_request(
&consumer_data->metadata_sock);
ERR("Handling metadata request");
goto error;
}
- break;
- } else {
- ERR("Unknown pollfd");
- goto error;
}
+ /* No need for an else branch all FDs are tested prior. */
}
health_code_update();
}
health_unregister(health_sessiond);
DBG("consumer thread cleanup completed");
+ rcu_thread_offline();
+ rcu_unregister_thread();
+
return NULL;
}
health_code_update();
while (1) {
- DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
+ DBG("Apps thread polling");
/* Inifinite blocking call, waiting for transmission */
restart:
health_poll_entry();
ret = lttng_poll_wait(&events, -1);
+ DBG("Apps thread return from poll on %d fds",
+ LTTNG_POLL_GETNB(&events));
health_poll_exit();
if (ret < 0) {
/*
health_code_update();
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
/* Inspect the apps cmd pipe */
if (pollfd == apps_cmd_pipe[0]) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
- ERR("Apps command pipe error");
- goto error;
- } else if (revents & LPOLLIN) {
+ if (revents & LPOLLIN) {
int sock;
/* Empty pipe */
health_code_update();
/*
- * We only monitor the error events of the socket. This
- * thread does not handle any incoming data from UST
- * (POLLIN).
+ * Since this is a command socket (write then read),
+ * we only monitor the error events of the socket.
*/
ret = lttng_poll_add(&events, sock,
LPOLLERR | LPOLLHUP | LPOLLRDHUP);
}
DBG("Apps with sock %d added to poll set", sock);
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ ERR("Apps command pipe error");
+ goto error;
+ } else {
+ ERR("Unknown poll events %u for sock %d", revents, pollfd);
+ goto error;
}
} else {
/*
/* Socket closed on remote end. */
ust_app_unregister(pollfd);
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
uint32_t revents = LTTNG_POLL_GETEV(&events, i);
int pollfd = LTTNG_POLL_GETFD(&events, i);
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
&wait_queue->head, head) {
if (pollfd == wait_node->app->sock &&
ust_app_destroy(wait_node->app);
free(wait_node);
break;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
* Don't care about return value. Let the manage apps threads
* handle app unregistration upon socket close.
*/
- (void) ust_app_register_done(app->sock);
+ (void) ust_app_register_done(app);
/*
* Even if the application socket has been closed, send the app
free(wait_node);
}
+ /* Empty command queue. */
+ for (;;) {
+ /* Dequeue command for registration */
+ node = cds_wfcq_dequeue_blocking(&ust_cmd_queue.head, &ust_cmd_queue.tail);
+ if (node == NULL) {
+ break;
+ }
+ ust_cmd = caa_container_of(node, struct ust_command, node);
+ ret = close(ust_cmd->sock);
+ if (ret < 0) {
+ PERROR("close ust sock exit dispatch %d", ust_cmd->sock);
+ }
+ lttng_fd_put(LTTNG_FD_APPS, 1);
+ free(ust_cmd);
+ }
+
error_testpoint:
DBG("Dispatch thread dying");
if (err) {
revents = LTTNG_POLL_GETEV(&events, i);
pollfd = LTTNG_POLL_GETFD(&events, i);
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
/* Event on the registration socket */
if (pollfd == apps_sock) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
- ERR("Register apps socket poll error");
- goto error;
- } else if (revents & LPOLLIN) {
+ if (revents & LPOLLIN) {
sock = lttcomm_accept_unix_sock(apps_sock);
if (sock < 0) {
goto error;
ust_cmd = zmalloc(sizeof(struct ust_command));
if (ust_cmd == NULL) {
PERROR("ust command zmalloc");
+ ret = close(sock);
+ if (ret) {
+ PERROR("close");
+ }
goto error;
}
* barrier with the exchange in cds_wfcq_enqueue.
*/
futex_nto1_wake(&ust_cmd_queue.futex);
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ ERR("Register apps socket poll error");
+ goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
* domain.
*/
if (session->kernel_session->consumer) {
- consumer_destroy_output(session->kernel_session->consumer);
+ consumer_output_put(session->kernel_session->consumer);
}
session->kernel_session->consumer =
consumer_copy_output(session->consumer);
dir_name = DEFAULT_KERNEL_TRACE_DIR;
break;
case LTTNG_DOMAIN_JUL:
+ case LTTNG_DOMAIN_LOG4J:
case LTTNG_DOMAIN_UST:
DBG3("Copying tracing session consumer output in UST session");
if (session->ust_session->consumer) {
- consumer_destroy_output(session->ust_session->consumer);
+ consumer_output_put(session->ust_session->consumer);
}
session->ust_session->consumer =
consumer_copy_output(session->consumer);
switch (domain->type) {
case LTTNG_DOMAIN_JUL:
+ case LTTNG_DOMAIN_LOG4J:
case LTTNG_DOMAIN_UST:
break;
default:
session->kernel_session->consumer->dst.trace_path,
S_IRWXU | S_IRWXG, session->uid, session->gid);
if (ret < 0) {
- if (ret != -EEXIST) {
+ if (errno != EEXIST) {
ERR("Trace directory creation error");
goto error;
}
case LTTNG_LIST_DOMAINS:
case LTTNG_LIST_CHANNELS:
case LTTNG_LIST_EVENTS:
+ case LTTNG_LIST_SYSCALLS:
break;
default:
/* Setup lttng message with no payload */
case LTTNG_CALIBRATE:
case LTTNG_LIST_SESSIONS:
case LTTNG_LIST_TRACEPOINTS:
+ case LTTNG_LIST_SYSCALLS:
case LTTNG_LIST_TRACEPOINT_FIELDS:
case LTTNG_SAVE_SESSION:
need_tracing_session = 0;
switch (cmd_ctx->lsm->cmd_type) {
case LTTNG_DISABLE_CHANNEL:
case LTTNG_DISABLE_EVENT:
- case LTTNG_DISABLE_ALL_EVENT:
switch (cmd_ctx->lsm->domain.type) {
case LTTNG_DOMAIN_KERNEL:
if (!cmd_ctx->session->kernel_session) {
}
break;
case LTTNG_DOMAIN_JUL:
+ case LTTNG_DOMAIN_LOG4J:
case LTTNG_DOMAIN_UST:
if (!cmd_ctx->session->ust_session) {
ret = LTTNG_ERR_NO_CHANNEL;
break;
case LTTNG_DOMAIN_JUL:
+ case LTTNG_DOMAIN_LOG4J:
case LTTNG_DOMAIN_UST:
{
if (!ust_app_supported()) {
cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
switch (cmd_ctx->lsm->domain.type) {
case LTTNG_DOMAIN_JUL:
+ case LTTNG_DOMAIN_LOG4J:
case LTTNG_DOMAIN_UST:
if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
ret = LTTNG_ERR_NO_USTCONSUMERD;
}
case LTTNG_DISABLE_EVENT:
{
+
+ /*
+ * FIXME: handle filter; for now we just receive the filter's
+ * bytecode along with the filter expression which are sent by
+ * liblttng-ctl and discard them.
+ *
+ * This fixes an issue where the client may block while sending
+ * the filter payload and encounter an error because the session
+ * daemon closes the socket without ever handling this data.
+ */
+ size_t count = cmd_ctx->lsm->u.disable.expression_len +
+ cmd_ctx->lsm->u.disable.bytecode_len;
+
+ if (count) {
+ char data[LTTNG_FILTER_MAX_LEN];
+
+ DBG("Discarding disable event command payload of size %zu", count);
+ while (count) {
+ ret = lttcomm_recv_unix_sock(sock, data,
+ count > sizeof(data) ? sizeof(data) : count);
+ if (ret < 0) {
+ goto error;
+ }
+
+ count -= (size_t) ret;
+ }
+ }
+ /* FIXME: passing packed structure to non-packed pointer */
ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
cmd_ctx->lsm->u.disable.channel_name,
- cmd_ctx->lsm->u.disable.name);
- break;
- }
- case LTTNG_DISABLE_ALL_EVENT:
- {
- DBG("Disabling all events");
-
- ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
- cmd_ctx->lsm->u.disable.channel_name);
+ &cmd_ctx->lsm->u.disable.event);
break;
}
case LTTNG_ENABLE_CHANNEL:
if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
ret = LTTNG_ERR_FILTER_INVAL;
+ free(filter_expression);
free(exclusion);
goto error;
}
bytecode = zmalloc(bytecode_len);
if (!bytecode) {
+ free(filter_expression);
free(exclusion);
ret = LTTNG_ERR_FILTER_NOMEM;
goto error;
if (ret <= 0) {
DBG("Nothing recv() from client car len data... continuing");
*sock_error = 1;
+ free(filter_expression);
free(bytecode);
free(exclusion);
ret = LTTNG_ERR_FILTER_INVAL;
}
if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
+ free(filter_expression);
free(bytecode);
free(exclusion);
ret = LTTNG_ERR_FILTER_INVAL;
kernel_poll_pipe[1]);
break;
}
- case LTTNG_ENABLE_ALL_EVENT:
- {
- DBG("Enabling all events");
-
- ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
- cmd_ctx->lsm->u.enable.channel_name,
- cmd_ctx->lsm->u.enable.event.type, NULL, NULL,
- kernel_poll_pipe[1]);
- break;
- }
case LTTNG_LIST_TRACEPOINTS:
{
struct lttng_event *events;
ret = LTTNG_OK;
break;
}
+ case LTTNG_LIST_SYSCALLS:
+ {
+ struct lttng_event *events;
+ ssize_t nb_events;
+
+ nb_events = cmd_list_syscalls(&events);
+ if (nb_events < 0) {
+ /* Return value is a negative lttng_error_code. */
+ ret = -nb_events;
+ goto error;
+ }
+
+ /*
+ * Setup lttng message with payload size set to the event list size in
+ * bytes and then copy list into the llm payload.
+ */
+ ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
+ if (ret < 0) {
+ free(events);
+ goto setup_error;
+ }
+
+ /* Copy event list into message payload */
+ memcpy(cmd_ctx->llm->payload, events,
+ sizeof(struct lttng_event) * nb_events);
+
+ free(events);
+
+ ret = LTTNG_OK;
+ break;
+ }
case LTTNG_SET_CONSUMER_URI:
{
size_t nb_uri, len;
goto error;
}
- ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
- nb_uri, uris);
+ ret = cmd_set_consumer_uri(cmd_ctx->session, nb_uri, uris);
+ free(uris);
if (ret != LTTNG_OK) {
- free(uris);
goto error;
}
- /*
- * XXX: 0 means that this URI should be applied on the session. Should
- * be a DOMAIN enuam.
- */
- if (cmd_ctx->lsm->domain.type == 0) {
- /* Add the URI for the UST session if a consumer is present. */
- if (cmd_ctx->session->ust_session &&
- cmd_ctx->session->ust_session->consumer) {
- ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
- nb_uri, uris);
- } else if (cmd_ctx->session->kernel_session &&
- cmd_ctx->session->kernel_session->consumer) {
- ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
- cmd_ctx->session, nb_uri, uris);
- }
- }
-
- free(uris);
break;
}
case LTTNG_LIST_CHANNELS:
{
int nb_chan;
- struct lttng_channel *channels;
+ struct lttng_channel *channels = NULL;
nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
cmd_ctx->session, &channels);
revents = LTTNG_POLL_GETEV(&events, i);
pollfd = LTTNG_POLL_GETFD(&events, i);
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
/* Event on the registration socket */
if (pollfd == sock) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ if (revents & LPOLLIN) {
+ continue;
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
ERR("Health socket poll error");
goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
health_code_update();
+ if (!revents) {
+ /* No activity for this FD (poll implementation). */
+ continue;
+ }
+
/* Thread quit pipe has been closed. Killing thread. */
ret = sessiond_check_thread_quit_pipe(pollfd, revents);
if (ret) {
/* Event on the registration socket */
if (pollfd == client_sock) {
- if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+ if (revents & LPOLLIN) {
+ continue;
+ } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
ERR("Client socket poll error");
goto error;
+ } else {
+ ERR("Unexpected poll events %u for sock %d", revents, pollfd);
+ goto error;
}
}
}
fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
fprintf(stderr, " --no-kernel Disable kernel tracer\n");
fprintf(stderr, " --agent-tcp-port Agent registration TCP port\n");
- fprintf(stderr, " -f --config Load daemon configuration file\n");
+ fprintf(stderr, " -f --config PATH Load daemon configuration file\n");
fprintf(stderr, " -l --load PATH Load session configuration\n");
fprintf(stderr, " --kmod-probes Specify kernel module probes to load\n");
+ fprintf(stderr, " --extra-kmod-probes Specify extra kernel module probes to load\n");
}
/*
{
int ret = 0;
+ if (arg && arg[0] == '\0') {
+ /*
+ * This only happens if the value is read from daemon config
+ * file. This means the option requires an argument and the
+ * configuration file contains a line such as:
+ * my_option =
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
switch (opt) {
case 0:
fprintf(stderr, "option %s", optname);
ret = -ENOMEM;
}
break;
+ case 'e':
+ free(kmod_extra_probes_list);
+ kmod_extra_probes_list = strdup(arg);
+ if (!kmod_extra_probes_list) {
+ perror("strdup");
+ ret = -ENOMEM;
+ }
+ break;
case 'f':
/* This is handled in set_options() thus silent break. */
break;
ret = -1;
}
+end:
+ if (ret == -EINVAL) {
+ const char *opt_name = "unknown";
+ int i;
+
+ for (i = 0; i < sizeof(long_options) / sizeof(struct option);
+ i++) {
+ if (opt == long_options[i].val) {
+ opt_name = long_options[i].name;
+ break;
+ }
+ }
+
+ WARN("Invalid argument provided for option \"%s\", using default value.",
+ opt_name);
+ }
+
return ret;
}
void *status;
const char *home_path, *env_app_timeout;
+ /* Initialize agent apps ht global variable */
+ agent_apps_ht_by_sock = NULL;
+
init_kernel_workarounds();
rcu_register_thread();
if (is_root) {
rundir = strdup(DEFAULT_LTTNG_RUNDIR);
+ if (!rundir) {
+ ret = -ENOMEM;
+ goto error;
+ }
/* Create global run dir with root access */
ret = create_lttng_rundir(rundir);
goto error;
}
+ /* After this point, we can safely call cleanup() with "goto exit" */
+
/*
* Init UST app hash table. Alloc hash table before this point since
* cleanup() can get called after that point.
*/
ust_app_ht_alloc();
- /* Initialize agent domain subsystem. */
- if ((ret = agent_setup()) < 0) {
- /* ENOMEM at this point. */
- goto error;
+ /*
+ * Initialize agent app hash table. We allocate the hash table here
+ * since cleanup() can get called after this point.
+ */
+ if (agent_app_ht_alloc()) {
+ ERR("Failed to allocate Agent app hash table");
+ ret = -1;
+ goto exit;
}
- /* After this point, we can safely call cleanup() with "goto exit" */
-
/*
* These actions must be executed as root. We do that *after* setting up
* the sockets path because we MUST make the check for another daemon using
/* Setup kernel tracer */
if (!opt_no_kernel) {
init_kernel_tracer();
+ if (kernel_tracer_fd >= 0) {
+ ret = syscall_init_table();
+ if (ret < 0) {
+ ERR("Unable to populate syscall table. Syscall tracing"
+ " won't work for this session daemon.");
+ }
+ }
}
/* Set ulimit for open files */
cleanup();
rcu_thread_offline();
rcu_unregister_thread();
+ rcu_barrier();
if (!ret) {
exit(EXIT_SUCCESS);
}