/*
* Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2 only,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-#define _GNU_SOURCE
#define _LGPL_SOURCE
#include <errno.h>
#include <inttypes.h>
#include "ust-consumer.h"
#include "ust-ctl.h"
#include "utils.h"
+#include "session.h"
+#include "lttng-sessiond.h"
+#include "notification-thread-commands.h"
+#include "rotate.h"
static
int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
attr->switch_timer_interval = uattr->switch_timer_interval;
attr->read_timer_interval = uattr->read_timer_interval;
attr->output = uattr->output;
+ attr->blocking_timeout = uattr->u.s.blocking_timeout;
}
/*
{
struct ust_app_event *event;
const struct ust_app_ht_key *key;
+ int ev_loglevel_value;
assert(node);
assert(_key);
event = caa_container_of(node, struct ust_app_event, node.node);
key = _key;
+ ev_loglevel_value = event->attr.loglevel;
/* Match the 4 elements of the key: name, filter, loglevel, exclusions */
}
/* Event loglevel. */
- if (event->attr.loglevel != key->loglevel) {
+ if (ev_loglevel_value != key->loglevel_type) {
if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
- && key->loglevel == 0 && event->attr.loglevel == -1) {
+ && key->loglevel_type == 0 &&
+ ev_loglevel_value == -1) {
/*
* Match is accepted. This is because on event creation, the
* loglevel is set to -1 if the event loglevel type is ALL so 0 and
ht = ua_chan->events;
key.name = event->attr.name;
key.filter = event->filter;
- key.loglevel = event->attr.loglevel;
+ key.loglevel_type = event->attr.loglevel;
key.exclusion = event->exclusion;
node_ptr = cds_lfht_add_unique(ht->ht,
* this function.
*/
static
-void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
+void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
+ struct ust_app *app)
{
int ret;
assert(ua_ctx);
if (ua_ctx->obj) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_object(sock, ua_ctx->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
sock, ua_ctx->obj->handle, ret);
* this function.
*/
static
-void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
+void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
+ struct ust_app *app)
{
int ret;
if (ua_event->exclusion != NULL)
free(ua_event->exclusion);
if (ua_event->obj != NULL) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_object(sock, ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release event obj failed with ret %d",
sock, ret);
*
* Return 0 on success or else a negative value.
*/
-static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
+static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
+ struct ust_app *app)
{
int ret = 0;
assert(stream);
if (stream->obj) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_object(sock, stream->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release stream obj failed with ret %d",
sock, ret);
* this function.
*/
static
-void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
+void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
+ struct ust_app *app)
{
assert(stream);
- (void) release_ust_app_stream(sock, stream);
+ (void) release_ust_app_stream(sock, stream, app);
free(stream);
}
free(ua_chan);
}
+/*
+ * Extract the lost packet or discarded events counter when the channel is
+ * being deleted and store the value in the parent channel so we can
+ * access it from lttng list and at stop/destroy.
+ *
+ * The session list lock must be held by the caller.
+ */
+static
+void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
+{
+ uint64_t discarded = 0, lost = 0;
+ struct ltt_session *session;
+ struct ltt_ust_channel *uchan;
+
+ if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
+ return;
+ }
+
+ rcu_read_lock();
+ session = session_find_by_id(ua_chan->session->tracing_id);
+ if (!session || !session->ust_session) {
+ /*
+ * Not finding the session is not an error because there are
+ * multiple ways the channels can be torn down.
+ *
+ * 1) The session daemon can initiate the destruction of the
+ * ust app session after receiving a destroy command or
+ * during its shutdown/teardown.
+ * 2) The application, since we are in per-pid tracing, is
+ * unregistering and tearing down its ust app session.
+ *
+ * Both paths are protected by the session list lock which
+ * ensures that the accounting of lost packets and discarded
+ * events is done exactly once. The session is then unpublished
+ * from the session list, resulting in this condition.
+ */
+ goto end;
+ }
+
+ if (ua_chan->attr.overwrite) {
+ consumer_get_lost_packets(ua_chan->session->tracing_id,
+ ua_chan->key, session->ust_session->consumer,
+ &lost);
+ } else {
+ consumer_get_discarded_events(ua_chan->session->tracing_id,
+ ua_chan->key, session->ust_session->consumer,
+ &discarded);
+ }
+ uchan = trace_ust_find_channel_by_name(
+ session->ust_session->domain_global.channels,
+ ua_chan->name);
+ if (!uchan) {
+ ERR("Missing UST channel to store discarded counters");
+ goto end;
+ }
+
+ uchan->per_pid_closed_app_discarded += discarded;
+ uchan->per_pid_closed_app_lost += lost;
+
+end:
+ rcu_read_unlock();
+ if (session) {
+ session_put(session);
+ }
+}
+
/*
* Delete ust app channel safely. RCU read lock must be held before calling
* this function.
+ *
+ * The session list lock must be held by the caller.
*/
static
void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
/* Wipe stream */
cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
cds_list_del(&stream->list);
- delete_ust_app_stream(sock, stream);
+ delete_ust_app_stream(sock, stream, app);
}
/* Wipe context */
cds_list_del(&ua_ctx->list);
ret = lttng_ht_del(ua_chan->ctx, &iter);
assert(!ret);
- delete_ust_app_ctx(sock, ua_ctx);
+ delete_ust_app_ctx(sock, ua_ctx, app);
}
/* Wipe events */
node.node) {
ret = lttng_ht_del(ua_chan->events, &iter);
assert(!ret);
- delete_ust_app_event(sock, ua_event);
+ delete_ust_app_event(sock, ua_event, app);
}
if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
/* Wipe and free registry from session registry. */
registry = get_session_registry(ua_chan->session);
if (registry) {
- ust_registry_channel_del_free(registry, ua_chan->key);
+ ust_registry_channel_del_free(registry, ua_chan->key,
+ sock >= 0);
+ }
+ /*
+ * A negative socket can be used by the caller when
+ * cleaning-up a ua_chan in an error path. Skip the
+ * accounting in this case.
+ */
+ if (sock >= 0) {
+ save_per_pid_lost_discarded_counters(ua_chan);
}
}
iter.iter.node = &ua_chan->ust_objd_node.node;
ret = lttng_ht_del(app->ust_objd, &iter);
assert(!ret);
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_object(sock, ua_chan->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release channel obj failed with ret %d",
sock, ret);
call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
}
+int ust_app_register_done(struct ust_app *app)
+{
+ int ret;
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_register_done(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
+ return ret;
+}
+
+int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
+{
+ int ret, sock;
+
+ if (app) {
+ pthread_mutex_lock(&app->sock_lock);
+ sock = app->sock;
+ } else {
+ sock = -1;
+ }
+ ret = ustctl_release_object(sock, data);
+ if (app) {
+ pthread_mutex_unlock(&app->sock_lock);
+ }
+ return ret;
+}
+
/*
* Push metadata to consumer socket.
*
{
int ret;
char *metadata_str = NULL;
- size_t len, offset;
+ size_t len, offset, new_metadata_len_sent;
ssize_t ret_val;
+ uint64_t metadata_key, metadata_version;
assert(registry);
assert(socket);
+ metadata_key = registry->metadata_key;
+
/*
* Means that no metadata was assigned to the session. This can
* happens if no start has been done previously.
*/
- if (!registry->metadata_key) {
+ if (!metadata_key) {
return 0;
}
- /*
- * On a push metadata error either the consumer is dead or the
- * metadata channel has been destroyed because its endpoint
- * might have died (e.g: relayd), or because the application has
- * exited. If so, the metadata closed flag is set to 1 so we
- * deny pushing metadata again which is not valid anymore on the
- * consumer side.
- */
- if (registry->metadata_closed) {
- return -EPIPE;
- }
-
offset = registry->metadata_len_sent;
len = registry->metadata_len - registry->metadata_len_sent;
+ new_metadata_len_sent = registry->metadata_len;
+ metadata_version = registry->metadata_version;
if (len == 0) {
DBG3("No metadata to push for metadata key %" PRIu64,
registry->metadata_key);
ret_val = -ENOMEM;
goto error;
}
- /* Copy what we haven't send out. */
+ /* Copy what we haven't sent out. */
memcpy(metadata_str, registry->metadata + offset, len);
- registry->metadata_len_sent += len;
push_data:
- ret = consumer_push_metadata(socket, registry->metadata_key,
- metadata_str, len, offset);
+ pthread_mutex_unlock(®istry->lock);
+ /*
+ * We need to unlock the registry while we push metadata to
+ * break a circular dependency between the consumerd metadata
+ * lock and the sessiond registry lock. Indeed, pushing metadata
+ * to the consumerd awaits that it gets pushed all the way to
+ * relayd, but doing so requires grabbing the metadata lock. If
+ * a concurrent metadata request is being performed by
+ * consumerd, this can try to grab the registry lock on the
+ * sessiond while holding the metadata lock on the consumer
+ * daemon. Those push and pull schemes are performed on two
+ * different bidirectionnal communication sockets.
+ */
+ ret = consumer_push_metadata(socket, metadata_key,
+ metadata_str, len, offset, metadata_version);
+ pthread_mutex_lock(®istry->lock);
if (ret < 0) {
/*
* There is an acceptable race here between the registry
*/
if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
ret = 0;
+ } else {
+ ERR("Error pushing metadata to consumer");
}
-
- /*
- * Update back the actual metadata len sent since it
- * failed here.
- */
- registry->metadata_len_sent -= len;
ret_val = ret;
goto error_push;
+ } else {
+ /*
+ * Metadata may have been concurrently pushed, since
+ * we're not holding the registry lock while pushing to
+ * consumer. This is handled by the fact that we send
+ * the metadata content, size, and the offset at which
+ * that metadata belongs. This may arrive out of order
+ * on the consumer side, and the consumer is able to
+ * deal with overlapping fragments. The consumer
+ * supports overlapping fragments, which must be
+ * contiguous starting from offset 0. We keep the
+ * largest metadata_len_sent value of the concurrent
+ * send.
+ */
+ registry->metadata_len_sent =
+ max_t(size_t, registry->metadata_len_sent,
+ new_metadata_len_sent);
}
-
free(metadata_str);
return len;
/*
* Delete ust app session safely. RCU read lock must be held before calling
* this function.
+ *
+ * The session list lock must be held by the caller.
*/
static
void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
ua_sess->deleted = true;
registry = get_session_registry(ua_sess);
+ /* Registry can be null on error path during initialization. */
if (registry) {
/* Push metadata for application before freeing the application. */
(void) push_metadata(registry, ua_sess->consumer);
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
if (reg_pid) {
+ /*
+ * Registry can be null on error path during
+ * initialization.
+ */
buffer_reg_pid_remove(reg_pid);
buffer_reg_pid_destroy(reg_pid);
}
}
if (ua_sess->handle != -1) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_release_handle(sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app sock %d release session handle failed with ret %d",
sock, ret);
}
+ /* Remove session from application UST object descriptor. */
+ iter.iter.node = &ua_sess->ust_objd_node.node;
+ ret = lttng_ht_del(app->ust_sessions_objd, &iter);
+ assert(!ret);
}
+
pthread_mutex_unlock(&ua_sess->lock);
+ consumer_output_put(ua_sess->consumer);
+
call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
}
int ret, sock;
struct ust_app_session *ua_sess, *tmp_ua_sess;
+ /*
+ * The session list lock must be held during this function to guarantee
+ * the existence of ua_sess.
+ */
+ session_lock_list();
/* Delete ust app sessions info */
sock = app->sock;
app->sock = -1;
}
ht_cleanup_push(app->sessions);
+ ht_cleanup_push(app->ust_sessions_objd);
ht_cleanup_push(app->ust_objd);
/*
DBG2("UST app pid %d deleted", app->pid);
free(app);
+ session_unlock_list();
}
/*
/*
* Delete the session from the application ht and delete the data structure by
* freeing every object inside and releasing them.
+ *
+ * The session list lock must be held by the caller.
*/
static void destroy_app_session(struct ust_app *app,
struct ust_app_session *ua_sess)
* Alloc new UST app session.
*/
static
-struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
+struct ust_app_session *alloc_ust_app_session(void)
{
struct ust_app_session *ua_sess;
ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
ua_chan->attr.read_timer_interval = attr->read_timer_interval;
ua_chan->attr.output = attr->output;
+ ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
}
/* By default, the channel is a per cpu channel. */
ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
* Alloc new UST app context.
*/
static
-struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
+struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
{
struct ust_app_ctx *ua_ctx;
if (uctx) {
memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
+ if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
+ char *provider_name = NULL, *ctx_name = NULL;
+
+ provider_name = strdup(uctx->u.app_ctx.provider_name);
+ ctx_name = strdup(uctx->u.app_ctx.ctx_name);
+ if (!provider_name || !ctx_name) {
+ free(provider_name);
+ free(ctx_name);
+ goto error;
+ }
+
+ ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
+ ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
+ }
}
DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
-
-error:
return ua_ctx;
+error:
+ free(ua_ctx);
+ return NULL;
}
/*
* Return an ust_app_event object or NULL on error.
*/
static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
- char *name, struct lttng_filter_bytecode *filter, int loglevel,
+ char *name, struct lttng_filter_bytecode *filter,
+ int loglevel_value,
const struct lttng_event_exclusion *exclusion)
{
struct lttng_ht_iter iter;
/* Setup key for event lookup. */
key.name = name;
key.filter = filter;
- key.loglevel = loglevel;
+ key.loglevel_type = loglevel_value;
/* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
key.exclusion = exclusion;
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
ua_chan->obj, &ua_ctx->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app create channel context failed for app (pid: %d) "
ret = -LTTNG_ERR_NOMEM;
goto error;
}
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_set_filter(app->sock, ust_bytecode,
ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app event %s filter failed for app (pid: %d) "
ret = -LTTNG_ERR_NOMEM;
goto error;
}
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app event %s exclusions failed for app (pid: %d) "
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_disable(app->sock, ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app event %s disable failed for app (pid: %d) "
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_disable(app->sock, ua_chan->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app channel %s disable failed for app (pid: %d) "
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_enable(app->sock, ua_chan->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app channel %s enable failed for app (pid: %d) "
health_code_update();
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_enable(app->sock, ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app event %s enable failed for app (pid: %d) "
/* Send channel to the application. */
ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
- if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
+ goto error;
+ } else if (ret < 0) {
goto error;
}
/* Send all streams to application. */
cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
- if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
+ goto error;
+ } else if (ret < 0) {
goto error;
}
/* We don't need the stream anymore once sent to the tracer. */
cds_list_del(&stream->list);
- delete_ust_app_stream(-1, stream);
+ delete_ust_app_stream(-1, stream, app);
}
/* Flag the channel that it is sent to the application. */
ua_chan->is_sent = 1;
health_code_update();
/* Create UST event on tracer */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
&ua_event->obj);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("Error ustctl create event %s for app pid: %d with ret %d",
struct ltt_ust_event *uevent;
struct ltt_ust_context *uctx;
struct ust_app_event *ua_event;
- struct ust_app_ctx *ua_ctx;
DBG2("UST app shadow copy of channel %s started", ua_chan->name);
ua_chan->attr.overwrite = uchan->attr.overwrite;
ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
+ ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
ua_chan->attr.output = uchan->attr.output;
+ ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
+
/*
* Note that the attribute channel type is not set since the channel on the
* tracing registry side does not have this information.
ua_chan->tracing_channel_id = uchan->id;
cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
- ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
+ struct ust_app_ctx *ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
+
if (ua_ctx == NULL) {
continue;
}
ua_sess->egid = usess->gid;
ua_sess->buffer_type = usess->buffer_type;
ua_sess->bits_per_long = app->bits_per_long;
+
/* There is only one consumer object per session possible. */
+ consumer_output_get(usess->consumer);
ua_sess->consumer = usess->consumer;
+
ua_sess->output_traces = usess->output_traces;
ua_sess->live_timer_interval = usess->live_timer_interval;
copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
DBG2("Channel %s not found on shadow session copy, creating it",
uchan->name);
- ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
+ ua_chan = alloc_ust_app_channel(uchan->name, ua_sess,
+ &uchan->attr);
if (ua_chan == NULL) {
/* malloc failed FIXME: Might want to do handle ENOMEM .. */
continue;
lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
}
+ return;
error:
- return;
+ consumer_output_put(ua_sess->consumer);
}
/*
* Returns 0 on success or else a negative code which is either -ENOMEM or
* -ENOTCONN which is the default code if the ustctl_create_session fails.
*/
-static int create_ust_app_session(struct ltt_ust_session *usess,
+static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
struct ust_app *app, struct ust_app_session **ua_sess_ptr,
int *is_created)
{
if (ua_sess == NULL) {
DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
app->pid, usess->id);
- ua_sess = alloc_ust_app_session(app);
+ ua_sess = alloc_ust_app_session();
if (ua_sess == NULL) {
/* Only malloc can failed so something is really wrong */
ret = -ENOMEM;
health_code_update();
if (ua_sess->handle == -1) {
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_create_session(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("Creating session for app pid %d with ret %d",
lttng_ht_node_init_u64(&ua_sess->node,
ua_sess->tracing_id);
lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
+ lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
+ lttng_ht_add_unique_ulong(app->ust_sessions_objd,
+ &ua_sess->ust_objd_node);
DBG2("UST app session created successfully with handle %d", ret);
}
static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
{
struct ust_app_ctx *ctx;
- const struct lttng_ust_context *key;
+ const struct lttng_ust_context_attr *key;
assert(node);
assert(_key);
goto no_match;
}
- /* Check the name in the case of perf thread counters. */
- if (key->ctx == LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER) {
+ switch(key->ctx) {
+ case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
if (strncmp(key->u.perf_counter.name,
- ctx->ctx.u.perf_counter.name,
- sizeof(key->u.perf_counter.name))) {
+ ctx->ctx.u.perf_counter.name,
+ sizeof(key->u.perf_counter.name))) {
goto no_match;
}
+ break;
+ case LTTNG_UST_CONTEXT_APP_CONTEXT:
+ if (strcmp(key->u.app_ctx.provider_name,
+ ctx->ctx.u.app_ctx.provider_name) ||
+ strcmp(key->u.app_ctx.ctx_name,
+ ctx->ctx.u.app_ctx.ctx_name)) {
+ goto no_match;
+ }
+ break;
+ default:
+ break;
}
/* Match. */
*/
static
struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
- struct lttng_ust_context *uctx)
+ struct lttng_ust_context_attr *uctx)
{
struct lttng_ht_iter iter;
struct lttng_ht_node_ulong *node;
* Called with UST app session lock held and a RCU read side lock.
*/
static
-int create_ust_app_channel_context(struct ust_app_session *ua_sess,
- struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
+int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
+ struct lttng_ust_context_attr *uctx,
struct ust_app *app)
{
int ret = 0;
ua_ctx = alloc_ust_app_ctx(uctx);
if (ua_ctx == NULL) {
/* malloc failed */
- ret = -1;
+ ret = -ENOMEM;
goto error;
}
/*
* Ask the consumer to create a channel and get it if successful.
*
+ * Called with UST app session lock held.
+ *
* Return 0 on success or else a negative value.
*/
static int do_consumer_create_channel(struct ltt_ust_session *usess,
struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
- int bitness, struct ust_registry_session *registry)
+ int bitness, struct ust_registry_session *registry,
+ uint64_t trace_archive_id)
{
int ret;
unsigned int nb_fd = 0;
* stream we have to expect.
*/
ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
- registry);
+ registry, trace_archive_id);
if (ret < 0) {
goto error_ask;
}
* Return 0 on success or else a negative value.
*/
static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
- struct ust_app_channel *ua_chan)
+ struct ust_app_channel *ua_chan,
+ struct ust_app *app)
{
int ret = 0;
struct ust_app_stream *stream, *stmp;
/* We don't need the streams anymore. */
cds_list_del(&stream->list);
- delete_ust_app_stream(-1, stream);
+ delete_ust_app_stream(-1, stream, app);
}
error:
* Return 0 on success else a negative value.
*/
static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
- struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
+ struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
+ struct ust_app *app)
{
int ret;
DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
/* Setup all streams for the registry. */
- ret = setup_buffer_reg_streams(reg_chan, ua_chan);
+ ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
if (ret < 0) {
goto error;
}
/* Send channel to the application. */
ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
- if (ret < 0) {
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
+ goto error;
+ } else if (ret < 0) {
goto error;
}
ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
if (ret < 0) {
- (void) release_ust_app_stream(-1, &stream);
+ (void) release_ust_app_stream(-1, &stream, app);
+ if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+ ret = -ENOTCONN; /* Caused by app exiting. */
+ }
goto error_stream_unlock;
}
* The return value is not important here. This function will output an
* error if needed.
*/
- (void) release_ust_app_stream(-1, &stream);
+ (void) release_ust_app_stream(-1, &stream, app);
}
ua_chan->is_sent = 1;
/*
* Create and send to the application the created buffers with per UID buffers.
*
+ * This MUST be called with a RCU read side lock acquired.
+ * The session list lock and the session's lock must be acquired.
+ *
* Return 0 on success else a negative value.
*/
static int create_channel_per_uid(struct ust_app *app,
int ret;
struct buffer_reg_uid *reg_uid;
struct buffer_reg_channel *reg_chan;
+ struct ltt_session *session = NULL;
+ enum lttng_error_code notification_ret;
+ struct ust_registry_channel *chan_reg;
assert(app);
assert(usess);
reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
reg_uid);
- if (!reg_chan) {
- /* Create the buffer registry channel object. */
- ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, ®_chan);
- if (ret < 0) {
- ERR("Error creating the UST channel \"%s\" registry instance",
- ua_chan->name);
- goto error;
- }
- assert(reg_chan);
+ if (reg_chan) {
+ goto send_channel;
+ }
- /*
- * Create the buffers on the consumer side. This call populates the
- * ust app channel object with all streams and data object.
- */
- ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
- app->bits_per_long, reg_uid->registry->reg.ust);
- if (ret < 0) {
- ERR("Error creating UST channel \"%s\" on the consumer daemon",
+ /* Create the buffer registry channel object. */
+ ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, ®_chan);
+ if (ret < 0) {
+ ERR("Error creating the UST channel \"%s\" registry instance",
ua_chan->name);
+ goto error;
+ }
- /*
- * Let's remove the previously created buffer registry channel so
- * it's not visible anymore in the session registry.
- */
- ust_registry_channel_del_free(reg_uid->registry->reg.ust,
- ua_chan->tracing_channel_id);
- buffer_reg_channel_remove(reg_uid->registry, reg_chan);
- buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
- goto error;
- }
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+ assert(pthread_mutex_trylock(&session->lock));
+ assert(session_trylock_list());
+
+ /*
+ * Create the buffers on the consumer side. This call populates the
+ * ust app channel object with all streams and data object.
+ */
+ ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
+ app->bits_per_long, reg_uid->registry->reg.ust,
+ session->current_archive_id);
+ if (ret < 0) {
+ ERR("Error creating UST channel \"%s\" on the consumer daemon",
+ ua_chan->name);
/*
- * Setup the streams and add it to the session registry.
+ * Let's remove the previously created buffer registry channel so
+ * it's not visible anymore in the session registry.
*/
- ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
- if (ret < 0) {
- ERR("Error setting up UST channel \"%s\"",
- ua_chan->name);
- goto error;
- }
+ ust_registry_channel_del_free(reg_uid->registry->reg.ust,
+ ua_chan->tracing_channel_id, false);
+ buffer_reg_channel_remove(reg_uid->registry, reg_chan);
+ buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
+ goto error;
+ }
+
+ /*
+ * Setup the streams and add it to the session registry.
+ */
+ ret = setup_buffer_reg_channel(reg_uid->registry,
+ ua_chan, reg_chan, app);
+ if (ret < 0) {
+ ERR("Error setting up UST channel \"%s\"", ua_chan->name);
+ goto error;
+ }
+ /* Notify the notification subsystem of the channel's creation. */
+ pthread_mutex_lock(®_uid->registry->reg.ust->lock);
+ chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
+ ua_chan->tracing_channel_id);
+ assert(chan_reg);
+ chan_reg->consumer_key = ua_chan->key;
+ chan_reg = NULL;
+ pthread_mutex_unlock(®_uid->registry->reg.ust->lock);
+
+ notification_ret = notification_thread_command_add_channel(
+ notification_thread_handle, session->name,
+ ua_sess->euid, ua_sess->egid,
+ ua_chan->name,
+ ua_chan->key,
+ LTTNG_DOMAIN_UST,
+ ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+ if (notification_ret != LTTNG_OK) {
+ ret = - (int) notification_ret;
+ ERR("Failed to add channel to notification thread");
+ goto error;
}
+send_channel:
/* Send buffers to the application. */
ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
if (ret < 0) {
- /*
- * Don't report error to the console, since it may be
- * caused by application concurrently exiting.
- */
+ if (ret != -ENOTCONN) {
+ ERR("Error sending channel to application");
+ }
goto error;
}
error:
+ if (session) {
+ session_put(session);
+ }
return ret;
}
/*
* Create and send to the application the created buffers with per PID buffers.
*
+ * Called with UST app session lock held.
+ * The session list lock and the session's lock must be acquired.
+ *
* Return 0 on success else a negative value.
*/
static int create_channel_per_pid(struct ust_app *app,
{
int ret;
struct ust_registry_session *registry;
+ enum lttng_error_code cmd_ret;
+ struct ltt_session *session = NULL;
+ uint64_t chan_reg_key;
+ struct ust_registry_channel *chan_reg;
assert(app);
assert(usess);
rcu_read_lock();
registry = get_session_registry(ua_sess);
+ /* The UST app session lock is held, registry shall not be null. */
assert(registry);
/* Create and add a new channel registry to session. */
goto error;
}
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+
+ assert(pthread_mutex_trylock(&session->lock));
+ assert(session_trylock_list());
+
/* Create and get channel on the consumer side. */
ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
- app->bits_per_long, registry);
+ app->bits_per_long, registry,
+ session->current_archive_id);
if (ret < 0) {
ERR("Error creating UST channel \"%s\" on the consumer daemon",
ua_chan->name);
- goto error;
+ goto error_remove_from_registry;
}
ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
if (ret < 0) {
- /*
- * Don't report error to the console, since it may be
- * caused by application concurrently exiting.
- */
- goto error;
+ if (ret != -ENOTCONN) {
+ ERR("Error sending channel to application");
+ }
+ goto error_remove_from_registry;
+ }
+
+ chan_reg_key = ua_chan->key;
+ pthread_mutex_lock(®istry->lock);
+ chan_reg = ust_registry_channel_find(registry, chan_reg_key);
+ assert(chan_reg);
+ chan_reg->consumer_key = ua_chan->key;
+ pthread_mutex_unlock(®istry->lock);
+
+ cmd_ret = notification_thread_command_add_channel(
+ notification_thread_handle, session->name,
+ ua_sess->euid, ua_sess->egid,
+ ua_chan->name,
+ ua_chan->key,
+ LTTNG_DOMAIN_UST,
+ ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+ if (cmd_ret != LTTNG_OK) {
+ ret = - (int) cmd_ret;
+ ERR("Failed to add channel to notification thread");
+ goto error_remove_from_registry;
}
+error_remove_from_registry:
+ if (ret) {
+ ust_registry_channel_del_free(registry, ua_chan->key, false);
+ }
error:
rcu_read_unlock();
+ if (session) {
+ session_put(session);
+ }
return ret;
}
* need and send it to the application. This MUST be called with a RCU read
* side lock acquired.
*
- * Return 0 on success or else a negative value.
+ * Called with UST app session lock held.
+ *
+ * Return 0 on success or else a negative value. Returns -ENOTCONN if
+ * the application exited concurrently.
*/
static int do_create_channel(struct ust_app *app,
struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
*
* Called with UST app session lock and RCU read-side lock held.
*
- * Return 0 on success or else a negative value.
+ * Return 0 on success or else a negative value. Returns -ENOTCONN if
+ * the application exited concurrently.
*/
static int create_ust_app_channel(struct ust_app_session *ua_sess,
struct ltt_ust_channel *uchan, struct ust_app *app,
/* Only add the channel if successful on the tracer side. */
lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
-
end:
if (ua_chanp) {
*ua_chanp = ua_chan;
error:
/* Valid. Calling here is already in a read side lock */
- delete_ust_app_event(-1, ua_event);
+ delete_ust_app_event(-1, ua_event, app);
return ret;
}
struct ust_app_channel *metadata;
struct consumer_socket *socket;
struct ust_registry_session *registry;
+ struct ltt_session *session = NULL;
assert(ua_sess);
assert(app);
assert(consumer);
registry = get_session_registry(ua_sess);
+ /* The UST app session is held registry shall not be null. */
assert(registry);
pthread_mutex_lock(®istry->lock);
*/
registry->metadata_key = metadata->key;
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+
+ assert(pthread_mutex_trylock(&session->lock));
+ assert(session_trylock_list());
+
/*
* Ask the metadata channel creation to the consumer. The metadata object
* will be created by the consumer and kept their. However, the stream is
* consumer.
*/
ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
- registry);
+ registry, session->current_archive_id);
if (ret < 0) {
/* Nullify the metadata key so we don't try to close it later on. */
registry->metadata_key = 0;
delete_ust_app_channel(-1, metadata, app);
error:
pthread_mutex_unlock(®istry->lock);
+ if (session) {
+ session_put(session);
+ }
return ret;
}
lta->v_minor = msg->minor;
lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
+ lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
lta->notify_sock = -1;
/* Copy name and make sure it's NULL terminated. */
lta->pid = msg->pid;
lttng_ht_node_init_ulong(<a->pid_n, (unsigned long) lta->pid);
lta->sock = sock;
+ pthread_mutex_init(<a->sock_lock, NULL);
lttng_ht_node_init_ulong(<a->sock_n, (unsigned long) lta->sock);
CDS_INIT_LIST_HEAD(<a->teardown_head);
-
error:
return lta;
}
assert(app);
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_tracer_version(app->sock, &app->version);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ERR("UST app %d version failed with ret %d", app->sock, ret);
/*
* Remove application from notify hash table. The thread handling the
* notify socket could have deleted the node so ignore on error because
- * either way it's valid. The close of that socket is handled by the other
- * thread.
+ * either way it's valid. The close of that socket is handled by the
+ * apps_notify_thread.
*/
iter.iter.node = <a->notify_sock_n.node;
(void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
*/
continue;
}
+ pthread_mutex_lock(&app->sock_lock);
handle = ustctl_tracepoint_list(app->sock);
if (handle < 0) {
if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
ERR("UST app list events getting handle failed for app pid %d",
app->pid);
}
+ pthread_mutex_unlock(&app->sock_lock);
continue;
}
&uiter)) != -LTTNG_UST_ERR_NOENT) {
/* Handle ustctl error. */
if (ret < 0) {
+ int release_ret;
+
if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ERR("UST app tp list get failed for app %d with ret %d",
app->sock, ret);
break;
}
free(tmp_event);
+ release_ret = ustctl_release_handle(app->sock, handle);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
+ pthread_mutex_unlock(&app->sock_lock);
goto rcu_error;
}
new_tmp_event = realloc(tmp_event,
new_nbmem * sizeof(struct lttng_event));
if (new_tmp_event == NULL) {
+ int release_ret;
+
PERROR("realloc ust app events");
free(tmp_event);
ret = -ENOMEM;
+ release_ret = ustctl_release_handle(app->sock, handle);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
+ pthread_mutex_unlock(&app->sock_lock);
goto rcu_error;
}
/* Zero the new memory */
tmp_event[count].enabled = -1;
count++;
}
+ ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+ }
}
ret = count;
*/
continue;
}
+ pthread_mutex_lock(&app->sock_lock);
handle = ustctl_tracepoint_field_list(app->sock);
if (handle < 0) {
if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
ERR("UST app list field getting handle failed for app pid %d",
app->pid);
}
+ pthread_mutex_unlock(&app->sock_lock);
continue;
}
&uiter)) != -LTTNG_UST_ERR_NOENT) {
/* Handle ustctl error. */
if (ret < 0) {
+ int release_ret;
+
if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ERR("UST app tp list field failed for app %d with ret %d",
app->sock, ret);
break;
}
free(tmp_event);
+ release_ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (release_ret < 0 &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
goto rcu_error;
}
new_tmp_event = realloc(tmp_event,
new_nbmem * sizeof(struct lttng_event_field));
if (new_tmp_event == NULL) {
+ int release_ret;
+
PERROR("realloc ust app event fields");
free(tmp_event);
ret = -ENOMEM;
+ release_ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (release_ret &&
+ release_ret != -LTTNG_UST_ERR_EXITING &&
+ release_ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
+ }
goto rcu_error;
}
/* Zero the new memory */
tmp_event[count].event.enabled = -1;
count++;
}
+ ret = ustctl_release_handle(app->sock, handle);
+ pthread_mutex_unlock(&app->sock_lock);
+ if (ret < 0 &&
+ ret != -LTTNG_UST_ERR_EXITING &&
+ ret != -EPIPE) {
+ ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+ }
}
ret = count;
rcu_read_lock();
+ /* Cleanup notify socket hash table */
+ if (ust_app_ht_by_notify_sock) {
+ cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
+ notify_sock_n.node) {
+ struct cds_lfht_node *node;
+ struct ust_app *app;
+
+ node = cds_lfht_iter_get_node(&iter.iter);
+ if (!node) {
+ continue;
+ }
+
+ app = container_of(node, struct ust_app,
+ notify_sock_n.node);
+ ust_app_notify_sock_unregister(app->notify_sock);
+ }
+ }
+
if (ust_app_ht) {
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ret = lttng_ht_del(ust_app_ht, &iter);
}
}
- /* Cleanup notify socket hash table */
- if (ust_app_ht_by_notify_sock) {
- cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
- notify_sock_n.node) {
- ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
- assert(!ret);
- }
- }
rcu_read_unlock();
/* Destroy is done only when the ht is empty */
DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
uchan->name, usess->id);
+ if (!usess->active) {
+ goto end;
+ }
+
rcu_read_lock();
/* For every registered applications */
rcu_read_unlock();
error:
+end:
return ret;
}
DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
uchan->name, usess->id);
+ if (!usess->active) {
+ goto end;
+ }
+
rcu_read_lock();
/* For every registered applications */
rcu_read_unlock();
error:
+end:
return ret;
}
{
int ret = 0;
struct lttng_ht_iter iter, uiter;
- struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
+ struct lttng_ht_node_str *ua_chan_node;
struct ust_app *app;
struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
"%s for session id %" PRIu64,
uevent->attr.name, uchan->name, usess->id);
+ if (!usess->active) {
+ goto end;
+ }
+
rcu_read_lock();
/* For all registered applications */
}
ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
- lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
- ua_event_node = lttng_ht_iter_get_node_str(&uiter);
- if (ua_event_node == NULL) {
+ ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
+ uevent->filter, uevent->attr.loglevel,
+ uevent->exclusion);
+ if (ua_event == NULL) {
DBG2("Event %s not found in channel %s for app pid %d."
"Skipping", uevent->attr.name, uchan->name, app->pid);
continue;
}
- ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
ret = disable_ust_app_event(ua_sess, ua_event, app);
if (ret < 0) {
}
rcu_read_unlock();
-
+end:
return ret;
}
DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
uchan->name, usess->id);
+ if (!usess->active) {
+ goto end;
+ }
+
rcu_read_lock();
/* For every registered applications */
* that if session exist, it will simply return a pointer to the ust
* app session.
*/
- ret = create_ust_app_session(usess, app, &ua_sess, &created);
+ ret = find_or_create_ust_app_session(usess, app, &ua_sess, &created);
if (ret < 0) {
switch (ret) {
case -ENOTCONN:
* or a timeout on it. We can't inform the caller that for a
* specific app, the session failed so lets continue here.
*/
+ ret = 0; /* Not an error. */
continue;
case -ENOMEM:
default:
}
pthread_mutex_unlock(&ua_sess->lock);
if (ret < 0) {
- if (ret == -ENOMEM) {
- /* No more memory is a fatal error. Stop right now. */
- goto error_rcu_unlock;
- }
/* Cleanup the created session if it's the case. */
if (created) {
destroy_app_session(app, ua_sess);
}
+ switch (ret) {
+ case -ENOTCONN:
+ /*
+ * The application's socket is not valid. Either a bad socket
+ * or a timeout on it. We can't inform the caller that for a
+ * specific app, the session failed so lets continue here.
+ */
+ ret = 0; /* Not an error. */
+ continue;
+ case -ENOMEM:
+ default:
+ goto error_rcu_unlock;
+ }
}
}
error_rcu_unlock:
rcu_read_unlock();
+end:
return ret;
}
DBG("UST app enabling event %s for all apps for session id %" PRIu64,
uevent->attr.name, usess->id);
+ if (!usess->active) {
+ goto end;
+ }
+
/*
* NOTE: At this point, this function is called only if the session and
* channel passed are already created for all apps. and enabled on the
/* Lookup channel in the ust app session */
lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
- /* If the channel is not found, there is a code flow error */
- assert(ua_chan_node);
-
+ /*
+ * It is possible that the channel cannot be found is
+ * the channel/event creation occurs concurrently with
+ * an application exit.
+ */
+ if (!ua_chan_node) {
+ pthread_mutex_unlock(&ua_sess->lock);
+ continue;
+ }
+
ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
/* Get event node */
error:
rcu_read_unlock();
+end:
return ret;
}
DBG("UST app creating event %s for all apps for session id %" PRIu64,
uevent->attr.name, usess->id);
+ if (!usess->active) {
+ goto end;
+ }
+
rcu_read_lock();
/* For all registered applications */
}
rcu_read_unlock();
-
+end:
return ret;
}
/*
* Start tracing for a specific UST session and app.
+ *
+ * Called with UST app session lock held.
+ *
*/
static
int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
/* Create directories if consumer is LOCAL and has a path defined. */
if (usess->consumer->type == CONSUMER_DST_LOCAL &&
- strlen(usess->consumer->dst.trace_path) > 0) {
- ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
- S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
+ usess->consumer->dst.session_root_path[0] != '\0') {
+ char *tmp_path;
+
+ tmp_path = zmalloc(LTTNG_PATH_MAX);
+ if (!tmp_path) {
+ ERR("Alloc tmp_path");
+ goto error_unlock;
+ }
+ ret = snprintf(tmp_path, LTTNG_PATH_MAX, "%s%s%s",
+ usess->consumer->dst.session_root_path,
+ usess->consumer->chunk_path,
+ usess->consumer->subdir);
+ if (ret >= LTTNG_PATH_MAX) {
+ ERR("Local destination path exceeds the maximal allowed length of %i bytes (needs %i bytes) with path = \"%s%s%s\"",
+ LTTNG_PATH_MAX, ret,
+ usess->consumer->dst.session_root_path,
+ usess->consumer->chunk_path,
+ usess->consumer->subdir);
+ free(tmp_path);
+ goto error_unlock;
+ }
+
+ DBG("Creating directory path for local tracing: \"%s\"",
+ tmp_path);
+ ret = run_as_mkdir_recursive(tmp_path, S_IRWXU | S_IRWXG,
+ ua_sess->euid, ua_sess->egid);
+ free(tmp_path);
if (ret < 0) {
- if (ret != -EEXIST) {
+ if (errno != EEXIST) {
ERR("Trace directory creation error");
goto error_unlock;
}
skip_setup:
/* This start the UST tracing */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_start_session(app->sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("Error starting tracing for app pid: %d (ret: %d)",
health_code_update();
/* Quiescent wait after starting trace */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_wait_quiescent(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app wait quiescent failed for app pid %d ret %d",
app->pid, ret);
health_code_update();
/* This inhibits UST tracing */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_stop_session(app->sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("Error stopping tracing for app pid: %d (ret: %d)",
health_code_update();
/* Quiescent wait after stopping trace */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_wait_quiescent(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app wait quiescent failed for app pid %d ret %d",
app->pid, ret);
health_code_update();
registry = get_session_registry(ua_sess);
+
+ /* The UST app session is held registry shall not be null. */
assert(registry);
/* Push metadata for application before freeing the application. */
cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
node.node) {
health_code_update();
- assert(ua_chan->is_sent);
ret = consumer_flush_channel(socket, ua_chan->key);
if (ret) {
ERR("Error flushing consumer channel");
return ret;
}
+static
+int ust_app_clear_quiescent_app_session(struct ust_app *app,
+ struct ust_app_session *ua_sess)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app_channel *ua_chan;
+ struct consumer_socket *socket;
+
+ DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
+
+ rcu_read_lock();
+
+ if (!app->compatible) {
+ goto end_not_compatible;
+ }
+
+ pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ goto end_unlock;
+ }
+
+ health_code_update();
+
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ ua_sess->consumer);
+ if (!socket) {
+ ERR("Failed to find consumer (%" PRIu32 ") socket",
+ app->bits_per_long);
+ ret = -1;
+ goto end_unlock;
+ }
+
+ /* Clear quiescent state. */
+ switch (ua_sess->buffer_type) {
+ case LTTNG_BUFFER_PER_PID:
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
+ ua_chan, node.node) {
+ health_code_update();
+ ret = consumer_clear_quiescent_channel(socket,
+ ua_chan->key);
+ if (ret) {
+ ERR("Error clearing quiescent state for consumer channel");
+ ret = -1;
+ continue;
+ }
+ }
+ break;
+ case LTTNG_BUFFER_PER_UID:
+ default:
+ assert(0);
+ ret = -1;
+ break;
+ }
+
+ health_code_update();
+
+end_unlock:
+ pthread_mutex_unlock(&ua_sess->lock);
+
+end_not_compatible:
+ rcu_read_unlock();
+ health_code_update();
+ return ret;
+}
+
+/*
+ * Clear quiescent state in each stream for all applications for a
+ * specific UST session.
+ * Called with UST session lock held.
+ */
+static
+int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
+
+{
+ int ret = 0;
+
+ DBG("Clearing stream quiescent state for all ust apps");
+
+ rcu_read_lock();
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct lttng_ht_iter iter;
+ struct buffer_reg_uid *reg;
+
+ /*
+ * Clear quiescent for all per UID buffers associated to
+ * that session.
+ */
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct consumer_socket *socket;
+ struct buffer_reg_channel *reg_chan;
+
+ /* Get associated consumer socket.*/
+ socket = consumer_find_socket_by_bitness(
+ reg->bits_per_long, usess->consumer);
+ if (!socket) {
+ /*
+ * Ignore request if no consumer is found for
+ * the session.
+ */
+ continue;
+ }
+
+ cds_lfht_for_each_entry(reg->registry->channels->ht,
+ &iter.iter, reg_chan, node.node) {
+ /*
+ * The following call will print error values so
+ * the return code is of little importance
+ * because whatever happens, we have to try them
+ * all.
+ */
+ (void) consumer_clear_quiescent_channel(socket,
+ reg_chan->consumer_key);
+ }
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct ust_app_session *ua_sess;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
+ pid_n.node) {
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+ (void) ust_app_clear_quiescent_app_session(app,
+ ua_sess);
+ }
+ break;
+ }
+ default:
+ ret = -1;
+ assert(0);
+ break;
+ }
+
+ rcu_read_unlock();
+ health_code_update();
+ return ret;
+}
+
/*
* Destroy a specific UST session in apps.
*/
health_code_update();
/* Quiescent wait after stopping trace */
+ pthread_mutex_lock(&app->sock_lock);
ret = ustctl_wait_quiescent(app->sock);
+ pthread_mutex_unlock(&app->sock_lock);
if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app wait quiescent failed for app pid %d ret %d",
app->pid, ret);
DBG("Starting all UST traces");
+ /*
+ * Even though the start trace might fail, flag this session active so
+ * other application coming in are started by default.
+ */
+ usess->active = 1;
+
rcu_read_lock();
+ /*
+ * In a start-stop-start use-case, we need to clear the quiescent state
+ * of each channel set by the prior stop command, thus ensuring that a
+ * following stop or destroy is sure to grab a timestamp_end near those
+ * operations, even if the packet is empty.
+ */
+ (void) ust_app_clear_quiescent_session(usess);
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ ust_app_global_update(usess, app);
+ }
+
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ret = ust_app_start_trace(usess, app);
if (ret < 0) {
DBG("Stopping all UST traces");
+ /*
+ * Even though the stop trace might fail, flag this session inactive so
+ * other application coming in are not started by default.
+ */
+ usess->active = 0;
+
rcu_read_lock();
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct ust_app_ctx *ua_ctx;
int is_created = 0;
- ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
+ ret = find_or_create_ust_app_session(usess, app, &ua_sess, &is_created);
if (ret < 0) {
/* Tracer is probably gone or ENOMEM. */
goto error;
/* App session already created. */
goto end;
}
+ if (!usess->active) {
+ goto end;
+ }
assert(ua_sess);
pthread_mutex_lock(&ua_sess->lock);
cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
node.node) {
ret = do_create_channel(app, usess, ua_sess, ua_chan);
- if (ret < 0) {
+ if (ret < 0 && ret != -ENOTCONN) {
/*
- * Stop everything. On error, the application failed, no more
- * file descriptor are available or ENOMEM so stopping here is
- * the only thing we can do for now.
+ * Stop everything. On error, the application
+ * failed, no more file descriptor are available
+ * or ENOMEM so stopping here is the only thing
+ * we can do for now. The only exception is
+ * -ENOTCONN, which indicates that the application
+ * has exit.
*/
goto error_unlock;
}
pthread_mutex_unlock(&ua_sess->lock);
- if (usess->active) {
- ret = ust_app_start_trace(usess, app);
- if (ret < 0) {
- goto error;
- }
-
- DBG2("UST trace started for app pid %d", app->pid);
+ ret = ust_app_start_trace(usess, app);
+ if (ret < 0) {
+ goto error;
}
+
+ DBG2("UST trace started for app pid %d", app->pid);
end:
/* Everything went well at this point. */
return;
if (!app->compatible) {
return;
}
-
+ if (!usess->active) {
+ return;
+ }
if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
ust_app_global_create(usess, app);
} else {
struct lttng_ht_iter iter;
struct ust_app *app;
+ if (!usess->active) {
+ return;
+ }
rcu_read_lock();
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ust_app_global_update(usess, app);
struct ust_app_session *ua_sess;
struct ust_app *app;
+ if (!usess->active) {
+ goto end;
+ }
+
rcu_read_lock();
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
}
ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
node);
- ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
+ ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
if (ret < 0) {
goto next_app;
}
}
rcu_read_unlock();
+end:
return ret;
}
return ret;
}
-/*
- * Calibrate registered applications.
- */
-int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
-{
- int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
-
- rcu_read_lock();
-
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
-
- health_code_update();
-
- ret = ustctl_calibrate(app->sock, calibrate);
- if (ret < 0) {
- switch (ret) {
- case -ENOSYS:
- /* Means that it's not implemented on the tracer side. */
- ret = 0;
- break;
- default:
- DBG2("Calibrate app PID %d returned with error %d",
- app->pid, ret);
- break;
- }
- }
- }
-
- DBG("UST app global domain calibration finished");
-
- rcu_read_unlock();
-
- health_code_update();
-
- return ret;
-}
-
/*
* Receive registration and populate the given msg structure.
*
return ret;
}
+/*
+ * Return a ust app session object using the application object and the
+ * session object descriptor has a key. If not found, NULL is returned.
+ * A RCU read side lock MUST be acquired when calling this function.
+*/
+static struct ust_app_session *find_session_by_objd(struct ust_app *app,
+ int objd)
+{
+ struct lttng_ht_node_ulong *node;
+ struct lttng_ht_iter iter;
+ struct ust_app_session *ua_sess = NULL;
+
+ assert(app);
+
+ lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
+ node = lttng_ht_iter_get_node_ulong(&iter);
+ if (node == NULL) {
+ DBG2("UST app session find by objd %d not found", objd);
+ goto error;
+ }
+
+ ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
+
+error:
+ return ua_sess;
+}
+
/*
* Return a ust app channel object using the application object and the channel
* object descriptor has a key. If not found, NULL is returned. A RCU read side
*
* On success 0 is returned else a negative value.
*/
-static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
+static int reply_ust_register_channel(int sock, int cobjd,
size_t nr_fields, struct ustctl_field *fields)
{
int ret, ret_code = 0;
- uint32_t chan_id, reg_count;
+ uint32_t chan_id;
uint64_t chan_reg_key;
enum ustctl_channel_header type;
struct ust_app *app;
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
if (!app) {
- DBG("Application socket %d is being teardown. Abort event notify",
+ DBG("Application socket %d is being torn down. Abort event notify",
sock);
ret = 0;
- free(fields);
goto error_rcu_unlock;
}
/* Lookup channel by UST object descriptor. */
ua_chan = find_channel_by_objd(app, cobjd);
if (!ua_chan) {
- DBG("Application channel is being teardown. Abort event notify");
+ DBG("Application channel is being torn down. Abort event notify");
ret = 0;
- free(fields);
goto error_rcu_unlock;
}
/* Get right session registry depending on the session buffer type. */
registry = get_session_registry(ua_sess);
- assert(registry);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort event notify");
+ ret = 0;
+ goto error_rcu_unlock;
+ };
/* Depending on the buffer type, a different channel key is used. */
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
assert(chan_reg);
if (!chan_reg->register_done) {
- reg_count = ust_registry_get_event_count(chan_reg);
- if (reg_count < 31) {
- type = USTCTL_CHANNEL_HEADER_COMPACT;
- } else {
- type = USTCTL_CHANNEL_HEADER_LARGE;
- }
-
+ /*
+ * TODO: eventually use the registry event count for
+ * this channel to better guess header type for per-pid
+ * buffers.
+ */
+ type = USTCTL_CHANNEL_HEADER_LARGE;
chan_reg->nr_ctx_fields = nr_fields;
chan_reg->ctx_fields = fields;
+ fields = NULL;
chan_reg->header_type = type;
} else {
/* Get current already assigned values. */
type = chan_reg->header_type;
- free(fields);
- /* Set to NULL so the error path does not do a double free. */
- fields = NULL;
}
/* Channel id is set during the object creation. */
chan_id = chan_reg->chan_id;
pthread_mutex_unlock(®istry->lock);
error_rcu_unlock:
rcu_read_unlock();
- if (ret) {
- free(fields);
- }
+ free(fields);
return ret;
}
* On success 0 is returned else a negative value.
*/
static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
- char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
- char *model_emf_uri)
+ char *sig, size_t nr_fields, struct ustctl_field *fields,
+ int loglevel_value, char *model_emf_uri)
{
int ret, ret_code;
uint32_t event_id = 0;
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
if (!app) {
- DBG("Application socket %d is being teardown. Abort event notify",
+ DBG("Application socket %d is being torn down. Abort event notify",
sock);
ret = 0;
- free(sig);
- free(fields);
- free(model_emf_uri);
goto error_rcu_unlock;
}
/* Lookup channel by UST object descriptor. */
ua_chan = find_channel_by_objd(app, cobjd);
if (!ua_chan) {
- DBG("Application channel is being teardown. Abort event notify");
+ DBG("Application channel is being torn down. Abort event notify");
ret = 0;
- free(sig);
- free(fields);
- free(model_emf_uri);
goto error_rcu_unlock;
}
ua_sess = ua_chan->session;
registry = get_session_registry(ua_sess);
- assert(registry);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort event notify");
+ ret = 0;
+ goto error_rcu_unlock;
+ }
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
chan_reg_key = ua_chan->tracing_channel_id;
* three variables MUST NOT be read/write after this.
*/
ret_code = ust_registry_create_event(registry, chan_reg_key,
- sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
- model_emf_uri, ua_sess->buffer_type, &event_id,
- app);
+ sobjd, cobjd, name, sig, nr_fields, fields,
+ loglevel_value, model_emf_uri, ua_sess->buffer_type,
+ &event_id, app);
+ sig = NULL;
+ fields = NULL;
+ model_emf_uri = NULL;
/*
* The return value is returned to ustctl so in case of an error, the
pthread_mutex_unlock(®istry->lock);
error_rcu_unlock:
rcu_read_unlock();
+ free(sig);
+ free(fields);
+ free(model_emf_uri);
return ret;
}
/*
- * Handle application notification through the given notify socket.
+ * Add enum to the UST session registry. Once done, this replies to the
+ * application with the appropriate error code.
*
- * Return 0 on success or else a negative value.
+ * The session UST registry lock is acquired within this function.
+ *
+ * On success 0 is returned else a negative value.
*/
-int ust_app_recv_notify(int sock)
+static int add_enum_ust_registry(int sock, int sobjd, char *name,
+ struct ustctl_enum_entry *entries, size_t nr_entries)
{
- int ret;
- enum ustctl_notify_cmd cmd;
+ int ret = 0, ret_code;
+ struct ust_app *app;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+ uint64_t enum_id = -1ULL;
+
+ rcu_read_lock();
+
+ /* Lookup application. If not found, there is a code flow error. */
+ app = find_app_by_notify_sock(sock);
+ if (!app) {
+ /* Return an error since this is not an error */
+ DBG("Application socket %d is being torn down. Aborting enum registration",
+ sock);
+ free(entries);
+ goto error_rcu_unlock;
+ }
+
+ /* Lookup session by UST object descriptor. */
+ ua_sess = find_session_by_objd(app, sobjd);
+ if (!ua_sess) {
+ /* Return an error since this is not an error */
+ DBG("Application session is being torn down (session not found). Aborting enum registration.");
+ free(entries);
+ goto error_rcu_unlock;
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down (registry not found). Aborting enum registration.");
+ free(entries);
+ goto error_rcu_unlock;
+ }
+
+ pthread_mutex_lock(®istry->lock);
+
+ /*
+ * From this point on, the callee acquires the ownership of
+ * entries. The variable entries MUST NOT be read/written after
+ * call.
+ */
+ ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
+ entries, nr_entries, &enum_id);
+ entries = NULL;
+
+ /*
+ * The return value is returned to ustctl so in case of an error, the
+ * application can be notified. In case of an error, it's important not to
+ * return a negative error or else the application will get closed.
+ */
+ ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app reply enum failed with ret %d", ret);
+ } else {
+ DBG3("UST app reply enum failed. Application died");
+ }
+ /*
+ * No need to wipe the create enum since the application socket will
+ * get close on error hence cleaning up everything by itself.
+ */
+ goto error;
+ }
+
+ DBG3("UST registry enum %s added successfully or already found", name);
+
+error:
+ pthread_mutex_unlock(®istry->lock);
+error_rcu_unlock:
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
+ * Handle application notification through the given notify socket.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int ust_app_recv_notify(int sock)
+{
+ int ret;
+ enum ustctl_notify_cmd cmd;
DBG3("UST app receiving notify from sock %d", sock);
switch (cmd) {
case USTCTL_NOTIFY_CMD_EVENT:
{
- int sobjd, cobjd, loglevel;
+ int sobjd, cobjd, loglevel_value;
char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
size_t nr_fields;
struct ustctl_field *fields;
DBG2("UST app ustctl register event received");
- ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
- &sig, &nr_fields, &fields, &model_emf_uri);
+ ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
+ &loglevel_value, &sig, &nr_fields, &fields,
+ &model_emf_uri);
if (ret < 0) {
if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
ERR("UST app recv event failed with ret %d", ret);
* to the this function.
*/
ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
- fields, loglevel, model_emf_uri);
+ fields, loglevel_value, model_emf_uri);
if (ret < 0) {
goto error;
}
* that if needed it will be freed. After this, it's invalid to access
* fields or clean it up.
*/
- ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
+ ret = reply_ust_register_channel(sock, cobjd, nr_fields,
fields);
if (ret < 0) {
goto error;
break;
}
+ case USTCTL_NOTIFY_CMD_ENUM:
+ {
+ int sobjd;
+ char name[LTTNG_UST_SYM_NAME_LEN];
+ size_t nr_entries;
+ struct ustctl_enum_entry *entries;
+
+ DBG2("UST app ustctl register enum received");
+
+ ret = ustctl_recv_register_enum(sock, &sobjd, name,
+ &entries, &nr_entries);
+ if (ret < 0) {
+ if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
+ ERR("UST app recv enum failed with ret %d", ret);
+ } else {
+ DBG3("UST app recv enum failed. Application died");
+ }
+ goto error;
+ }
+
+ /* Callee assumes ownership of entries */
+ ret = add_enum_ust_registry(sock, sobjd, name,
+ entries, nr_entries);
+ if (ret < 0) {
+ goto error;
+ }
+
+ break;
+ }
default:
/* Should NEVER happen. */
assert(0);
* Take a snapshot for a given UST session. The snapshot is sent to the given
* output.
*
- * Return 0 on success or else a negative value.
+ * Returns LTTNG_OK on success or a LTTNG_ERR error code.
*/
-int ust_app_snapshot_record(struct ltt_ust_session *usess,
+enum lttng_error_code ust_app_snapshot_record(struct ltt_ust_session *usess,
struct snapshot_output *output, int wait,
uint64_t nb_packets_per_stream)
{
int ret = 0;
- unsigned int snapshot_done = 0;
+ enum lttng_error_code status = LTTNG_OK;
struct lttng_ht_iter iter;
struct ust_app *app;
char pathname[PATH_MAX];
+ struct ltt_session *session = NULL;
+ uint64_t trace_archive_id;
assert(usess);
assert(output);
rcu_read_lock();
+ session = session_find_by_id(usess->id);
+ assert(session);
+ assert(pthread_mutex_trylock(&session->lock));
+ assert(session_trylock_list());
+ trace_archive_id = session->current_archive_id;
+
switch (usess->buffer_type) {
case LTTNG_BUFFER_PER_UID:
{
struct buffer_reg_channel *reg_chan;
struct consumer_socket *socket;
+ if (!reg->registry->reg.ust->metadata_key) {
+ /* Skip since no metadata is present */
+ continue;
+ }
+
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
usess->consumer);
if (!socket) {
- ret = -EINVAL;
+ status = LTTNG_ERR_INVALID;
goto error;
}
reg->uid, reg->bits_per_long);
if (ret < 0) {
PERROR("snprintf snapshot path");
+ status = LTTNG_ERR_INVALID;
goto error;
}
/* Add the UST default trace dir to path. */
cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
reg_chan, node.node) {
- ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
- output, 0, usess->uid, usess->gid, pathname, wait,
- nb_packets_per_stream);
- if (ret < 0) {
+ status = consumer_snapshot_channel(socket,
+ reg_chan->consumer_key,
+ output, 0, usess->uid,
+ usess->gid, pathname, wait,
+ nb_packets_per_stream,
+ trace_archive_id);
+ if (status != LTTNG_OK) {
goto error;
}
}
- ret = consumer_snapshot_channel(socket,
+ status = consumer_snapshot_channel(socket,
reg->registry->reg.ust->metadata_key, output, 1,
- usess->uid, usess->gid, pathname, wait, 0);
- if (ret < 0) {
+ usess->uid, usess->gid, pathname, wait, 0,
+ trace_archive_id);
+ if (status != LTTNG_OK) {
goto error;
}
- snapshot_done = 1;
}
break;
}
socket = consumer_find_socket_by_bitness(app->bits_per_long,
output->consumer);
if (!socket) {
- ret = -EINVAL;
+ status = LTTNG_ERR_INVALID;
goto error;
}
ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
ua_sess->path);
if (ret < 0) {
+ status = LTTNG_ERR_INVALID;
PERROR("snprintf snapshot path");
goto error;
}
cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
ua_chan, node.node) {
- ret = consumer_snapshot_channel(socket, ua_chan->key, output,
- 0, ua_sess->euid, ua_sess->egid, pathname, wait,
- nb_packets_per_stream);
- if (ret < 0) {
+ status = consumer_snapshot_channel(socket,
+ ua_chan->key, output,
+ 0, ua_sess->euid, ua_sess->egid,
+ pathname, wait,
+ nb_packets_per_stream,
+ trace_archive_id);
+ switch (status) {
+ case LTTNG_OK:
+ break;
+ case LTTNG_ERR_CHAN_NOT_FOUND:
+ continue;
+ default:
goto error;
}
}
registry = get_session_registry(ua_sess);
- assert(registry);
- ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
- 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
- if (ret < 0) {
+ if (!registry) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
+ }
+ status = consumer_snapshot_channel(socket,
+ registry->metadata_key, output,
+ 1, ua_sess->euid, ua_sess->egid,
+ pathname, wait, 0,
+ trace_archive_id);
+ switch (status) {
+ case LTTNG_OK:
+ break;
+ case LTTNG_ERR_CHAN_NOT_FOUND:
+ continue;
+ default:
goto error;
}
- snapshot_done = 1;
}
break;
}
break;
}
- if (!snapshot_done) {
- /*
- * If no snapshot was made and we are not in the error path, this means
- * that there are no buffers thus no (prior) application to snapshot
- * data from so we have simply NO data.
- */
- ret = -ENODATA;
- }
-
error:
rcu_read_unlock();
- return ret;
+ if (session) {
+ session_put(session);
+ }
+ return status;
}
/*
return tot_size;
}
+
+int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
+ struct cds_list_head *buffer_reg_uid_list,
+ struct consumer_output *consumer, uint64_t uchan_id,
+ int overwrite, uint64_t *discarded, uint64_t *lost)
+{
+ int ret;
+ uint64_t consumer_chan_key;
+
+ *discarded = 0;
+ *lost = 0;
+
+ ret = buffer_reg_uid_consumer_channel_key(
+ buffer_reg_uid_list, uchan_id, &consumer_chan_key);
+ if (ret < 0) {
+ /* Not found */
+ ret = 0;
+ goto end;
+ }
+
+ if (overwrite) {
+ ret = consumer_get_lost_packets(ust_session_id,
+ consumer_chan_key, consumer, lost);
+ } else {
+ ret = consumer_get_discarded_events(ust_session_id,
+ consumer_chan_key, consumer, discarded);
+ }
+
+end:
+ return ret;
+}
+
+int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
+ struct ltt_ust_channel *uchan,
+ struct consumer_output *consumer, int overwrite,
+ uint64_t *discarded, uint64_t *lost)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_str *ua_chan_node;
+ struct ust_app *app;
+ struct ust_app_session *ua_sess;
+ struct ust_app_channel *ua_chan;
+
+ *discarded = 0;
+ *lost = 0;
+
+ rcu_read_lock();
+ /*
+ * Iterate over every registered applications. Sum counters for
+ * all applications containing requested session and channel.
+ */
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct lttng_ht_iter uiter;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+
+ /* Get channel */
+ lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+ ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+ /* If the session is found for the app, the channel must be there */
+ assert(ua_chan_node);
+
+ ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+
+ if (overwrite) {
+ uint64_t _lost;
+
+ ret = consumer_get_lost_packets(usess->id, ua_chan->key,
+ consumer, &_lost);
+ if (ret < 0) {
+ break;
+ }
+ (*lost) += _lost;
+ } else {
+ uint64_t _discarded;
+
+ ret = consumer_get_discarded_events(usess->id,
+ ua_chan->key, consumer, &_discarded);
+ if (ret < 0) {
+ break;
+ }
+ (*discarded) += _discarded;
+ }
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+static
+int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct ust_app_session *ua_sess;
+
+ DBG("Regenerating the metadata for ust app pid %d", app->pid);
+
+ rcu_read_lock();
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ /* The session is in teardown process. Ignore and continue. */
+ goto end;
+ }
+
+ pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ goto end_unlock;
+ }
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
+
+end_unlock:
+ pthread_mutex_unlock(&ua_sess->lock);
+
+end:
+ rcu_read_unlock();
+ health_code_update();
+ return ret;
+}
+
+/*
+ * Regenerate the statedump for each app in the session.
+ */
+int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ DBG("Regenerating the metadata for all UST apps");
+
+ rcu_read_lock();
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ continue;
+ }
+
+ ret = ust_app_regenerate_statedump(usess, app);
+ if (ret < 0) {
+ /* Continue to the next app even on error */
+ continue;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/*
+ * Rotate all the channels of a session.
+ *
+ * Return LTTNG_OK on success or else an LTTng error code.
+ */
+enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
+{
+ int ret;
+ enum lttng_error_code cmd_ret = LTTNG_OK;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+ struct ltt_ust_session *usess = session->ust_session;
+ char pathname[LTTNG_PATH_MAX];
+
+ assert(usess);
+
+ rcu_read_lock();
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct buffer_reg_uid *reg;
+
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct buffer_reg_channel *reg_chan;
+ struct consumer_socket *socket;
+
+ /* Get consumer socket to use to push the metadata.*/
+ socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ cmd_ret = LTTNG_ERR_INVALID;
+ goto error;
+ }
+
+ ret = snprintf(pathname, sizeof(pathname),
+ DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
+ reg->uid, reg->bits_per_long);
+ if (ret < 0 || ret == sizeof(pathname)) {
+ PERROR("Failed to format rotation path");
+ cmd_ret = LTTNG_ERR_INVALID;
+ goto error;
+ }
+
+ /* Rotate the data channels. */
+ cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+ reg_chan, node.node) {
+ ret = consumer_rotate_channel(socket,
+ reg_chan->consumer_key,
+ usess->uid, usess->gid,
+ usess->consumer, pathname,
+ /* is_metadata_channel */ false,
+ session->current_archive_id);
+ if (ret < 0) {
+ cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+ goto error;
+ }
+ }
+
+ (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+
+ ret = consumer_rotate_channel(socket,
+ reg->registry->reg.ust->metadata_key,
+ usess->uid, usess->gid,
+ usess->consumer, pathname,
+ /* is_metadata_channel */ true,
+ session->current_archive_id);
+ if (ret < 0) {
+ cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+ goto error;
+ }
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ struct consumer_socket *socket;
+ struct lttng_ht_iter chan_iter;
+ struct ust_app_channel *ua_chan;
+ struct ust_app_session *ua_sess;
+ struct ust_registry_session *registry;
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (!ua_sess) {
+ /* Session not associated with this app. */
+ continue;
+ }
+ ret = snprintf(pathname, sizeof(pathname),
+ DEFAULT_UST_TRACE_DIR "/%s",
+ ua_sess->path);
+ if (ret < 0 || ret == sizeof(pathname)) {
+ PERROR("Failed to format rotation path");
+ cmd_ret = LTTNG_ERR_INVALID;
+ goto error;
+ }
+
+ /* Get the right consumer socket for the application. */
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ usess->consumer);
+ if (!socket) {
+ cmd_ret = LTTNG_ERR_INVALID;
+ goto error;
+ }
+
+ registry = get_session_registry(ua_sess);
+ if (!registry) {
+ DBG("Application session is being torn down. Skip application.");
+ continue;
+ }
+
+
+ /* Rotate the data channels. */
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+ ua_chan, node.node) {
+ ret = consumer_rotate_channel(socket, ua_chan->key,
+ ua_sess->euid, ua_sess->egid,
+ ua_sess->consumer, pathname,
+ /* is_metadata_channel */ false,
+ session->current_archive_id);
+ if (ret < 0) {
+ /* Per-PID buffer and application going away. */
+ if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
+ continue;
+ cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+ goto error;
+ }
+ }
+
+ /* Rotate the metadata channel. */
+ (void) push_metadata(registry, usess->consumer);
+ ret = consumer_rotate_channel(socket, registry->metadata_key,
+ ua_sess->euid, ua_sess->egid,
+ ua_sess->consumer, pathname,
+ /* is_metadata_channel */ true,
+ session->current_archive_id);
+ if (ret < 0) {
+ /* Per-PID buffer and application going away. */
+ if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
+ continue;
+ cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+ goto error;
+ }
+ }
+ break;
+ }
+ default:
+ assert(0);
+ break;
+ }
+
+ cmd_ret = LTTNG_OK;
+
+error:
+ rcu_read_unlock();
+ return cmd_ret;
+}