#include "ust-ctl.h"
#include "utils.h"
#include "session.h"
+#include "lttng-sessiond.h"
+#include "notification-thread-commands.h"
static
int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
attr->switch_timer_interval = uattr->switch_timer_interval;
attr->read_timer_interval = uattr->read_timer_interval;
attr->output = uattr->output;
+ attr->blocking_timeout = uattr->u.s.blocking_timeout;
}
/*
/* Wipe and free registry from session registry. */
registry = get_session_registry(ua_chan->session);
if (registry) {
- ust_registry_channel_del_free(registry, ua_chan->key);
+ ust_registry_channel_del_free(registry, ua_chan->key,
+ true);
}
save_per_pid_lost_discarded_counters(ua_chan);
}
return 0;
}
- /*
- * On a push metadata error either the consumer is dead or the
- * metadata channel has been destroyed because its endpoint
- * might have died (e.g: relayd), or because the application has
- * exited. If so, the metadata closed flag is set to 1 so we
- * deny pushing metadata again which is not valid anymore on the
- * consumer side.
- */
- if (registry->metadata_closed) {
- return -EPIPE;
- }
-
offset = registry->metadata_len_sent;
len = registry->metadata_len - registry->metadata_len_sent;
new_metadata_len_sent = registry->metadata_len;
ua_sess->deleted = true;
registry = get_session_registry(ua_sess);
+ /* Registry can be null on error path during initialization. */
if (registry) {
/* Push metadata for application before freeing the application. */
(void) push_metadata(registry, ua_sess->consumer);
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
if (reg_pid) {
+ /*
+ * Registry can be null on error path during
+ * initialization.
+ */
buffer_reg_pid_remove(reg_pid);
buffer_reg_pid_destroy(reg_pid);
}
ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
ua_chan->attr.read_timer_interval = attr->read_timer_interval;
ua_chan->attr.output = attr->output;
+ ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
}
/* By default, the channel is a per cpu channel. */
ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
ua_chan->attr.overwrite = uchan->attr.overwrite;
ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
+ ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
ua_chan->attr.output = uchan->attr.output;
+ ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
+
/*
* Note that the attribute channel type is not set since the channel on the
* tracing registry side does not have this information.
/*
* Ask the consumer to create a channel and get it if successful.
*
+ * Called with UST app session lock held.
+ *
* Return 0 on success or else a negative value.
*/
static int do_consumer_create_channel(struct ltt_ust_session *usess,
(void) release_ust_app_stream(-1, &stream, app);
if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
ret = -ENOTCONN; /* Caused by app exiting. */
- goto error_stream_unlock;
- } else if (ret < 0) {
- goto error_stream_unlock;
}
goto error_stream_unlock;
}
int ret;
struct buffer_reg_uid *reg_uid;
struct buffer_reg_channel *reg_chan;
+ bool created = false;
assert(app);
assert(usess);
* it's not visible anymore in the session registry.
*/
ust_registry_channel_del_free(reg_uid->registry->reg.ust,
- ua_chan->tracing_channel_id);
+ ua_chan->tracing_channel_id, false);
buffer_reg_channel_remove(reg_uid->registry, reg_chan);
buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
goto error;
ua_chan->name);
goto error;
}
+ created = true;
+ }
+
+ if (created) {
+ enum lttng_error_code cmd_ret;
+ struct ltt_session *session;
+ uint64_t chan_reg_key;
+ struct ust_registry_channel *chan_reg;
+
+ rcu_read_lock();
+ chan_reg_key = ua_chan->tracing_channel_id;
+ pthread_mutex_lock(®_uid->registry->reg.ust->lock);
+ chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
+ chan_reg_key);
+ assert(chan_reg);
+ chan_reg->consumer_key = ua_chan->key;
+ chan_reg = NULL;
+ pthread_mutex_unlock(®_uid->registry->reg.ust->lock);
+
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+
+ cmd_ret = notification_thread_command_add_channel(
+ notification_thread_handle, session->name,
+ ua_sess->euid, ua_sess->egid,
+ ua_chan->name,
+ ua_chan->key,
+ LTTNG_DOMAIN_UST,
+ ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+ rcu_read_unlock();
+ if (cmd_ret != LTTNG_OK) {
+ ret = - (int) cmd_ret;
+ ERR("Failed to add channel to notification thread");
+ goto error;
+ }
}
/* Send buffers to the application. */
/*
* Create and send to the application the created buffers with per PID buffers.
*
+ * Called with UST app session lock held.
+ *
* Return 0 on success else a negative value.
*/
static int create_channel_per_pid(struct ust_app *app,
{
int ret;
struct ust_registry_session *registry;
+ enum lttng_error_code cmd_ret;
+ struct ltt_session *session;
+ uint64_t chan_reg_key;
+ struct ust_registry_channel *chan_reg;
assert(app);
assert(usess);
rcu_read_lock();
registry = get_session_registry(ua_sess);
+ /* The UST app session lock is held, registry shall not be null. */
assert(registry);
/* Create and add a new channel registry to session. */
goto error;
}
+ session = session_find_by_id(ua_sess->tracing_id);
+ assert(session);
+
+ chan_reg_key = ua_chan->key;
+ pthread_mutex_lock(®istry->lock);
+ chan_reg = ust_registry_channel_find(registry, chan_reg_key);
+ assert(chan_reg);
+ chan_reg->consumer_key = ua_chan->key;
+ pthread_mutex_unlock(®istry->lock);
+
+ cmd_ret = notification_thread_command_add_channel(
+ notification_thread_handle, session->name,
+ ua_sess->euid, ua_sess->egid,
+ ua_chan->name,
+ ua_chan->key,
+ LTTNG_DOMAIN_UST,
+ ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+ if (cmd_ret != LTTNG_OK) {
+ ret = - (int) cmd_ret;
+ ERR("Failed to add channel to notification thread");
+ goto error;
+ }
+
error:
rcu_read_unlock();
return ret;
* need and send it to the application. This MUST be called with a RCU read
* side lock acquired.
*
+ * Called with UST app session lock held.
+ *
* Return 0 on success or else a negative value. Returns -ENOTCONN if
* the application exited concurrently.
*/
/* Only add the channel if successful on the tracer side. */
lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
-
end:
if (ua_chanp) {
*ua_chanp = ua_chan;
assert(consumer);
registry = get_session_registry(ua_sess);
+ /* The UST app session is held registry shall not be null. */
assert(registry);
pthread_mutex_lock(®istry->lock);
/*
* Start tracing for a specific UST session and app.
+ *
+ * Called with UST app session lock held.
+ *
*/
static
int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
health_code_update();
registry = get_session_registry(ua_sess);
+
+ /* The UST app session is held registry shall not be null. */
assert(registry);
/* Push metadata for application before freeing the application. */
return ret;
}
+static
+int ust_app_clear_quiescent_app_session(struct ust_app *app,
+ struct ust_app_session *ua_sess)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app_channel *ua_chan;
+ struct consumer_socket *socket;
+
+ DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
+
+ rcu_read_lock();
+
+ if (!app->compatible) {
+ goto end_not_compatible;
+ }
+
+ pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ goto end_unlock;
+ }
+
+ health_code_update();
+
+ socket = consumer_find_socket_by_bitness(app->bits_per_long,
+ ua_sess->consumer);
+ if (!socket) {
+ ERR("Failed to find consumer (%" PRIu32 ") socket",
+ app->bits_per_long);
+ ret = -1;
+ goto end_unlock;
+ }
+
+ /* Clear quiescent state. */
+ switch (ua_sess->buffer_type) {
+ case LTTNG_BUFFER_PER_PID:
+ cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
+ ua_chan, node.node) {
+ health_code_update();
+ ret = consumer_clear_quiescent_channel(socket,
+ ua_chan->key);
+ if (ret) {
+ ERR("Error clearing quiescent state for consumer channel");
+ ret = -1;
+ continue;
+ }
+ }
+ break;
+ case LTTNG_BUFFER_PER_UID:
+ default:
+ assert(0);
+ ret = -1;
+ break;
+ }
+
+ health_code_update();
+
+end_unlock:
+ pthread_mutex_unlock(&ua_sess->lock);
+
+end_not_compatible:
+ rcu_read_unlock();
+ health_code_update();
+ return ret;
+}
+
+/*
+ * Clear quiescent state in each stream for all applications for a
+ * specific UST session.
+ * Called with UST session lock held.
+ */
+static
+int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
+
+{
+ int ret = 0;
+
+ DBG("Clearing stream quiescent state for all ust apps");
+
+ rcu_read_lock();
+
+ switch (usess->buffer_type) {
+ case LTTNG_BUFFER_PER_UID:
+ {
+ struct lttng_ht_iter iter;
+ struct buffer_reg_uid *reg;
+
+ /*
+ * Clear quiescent for all per UID buffers associated to
+ * that session.
+ */
+ cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+ struct consumer_socket *socket;
+ struct buffer_reg_channel *reg_chan;
+
+ /* Get associated consumer socket.*/
+ socket = consumer_find_socket_by_bitness(
+ reg->bits_per_long, usess->consumer);
+ if (!socket) {
+ /*
+ * Ignore request if no consumer is found for
+ * the session.
+ */
+ continue;
+ }
+
+ cds_lfht_for_each_entry(reg->registry->channels->ht,
+ &iter.iter, reg_chan, node.node) {
+ /*
+ * The following call will print error values so
+ * the return code is of little importance
+ * because whatever happens, we have to try them
+ * all.
+ */
+ (void) consumer_clear_quiescent_channel(socket,
+ reg_chan->consumer_key);
+ }
+ }
+ break;
+ }
+ case LTTNG_BUFFER_PER_PID:
+ {
+ struct ust_app_session *ua_sess;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
+ pid_n.node) {
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ continue;
+ }
+ (void) ust_app_clear_quiescent_app_session(app,
+ ua_sess);
+ }
+ break;
+ }
+ default:
+ ret = -1;
+ assert(0);
+ break;
+ }
+
+ rcu_read_unlock();
+ health_code_update();
+ return ret;
+}
+
/*
* Destroy a specific UST session in apps.
*/
rcu_read_lock();
+ /*
+ * In a start-stop-start use-case, we need to clear the quiescent state
+ * of each channel set by the prior stop command, thus ensuring that a
+ * following stop or destroy is sure to grab a timestamp_end near those
+ * operations, even if the packet is empty.
+ */
+ (void) ust_app_clear_quiescent_session(usess);
+
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
ret = ust_app_start_trace(usess, app);
if (ret < 0) {
return ret;
}
-/*
- * Calibrate registered applications.
- */
-int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
-{
- int ret = 0;
- struct lttng_ht_iter iter;
- struct ust_app *app;
-
- rcu_read_lock();
-
- cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
- if (!app->compatible) {
- /*
- * TODO: In time, we should notice the caller of this error by
- * telling him that this is a version error.
- */
- continue;
- }
-
- health_code_update();
-
- pthread_mutex_lock(&app->sock_lock);
- ret = ustctl_calibrate(app->sock, calibrate);
- pthread_mutex_unlock(&app->sock_lock);
- if (ret < 0) {
- switch (ret) {
- case -ENOSYS:
- /* Means that it's not implemented on the tracer side. */
- ret = 0;
- break;
- default:
- DBG2("Calibrate app PID %d returned with error %d",
- app->pid, ret);
- break;
- }
- }
- }
-
- DBG("UST app global domain calibration finished");
-
- rcu_read_unlock();
-
- health_code_update();
-
- return ret;
-}
-
/*
* Receive registration and populate the given msg structure.
*
size_t nr_fields, struct ustctl_field *fields)
{
int ret, ret_code = 0;
- uint32_t chan_id, reg_count;
+ uint32_t chan_id;
uint64_t chan_reg_key;
enum ustctl_channel_header type;
struct ust_app *app;
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
if (!app) {
- DBG("Application socket %d is being teardown. Abort event notify",
+ DBG("Application socket %d is being torn down. Abort event notify",
sock);
ret = 0;
- free(fields);
goto error_rcu_unlock;
}
/* Lookup channel by UST object descriptor. */
ua_chan = find_channel_by_objd(app, cobjd);
if (!ua_chan) {
- DBG("Application channel is being teardown. Abort event notify");
+ DBG("Application channel is being torn down. Abort event notify");
ret = 0;
- free(fields);
goto error_rcu_unlock;
}
/* Get right session registry depending on the session buffer type. */
registry = get_session_registry(ua_sess);
- assert(registry);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort event notify");
+ ret = 0;
+ goto error_rcu_unlock;
+ };
/* Depending on the buffer type, a different channel key is used. */
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
assert(chan_reg);
if (!chan_reg->register_done) {
- reg_count = ust_registry_get_event_count(chan_reg);
- if (reg_count < 31) {
- type = USTCTL_CHANNEL_HEADER_COMPACT;
- } else {
- type = USTCTL_CHANNEL_HEADER_LARGE;
- }
-
+ /*
+ * TODO: eventually use the registry event count for
+ * this channel to better guess header type for per-pid
+ * buffers.
+ */
+ type = USTCTL_CHANNEL_HEADER_LARGE;
chan_reg->nr_ctx_fields = nr_fields;
chan_reg->ctx_fields = fields;
+ fields = NULL;
chan_reg->header_type = type;
} else {
/* Get current already assigned values. */
type = chan_reg->header_type;
- free(fields);
- /* Set to NULL so the error path does not do a double free. */
- fields = NULL;
}
/* Channel id is set during the object creation. */
chan_id = chan_reg->chan_id;
pthread_mutex_unlock(®istry->lock);
error_rcu_unlock:
rcu_read_unlock();
- if (ret) {
- free(fields);
- }
+ free(fields);
return ret;
}
/* Lookup application. If not found, there is a code flow error. */
app = find_app_by_notify_sock(sock);
if (!app) {
- DBG("Application socket %d is being teardown. Abort event notify",
+ DBG("Application socket %d is being torn down. Abort event notify",
sock);
ret = 0;
- free(sig);
- free(fields);
- free(model_emf_uri);
goto error_rcu_unlock;
}
/* Lookup channel by UST object descriptor. */
ua_chan = find_channel_by_objd(app, cobjd);
if (!ua_chan) {
- DBG("Application channel is being teardown. Abort event notify");
+ DBG("Application channel is being torn down. Abort event notify");
ret = 0;
- free(sig);
- free(fields);
- free(model_emf_uri);
goto error_rcu_unlock;
}
ua_sess = ua_chan->session;
registry = get_session_registry(ua_sess);
- assert(registry);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort event notify");
+ ret = 0;
+ goto error_rcu_unlock;
+ }
if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
chan_reg_key = ua_chan->tracing_channel_id;
sobjd, cobjd, name, sig, nr_fields, fields,
loglevel_value, model_emf_uri, ua_sess->buffer_type,
&event_id, app);
+ sig = NULL;
+ fields = NULL;
+ model_emf_uri = NULL;
/*
* The return value is returned to ustctl so in case of an error, the
pthread_mutex_unlock(®istry->lock);
error_rcu_unlock:
rcu_read_unlock();
+ free(sig);
+ free(fields);
+ free(model_emf_uri);
return ret;
}
ua_sess = find_session_by_objd(app, sobjd);
if (!ua_sess) {
/* Return an error since this is not an error */
- DBG("Application session is being torn down. Aborting enum registration.");
+ DBG("Application session is being torn down (session not found). Aborting enum registration.");
free(entries);
goto error_rcu_unlock;
}
registry = get_session_registry(ua_sess);
- assert(registry);
+ if (!registry) {
+ DBG("Application session is being torn down (registry not found). Aborting enum registration.");
+ free(entries);
+ goto error_rcu_unlock;
+ }
pthread_mutex_lock(®istry->lock);
struct buffer_reg_channel *reg_chan;
struct consumer_socket *socket;
+ if (!reg->registry->reg.ust->metadata_key) {
+ /* Skip since no metadata is present */
+ continue;
+ }
+
/* Get consumer socket to use to push the metadata.*/
socket = consumer_find_socket_by_bitness(reg->bits_per_long,
usess->consumer);
}
registry = get_session_registry(ua_sess);
- assert(registry);
+ if (!registry) {
+ DBG("Application session is being torn down. Abort snapshot record.");
+ ret = -1;
+ goto error;
+ }
ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
if (ret < 0) {
int ret;
uint64_t consumer_chan_key;
+ *discarded = 0;
+ *lost = 0;
+
ret = buffer_reg_uid_consumer_channel_key(
buffer_reg_uid_list, ust_session_id,
uchan_id, &consumer_chan_key);
if (ret < 0) {
+ /* Not found */
+ ret = 0;
goto end;
}
if (overwrite) {
ret = consumer_get_lost_packets(ust_session_id,
consumer_chan_key, consumer, lost);
- *discarded = 0;
} else {
ret = consumer_get_discarded_events(ust_session_id,
consumer_chan_key, consumer, discarded);
- *lost = 0;
}
end:
struct ust_app_session *ua_sess;
struct ust_app_channel *ua_chan;
+ *discarded = 0;
+ *lost = 0;
+
rcu_read_lock();
/*
- * Iterate over every registered applications, return when we
- * found one in the right session and channel.
+ * Iterate over every registered applications. Sum counters for
+ * all applications containing requested session and channel.
*/
cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
struct lttng_ht_iter uiter;
ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
if (overwrite) {
+ uint64_t _lost;
+
ret = consumer_get_lost_packets(usess->id, ua_chan->key,
- consumer, lost);
- *discarded = 0;
- goto end;
+ consumer, &_lost);
+ if (ret < 0) {
+ break;
+ }
+ (*lost) += _lost;
} else {
+ uint64_t _discarded;
+
ret = consumer_get_discarded_events(usess->id,
- ua_chan->key, consumer, discarded);
- *lost = 0;
- goto end;
+ ua_chan->key, consumer, &_discarded);
+ if (ret < 0) {
+ break;
+ }
+ (*discarded) += _discarded;
}
}
+ rcu_read_unlock();
+ return ret;
+}
+
+static
+int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
+ struct ust_app *app)
+{
+ int ret = 0;
+ struct ust_app_session *ua_sess;
+
+ DBG("Regenerating the metadata for ust app pid %d", app->pid);
+
+ rcu_read_lock();
+
+ ua_sess = lookup_session_by_app(usess, app);
+ if (ua_sess == NULL) {
+ /* The session is in teardown process. Ignore and continue. */
+ goto end;
+ }
+
+ pthread_mutex_lock(&ua_sess->lock);
+
+ if (ua_sess->deleted) {
+ goto end_unlock;
+ }
+
+ pthread_mutex_lock(&app->sock_lock);
+ ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
+ pthread_mutex_unlock(&app->sock_lock);
+
+end_unlock:
+ pthread_mutex_unlock(&ua_sess->lock);
+
end:
rcu_read_unlock();
+ health_code_update();
return ret;
}
+
+/*
+ * Regenerate the statedump for each app in the session.
+ */
+int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
+{
+ int ret = 0;
+ struct lttng_ht_iter iter;
+ struct ust_app *app;
+
+ DBG("Regenerating the metadata for all UST apps");
+
+ rcu_read_lock();
+
+ cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+ if (!app->compatible) {
+ continue;
+ }
+
+ ret = ust_app_regenerate_statedump(usess, app);
+ if (ret < 0) {
+ /* Continue to the next app even on error */
+ continue;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return 0;
+}