extern struct lttng_consumer_global_data consumer_data;
extern int consumer_poll_timeout;
-extern volatile int consumer_quit;
/*
* Free channel object and all streams associated with it. This MUST be used
{
int ret = 0;
struct lttng_consumer_channel *channel;
+ unsigned int channel_monitor;
DBG("UST consumer close metadata key %" PRIu64, chan_key);
pthread_mutex_lock(&consumer_data.lock);
pthread_mutex_lock(&channel->lock);
-
+ channel_monitor = channel->monitor;
if (cds_lfht_is_node_deleted(&channel->node.node)) {
goto error_unlock;
}
lttng_ustconsumer_close_metadata(channel);
+ pthread_mutex_unlock(&channel->lock);
+ pthread_mutex_unlock(&consumer_data.lock);
+ /*
+ * The ownership of a metadata channel depends on the type of
+ * session to which it belongs. In effect, the monitor flag is checked
+ * to determine if this metadata channel is in "snapshot" mode or not.
+ *
+ * In the non-snapshot case, the metadata channel is created along with
+ * a single stream which will remain present until the metadata channel
+ * is destroyed (on the destruction of its session). In this case, the
+ * metadata stream in "monitored" by the metadata poll thread and holds
+ * the ownership of its channel.
+ *
+ * Closing the metadata will cause the metadata stream's "metadata poll
+ * pipe" to be closed. Closing this pipe will wake-up the metadata poll
+ * thread which will teardown the metadata stream which, in return,
+ * deletes the metadata channel.
+ *
+ * In the snapshot case, the metadata stream is created and destroyed
+ * on every snapshot record. Since the channel doesn't have an owner
+ * other than the session daemon, it is safe to destroy it immediately
+ * on reception of the CLOSE_METADATA command.
+ */
+ if (!channel_monitor) {
+ /*
+ * The channel and consumer_data locks must be
+ * released before this call since consumer_del_channel
+ * re-acquires the channel and consumer_data locks to teardown
+ * the channel and queue its reclamation by the "call_rcu"
+ * worker thread.
+ */
+ consumer_del_channel(channel);
+ }
+
+ return ret;
error_unlock:
pthread_mutex_unlock(&channel->lock);
pthread_mutex_unlock(&consumer_data.lock);
DBG("UST consumer snapshot channel %" PRIu64, key);
cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
- /* Are we at a position _before_ the first available packet ? */
- bool before_first_packet = true;
-
health_code_update();
/* Lock stream because we are about to change its state. */
DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
stream->name, stream->key);
}
- if (relayd_id != -1ULL) {
- ret = consumer_send_relayd_streams_sent(relayd_id);
- if (ret < 0) {
- goto error_unlock;
- }
- }
/*
* If tracing is active, we want to perform a "full" buffer flush.
produced_pos, nb_packets_per_stream,
stream->max_sb_size);
- while (consumed_pos < produced_pos) {
+ while ((long) (consumed_pos - produced_pos) < 0) {
ssize_t read_len;
unsigned long len, padded_len;
- int lost_packet = 0;
health_code_update();
}
DBG("UST consumer get subbuf failed. Skipping it.");
consumed_pos += stream->max_sb_size;
-
- /*
- * Start accounting lost packets only when we
- * already have extracted packets (to match the
- * content of the final snapshot).
- */
- if (!before_first_packet) {
- lost_packet = 1;
- }
+ stream->chan->lost_packets++;
continue;
}
goto error_close_stream;
}
consumed_pos += stream->max_sb_size;
-
- /*
- * Only account lost packets located between
- * succesfully extracted packets (do not account before
- * and after since they are not visible in the
- * resulting snapshot).
- */
- stream->chan->lost_packets += lost_packet;
- lost_packet = 0;
- before_first_packet = false;
}
/* Simply close the stream so we can use it on the next snapshot. */
case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
{
/* Session daemon status message are handled in the following call. */
- ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
+ consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
&msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
msg.u.relayd_sock.relayd_session_id);
request.key = channel->key;
DBG("Sending metadata request to sessiond, session id %" PRIu64
- ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
+ ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
request.session_id, request.session_id_per_pid, request.uid,
request.key);