*/
static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
const char *pathname, const char *name, uid_t uid, gid_t gid,
- int relayd_id, uint64_t key, enum lttng_event_output output,
- uint64_t tracefile_size, uint64_t tracefile_count)
+ uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
+ uint64_t tracefile_size, uint64_t tracefile_count,
+ uint64_t session_id_per_pid)
{
assert(pathname);
assert(name);
- return consumer_allocate_channel(key, session_id, pathname, name, uid, gid,
- relayd_id, output, tracefile_size, tracefile_count);
+ return consumer_allocate_channel(key, session_id, pathname, name, uid,
+ gid, relayd_id, output, tracefile_size,
+ tracefile_count, session_id_per_pid);
}
/*
assert(stream);
assert(sock >= 0);
- DBG2("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
+ DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
/* Send stream to session daemon. */
ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
}
pthread_mutex_lock(&consumer_data.lock);
+ pthread_mutex_lock(&channel->lock);
if (cds_lfht_is_node_deleted(&channel->node.node)) {
goto error_unlock;
}
error_unlock:
+ pthread_mutex_unlock(&channel->lock);
pthread_mutex_unlock(&consumer_data.lock);
error:
return ret;
* and ultimately try to get rid of this global consumer data lock.
*/
pthread_mutex_lock(&consumer_data.lock);
-
+ pthread_mutex_lock(&channel->lock);
pthread_mutex_lock(&channel->metadata_cache->lock);
ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
if (ret < 0) {
* waiting for the metadata cache to be flushed.
*/
pthread_mutex_unlock(&channel->metadata_cache->lock);
+ pthread_mutex_unlock(&channel->lock);
pthread_mutex_unlock(&consumer_data.lock);
goto end_free;
}
pthread_mutex_unlock(&channel->metadata_cache->lock);
+ pthread_mutex_unlock(&channel->lock);
pthread_mutex_unlock(&consumer_data.lock);
while (consumer_metadata_cache_flushed(channel, offset + len)) {
msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
(enum lttng_event_output) msg.u.ask_channel.output,
msg.u.ask_channel.tracefile_size,
- msg.u.ask_channel.tracefile_count);
+ msg.u.ask_channel.tracefile_count,
+ msg.u.ask_channel.session_id_per_pid);
if (!channel) {
goto end_channel_error;
}
assert(stream->ustream);
assert(ctx);
- DBG2("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
+ DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
stream->name);
/* Ease our life for what's next. */
DBG("UST consumer checking data pending");
+ if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
+ ret = 0;
+ goto end;
+ }
+
ret = ustctl_get_next_subbuf(stream->ustream);
if (ret == 0) {
/* There is still data so let's put back this subbuffer. */
}
}
+/*
+ * Please refer to consumer-timer.c before adding any lock within this
+ * function or any of its callees. Timers have a very strict locking
+ * semantic with respect to teardown. Failure to respect this semantic
+ * introduces deadlocks.
+ */
int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
struct lttng_consumer_channel *channel)
{
}
request.session_id = channel->session_id;
+ request.session_id_per_pid = channel->session_id_per_pid;
request.uid = channel->uid;
request.key = channel->key;
- DBG("Sending metadata request to sessiond, session %" PRIu64,
- channel->session_id);
+ DBG("Sending metadata request to sessiond, session id %" PRIu64
+ ", per-pid %" PRIu64,
+ channel->session_id,
+ channel->session_id_per_pid);
ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
sizeof(request));