2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
28 #include <sys/socket.h>
29 #include <sys/types.h>
34 #include <bin/lttng-consumerd/health-consumerd.h>
35 #include <common/common.h>
36 #include <common/utils.h>
37 #include <common/compat/poll.h>
38 #include <common/compat/endian.h>
39 #include <common/index/index.h>
40 #include <common/kernel-ctl/kernel-ctl.h>
41 #include <common/sessiond-comm/relayd.h>
42 #include <common/sessiond-comm/sessiond-comm.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/relayd/relayd.h>
45 #include <common/ust-consumer/ust-consumer.h>
46 #include <common/consumer-timer.h>
49 #include "consumer-stream.h"
50 #include "consumer-testpoint.h"
53 struct lttng_consumer_global_data consumer_data
= {
56 .type
= LTTNG_CONSUMER_UNKNOWN
,
59 enum consumer_channel_action
{
62 CONSUMER_CHANNEL_QUIT
,
65 struct consumer_channel_msg
{
66 enum consumer_channel_action action
;
67 struct lttng_consumer_channel
*chan
; /* add */
68 uint64_t key
; /* del */
72 * Flag to inform the polling thread to quit when all fd hung up. Updated by
73 * the consumer_thread_receive_fds when it notices that all fds has hung up.
74 * Also updated by the signal handler (consumer_should_exit()). Read by the
77 volatile int consumer_quit
;
80 * Global hash table containing respectively metadata and data streams. The
81 * stream element in this ht should only be updated by the metadata poll thread
82 * for the metadata and the data poll thread for the data.
84 static struct lttng_ht
*metadata_ht
;
85 static struct lttng_ht
*data_ht
;
88 * Notify a thread lttng pipe to poll back again. This usually means that some
89 * global state has changed so we just send back the thread in a poll wait
92 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
94 struct lttng_consumer_stream
*null_stream
= NULL
;
98 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
101 static void notify_health_quit_pipe(int *pipe
)
105 ret
= lttng_write(pipe
[1], "4", 1);
107 PERROR("write consumer health quit");
111 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
112 struct lttng_consumer_channel
*chan
,
114 enum consumer_channel_action action
)
116 struct consumer_channel_msg msg
;
119 memset(&msg
, 0, sizeof(msg
));
124 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
125 if (ret
< sizeof(msg
)) {
126 PERROR("notify_channel_pipe write error");
130 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
133 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
136 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
137 struct lttng_consumer_channel
**chan
,
139 enum consumer_channel_action
*action
)
141 struct consumer_channel_msg msg
;
144 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
145 if (ret
< sizeof(msg
)) {
149 *action
= msg
.action
;
157 * Cleanup the stream list of a channel. Those streams are not yet globally
160 static void clean_channel_stream_list(struct lttng_consumer_channel
*channel
)
162 struct lttng_consumer_stream
*stream
, *stmp
;
166 /* Delete streams that might have been left in the stream list. */
167 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
169 cds_list_del(&stream
->send_node
);
171 * Once a stream is added to this list, the buffers were created so we
172 * have a guarantee that this call will succeed. Setting the monitor
173 * mode to 0 so we don't lock nor try to delete the stream from the
177 consumer_stream_destroy(stream
, NULL
);
182 * Find a stream. The consumer_data.lock must be locked during this
185 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
188 struct lttng_ht_iter iter
;
189 struct lttng_ht_node_u64
*node
;
190 struct lttng_consumer_stream
*stream
= NULL
;
194 /* -1ULL keys are lookup failures */
195 if (key
== (uint64_t) -1ULL) {
201 lttng_ht_lookup(ht
, &key
, &iter
);
202 node
= lttng_ht_iter_get_node_u64(&iter
);
204 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
212 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
214 struct lttng_consumer_stream
*stream
;
217 stream
= find_stream(key
, ht
);
219 stream
->key
= (uint64_t) -1ULL;
221 * We don't want the lookup to match, but we still need
222 * to iterate on this stream when iterating over the hash table. Just
223 * change the node key.
225 stream
->node
.key
= (uint64_t) -1ULL;
231 * Return a channel object for the given key.
233 * RCU read side lock MUST be acquired before calling this function and
234 * protects the channel ptr.
236 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
238 struct lttng_ht_iter iter
;
239 struct lttng_ht_node_u64
*node
;
240 struct lttng_consumer_channel
*channel
= NULL
;
242 /* -1ULL keys are lookup failures */
243 if (key
== (uint64_t) -1ULL) {
247 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
248 node
= lttng_ht_iter_get_node_u64(&iter
);
250 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
257 * There is a possibility that the consumer does not have enough time between
258 * the close of the channel on the session daemon and the cleanup in here thus
259 * once we have a channel add with an existing key, we know for sure that this
260 * channel will eventually get cleaned up by all streams being closed.
262 * This function just nullifies the already existing channel key.
264 static void steal_channel_key(uint64_t key
)
266 struct lttng_consumer_channel
*channel
;
269 channel
= consumer_find_channel(key
);
271 channel
->key
= (uint64_t) -1ULL;
273 * We don't want the lookup to match, but we still need to iterate on
274 * this channel when iterating over the hash table. Just change the
277 channel
->node
.key
= (uint64_t) -1ULL;
282 static void free_channel_rcu(struct rcu_head
*head
)
284 struct lttng_ht_node_u64
*node
=
285 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
286 struct lttng_consumer_channel
*channel
=
287 caa_container_of(node
, struct lttng_consumer_channel
, node
);
289 switch (consumer_data
.type
) {
290 case LTTNG_CONSUMER_KERNEL
:
292 case LTTNG_CONSUMER32_UST
:
293 case LTTNG_CONSUMER64_UST
:
294 lttng_ustconsumer_free_channel(channel
);
297 ERR("Unknown consumer_data type");
304 * RCU protected relayd socket pair free.
306 static void free_relayd_rcu(struct rcu_head
*head
)
308 struct lttng_ht_node_u64
*node
=
309 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
310 struct consumer_relayd_sock_pair
*relayd
=
311 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
314 * Close all sockets. This is done in the call RCU since we don't want the
315 * socket fds to be reassigned thus potentially creating bad state of the
318 * We do not have to lock the control socket mutex here since at this stage
319 * there is no one referencing to this relayd object.
321 (void) relayd_close(&relayd
->control_sock
);
322 (void) relayd_close(&relayd
->data_sock
);
328 * Destroy and free relayd socket pair object.
330 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
333 struct lttng_ht_iter iter
;
335 if (relayd
== NULL
) {
339 DBG("Consumer destroy and close relayd socket pair");
341 iter
.iter
.node
= &relayd
->node
.node
;
342 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
344 /* We assume the relayd is being or is destroyed */
348 /* RCU free() call */
349 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
353 * Remove a channel from the global list protected by a mutex. This function is
354 * also responsible for freeing its data structures.
356 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
359 struct lttng_ht_iter iter
;
361 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
363 pthread_mutex_lock(&consumer_data
.lock
);
364 pthread_mutex_lock(&channel
->lock
);
366 /* Destroy streams that might have been left in the stream list. */
367 clean_channel_stream_list(channel
);
369 if (channel
->live_timer_enabled
== 1) {
370 consumer_timer_live_stop(channel
);
373 switch (consumer_data
.type
) {
374 case LTTNG_CONSUMER_KERNEL
:
376 case LTTNG_CONSUMER32_UST
:
377 case LTTNG_CONSUMER64_UST
:
378 lttng_ustconsumer_del_channel(channel
);
381 ERR("Unknown consumer_data type");
387 iter
.iter
.node
= &channel
->node
.node
;
388 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
392 call_rcu(&channel
->node
.head
, free_channel_rcu
);
394 pthread_mutex_unlock(&channel
->lock
);
395 pthread_mutex_unlock(&consumer_data
.lock
);
399 * Iterate over the relayd hash table and destroy each element. Finally,
400 * destroy the whole hash table.
402 static void cleanup_relayd_ht(void)
404 struct lttng_ht_iter iter
;
405 struct consumer_relayd_sock_pair
*relayd
;
409 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
411 consumer_destroy_relayd(relayd
);
416 lttng_ht_destroy(consumer_data
.relayd_ht
);
420 * Update the end point status of all streams having the given network sequence
421 * index (relayd index).
423 * It's atomically set without having the stream mutex locked which is fine
424 * because we handle the write/read race with a pipe wakeup for each thread.
426 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
427 enum consumer_endpoint_status status
)
429 struct lttng_ht_iter iter
;
430 struct lttng_consumer_stream
*stream
;
432 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
436 /* Let's begin with metadata */
437 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
438 if (stream
->net_seq_idx
== net_seq_idx
) {
439 uatomic_set(&stream
->endpoint_status
, status
);
440 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
444 /* Follow up by the data streams */
445 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
446 if (stream
->net_seq_idx
== net_seq_idx
) {
447 uatomic_set(&stream
->endpoint_status
, status
);
448 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
455 * Cleanup a relayd object by flagging every associated streams for deletion,
456 * destroying the object meaning removing it from the relayd hash table,
457 * closing the sockets and freeing the memory in a RCU call.
459 * If a local data context is available, notify the threads that the streams'
460 * state have changed.
462 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
463 struct lttng_consumer_local_data
*ctx
)
469 DBG("Cleaning up relayd sockets");
471 /* Save the net sequence index before destroying the object */
472 netidx
= relayd
->net_seq_idx
;
475 * Delete the relayd from the relayd hash table, close the sockets and free
476 * the object in a RCU call.
478 consumer_destroy_relayd(relayd
);
480 /* Set inactive endpoint to all streams */
481 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
484 * With a local data context, notify the threads that the streams' state
485 * have changed. The write() action on the pipe acts as an "implicit"
486 * memory barrier ordering the updates of the end point status from the
487 * read of this status which happens AFTER receiving this notify.
490 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
491 notify_thread_lttng_pipe(ctx
->consumer_metadata_pipe
);
496 * Flag a relayd socket pair for destruction. Destroy it if the refcount
499 * RCU read side lock MUST be aquired before calling this function.
501 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
505 /* Set destroy flag for this object */
506 uatomic_set(&relayd
->destroy_flag
, 1);
508 /* Destroy the relayd if refcount is 0 */
509 if (uatomic_read(&relayd
->refcount
) == 0) {
510 consumer_destroy_relayd(relayd
);
515 * Completly destroy stream from every visiable data structure and the given
518 * One this call returns, the stream object is not longer usable nor visible.
520 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
523 consumer_stream_destroy(stream
, ht
);
527 * XXX naming of del vs destroy is all mixed up.
529 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
531 consumer_stream_destroy(stream
, data_ht
);
534 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
536 consumer_stream_destroy(stream
, metadata_ht
);
539 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
541 enum lttng_consumer_stream_state state
,
542 const char *channel_name
,
549 enum consumer_channel_type type
,
550 unsigned int monitor
)
553 struct lttng_consumer_stream
*stream
;
555 stream
= zmalloc(sizeof(*stream
));
556 if (stream
== NULL
) {
557 PERROR("malloc struct lttng_consumer_stream");
564 stream
->key
= stream_key
;
566 stream
->out_fd_offset
= 0;
567 stream
->output_written
= 0;
568 stream
->state
= state
;
571 stream
->net_seq_idx
= relayd_id
;
572 stream
->session_id
= session_id
;
573 stream
->monitor
= monitor
;
574 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
575 stream
->index_fd
= -1;
576 pthread_mutex_init(&stream
->lock
, NULL
);
577 pthread_mutex_init(&stream
->metadata_timer_lock
, NULL
);
579 /* If channel is the metadata, flag this stream as metadata. */
580 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
581 stream
->metadata_flag
= 1;
582 /* Metadata is flat out. */
583 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
584 /* Live rendez-vous point. */
585 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
586 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
588 /* Format stream name to <channel_name>_<cpu_number> */
589 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
592 PERROR("snprintf stream name");
597 /* Key is always the wait_fd for streams. */
598 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
600 /* Init node per channel id key */
601 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
603 /* Init session id node with the stream session id */
604 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
606 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
607 " relayd_id %" PRIu64
", session_id %" PRIu64
,
608 stream
->name
, stream
->key
, channel_key
,
609 stream
->net_seq_idx
, stream
->session_id
);
625 * Add a stream to the global list protected by a mutex.
627 int consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
629 struct lttng_ht
*ht
= data_ht
;
635 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
637 pthread_mutex_lock(&consumer_data
.lock
);
638 pthread_mutex_lock(&stream
->chan
->lock
);
639 pthread_mutex_lock(&stream
->chan
->timer_lock
);
640 pthread_mutex_lock(&stream
->lock
);
643 /* Steal stream identifier to avoid having streams with the same key */
644 steal_stream_key(stream
->key
, ht
);
646 lttng_ht_add_unique_u64(ht
, &stream
->node
);
648 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
649 &stream
->node_channel_id
);
652 * Add stream to the stream_list_ht of the consumer data. No need to steal
653 * the key since the HT does not use it and we allow to add redundant keys
656 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
659 * When nb_init_stream_left reaches 0, we don't need to trigger any action
660 * in terms of destroying the associated channel, because the action that
661 * causes the count to become 0 also causes a stream to be added. The
662 * channel deletion will thus be triggered by the following removal of this
665 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
666 /* Increment refcount before decrementing nb_init_stream_left */
668 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
671 /* Update consumer data once the node is inserted. */
672 consumer_data
.stream_count
++;
673 consumer_data
.need_update
= 1;
676 pthread_mutex_unlock(&stream
->lock
);
677 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
678 pthread_mutex_unlock(&stream
->chan
->lock
);
679 pthread_mutex_unlock(&consumer_data
.lock
);
684 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
686 consumer_del_stream(stream
, data_ht
);
690 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
691 * be acquired before calling this.
693 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
696 struct lttng_ht_node_u64
*node
;
697 struct lttng_ht_iter iter
;
701 lttng_ht_lookup(consumer_data
.relayd_ht
,
702 &relayd
->net_seq_idx
, &iter
);
703 node
= lttng_ht_iter_get_node_u64(&iter
);
707 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
714 * Allocate and return a consumer relayd socket.
716 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
717 uint64_t net_seq_idx
)
719 struct consumer_relayd_sock_pair
*obj
= NULL
;
721 /* net sequence index of -1 is a failure */
722 if (net_seq_idx
== (uint64_t) -1ULL) {
726 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
728 PERROR("zmalloc relayd sock");
732 obj
->net_seq_idx
= net_seq_idx
;
734 obj
->destroy_flag
= 0;
735 obj
->control_sock
.sock
.fd
= -1;
736 obj
->data_sock
.sock
.fd
= -1;
737 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
738 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
745 * Find a relayd socket pair in the global consumer data.
747 * Return the object if found else NULL.
748 * RCU read-side lock must be held across this call and while using the
751 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
753 struct lttng_ht_iter iter
;
754 struct lttng_ht_node_u64
*node
;
755 struct consumer_relayd_sock_pair
*relayd
= NULL
;
757 /* Negative keys are lookup failures */
758 if (key
== (uint64_t) -1ULL) {
762 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
764 node
= lttng_ht_iter_get_node_u64(&iter
);
766 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
774 * Find a relayd and send the stream
776 * Returns 0 on success, < 0 on error
778 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
782 struct consumer_relayd_sock_pair
*relayd
;
785 assert(stream
->net_seq_idx
!= -1ULL);
788 /* The stream is not metadata. Get relayd reference if exists. */
790 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
791 if (relayd
!= NULL
) {
792 /* Add stream on the relayd */
793 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
794 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
795 path
, &stream
->relayd_stream_id
,
796 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
);
797 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
802 uatomic_inc(&relayd
->refcount
);
803 stream
->sent_to_relayd
= 1;
805 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
806 stream
->key
, stream
->net_seq_idx
);
811 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
812 stream
->name
, stream
->key
, stream
->net_seq_idx
);
820 * Find a relayd and send the streams sent message
822 * Returns 0 on success, < 0 on error
824 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
827 struct consumer_relayd_sock_pair
*relayd
;
829 assert(net_seq_idx
!= -1ULL);
831 /* The stream is not metadata. Get relayd reference if exists. */
833 relayd
= consumer_find_relayd(net_seq_idx
);
834 if (relayd
!= NULL
) {
835 /* Add stream on the relayd */
836 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
837 ret
= relayd_streams_sent(&relayd
->control_sock
);
838 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
843 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
850 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
858 * Find a relayd and close the stream
860 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
862 struct consumer_relayd_sock_pair
*relayd
;
864 /* The stream is not metadata. Get relayd reference if exists. */
866 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
868 consumer_stream_relayd_close(stream
, relayd
);
874 * Handle stream for relayd transmission if the stream applies for network
875 * streaming where the net sequence index is set.
877 * Return destination file descriptor or negative value on error.
879 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
880 size_t data_size
, unsigned long padding
,
881 struct consumer_relayd_sock_pair
*relayd
)
884 struct lttcomm_relayd_data_hdr data_hdr
;
890 /* Reset data header */
891 memset(&data_hdr
, 0, sizeof(data_hdr
));
893 if (stream
->metadata_flag
) {
894 /* Caller MUST acquire the relayd control socket lock */
895 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
900 /* Metadata are always sent on the control socket. */
901 outfd
= relayd
->control_sock
.sock
.fd
;
903 /* Set header with stream information */
904 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
905 data_hdr
.data_size
= htobe32(data_size
);
906 data_hdr
.padding_size
= htobe32(padding
);
908 * Note that net_seq_num below is assigned with the *current* value of
909 * next_net_seq_num and only after that the next_net_seq_num will be
910 * increment. This is why when issuing a command on the relayd using
911 * this next value, 1 should always be substracted in order to compare
912 * the last seen sequence number on the relayd side to the last sent.
914 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
915 /* Other fields are zeroed previously */
917 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
923 ++stream
->next_net_seq_num
;
925 /* Set to go on data socket */
926 outfd
= relayd
->data_sock
.sock
.fd
;
934 * Allocate and return a new lttng_consumer_channel object using the given key
935 * to initialize the hash table node.
937 * On error, return NULL.
939 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
941 const char *pathname
,
946 enum lttng_event_output output
,
947 uint64_t tracefile_size
,
948 uint64_t tracefile_count
,
949 uint64_t session_id_per_pid
,
950 unsigned int monitor
,
951 unsigned int live_timer_interval
,
952 const char *root_shm_path
,
953 const char *shm_path
)
955 struct lttng_consumer_channel
*channel
;
957 channel
= zmalloc(sizeof(*channel
));
958 if (channel
== NULL
) {
959 PERROR("malloc struct lttng_consumer_channel");
964 channel
->refcount
= 0;
965 channel
->session_id
= session_id
;
966 channel
->session_id_per_pid
= session_id_per_pid
;
969 channel
->relayd_id
= relayd_id
;
970 channel
->tracefile_size
= tracefile_size
;
971 channel
->tracefile_count
= tracefile_count
;
972 channel
->monitor
= monitor
;
973 channel
->live_timer_interval
= live_timer_interval
;
974 pthread_mutex_init(&channel
->lock
, NULL
);
975 pthread_mutex_init(&channel
->timer_lock
, NULL
);
978 case LTTNG_EVENT_SPLICE
:
979 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
981 case LTTNG_EVENT_MMAP
:
982 channel
->output
= CONSUMER_CHANNEL_MMAP
;
992 * In monitor mode, the streams associated with the channel will be put in
993 * a special list ONLY owned by this channel. So, the refcount is set to 1
994 * here meaning that the channel itself has streams that are referenced.
996 * On a channel deletion, once the channel is no longer visible, the
997 * refcount is decremented and checked for a zero value to delete it. With
998 * streams in no monitor mode, it will now be safe to destroy the channel.
1000 if (!channel
->monitor
) {
1001 channel
->refcount
= 1;
1004 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
1005 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
1007 strncpy(channel
->name
, name
, sizeof(channel
->name
));
1008 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
1010 if (root_shm_path
) {
1011 strncpy(channel
->root_shm_path
, root_shm_path
, sizeof(channel
->root_shm_path
));
1012 channel
->root_shm_path
[sizeof(channel
->root_shm_path
) - 1] = '\0';
1015 strncpy(channel
->shm_path
, shm_path
, sizeof(channel
->shm_path
));
1016 channel
->shm_path
[sizeof(channel
->shm_path
) - 1] = '\0';
1019 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
1021 channel
->wait_fd
= -1;
1023 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
1025 DBG("Allocated channel (key %" PRIu64
")", channel
->key
);
1032 * Add a channel to the global list protected by a mutex.
1034 * Always return 0 indicating success.
1036 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
1037 struct lttng_consumer_local_data
*ctx
)
1039 pthread_mutex_lock(&consumer_data
.lock
);
1040 pthread_mutex_lock(&channel
->lock
);
1041 pthread_mutex_lock(&channel
->timer_lock
);
1044 * This gives us a guarantee that the channel we are about to add to the
1045 * channel hash table will be unique. See this function comment on the why
1046 * we need to steel the channel key at this stage.
1048 steal_channel_key(channel
->key
);
1051 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
1054 pthread_mutex_unlock(&channel
->timer_lock
);
1055 pthread_mutex_unlock(&channel
->lock
);
1056 pthread_mutex_unlock(&consumer_data
.lock
);
1058 if (channel
->wait_fd
!= -1 && channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1059 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1066 * Allocate the pollfd structure and the local view of the out fds to avoid
1067 * doing a lookup in the linked list and concurrency issues when writing is
1068 * needed. Called with consumer_data.lock held.
1070 * Returns the number of fds in the structures.
1072 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1073 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1074 struct lttng_ht
*ht
)
1077 struct lttng_ht_iter iter
;
1078 struct lttng_consumer_stream
*stream
;
1083 assert(local_stream
);
1085 DBG("Updating poll fd array");
1087 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1089 * Only active streams with an active end point can be added to the
1090 * poll set and local stream storage of the thread.
1092 * There is a potential race here for endpoint_status to be updated
1093 * just after the check. However, this is OK since the stream(s) will
1094 * be deleted once the thread is notified that the end point state has
1095 * changed where this function will be called back again.
1097 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
1098 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1102 * This clobbers way too much the debug output. Uncomment that if you
1103 * need it for debugging purposes.
1105 * DBG("Active FD %d", stream->wait_fd);
1107 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1108 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1109 local_stream
[i
] = stream
;
1115 * Insert the consumer_data_pipe at the end of the array and don't
1116 * increment i so nb_fd is the number of real FD.
1118 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1119 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1121 (*pollfd
)[i
+ 1].fd
= lttng_pipe_get_readfd(ctx
->consumer_wakeup_pipe
);
1122 (*pollfd
)[i
+ 1].events
= POLLIN
| POLLPRI
;
1127 * Poll on the should_quit pipe and the command socket return -1 on
1128 * error, 1 if should exit, 0 if data is available on the command socket
1130 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1135 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1136 if (num_rdy
== -1) {
1138 * Restart interrupted system call.
1140 if (errno
== EINTR
) {
1143 PERROR("Poll error");
1146 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1147 DBG("consumer_should_quit wake up");
1154 * Set the error socket.
1156 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1159 ctx
->consumer_error_socket
= sock
;
1163 * Set the command socket path.
1165 void lttng_consumer_set_command_sock_path(
1166 struct lttng_consumer_local_data
*ctx
, char *sock
)
1168 ctx
->consumer_command_sock_path
= sock
;
1172 * Send return code to the session daemon.
1173 * If the socket is not defined, we return 0, it is not a fatal error
1175 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1177 if (ctx
->consumer_error_socket
> 0) {
1178 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1179 sizeof(enum lttcomm_sessiond_command
));
1186 * Close all the tracefiles and stream fds and MUST be called when all
1187 * instances are destroyed i.e. when all threads were joined and are ended.
1189 void lttng_consumer_cleanup(void)
1191 struct lttng_ht_iter iter
;
1192 struct lttng_consumer_channel
*channel
;
1196 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1198 consumer_del_channel(channel
);
1203 lttng_ht_destroy(consumer_data
.channel_ht
);
1205 cleanup_relayd_ht();
1207 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1210 * This HT contains streams that are freed by either the metadata thread or
1211 * the data thread so we do *nothing* on the hash table and simply destroy
1214 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1218 * Called from signal handler.
1220 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1225 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1227 PERROR("write consumer quit");
1230 DBG("Consumer flag that it should quit");
1233 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1237 int outfd
= stream
->out_fd
;
1240 * This does a blocking write-and-wait on any page that belongs to the
1241 * subbuffer prior to the one we just wrote.
1242 * Don't care about error values, as these are just hints and ways to
1243 * limit the amount of page cache used.
1245 if (orig_offset
< stream
->max_sb_size
) {
1248 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1249 stream
->max_sb_size
,
1250 SYNC_FILE_RANGE_WAIT_BEFORE
1251 | SYNC_FILE_RANGE_WRITE
1252 | SYNC_FILE_RANGE_WAIT_AFTER
);
1254 * Give hints to the kernel about how we access the file:
1255 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1258 * We need to call fadvise again after the file grows because the
1259 * kernel does not seem to apply fadvise to non-existing parts of the
1262 * Call fadvise _after_ having waited for the page writeback to
1263 * complete because the dirty page writeback semantic is not well
1264 * defined. So it can be expected to lead to lower throughput in
1267 ret
= posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1268 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1269 if (ret
&& ret
!= -ENOSYS
) {
1271 PERROR("posix_fadvise");
1276 * Initialise the necessary environnement :
1277 * - create a new context
1278 * - create the poll_pipe
1279 * - create the should_quit pipe (for signal handler)
1280 * - create the thread pipe (for splice)
1282 * Takes a function pointer as argument, this function is called when data is
1283 * available on a buffer. This function is responsible to do the
1284 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1285 * buffer configuration and then kernctl_put_next_subbuf at the end.
1287 * Returns a pointer to the new context or NULL on error.
1289 struct lttng_consumer_local_data
*lttng_consumer_create(
1290 enum lttng_consumer_type type
,
1291 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1292 struct lttng_consumer_local_data
*ctx
),
1293 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1294 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1295 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1298 struct lttng_consumer_local_data
*ctx
;
1300 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1301 consumer_data
.type
== type
);
1302 consumer_data
.type
= type
;
1304 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1306 PERROR("allocating context");
1310 ctx
->consumer_error_socket
= -1;
1311 ctx
->consumer_metadata_socket
= -1;
1312 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1313 /* assign the callbacks */
1314 ctx
->on_buffer_ready
= buffer_ready
;
1315 ctx
->on_recv_channel
= recv_channel
;
1316 ctx
->on_recv_stream
= recv_stream
;
1317 ctx
->on_update_stream
= update_stream
;
1319 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1320 if (!ctx
->consumer_data_pipe
) {
1321 goto error_poll_pipe
;
1324 ctx
->consumer_wakeup_pipe
= lttng_pipe_open(0);
1325 if (!ctx
->consumer_wakeup_pipe
) {
1326 goto error_wakeup_pipe
;
1329 ret
= pipe(ctx
->consumer_should_quit
);
1331 PERROR("Error creating recv pipe");
1332 goto error_quit_pipe
;
1335 ret
= pipe(ctx
->consumer_channel_pipe
);
1337 PERROR("Error creating channel pipe");
1338 goto error_channel_pipe
;
1341 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1342 if (!ctx
->consumer_metadata_pipe
) {
1343 goto error_metadata_pipe
;
1348 error_metadata_pipe
:
1349 utils_close_pipe(ctx
->consumer_channel_pipe
);
1351 utils_close_pipe(ctx
->consumer_should_quit
);
1353 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1355 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1363 * Iterate over all streams of the hashtable and free them properly.
1365 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1367 struct lttng_ht_iter iter
;
1368 struct lttng_consumer_stream
*stream
;
1375 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1377 * Ignore return value since we are currently cleaning up so any error
1380 (void) consumer_del_stream(stream
, ht
);
1384 lttng_ht_destroy(ht
);
1388 * Iterate over all streams of the metadata hashtable and free them
1391 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1393 struct lttng_ht_iter iter
;
1394 struct lttng_consumer_stream
*stream
;
1401 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1403 * Ignore return value since we are currently cleaning up so any error
1406 (void) consumer_del_metadata_stream(stream
, ht
);
1410 lttng_ht_destroy(ht
);
1414 * Close all fds associated with the instance and free the context.
1416 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1420 DBG("Consumer destroying it. Closing everything.");
1426 destroy_data_stream_ht(data_ht
);
1427 destroy_metadata_stream_ht(metadata_ht
);
1429 ret
= close(ctx
->consumer_error_socket
);
1433 ret
= close(ctx
->consumer_metadata_socket
);
1437 utils_close_pipe(ctx
->consumer_channel_pipe
);
1438 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1439 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1440 lttng_pipe_destroy(ctx
->consumer_wakeup_pipe
);
1441 utils_close_pipe(ctx
->consumer_should_quit
);
1443 unlink(ctx
->consumer_command_sock_path
);
1448 * Write the metadata stream id on the specified file descriptor.
1450 static int write_relayd_metadata_id(int fd
,
1451 struct lttng_consumer_stream
*stream
,
1452 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1455 struct lttcomm_relayd_metadata_payload hdr
;
1457 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1458 hdr
.padding_size
= htobe32(padding
);
1459 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1460 if (ret
< sizeof(hdr
)) {
1462 * This error means that the fd's end is closed so ignore the PERROR
1463 * not to clubber the error output since this can happen in a normal
1466 if (errno
!= EPIPE
) {
1467 PERROR("write metadata stream id");
1469 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1471 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1472 * handle writting the missing part so report that as an error and
1473 * don't lie to the caller.
1478 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1479 stream
->relayd_stream_id
, padding
);
1486 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1487 * core function for writing trace buffers to either the local filesystem or
1490 * It must be called with the stream lock held.
1492 * Careful review MUST be put if any changes occur!
1494 * Returns the number of bytes written
1496 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1497 struct lttng_consumer_local_data
*ctx
,
1498 struct lttng_consumer_stream
*stream
, unsigned long len
,
1499 unsigned long padding
,
1500 struct ctf_packet_index
*index
)
1502 unsigned long mmap_offset
;
1505 off_t orig_offset
= stream
->out_fd_offset
;
1506 /* Default is on the disk */
1507 int outfd
= stream
->out_fd
;
1508 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1509 unsigned int relayd_hang_up
= 0;
1511 /* RCU lock for the relayd pointer */
1514 /* Flag that the current stream if set for network streaming. */
1515 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1516 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1517 if (relayd
== NULL
) {
1523 /* get the offset inside the fd to mmap */
1524 switch (consumer_data
.type
) {
1525 case LTTNG_CONSUMER_KERNEL
:
1526 mmap_base
= stream
->mmap_base
;
1527 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1530 PERROR("tracer ctl get_mmap_read_offset");
1534 case LTTNG_CONSUMER32_UST
:
1535 case LTTNG_CONSUMER64_UST
:
1536 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1538 ERR("read mmap get mmap base for stream %s", stream
->name
);
1542 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1544 PERROR("tracer ctl get_mmap_read_offset");
1550 ERR("Unknown consumer_data type");
1554 /* Handle stream on the relayd if the output is on the network */
1556 unsigned long netlen
= len
;
1559 * Lock the control socket for the complete duration of the function
1560 * since from this point on we will use the socket.
1562 if (stream
->metadata_flag
) {
1563 /* Metadata requires the control socket. */
1564 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1565 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1568 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1573 /* Use the returned socket. */
1576 /* Write metadata stream id before payload */
1577 if (stream
->metadata_flag
) {
1578 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1585 /* No streaming, we have to set the len with the full padding */
1589 * Check if we need to change the tracefile before writing the packet.
1591 if (stream
->chan
->tracefile_size
> 0 &&
1592 (stream
->tracefile_size_current
+ len
) >
1593 stream
->chan
->tracefile_size
) {
1594 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1595 stream
->name
, stream
->chan
->tracefile_size
,
1596 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1597 stream
->out_fd
, &(stream
->tracefile_count_current
),
1600 ERR("Rotating output file");
1603 outfd
= stream
->out_fd
;
1605 if (stream
->index_fd
>= 0) {
1606 ret
= close(stream
->index_fd
);
1608 PERROR("Closing index");
1611 stream
->index_fd
= -1;
1612 ret
= index_create_file(stream
->chan
->pathname
,
1613 stream
->name
, stream
->uid
, stream
->gid
,
1614 stream
->chan
->tracefile_size
,
1615 stream
->tracefile_count_current
);
1619 stream
->index_fd
= ret
;
1622 /* Reset current size because we just perform a rotation. */
1623 stream
->tracefile_size_current
= 0;
1624 stream
->out_fd_offset
= 0;
1627 stream
->tracefile_size_current
+= len
;
1629 index
->offset
= htobe64(stream
->out_fd_offset
);
1634 * This call guarantee that len or less is returned. It's impossible to
1635 * receive a ret value that is bigger than len.
1637 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1638 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1639 if (ret
< 0 || ((size_t) ret
!= len
)) {
1641 * Report error to caller if nothing was written else at least send the
1649 /* Socket operation failed. We consider the relayd dead */
1650 if (errno
== EPIPE
|| errno
== EINVAL
|| errno
== EBADF
) {
1652 * This is possible if the fd is closed on the other side
1653 * (outfd) or any write problem. It can be verbose a bit for a
1654 * normal execution if for instance the relayd is stopped
1655 * abruptly. This can happen so set this to a DBG statement.
1657 DBG("Consumer mmap write detected relayd hang up");
1659 /* Unhandled error, print it and stop function right now. */
1660 PERROR("Error in write mmap (ret %zd != len %lu)", ret
, len
);
1664 stream
->output_written
+= ret
;
1666 /* This call is useless on a socket so better save a syscall. */
1668 /* This won't block, but will start writeout asynchronously */
1669 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, len
,
1670 SYNC_FILE_RANGE_WRITE
);
1671 stream
->out_fd_offset
+= len
;
1673 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1677 * This is a special case that the relayd has closed its socket. Let's
1678 * cleanup the relayd object and all associated streams.
1680 if (relayd
&& relayd_hang_up
) {
1681 cleanup_relayd(relayd
, ctx
);
1685 /* Unlock only if ctrl socket used */
1686 if (relayd
&& stream
->metadata_flag
) {
1687 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1695 * Splice the data from the ring buffer to the tracefile.
1697 * It must be called with the stream lock held.
1699 * Returns the number of bytes spliced.
1701 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1702 struct lttng_consumer_local_data
*ctx
,
1703 struct lttng_consumer_stream
*stream
, unsigned long len
,
1704 unsigned long padding
,
1705 struct ctf_packet_index
*index
)
1707 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1709 off_t orig_offset
= stream
->out_fd_offset
;
1710 int fd
= stream
->wait_fd
;
1711 /* Default is on the disk */
1712 int outfd
= stream
->out_fd
;
1713 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1715 unsigned int relayd_hang_up
= 0;
1717 switch (consumer_data
.type
) {
1718 case LTTNG_CONSUMER_KERNEL
:
1720 case LTTNG_CONSUMER32_UST
:
1721 case LTTNG_CONSUMER64_UST
:
1722 /* Not supported for user space tracing */
1725 ERR("Unknown consumer_data type");
1729 /* RCU lock for the relayd pointer */
1732 /* Flag that the current stream if set for network streaming. */
1733 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1734 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1735 if (relayd
== NULL
) {
1740 splice_pipe
= stream
->splice_pipe
;
1742 /* Write metadata stream id before payload */
1744 unsigned long total_len
= len
;
1746 if (stream
->metadata_flag
) {
1748 * Lock the control socket for the complete duration of the function
1749 * since from this point on we will use the socket.
1751 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1753 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1761 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1764 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1770 /* Use the returned socket. */
1773 /* No streaming, we have to set the len with the full padding */
1777 * Check if we need to change the tracefile before writing the packet.
1779 if (stream
->chan
->tracefile_size
> 0 &&
1780 (stream
->tracefile_size_current
+ len
) >
1781 stream
->chan
->tracefile_size
) {
1782 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1783 stream
->name
, stream
->chan
->tracefile_size
,
1784 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1785 stream
->out_fd
, &(stream
->tracefile_count_current
),
1789 ERR("Rotating output file");
1792 outfd
= stream
->out_fd
;
1794 if (stream
->index_fd
>= 0) {
1795 ret
= close(stream
->index_fd
);
1797 PERROR("Closing index");
1800 stream
->index_fd
= -1;
1801 ret
= index_create_file(stream
->chan
->pathname
,
1802 stream
->name
, stream
->uid
, stream
->gid
,
1803 stream
->chan
->tracefile_size
,
1804 stream
->tracefile_count_current
);
1809 stream
->index_fd
= ret
;
1812 /* Reset current size because we just perform a rotation. */
1813 stream
->tracefile_size_current
= 0;
1814 stream
->out_fd_offset
= 0;
1817 stream
->tracefile_size_current
+= len
;
1818 index
->offset
= htobe64(stream
->out_fd_offset
);
1822 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1823 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1824 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1825 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1826 DBG("splice chan to pipe, ret %zd", ret_splice
);
1827 if (ret_splice
< 0) {
1830 PERROR("Error in relay splice");
1834 /* Handle stream on the relayd if the output is on the network */
1835 if (relayd
&& stream
->metadata_flag
) {
1836 size_t metadata_payload_size
=
1837 sizeof(struct lttcomm_relayd_metadata_payload
);
1839 /* Update counter to fit the spliced data */
1840 ret_splice
+= metadata_payload_size
;
1841 len
+= metadata_payload_size
;
1843 * We do this so the return value can match the len passed as
1844 * argument to this function.
1846 written
-= metadata_payload_size
;
1849 /* Splice data out */
1850 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1851 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1852 DBG("Consumer splice pipe to file (out_fd: %d), ret %zd",
1854 if (ret_splice
< 0) {
1859 } else if (ret_splice
> len
) {
1861 * We don't expect this code path to be executed but you never know
1862 * so this is an extra protection agains a buggy splice().
1865 written
+= ret_splice
;
1866 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice
,
1870 /* All good, update current len and continue. */
1874 /* This call is useless on a socket so better save a syscall. */
1876 /* This won't block, but will start writeout asynchronously */
1877 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1878 SYNC_FILE_RANGE_WRITE
);
1879 stream
->out_fd_offset
+= ret_splice
;
1881 stream
->output_written
+= ret_splice
;
1882 written
+= ret_splice
;
1884 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1889 * This is a special case that the relayd has closed its socket. Let's
1890 * cleanup the relayd object and all associated streams.
1892 if (relayd
&& relayd_hang_up
) {
1893 cleanup_relayd(relayd
, ctx
);
1894 /* Skip splice error so the consumer does not fail */
1899 /* send the appropriate error description to sessiond */
1902 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1905 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1908 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1913 if (relayd
&& stream
->metadata_flag
) {
1914 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1922 * Take a snapshot for a specific fd
1924 * Returns 0 on success, < 0 on error
1926 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1928 switch (consumer_data
.type
) {
1929 case LTTNG_CONSUMER_KERNEL
:
1930 return lttng_kconsumer_take_snapshot(stream
);
1931 case LTTNG_CONSUMER32_UST
:
1932 case LTTNG_CONSUMER64_UST
:
1933 return lttng_ustconsumer_take_snapshot(stream
);
1935 ERR("Unknown consumer_data type");
1942 * Get the produced position
1944 * Returns 0 on success, < 0 on error
1946 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1949 switch (consumer_data
.type
) {
1950 case LTTNG_CONSUMER_KERNEL
:
1951 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1952 case LTTNG_CONSUMER32_UST
:
1953 case LTTNG_CONSUMER64_UST
:
1954 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1956 ERR("Unknown consumer_data type");
1962 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1963 int sock
, struct pollfd
*consumer_sockpoll
)
1965 switch (consumer_data
.type
) {
1966 case LTTNG_CONSUMER_KERNEL
:
1967 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1968 case LTTNG_CONSUMER32_UST
:
1969 case LTTNG_CONSUMER64_UST
:
1970 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1972 ERR("Unknown consumer_data type");
1978 void lttng_consumer_close_all_metadata(void)
1980 switch (consumer_data
.type
) {
1981 case LTTNG_CONSUMER_KERNEL
:
1983 * The Kernel consumer has a different metadata scheme so we don't
1984 * close anything because the stream will be closed by the session
1988 case LTTNG_CONSUMER32_UST
:
1989 case LTTNG_CONSUMER64_UST
:
1991 * Close all metadata streams. The metadata hash table is passed and
1992 * this call iterates over it by closing all wakeup fd. This is safe
1993 * because at this point we are sure that the metadata producer is
1994 * either dead or blocked.
1996 lttng_ustconsumer_close_all_metadata(metadata_ht
);
1999 ERR("Unknown consumer_data type");
2005 * Clean up a metadata stream and free its memory.
2007 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
2008 struct lttng_ht
*ht
)
2010 struct lttng_consumer_channel
*free_chan
= NULL
;
2014 * This call should NEVER receive regular stream. It must always be
2015 * metadata stream and this is crucial for data structure synchronization.
2017 assert(stream
->metadata_flag
);
2019 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
2021 pthread_mutex_lock(&consumer_data
.lock
);
2022 pthread_mutex_lock(&stream
->chan
->lock
);
2023 pthread_mutex_lock(&stream
->lock
);
2025 /* Remove any reference to that stream. */
2026 consumer_stream_delete(stream
, ht
);
2028 /* Close down everything including the relayd if one. */
2029 consumer_stream_close(stream
);
2030 /* Destroy tracer buffers of the stream. */
2031 consumer_stream_destroy_buffers(stream
);
2033 /* Atomically decrement channel refcount since other threads can use it. */
2034 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2035 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2036 /* Go for channel deletion! */
2037 free_chan
= stream
->chan
;
2041 * Nullify the stream reference so it is not used after deletion. The
2042 * channel lock MUST be acquired before being able to check for a NULL
2045 stream
->chan
->metadata_stream
= NULL
;
2047 pthread_mutex_unlock(&stream
->lock
);
2048 pthread_mutex_unlock(&stream
->chan
->lock
);
2049 pthread_mutex_unlock(&consumer_data
.lock
);
2052 consumer_del_channel(free_chan
);
2055 consumer_stream_free(stream
);
2059 * Action done with the metadata stream when adding it to the consumer internal
2060 * data structures to handle it.
2062 int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2064 struct lttng_ht
*ht
= metadata_ht
;
2066 struct lttng_ht_iter iter
;
2067 struct lttng_ht_node_u64
*node
;
2072 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2074 pthread_mutex_lock(&consumer_data
.lock
);
2075 pthread_mutex_lock(&stream
->chan
->lock
);
2076 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2077 pthread_mutex_lock(&stream
->lock
);
2080 * From here, refcounts are updated so be _careful_ when returning an error
2087 * Lookup the stream just to make sure it does not exist in our internal
2088 * state. This should NEVER happen.
2090 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2091 node
= lttng_ht_iter_get_node_u64(&iter
);
2095 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2096 * in terms of destroying the associated channel, because the action that
2097 * causes the count to become 0 also causes a stream to be added. The
2098 * channel deletion will thus be triggered by the following removal of this
2101 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2102 /* Increment refcount before decrementing nb_init_stream_left */
2104 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2107 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2109 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2110 &stream
->node_channel_id
);
2113 * Add stream to the stream_list_ht of the consumer data. No need to steal
2114 * the key since the HT does not use it and we allow to add redundant keys
2117 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2121 pthread_mutex_unlock(&stream
->lock
);
2122 pthread_mutex_unlock(&stream
->chan
->lock
);
2123 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2124 pthread_mutex_unlock(&consumer_data
.lock
);
2129 * Delete data stream that are flagged for deletion (endpoint_status).
2131 static void validate_endpoint_status_data_stream(void)
2133 struct lttng_ht_iter iter
;
2134 struct lttng_consumer_stream
*stream
;
2136 DBG("Consumer delete flagged data stream");
2139 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2140 /* Validate delete flag of the stream */
2141 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2144 /* Delete it right now */
2145 consumer_del_stream(stream
, data_ht
);
2151 * Delete metadata stream that are flagged for deletion (endpoint_status).
2153 static void validate_endpoint_status_metadata_stream(
2154 struct lttng_poll_event
*pollset
)
2156 struct lttng_ht_iter iter
;
2157 struct lttng_consumer_stream
*stream
;
2159 DBG("Consumer delete flagged metadata stream");
2164 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2165 /* Validate delete flag of the stream */
2166 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2170 * Remove from pollset so the metadata thread can continue without
2171 * blocking on a deleted stream.
2173 lttng_poll_del(pollset
, stream
->wait_fd
);
2175 /* Delete it right now */
2176 consumer_del_metadata_stream(stream
, metadata_ht
);
2182 * Thread polls on metadata file descriptor and write them on disk or on the
2185 void *consumer_thread_metadata_poll(void *data
)
2187 int ret
, i
, pollfd
, err
= -1;
2188 uint32_t revents
, nb_fd
;
2189 struct lttng_consumer_stream
*stream
= NULL
;
2190 struct lttng_ht_iter iter
;
2191 struct lttng_ht_node_u64
*node
;
2192 struct lttng_poll_event events
;
2193 struct lttng_consumer_local_data
*ctx
= data
;
2196 rcu_register_thread();
2198 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2200 if (testpoint(consumerd_thread_metadata
)) {
2201 goto error_testpoint
;
2204 health_code_update();
2206 DBG("Thread metadata poll started");
2208 /* Size is set to 1 for the consumer_metadata pipe */
2209 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2211 ERR("Poll set creation failed");
2215 ret
= lttng_poll_add(&events
,
2216 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2222 DBG("Metadata main loop started");
2226 health_code_update();
2227 health_poll_entry();
2228 DBG("Metadata poll wait");
2229 ret
= lttng_poll_wait(&events
, -1);
2230 DBG("Metadata poll return from wait with %d fd(s)",
2231 LTTNG_POLL_GETNB(&events
));
2233 DBG("Metadata event caught in thread");
2235 if (errno
== EINTR
) {
2236 ERR("Poll EINTR caught");
2239 if (LTTNG_POLL_GETNB(&events
) == 0) {
2240 err
= 0; /* All is OK */
2247 /* From here, the event is a metadata wait fd */
2248 for (i
= 0; i
< nb_fd
; i
++) {
2249 health_code_update();
2251 revents
= LTTNG_POLL_GETEV(&events
, i
);
2252 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2255 /* No activity for this FD (poll implementation). */
2259 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2260 if (revents
& LPOLLIN
) {
2263 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2264 &stream
, sizeof(stream
));
2265 if (pipe_len
< sizeof(stream
)) {
2267 PERROR("read metadata stream");
2270 * Remove the pipe from the poll set and continue the loop
2271 * since their might be data to consume.
2273 lttng_poll_del(&events
,
2274 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2275 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2279 /* A NULL stream means that the state has changed. */
2280 if (stream
== NULL
) {
2281 /* Check for deleted streams. */
2282 validate_endpoint_status_metadata_stream(&events
);
2286 DBG("Adding metadata stream %d to poll set",
2289 /* Add metadata stream to the global poll events list */
2290 lttng_poll_add(&events
, stream
->wait_fd
,
2291 LPOLLIN
| LPOLLPRI
| LPOLLHUP
);
2292 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2293 DBG("Metadata thread pipe hung up");
2295 * Remove the pipe from the poll set and continue the loop
2296 * since their might be data to consume.
2298 lttng_poll_del(&events
,
2299 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2300 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2303 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2307 /* Handle other stream */
2313 uint64_t tmp_id
= (uint64_t) pollfd
;
2315 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2317 node
= lttng_ht_iter_get_node_u64(&iter
);
2320 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2323 if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2324 /* Get the data out of the metadata file descriptor */
2325 DBG("Metadata available on fd %d", pollfd
);
2326 assert(stream
->wait_fd
== pollfd
);
2329 health_code_update();
2331 len
= ctx
->on_buffer_ready(stream
, ctx
);
2333 * We don't check the return value here since if we get
2334 * a negative len, it means an error occured thus we
2335 * simply remove it from the poll set and free the
2340 /* It's ok to have an unavailable sub-buffer */
2341 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2342 /* Clean up stream from consumer and free it. */
2343 lttng_poll_del(&events
, stream
->wait_fd
);
2344 consumer_del_metadata_stream(stream
, metadata_ht
);
2346 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2347 DBG("Metadata fd %d is hup|err.", pollfd
);
2348 if (!stream
->hangup_flush_done
2349 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2350 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2351 DBG("Attempting to flush and consume the UST buffers");
2352 lttng_ustconsumer_on_stream_hangup(stream
);
2354 /* We just flushed the stream now read it. */
2356 health_code_update();
2358 len
= ctx
->on_buffer_ready(stream
, ctx
);
2360 * We don't check the return value here since if we get
2361 * a negative len, it means an error occured thus we
2362 * simply remove it from the poll set and free the
2368 lttng_poll_del(&events
, stream
->wait_fd
);
2370 * This call update the channel states, closes file descriptors
2371 * and securely free the stream.
2373 consumer_del_metadata_stream(stream
, metadata_ht
);
2375 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2379 /* Release RCU lock for the stream looked up */
2387 DBG("Metadata poll thread exiting");
2389 lttng_poll_clean(&events
);
2394 ERR("Health error occurred in %s", __func__
);
2396 health_unregister(health_consumerd
);
2397 rcu_unregister_thread();
2402 * This thread polls the fds in the set to consume the data and write
2403 * it to tracefile if necessary.
2405 void *consumer_thread_data_poll(void *data
)
2407 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2408 struct pollfd
*pollfd
= NULL
;
2409 /* local view of the streams */
2410 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2411 /* local view of consumer_data.fds_count */
2413 struct lttng_consumer_local_data
*ctx
= data
;
2416 rcu_register_thread();
2418 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2420 if (testpoint(consumerd_thread_data
)) {
2421 goto error_testpoint
;
2424 health_code_update();
2426 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2427 if (local_stream
== NULL
) {
2428 PERROR("local_stream malloc");
2433 health_code_update();
2439 * the fds set has been updated, we need to update our
2440 * local array as well
2442 pthread_mutex_lock(&consumer_data
.lock
);
2443 if (consumer_data
.need_update
) {
2448 local_stream
= NULL
;
2451 * Allocate for all fds +1 for the consumer_data_pipe and +1 for
2454 pollfd
= zmalloc((consumer_data
.stream_count
+ 2) * sizeof(struct pollfd
));
2455 if (pollfd
== NULL
) {
2456 PERROR("pollfd malloc");
2457 pthread_mutex_unlock(&consumer_data
.lock
);
2461 local_stream
= zmalloc((consumer_data
.stream_count
+ 2) *
2462 sizeof(struct lttng_consumer_stream
*));
2463 if (local_stream
== NULL
) {
2464 PERROR("local_stream malloc");
2465 pthread_mutex_unlock(&consumer_data
.lock
);
2468 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2471 ERR("Error in allocating pollfd or local_outfds");
2472 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2473 pthread_mutex_unlock(&consumer_data
.lock
);
2477 consumer_data
.need_update
= 0;
2479 pthread_mutex_unlock(&consumer_data
.lock
);
2481 /* No FDs and consumer_quit, consumer_cleanup the thread */
2482 if (nb_fd
== 0 && consumer_quit
== 1) {
2483 err
= 0; /* All is OK */
2486 /* poll on the array of fds */
2488 DBG("polling on %d fd", nb_fd
+ 2);
2489 health_poll_entry();
2490 num_rdy
= poll(pollfd
, nb_fd
+ 2, -1);
2492 DBG("poll num_rdy : %d", num_rdy
);
2493 if (num_rdy
== -1) {
2495 * Restart interrupted system call.
2497 if (errno
== EINTR
) {
2500 PERROR("Poll error");
2501 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2503 } else if (num_rdy
== 0) {
2504 DBG("Polling thread timed out");
2509 * If the consumer_data_pipe triggered poll go directly to the
2510 * beginning of the loop to update the array. We want to prioritize
2511 * array update over low-priority reads.
2513 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2514 ssize_t pipe_readlen
;
2516 DBG("consumer_data_pipe wake up");
2517 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2518 &new_stream
, sizeof(new_stream
));
2519 if (pipe_readlen
< sizeof(new_stream
)) {
2520 PERROR("Consumer data pipe");
2521 /* Continue so we can at least handle the current stream(s). */
2526 * If the stream is NULL, just ignore it. It's also possible that
2527 * the sessiond poll thread changed the consumer_quit state and is
2528 * waking us up to test it.
2530 if (new_stream
== NULL
) {
2531 validate_endpoint_status_data_stream();
2535 /* Continue to update the local streams and handle prio ones */
2539 /* Handle wakeup pipe. */
2540 if (pollfd
[nb_fd
+ 1].revents
& (POLLIN
| POLLPRI
)) {
2542 ssize_t pipe_readlen
;
2544 pipe_readlen
= lttng_pipe_read(ctx
->consumer_wakeup_pipe
, &dummy
,
2546 if (pipe_readlen
< 0) {
2547 PERROR("Consumer data wakeup pipe");
2549 /* We've been awakened to handle stream(s). */
2550 ctx
->has_wakeup
= 0;
2553 /* Take care of high priority channels first. */
2554 for (i
= 0; i
< nb_fd
; i
++) {
2555 health_code_update();
2557 if (local_stream
[i
] == NULL
) {
2560 if (pollfd
[i
].revents
& POLLPRI
) {
2561 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2563 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2564 /* it's ok to have an unavailable sub-buffer */
2565 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2566 /* Clean the stream and free it. */
2567 consumer_del_stream(local_stream
[i
], data_ht
);
2568 local_stream
[i
] = NULL
;
2569 } else if (len
> 0) {
2570 local_stream
[i
]->data_read
= 1;
2576 * If we read high prio channel in this loop, try again
2577 * for more high prio data.
2583 /* Take care of low priority channels. */
2584 for (i
= 0; i
< nb_fd
; i
++) {
2585 health_code_update();
2587 if (local_stream
[i
] == NULL
) {
2590 if ((pollfd
[i
].revents
& POLLIN
) ||
2591 local_stream
[i
]->hangup_flush_done
||
2592 local_stream
[i
]->has_data
) {
2593 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2594 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2595 /* it's ok to have an unavailable sub-buffer */
2596 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2597 /* Clean the stream and free it. */
2598 consumer_del_stream(local_stream
[i
], data_ht
);
2599 local_stream
[i
] = NULL
;
2600 } else if (len
> 0) {
2601 local_stream
[i
]->data_read
= 1;
2606 /* Handle hangup and errors */
2607 for (i
= 0; i
< nb_fd
; i
++) {
2608 health_code_update();
2610 if (local_stream
[i
] == NULL
) {
2613 if (!local_stream
[i
]->hangup_flush_done
2614 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2615 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2616 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2617 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2619 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2620 /* Attempt read again, for the data we just flushed. */
2621 local_stream
[i
]->data_read
= 1;
2624 * If the poll flag is HUP/ERR/NVAL and we have
2625 * read no data in this pass, we can remove the
2626 * stream from its hash table.
2628 if ((pollfd
[i
].revents
& POLLHUP
)) {
2629 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2630 if (!local_stream
[i
]->data_read
) {
2631 consumer_del_stream(local_stream
[i
], data_ht
);
2632 local_stream
[i
] = NULL
;
2635 } else if (pollfd
[i
].revents
& POLLERR
) {
2636 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2637 if (!local_stream
[i
]->data_read
) {
2638 consumer_del_stream(local_stream
[i
], data_ht
);
2639 local_stream
[i
] = NULL
;
2642 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2643 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2644 if (!local_stream
[i
]->data_read
) {
2645 consumer_del_stream(local_stream
[i
], data_ht
);
2646 local_stream
[i
] = NULL
;
2650 if (local_stream
[i
] != NULL
) {
2651 local_stream
[i
]->data_read
= 0;
2658 DBG("polling thread exiting");
2663 * Close the write side of the pipe so epoll_wait() in
2664 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2665 * read side of the pipe. If we close them both, epoll_wait strangely does
2666 * not return and could create a endless wait period if the pipe is the
2667 * only tracked fd in the poll set. The thread will take care of closing
2670 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2675 ERR("Health error occurred in %s", __func__
);
2677 health_unregister(health_consumerd
);
2679 rcu_unregister_thread();
2684 * Close wake-up end of each stream belonging to the channel. This will
2685 * allow the poll() on the stream read-side to detect when the
2686 * write-side (application) finally closes them.
2689 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2691 struct lttng_ht
*ht
;
2692 struct lttng_consumer_stream
*stream
;
2693 struct lttng_ht_iter iter
;
2695 ht
= consumer_data
.stream_per_chan_id_ht
;
2698 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2699 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2700 ht
->match_fct
, &channel
->key
,
2701 &iter
.iter
, stream
, node_channel_id
.node
) {
2703 * Protect against teardown with mutex.
2705 pthread_mutex_lock(&stream
->lock
);
2706 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2709 switch (consumer_data
.type
) {
2710 case LTTNG_CONSUMER_KERNEL
:
2712 case LTTNG_CONSUMER32_UST
:
2713 case LTTNG_CONSUMER64_UST
:
2714 if (stream
->metadata_flag
) {
2715 /* Safe and protected by the stream lock. */
2716 lttng_ustconsumer_close_metadata(stream
->chan
);
2719 * Note: a mutex is taken internally within
2720 * liblttng-ust-ctl to protect timer wakeup_fd
2721 * use from concurrent close.
2723 lttng_ustconsumer_close_stream_wakeup(stream
);
2727 ERR("Unknown consumer_data type");
2731 pthread_mutex_unlock(&stream
->lock
);
2736 static void destroy_channel_ht(struct lttng_ht
*ht
)
2738 struct lttng_ht_iter iter
;
2739 struct lttng_consumer_channel
*channel
;
2747 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2748 ret
= lttng_ht_del(ht
, &iter
);
2753 lttng_ht_destroy(ht
);
2757 * This thread polls the channel fds to detect when they are being
2758 * closed. It closes all related streams if the channel is detected as
2759 * closed. It is currently only used as a shim layer for UST because the
2760 * consumerd needs to keep the per-stream wakeup end of pipes open for
2763 void *consumer_thread_channel_poll(void *data
)
2765 int ret
, i
, pollfd
, err
= -1;
2766 uint32_t revents
, nb_fd
;
2767 struct lttng_consumer_channel
*chan
= NULL
;
2768 struct lttng_ht_iter iter
;
2769 struct lttng_ht_node_u64
*node
;
2770 struct lttng_poll_event events
;
2771 struct lttng_consumer_local_data
*ctx
= data
;
2772 struct lttng_ht
*channel_ht
;
2774 rcu_register_thread();
2776 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2778 if (testpoint(consumerd_thread_channel
)) {
2779 goto error_testpoint
;
2782 health_code_update();
2784 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2786 /* ENOMEM at this point. Better to bail out. */
2790 DBG("Thread channel poll started");
2792 /* Size is set to 1 for the consumer_channel pipe */
2793 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2795 ERR("Poll set creation failed");
2799 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2805 DBG("Channel main loop started");
2809 health_code_update();
2810 DBG("Channel poll wait");
2811 health_poll_entry();
2812 ret
= lttng_poll_wait(&events
, -1);
2813 DBG("Channel poll return from wait with %d fd(s)",
2814 LTTNG_POLL_GETNB(&events
));
2816 DBG("Channel event caught in thread");
2818 if (errno
== EINTR
) {
2819 ERR("Poll EINTR caught");
2822 if (LTTNG_POLL_GETNB(&events
) == 0) {
2823 err
= 0; /* All is OK */
2830 /* From here, the event is a channel wait fd */
2831 for (i
= 0; i
< nb_fd
; i
++) {
2832 health_code_update();
2834 revents
= LTTNG_POLL_GETEV(&events
, i
);
2835 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2838 /* No activity for this FD (poll implementation). */
2842 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2843 if (revents
& LPOLLIN
) {
2844 enum consumer_channel_action action
;
2847 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2850 ERR("Error reading channel pipe");
2852 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2857 case CONSUMER_CHANNEL_ADD
:
2858 DBG("Adding channel %d to poll set",
2861 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2864 lttng_ht_add_unique_u64(channel_ht
,
2865 &chan
->wait_fd_node
);
2867 /* Add channel to the global poll events list */
2868 lttng_poll_add(&events
, chan
->wait_fd
,
2869 LPOLLERR
| LPOLLHUP
);
2871 case CONSUMER_CHANNEL_DEL
:
2874 * This command should never be called if the channel
2875 * has streams monitored by either the data or metadata
2876 * thread. The consumer only notify this thread with a
2877 * channel del. command if it receives a destroy
2878 * channel command from the session daemon that send it
2879 * if a command prior to the GET_CHANNEL failed.
2883 chan
= consumer_find_channel(key
);
2886 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
2889 lttng_poll_del(&events
, chan
->wait_fd
);
2890 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
2891 ret
= lttng_ht_del(channel_ht
, &iter
);
2894 switch (consumer_data
.type
) {
2895 case LTTNG_CONSUMER_KERNEL
:
2897 case LTTNG_CONSUMER32_UST
:
2898 case LTTNG_CONSUMER64_UST
:
2899 health_code_update();
2900 /* Destroy streams that might have been left in the stream list. */
2901 clean_channel_stream_list(chan
);
2904 ERR("Unknown consumer_data type");
2909 * Release our own refcount. Force channel deletion even if
2910 * streams were not initialized.
2912 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
2913 consumer_del_channel(chan
);
2918 case CONSUMER_CHANNEL_QUIT
:
2920 * Remove the pipe from the poll set and continue the loop
2921 * since their might be data to consume.
2923 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2926 ERR("Unknown action");
2929 } else if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2930 DBG("Channel thread pipe hung up");
2932 * Remove the pipe from the poll set and continue the loop
2933 * since their might be data to consume.
2935 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2938 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2942 /* Handle other stream */
2948 uint64_t tmp_id
= (uint64_t) pollfd
;
2950 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2952 node
= lttng_ht_iter_get_node_u64(&iter
);
2955 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2958 /* Check for error event */
2959 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2960 DBG("Channel fd %d is hup|err.", pollfd
);
2962 lttng_poll_del(&events
, chan
->wait_fd
);
2963 ret
= lttng_ht_del(channel_ht
, &iter
);
2967 * This will close the wait fd for each stream associated to
2968 * this channel AND monitored by the data/metadata thread thus
2969 * will be clean by the right thread.
2971 consumer_close_channel_streams(chan
);
2973 /* Release our own refcount */
2974 if (!uatomic_sub_return(&chan
->refcount
, 1)
2975 && !uatomic_read(&chan
->nb_init_stream_left
)) {
2976 consumer_del_channel(chan
);
2979 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2984 /* Release RCU lock for the channel looked up */
2992 lttng_poll_clean(&events
);
2994 destroy_channel_ht(channel_ht
);
2997 DBG("Channel poll thread exiting");
3000 ERR("Health error occurred in %s", __func__
);
3002 health_unregister(health_consumerd
);
3003 rcu_unregister_thread();
3007 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
3008 struct pollfd
*sockpoll
, int client_socket
)
3015 ret
= lttng_consumer_poll_socket(sockpoll
);
3019 DBG("Metadata connection on client_socket");
3021 /* Blocking call, waiting for transmission */
3022 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
3023 if (ctx
->consumer_metadata_socket
< 0) {
3024 WARN("On accept metadata");
3035 * This thread listens on the consumerd socket and receives the file
3036 * descriptors from the session daemon.
3038 void *consumer_thread_sessiond_poll(void *data
)
3040 int sock
= -1, client_socket
, ret
, err
= -1;
3042 * structure to poll for incoming data on communication socket avoids
3043 * making blocking sockets.
3045 struct pollfd consumer_sockpoll
[2];
3046 struct lttng_consumer_local_data
*ctx
= data
;
3048 rcu_register_thread();
3050 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
3052 if (testpoint(consumerd_thread_sessiond
)) {
3053 goto error_testpoint
;
3056 health_code_update();
3058 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
3059 unlink(ctx
->consumer_command_sock_path
);
3060 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
3061 if (client_socket
< 0) {
3062 ERR("Cannot create command socket");
3066 ret
= lttcomm_listen_unix_sock(client_socket
);
3071 DBG("Sending ready command to lttng-sessiond");
3072 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3073 /* return < 0 on error, but == 0 is not fatal */
3075 ERR("Error sending ready command to lttng-sessiond");
3079 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3080 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3081 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3082 consumer_sockpoll
[1].fd
= client_socket
;
3083 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3085 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3093 DBG("Connection on client_socket");
3095 /* Blocking call, waiting for transmission */
3096 sock
= lttcomm_accept_unix_sock(client_socket
);
3103 * Setup metadata socket which is the second socket connection on the
3104 * command unix socket.
3106 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3115 /* This socket is not useful anymore. */
3116 ret
= close(client_socket
);
3118 PERROR("close client_socket");
3122 /* update the polling structure to poll on the established socket */
3123 consumer_sockpoll
[1].fd
= sock
;
3124 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3127 health_code_update();
3129 health_poll_entry();
3130 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3139 DBG("Incoming command on sock");
3140 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3143 * This could simply be a session daemon quitting. Don't output
3146 DBG("Communication interrupted on command socket");
3150 if (consumer_quit
) {
3151 DBG("consumer_thread_receive_fds received quit from signal");
3152 err
= 0; /* All is OK */
3155 DBG("received command on sock");
3161 DBG("Consumer thread sessiond poll exiting");
3164 * Close metadata streams since the producer is the session daemon which
3167 * NOTE: for now, this only applies to the UST tracer.
3169 lttng_consumer_close_all_metadata();
3172 * when all fds have hung up, the polling thread
3178 * Notify the data poll thread to poll back again and test the
3179 * consumer_quit state that we just set so to quit gracefully.
3181 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3183 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3185 notify_health_quit_pipe(health_quit_pipe
);
3187 /* Cleaning up possibly open sockets. */
3191 PERROR("close sock sessiond poll");
3194 if (client_socket
>= 0) {
3195 ret
= close(client_socket
);
3197 PERROR("close client_socket sessiond poll");
3204 ERR("Health error occurred in %s", __func__
);
3206 health_unregister(health_consumerd
);
3208 rcu_unregister_thread();
3212 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3213 struct lttng_consumer_local_data
*ctx
)
3217 pthread_mutex_lock(&stream
->lock
);
3218 if (stream
->metadata_flag
) {
3219 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3222 switch (consumer_data
.type
) {
3223 case LTTNG_CONSUMER_KERNEL
:
3224 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3226 case LTTNG_CONSUMER32_UST
:
3227 case LTTNG_CONSUMER64_UST
:
3228 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3231 ERR("Unknown consumer_data type");
3237 if (stream
->metadata_flag
) {
3238 pthread_cond_broadcast(&stream
->metadata_rdv
);
3239 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3241 pthread_mutex_unlock(&stream
->lock
);
3245 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3247 switch (consumer_data
.type
) {
3248 case LTTNG_CONSUMER_KERNEL
:
3249 return lttng_kconsumer_on_recv_stream(stream
);
3250 case LTTNG_CONSUMER32_UST
:
3251 case LTTNG_CONSUMER64_UST
:
3252 return lttng_ustconsumer_on_recv_stream(stream
);
3254 ERR("Unknown consumer_data type");
3261 * Allocate and set consumer data hash tables.
3263 int lttng_consumer_init(void)
3265 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3266 if (!consumer_data
.channel_ht
) {
3270 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3271 if (!consumer_data
.relayd_ht
) {
3275 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3276 if (!consumer_data
.stream_list_ht
) {
3280 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3281 if (!consumer_data
.stream_per_chan_id_ht
) {
3285 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3290 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3302 * Process the ADD_RELAYD command receive by a consumer.
3304 * This will create a relayd socket pair and add it to the relayd hash table.
3305 * The caller MUST acquire a RCU read side lock before calling it.
3307 int consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3308 struct lttng_consumer_local_data
*ctx
, int sock
,
3309 struct pollfd
*consumer_sockpoll
,
3310 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3311 uint64_t relayd_session_id
)
3313 int fd
= -1, ret
= -1, relayd_created
= 0;
3314 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3315 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3318 assert(relayd_sock
);
3320 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3322 /* Get relayd reference if exists. */
3323 relayd
= consumer_find_relayd(net_seq_idx
);
3324 if (relayd
== NULL
) {
3325 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3326 /* Not found. Allocate one. */
3327 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3328 if (relayd
== NULL
) {
3330 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3333 relayd
->sessiond_session_id
= sessiond_id
;
3338 * This code path MUST continue to the consumer send status message to
3339 * we can notify the session daemon and continue our work without
3340 * killing everything.
3344 * relayd key should never be found for control socket.
3346 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3349 /* First send a status message before receiving the fds. */
3350 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3352 /* Somehow, the session daemon is not responding anymore. */
3353 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3354 goto error_nosignal
;
3357 /* Poll on consumer socket. */
3358 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3360 /* Needing to exit in the middle of a command: error. */
3361 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3363 goto error_nosignal
;
3366 /* Get relayd socket from session daemon */
3367 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3368 if (ret
!= sizeof(fd
)) {
3370 fd
= -1; /* Just in case it gets set with an invalid value. */
3373 * Failing to receive FDs might indicate a major problem such as
3374 * reaching a fd limit during the receive where the kernel returns a
3375 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3376 * don't take any chances and stop everything.
3378 * XXX: Feature request #558 will fix that and avoid this possible
3379 * issue when reaching the fd limit.
3381 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3382 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3386 /* Copy socket information and received FD */
3387 switch (sock_type
) {
3388 case LTTNG_STREAM_CONTROL
:
3389 /* Copy received lttcomm socket */
3390 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3391 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3392 /* Handle create_sock error. */
3394 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3398 * Close the socket created internally by
3399 * lttcomm_create_sock, so we can replace it by the one
3400 * received from sessiond.
3402 if (close(relayd
->control_sock
.sock
.fd
)) {
3406 /* Assign new file descriptor */
3407 relayd
->control_sock
.sock
.fd
= fd
;
3408 fd
= -1; /* For error path */
3409 /* Assign version values. */
3410 relayd
->control_sock
.major
= relayd_sock
->major
;
3411 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3413 relayd
->relayd_session_id
= relayd_session_id
;
3416 case LTTNG_STREAM_DATA
:
3417 /* Copy received lttcomm socket */
3418 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3419 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3420 /* Handle create_sock error. */
3422 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3426 * Close the socket created internally by
3427 * lttcomm_create_sock, so we can replace it by the one
3428 * received from sessiond.
3430 if (close(relayd
->data_sock
.sock
.fd
)) {
3434 /* Assign new file descriptor */
3435 relayd
->data_sock
.sock
.fd
= fd
;
3436 fd
= -1; /* for eventual error paths */
3437 /* Assign version values. */
3438 relayd
->data_sock
.major
= relayd_sock
->major
;
3439 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3442 ERR("Unknown relayd socket type (%d)", sock_type
);
3444 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3448 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3449 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3450 relayd
->net_seq_idx
, fd
);
3452 /* We successfully added the socket. Send status back. */
3453 ret
= consumer_send_status_msg(sock
, ret_code
);
3455 /* Somehow, the session daemon is not responding anymore. */
3456 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3457 goto error_nosignal
;
3461 * Add relayd socket pair to consumer data hashtable. If object already
3462 * exists or on error, the function gracefully returns.
3470 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3471 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3475 /* Close received socket if valid. */
3478 PERROR("close received socket");
3482 if (relayd_created
) {
3490 * Try to lock the stream mutex.
3492 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3494 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3501 * Try to lock the stream mutex. On failure, we know that the stream is
3502 * being used else where hence there is data still being extracted.
3504 ret
= pthread_mutex_trylock(&stream
->lock
);
3506 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3518 * Search for a relayd associated to the session id and return the reference.
3520 * A rcu read side lock MUST be acquire before calling this function and locked
3521 * until the relayd object is no longer necessary.
3523 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3525 struct lttng_ht_iter iter
;
3526 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3528 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3529 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3532 * Check by sessiond id which is unique here where the relayd session
3533 * id might not be when having multiple relayd.
3535 if (relayd
->sessiond_session_id
== id
) {
3536 /* Found the relayd. There can be only one per id. */
3548 * Check if for a given session id there is still data needed to be extract
3551 * Return 1 if data is pending or else 0 meaning ready to be read.
3553 int consumer_data_pending(uint64_t id
)
3556 struct lttng_ht_iter iter
;
3557 struct lttng_ht
*ht
;
3558 struct lttng_consumer_stream
*stream
;
3559 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3560 int (*data_pending
)(struct lttng_consumer_stream
*);
3562 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3565 pthread_mutex_lock(&consumer_data
.lock
);
3567 switch (consumer_data
.type
) {
3568 case LTTNG_CONSUMER_KERNEL
:
3569 data_pending
= lttng_kconsumer_data_pending
;
3571 case LTTNG_CONSUMER32_UST
:
3572 case LTTNG_CONSUMER64_UST
:
3573 data_pending
= lttng_ustconsumer_data_pending
;
3576 ERR("Unknown consumer data type");
3580 /* Ease our life a bit */
3581 ht
= consumer_data
.stream_list_ht
;
3583 relayd
= find_relayd_by_session_id(id
);
3585 /* Send init command for data pending. */
3586 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3587 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3588 relayd
->relayd_session_id
);
3589 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3591 /* Communication error thus the relayd so no data pending. */
3592 goto data_not_pending
;
3596 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3597 ht
->hash_fct(&id
, lttng_ht_seed
),
3599 &iter
.iter
, stream
, node_session_id
.node
) {
3600 /* If this call fails, the stream is being used hence data pending. */
3601 ret
= stream_try_lock(stream
);
3607 * A removed node from the hash table indicates that the stream has
3608 * been deleted thus having a guarantee that the buffers are closed
3609 * on the consumer side. However, data can still be transmitted
3610 * over the network so don't skip the relayd check.
3612 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3614 /* Check the stream if there is data in the buffers. */
3615 ret
= data_pending(stream
);
3617 pthread_mutex_unlock(&stream
->lock
);
3624 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3625 if (stream
->metadata_flag
) {
3626 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3627 stream
->relayd_stream_id
);
3629 ret
= relayd_data_pending(&relayd
->control_sock
,
3630 stream
->relayd_stream_id
,
3631 stream
->next_net_seq_num
- 1);
3633 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3635 pthread_mutex_unlock(&stream
->lock
);
3639 pthread_mutex_unlock(&stream
->lock
);
3643 unsigned int is_data_inflight
= 0;
3645 /* Send init command for data pending. */
3646 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3647 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3648 relayd
->relayd_session_id
, &is_data_inflight
);
3649 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3651 goto data_not_pending
;
3653 if (is_data_inflight
) {
3659 * Finding _no_ node in the hash table and no inflight data means that the
3660 * stream(s) have been removed thus data is guaranteed to be available for
3661 * analysis from the trace files.
3665 /* Data is available to be read by a viewer. */
3666 pthread_mutex_unlock(&consumer_data
.lock
);
3671 /* Data is still being extracted from buffers. */
3672 pthread_mutex_unlock(&consumer_data
.lock
);
3678 * Send a ret code status message to the sessiond daemon.
3680 * Return the sendmsg() return value.
3682 int consumer_send_status_msg(int sock
, int ret_code
)
3684 struct lttcomm_consumer_status_msg msg
;
3686 memset(&msg
, 0, sizeof(msg
));
3687 msg
.ret_code
= ret_code
;
3689 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3693 * Send a channel status message to the sessiond daemon.
3695 * Return the sendmsg() return value.
3697 int consumer_send_status_channel(int sock
,
3698 struct lttng_consumer_channel
*channel
)
3700 struct lttcomm_consumer_status_channel msg
;
3704 memset(&msg
, 0, sizeof(msg
));
3706 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3708 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3709 msg
.key
= channel
->key
;
3710 msg
.stream_count
= channel
->streams
.count
;
3713 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3716 unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos
,
3717 unsigned long produced_pos
, uint64_t nb_packets_per_stream
,
3718 uint64_t max_sb_size
)
3720 unsigned long start_pos
;
3722 if (!nb_packets_per_stream
) {
3723 return consumed_pos
; /* Grab everything */
3725 start_pos
= produced_pos
- offset_align_floor(produced_pos
, max_sb_size
);
3726 start_pos
-= max_sb_size
* nb_packets_per_stream
;
3727 if ((long) (start_pos
- consumed_pos
) < 0) {
3728 return consumed_pos
; /* Grab everything */