2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
32 #include <common/common.h>
33 #include <common/utils.h>
34 #include <common/compat/poll.h>
35 #include <common/kernel-ctl/kernel-ctl.h>
36 #include <common/sessiond-comm/relayd.h>
37 #include <common/sessiond-comm/sessiond-comm.h>
38 #include <common/kernel-consumer/kernel-consumer.h>
39 #include <common/relayd/relayd.h>
40 #include <common/ust-consumer/ust-consumer.h>
44 struct lttng_consumer_global_data consumer_data
= {
47 .type
= LTTNG_CONSUMER_UNKNOWN
,
50 /* timeout parameter, to control the polling thread grace period. */
51 int consumer_poll_timeout
= -1;
54 * Flag to inform the polling thread to quit when all fd hung up. Updated by
55 * the consumer_thread_receive_fds when it notices that all fds has hung up.
56 * Also updated by the signal handler (consumer_should_exit()). Read by the
59 volatile int consumer_quit
= 0;
62 * The following two hash tables are visible by all threads which are separated
63 * in different source files.
65 * Global hash table containing respectively metadata and data streams. The
66 * stream element in this ht should only be updated by the metadata poll thread
67 * for the metadata and the data poll thread for the data.
69 struct lttng_ht
*metadata_ht
= NULL
;
70 struct lttng_ht
*data_ht
= NULL
;
73 * Find a stream. The consumer_data.lock must be locked during this
76 static struct lttng_consumer_stream
*consumer_find_stream(int key
,
79 struct lttng_ht_iter iter
;
80 struct lttng_ht_node_ulong
*node
;
81 struct lttng_consumer_stream
*stream
= NULL
;
85 /* Negative keys are lookup failures */
92 lttng_ht_lookup(ht
, (void *)((unsigned long) key
), &iter
);
93 node
= lttng_ht_iter_get_node_ulong(&iter
);
95 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
103 void consumer_steal_stream_key(int key
, struct lttng_ht
*ht
)
105 struct lttng_consumer_stream
*stream
;
108 stream
= consumer_find_stream(key
, ht
);
112 * We don't want the lookup to match, but we still need
113 * to iterate on this stream when iterating over the hash table. Just
114 * change the node key.
116 stream
->node
.key
= -1;
121 static struct lttng_consumer_channel
*consumer_find_channel(int key
)
123 struct lttng_ht_iter iter
;
124 struct lttng_ht_node_ulong
*node
;
125 struct lttng_consumer_channel
*channel
= NULL
;
127 /* Negative keys are lookup failures */
134 lttng_ht_lookup(consumer_data
.channel_ht
, (void *)((unsigned long) key
),
136 node
= lttng_ht_iter_get_node_ulong(&iter
);
138 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
146 static void consumer_steal_channel_key(int key
)
148 struct lttng_consumer_channel
*channel
;
151 channel
= consumer_find_channel(key
);
155 * We don't want the lookup to match, but we still need
156 * to iterate on this channel when iterating over the hash table. Just
157 * change the node key.
159 channel
->node
.key
= -1;
165 void consumer_free_stream(struct rcu_head
*head
)
167 struct lttng_ht_node_ulong
*node
=
168 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
169 struct lttng_consumer_stream
*stream
=
170 caa_container_of(node
, struct lttng_consumer_stream
, node
);
176 * RCU protected relayd socket pair free.
178 static void consumer_rcu_free_relayd(struct rcu_head
*head
)
180 struct lttng_ht_node_ulong
*node
=
181 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
182 struct consumer_relayd_sock_pair
*relayd
=
183 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
189 * Destroy and free relayd socket pair object.
191 * This function MUST be called with the consumer_data lock acquired.
193 static void destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
196 struct lttng_ht_iter iter
;
198 if (relayd
== NULL
) {
202 DBG("Consumer destroy and close relayd socket pair");
204 iter
.iter
.node
= &relayd
->node
.node
;
205 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
207 /* We assume the relayd was already destroyed */
211 /* Close all sockets */
212 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
213 (void) relayd_close(&relayd
->control_sock
);
214 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
215 (void) relayd_close(&relayd
->data_sock
);
217 /* RCU free() call */
218 call_rcu(&relayd
->node
.head
, consumer_rcu_free_relayd
);
222 * Flag a relayd socket pair for destruction. Destroy it if the refcount
225 * RCU read side lock MUST be aquired before calling this function.
227 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
231 /* Set destroy flag for this object */
232 uatomic_set(&relayd
->destroy_flag
, 1);
234 /* Destroy the relayd if refcount is 0 */
235 if (uatomic_read(&relayd
->refcount
) == 0) {
236 destroy_relayd(relayd
);
241 * Remove a stream from the global list protected by a mutex. This
242 * function is also responsible for freeing its data structures.
244 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
248 struct lttng_ht_iter iter
;
249 struct lttng_consumer_channel
*free_chan
= NULL
;
250 struct consumer_relayd_sock_pair
*relayd
;
255 /* Means the stream was allocated but not successfully added */
259 pthread_mutex_lock(&consumer_data
.lock
);
261 switch (consumer_data
.type
) {
262 case LTTNG_CONSUMER_KERNEL
:
263 if (stream
->mmap_base
!= NULL
) {
264 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
270 case LTTNG_CONSUMER32_UST
:
271 case LTTNG_CONSUMER64_UST
:
272 lttng_ustconsumer_del_stream(stream
);
275 ERR("Unknown consumer_data type");
281 iter
.iter
.node
= &stream
->node
.node
;
282 ret
= lttng_ht_del(ht
, &iter
);
286 assert(consumer_data
.stream_count
> 0);
287 consumer_data
.stream_count
--;
289 if (stream
->out_fd
>= 0) {
290 ret
= close(stream
->out_fd
);
295 if (stream
->wait_fd
>= 0 && !stream
->wait_fd_is_copy
) {
296 ret
= close(stream
->wait_fd
);
301 if (stream
->shm_fd
>= 0 && stream
->wait_fd
!= stream
->shm_fd
) {
302 ret
= close(stream
->shm_fd
);
308 /* Check and cleanup relayd */
310 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
311 if (relayd
!= NULL
) {
312 uatomic_dec(&relayd
->refcount
);
313 assert(uatomic_read(&relayd
->refcount
) >= 0);
315 /* Closing streams requires to lock the control socket. */
316 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
317 ret
= relayd_send_close_stream(&relayd
->control_sock
,
318 stream
->relayd_stream_id
,
319 stream
->next_net_seq_num
- 1);
320 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
322 DBG("Unable to close stream on the relayd. Continuing");
324 * Continue here. There is nothing we can do for the relayd.
325 * Chances are that the relayd has closed the socket so we just
326 * continue cleaning up.
330 /* Both conditions are met, we destroy the relayd. */
331 if (uatomic_read(&relayd
->refcount
) == 0 &&
332 uatomic_read(&relayd
->destroy_flag
)) {
333 destroy_relayd(relayd
);
338 uatomic_dec(&stream
->chan
->refcount
);
339 if (!uatomic_read(&stream
->chan
->refcount
)
340 && !uatomic_read(&stream
->chan
->nb_init_streams
)) {
341 free_chan
= stream
->chan
;
345 consumer_data
.need_update
= 1;
346 pthread_mutex_unlock(&consumer_data
.lock
);
349 consumer_del_channel(free_chan
);
353 call_rcu(&stream
->node
.head
, consumer_free_stream
);
356 struct lttng_consumer_stream
*consumer_allocate_stream(
357 int channel_key
, int stream_key
,
358 int shm_fd
, int wait_fd
,
359 enum lttng_consumer_stream_state state
,
361 enum lttng_event_output output
,
362 const char *path_name
,
369 struct lttng_consumer_stream
*stream
;
371 stream
= zmalloc(sizeof(*stream
));
372 if (stream
== NULL
) {
373 PERROR("malloc struct lttng_consumer_stream");
374 *alloc_ret
= -ENOMEM
;
379 * Get stream's channel reference. Needed when adding the stream to the
382 stream
->chan
= consumer_find_channel(channel_key
);
384 *alloc_ret
= -ENOENT
;
385 ERR("Unable to find channel for stream %d", stream_key
);
389 stream
->key
= stream_key
;
390 stream
->shm_fd
= shm_fd
;
391 stream
->wait_fd
= wait_fd
;
393 stream
->out_fd_offset
= 0;
394 stream
->state
= state
;
395 stream
->mmap_len
= mmap_len
;
396 stream
->mmap_base
= NULL
;
397 stream
->output
= output
;
400 stream
->net_seq_idx
= net_index
;
401 stream
->metadata_flag
= metadata_flag
;
402 strncpy(stream
->path_name
, path_name
, sizeof(stream
->path_name
));
403 stream
->path_name
[sizeof(stream
->path_name
) - 1] = '\0';
406 * Index differently the metadata node because the thread is using an
407 * internal hash table to match streams in the metadata_ht to the epoll set
411 lttng_ht_node_init_ulong(&stream
->node
, stream
->wait_fd
);
413 lttng_ht_node_init_ulong(&stream
->node
, stream
->key
);
417 * The cpu number is needed before using any ustctl_* actions. Ignored for
418 * the kernel so the value does not matter.
420 pthread_mutex_lock(&consumer_data
.lock
);
421 stream
->cpu
= stream
->chan
->cpucount
++;
422 pthread_mutex_unlock(&consumer_data
.lock
);
424 DBG3("Allocated stream %s (key %d, shm_fd %d, wait_fd %d, mmap_len %llu,"
425 " out_fd %d, net_seq_idx %d)", stream
->path_name
, stream
->key
,
426 stream
->shm_fd
, stream
->wait_fd
,
427 (unsigned long long) stream
->mmap_len
, stream
->out_fd
,
428 stream
->net_seq_idx
);
438 * Add a stream to the global list protected by a mutex.
440 static int consumer_add_stream(struct lttng_consumer_stream
*stream
,
444 struct consumer_relayd_sock_pair
*relayd
;
449 DBG3("Adding consumer stream %d", stream
->key
);
451 pthread_mutex_lock(&consumer_data
.lock
);
454 /* Steal stream identifier to avoid having streams with the same key */
455 consumer_steal_stream_key(stream
->key
, ht
);
457 lttng_ht_add_unique_ulong(ht
, &stream
->node
);
459 /* Check and cleanup relayd */
460 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
461 if (relayd
!= NULL
) {
462 uatomic_inc(&relayd
->refcount
);
465 /* Update channel refcount once added without error(s). */
466 uatomic_inc(&stream
->chan
->refcount
);
469 * When nb_init_streams reaches 0, we don't need to trigger any action in
470 * terms of destroying the associated channel, because the action that
471 * causes the count to become 0 also causes a stream to be added. The
472 * channel deletion will thus be triggered by the following removal of this
475 if (uatomic_read(&stream
->chan
->nb_init_streams
) > 0) {
476 uatomic_dec(&stream
->chan
->nb_init_streams
);
479 /* Update consumer data once the node is inserted. */
480 consumer_data
.stream_count
++;
481 consumer_data
.need_update
= 1;
484 pthread_mutex_unlock(&consumer_data
.lock
);
490 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
491 * be acquired before calling this.
493 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
496 struct lttng_ht_node_ulong
*node
;
497 struct lttng_ht_iter iter
;
499 if (relayd
== NULL
) {
504 lttng_ht_lookup(consumer_data
.relayd_ht
,
505 (void *)((unsigned long) relayd
->net_seq_idx
), &iter
);
506 node
= lttng_ht_iter_get_node_ulong(&iter
);
508 /* Relayd already exist. Ignore the insertion */
511 lttng_ht_add_unique_ulong(consumer_data
.relayd_ht
, &relayd
->node
);
518 * Allocate and return a consumer relayd socket.
520 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
523 struct consumer_relayd_sock_pair
*obj
= NULL
;
525 /* Negative net sequence index is a failure */
526 if (net_seq_idx
< 0) {
530 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
532 PERROR("zmalloc relayd sock");
536 obj
->net_seq_idx
= net_seq_idx
;
538 obj
->destroy_flag
= 0;
539 lttng_ht_node_init_ulong(&obj
->node
, obj
->net_seq_idx
);
540 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
547 * Find a relayd socket pair in the global consumer data.
549 * Return the object if found else NULL.
550 * RCU read-side lock must be held across this call and while using the
553 struct consumer_relayd_sock_pair
*consumer_find_relayd(int key
)
555 struct lttng_ht_iter iter
;
556 struct lttng_ht_node_ulong
*node
;
557 struct consumer_relayd_sock_pair
*relayd
= NULL
;
559 /* Negative keys are lookup failures */
564 lttng_ht_lookup(consumer_data
.relayd_ht
, (void *)((unsigned long) key
),
566 node
= lttng_ht_iter_get_node_ulong(&iter
);
568 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
576 * Handle stream for relayd transmission if the stream applies for network
577 * streaming where the net sequence index is set.
579 * Return destination file descriptor or negative value on error.
581 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
582 size_t data_size
, unsigned long padding
,
583 struct consumer_relayd_sock_pair
*relayd
)
586 struct lttcomm_relayd_data_hdr data_hdr
;
592 /* Reset data header */
593 memset(&data_hdr
, 0, sizeof(data_hdr
));
595 if (stream
->metadata_flag
) {
596 /* Caller MUST acquire the relayd control socket lock */
597 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
602 /* Metadata are always sent on the control socket. */
603 outfd
= relayd
->control_sock
.fd
;
605 /* Set header with stream information */
606 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
607 data_hdr
.data_size
= htobe32(data_size
);
608 data_hdr
.padding_size
= htobe32(padding
);
609 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
++);
610 /* Other fields are zeroed previously */
612 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
618 /* Set to go on data socket */
619 outfd
= relayd
->data_sock
.fd
;
627 * Update a stream according to what we just received.
629 void consumer_change_stream_state(int stream_key
,
630 enum lttng_consumer_stream_state state
)
632 struct lttng_consumer_stream
*stream
;
634 pthread_mutex_lock(&consumer_data
.lock
);
635 stream
= consumer_find_stream(stream_key
, consumer_data
.stream_ht
);
637 stream
->state
= state
;
639 consumer_data
.need_update
= 1;
640 pthread_mutex_unlock(&consumer_data
.lock
);
644 void consumer_free_channel(struct rcu_head
*head
)
646 struct lttng_ht_node_ulong
*node
=
647 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
648 struct lttng_consumer_channel
*channel
=
649 caa_container_of(node
, struct lttng_consumer_channel
, node
);
655 * Remove a channel from the global list protected by a mutex. This
656 * function is also responsible for freeing its data structures.
658 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
661 struct lttng_ht_iter iter
;
663 pthread_mutex_lock(&consumer_data
.lock
);
665 switch (consumer_data
.type
) {
666 case LTTNG_CONSUMER_KERNEL
:
668 case LTTNG_CONSUMER32_UST
:
669 case LTTNG_CONSUMER64_UST
:
670 lttng_ustconsumer_del_channel(channel
);
673 ERR("Unknown consumer_data type");
679 iter
.iter
.node
= &channel
->node
.node
;
680 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
684 if (channel
->mmap_base
!= NULL
) {
685 ret
= munmap(channel
->mmap_base
, channel
->mmap_len
);
690 if (channel
->wait_fd
>= 0 && !channel
->wait_fd_is_copy
) {
691 ret
= close(channel
->wait_fd
);
696 if (channel
->shm_fd
>= 0 && channel
->wait_fd
!= channel
->shm_fd
) {
697 ret
= close(channel
->shm_fd
);
703 call_rcu(&channel
->node
.head
, consumer_free_channel
);
705 pthread_mutex_unlock(&consumer_data
.lock
);
708 struct lttng_consumer_channel
*consumer_allocate_channel(
710 int shm_fd
, int wait_fd
,
712 uint64_t max_sb_size
,
713 unsigned int nb_init_streams
)
715 struct lttng_consumer_channel
*channel
;
718 channel
= zmalloc(sizeof(*channel
));
719 if (channel
== NULL
) {
720 PERROR("malloc struct lttng_consumer_channel");
723 channel
->key
= channel_key
;
724 channel
->shm_fd
= shm_fd
;
725 channel
->wait_fd
= wait_fd
;
726 channel
->mmap_len
= mmap_len
;
727 channel
->max_sb_size
= max_sb_size
;
728 channel
->refcount
= 0;
729 channel
->nb_init_streams
= nb_init_streams
;
730 lttng_ht_node_init_ulong(&channel
->node
, channel
->key
);
732 switch (consumer_data
.type
) {
733 case LTTNG_CONSUMER_KERNEL
:
734 channel
->mmap_base
= NULL
;
735 channel
->mmap_len
= 0;
737 case LTTNG_CONSUMER32_UST
:
738 case LTTNG_CONSUMER64_UST
:
739 ret
= lttng_ustconsumer_allocate_channel(channel
);
746 ERR("Unknown consumer_data type");
750 DBG("Allocated channel (key %d, shm_fd %d, wait_fd %d, mmap_len %llu, max_sb_size %llu)",
751 channel
->key
, channel
->shm_fd
, channel
->wait_fd
,
752 (unsigned long long) channel
->mmap_len
,
753 (unsigned long long) channel
->max_sb_size
);
759 * Add a channel to the global list protected by a mutex.
761 int consumer_add_channel(struct lttng_consumer_channel
*channel
)
763 struct lttng_ht_node_ulong
*node
;
764 struct lttng_ht_iter iter
;
766 pthread_mutex_lock(&consumer_data
.lock
);
767 /* Steal channel identifier, for UST */
768 consumer_steal_channel_key(channel
->key
);
771 lttng_ht_lookup(consumer_data
.channel_ht
,
772 (void *)((unsigned long) channel
->key
), &iter
);
773 node
= lttng_ht_iter_get_node_ulong(&iter
);
775 /* Channel already exist. Ignore the insertion */
779 lttng_ht_add_unique_ulong(consumer_data
.channel_ht
, &channel
->node
);
783 pthread_mutex_unlock(&consumer_data
.lock
);
789 * Allocate the pollfd structure and the local view of the out fds to avoid
790 * doing a lookup in the linked list and concurrency issues when writing is
791 * needed. Called with consumer_data.lock held.
793 * Returns the number of fds in the structures.
795 static int consumer_update_poll_array(
796 struct lttng_consumer_local_data
*ctx
, struct pollfd
**pollfd
,
797 struct lttng_consumer_stream
**local_stream
, struct lttng_ht
*ht
)
800 struct lttng_ht_iter iter
;
801 struct lttng_consumer_stream
*stream
;
803 DBG("Updating poll fd array");
805 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
806 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
) {
809 DBG("Active FD %d", stream
->wait_fd
);
810 (*pollfd
)[i
].fd
= stream
->wait_fd
;
811 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
812 local_stream
[i
] = stream
;
818 * Insert the consumer_data_pipe at the end of the array and don't
819 * increment i so nb_fd is the number of real FD.
821 (*pollfd
)[i
].fd
= ctx
->consumer_data_pipe
[0];
822 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
827 * Poll on the should_quit pipe and the command socket return -1 on error and
828 * should exit, 0 if data is available on the command socket
830 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
835 num_rdy
= poll(consumer_sockpoll
, 2, -1);
838 * Restart interrupted system call.
840 if (errno
== EINTR
) {
843 PERROR("Poll error");
846 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
847 DBG("consumer_should_quit wake up");
857 * Set the error socket.
859 void lttng_consumer_set_error_sock(
860 struct lttng_consumer_local_data
*ctx
, int sock
)
862 ctx
->consumer_error_socket
= sock
;
866 * Set the command socket path.
868 void lttng_consumer_set_command_sock_path(
869 struct lttng_consumer_local_data
*ctx
, char *sock
)
871 ctx
->consumer_command_sock_path
= sock
;
875 * Send return code to the session daemon.
876 * If the socket is not defined, we return 0, it is not a fatal error
878 int lttng_consumer_send_error(
879 struct lttng_consumer_local_data
*ctx
, int cmd
)
881 if (ctx
->consumer_error_socket
> 0) {
882 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
883 sizeof(enum lttcomm_sessiond_command
));
890 * Close all the tracefiles and stream fds, should be called when all instances
893 void lttng_consumer_cleanup(void)
895 struct lttng_ht_iter iter
;
896 struct lttng_ht_node_ulong
*node
;
901 * close all outfd. Called when there are no more threads running (after
902 * joining on the threads), no need to protect list iteration with mutex.
904 cds_lfht_for_each_entry(consumer_data
.stream_ht
->ht
, &iter
.iter
, node
,
906 struct lttng_consumer_stream
*stream
=
907 caa_container_of(node
, struct lttng_consumer_stream
, node
);
908 consumer_del_stream(stream
, consumer_data
.stream_ht
);
911 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, node
,
913 struct lttng_consumer_channel
*channel
=
914 caa_container_of(node
, struct lttng_consumer_channel
, node
);
915 consumer_del_channel(channel
);
920 lttng_ht_destroy(consumer_data
.stream_ht
);
921 lttng_ht_destroy(consumer_data
.channel_ht
);
925 * Called from signal handler.
927 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
932 ret
= write(ctx
->consumer_should_quit
[1], "4", 1);
933 } while (ret
< 0 && errno
== EINTR
);
935 PERROR("write consumer quit");
939 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
942 int outfd
= stream
->out_fd
;
945 * This does a blocking write-and-wait on any page that belongs to the
946 * subbuffer prior to the one we just wrote.
947 * Don't care about error values, as these are just hints and ways to
948 * limit the amount of page cache used.
950 if (orig_offset
< stream
->chan
->max_sb_size
) {
953 lttng_sync_file_range(outfd
, orig_offset
- stream
->chan
->max_sb_size
,
954 stream
->chan
->max_sb_size
,
955 SYNC_FILE_RANGE_WAIT_BEFORE
956 | SYNC_FILE_RANGE_WRITE
957 | SYNC_FILE_RANGE_WAIT_AFTER
);
959 * Give hints to the kernel about how we access the file:
960 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
963 * We need to call fadvise again after the file grows because the
964 * kernel does not seem to apply fadvise to non-existing parts of the
967 * Call fadvise _after_ having waited for the page writeback to
968 * complete because the dirty page writeback semantic is not well
969 * defined. So it can be expected to lead to lower throughput in
972 posix_fadvise(outfd
, orig_offset
- stream
->chan
->max_sb_size
,
973 stream
->chan
->max_sb_size
, POSIX_FADV_DONTNEED
);
977 * Initialise the necessary environnement :
978 * - create a new context
979 * - create the poll_pipe
980 * - create the should_quit pipe (for signal handler)
981 * - create the thread pipe (for splice)
983 * Takes a function pointer as argument, this function is called when data is
984 * available on a buffer. This function is responsible to do the
985 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
986 * buffer configuration and then kernctl_put_next_subbuf at the end.
988 * Returns a pointer to the new context or NULL on error.
990 struct lttng_consumer_local_data
*lttng_consumer_create(
991 enum lttng_consumer_type type
,
992 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
993 struct lttng_consumer_local_data
*ctx
),
994 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
995 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
996 int (*update_stream
)(int stream_key
, uint32_t state
))
999 struct lttng_consumer_local_data
*ctx
;
1001 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1002 consumer_data
.type
== type
);
1003 consumer_data
.type
= type
;
1005 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1007 PERROR("allocating context");
1011 ctx
->consumer_error_socket
= -1;
1012 /* assign the callbacks */
1013 ctx
->on_buffer_ready
= buffer_ready
;
1014 ctx
->on_recv_channel
= recv_channel
;
1015 ctx
->on_recv_stream
= recv_stream
;
1016 ctx
->on_update_stream
= update_stream
;
1018 ret
= pipe(ctx
->consumer_data_pipe
);
1020 PERROR("Error creating poll pipe");
1021 goto error_poll_pipe
;
1024 /* set read end of the pipe to non-blocking */
1025 ret
= fcntl(ctx
->consumer_data_pipe
[0], F_SETFL
, O_NONBLOCK
);
1027 PERROR("fcntl O_NONBLOCK");
1028 goto error_poll_fcntl
;
1031 /* set write end of the pipe to non-blocking */
1032 ret
= fcntl(ctx
->consumer_data_pipe
[1], F_SETFL
, O_NONBLOCK
);
1034 PERROR("fcntl O_NONBLOCK");
1035 goto error_poll_fcntl
;
1038 ret
= pipe(ctx
->consumer_should_quit
);
1040 PERROR("Error creating recv pipe");
1041 goto error_quit_pipe
;
1044 ret
= pipe(ctx
->consumer_thread_pipe
);
1046 PERROR("Error creating thread pipe");
1047 goto error_thread_pipe
;
1050 ret
= utils_create_pipe(ctx
->consumer_metadata_pipe
);
1052 goto error_metadata_pipe
;
1055 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1057 goto error_splice_pipe
;
1063 utils_close_pipe(ctx
->consumer_metadata_pipe
);
1064 error_metadata_pipe
:
1065 utils_close_pipe(ctx
->consumer_thread_pipe
);
1067 for (i
= 0; i
< 2; i
++) {
1070 err
= close(ctx
->consumer_should_quit
[i
]);
1077 for (i
= 0; i
< 2; i
++) {
1080 err
= close(ctx
->consumer_data_pipe
[i
]);
1092 * Close all fds associated with the instance and free the context.
1094 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1098 ret
= close(ctx
->consumer_error_socket
);
1102 ret
= close(ctx
->consumer_thread_pipe
[0]);
1106 ret
= close(ctx
->consumer_thread_pipe
[1]);
1110 ret
= close(ctx
->consumer_data_pipe
[0]);
1114 ret
= close(ctx
->consumer_data_pipe
[1]);
1118 ret
= close(ctx
->consumer_should_quit
[0]);
1122 ret
= close(ctx
->consumer_should_quit
[1]);
1126 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1128 unlink(ctx
->consumer_command_sock_path
);
1133 * Write the metadata stream id on the specified file descriptor.
1135 static int write_relayd_metadata_id(int fd
,
1136 struct lttng_consumer_stream
*stream
,
1137 struct consumer_relayd_sock_pair
*relayd
,
1138 unsigned long padding
)
1141 struct lttcomm_relayd_metadata_payload hdr
;
1143 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1144 hdr
.padding_size
= htobe32(padding
);
1146 ret
= write(fd
, (void *) &hdr
, sizeof(hdr
));
1147 } while (ret
< 0 && errno
== EINTR
);
1149 PERROR("write metadata stream id");
1152 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1153 stream
->relayd_stream_id
, padding
);
1160 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1161 * core function for writing trace buffers to either the local filesystem or
1164 * Careful review MUST be put if any changes occur!
1166 * Returns the number of bytes written
1168 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1169 struct lttng_consumer_local_data
*ctx
,
1170 struct lttng_consumer_stream
*stream
, unsigned long len
,
1171 unsigned long padding
)
1173 unsigned long mmap_offset
;
1174 ssize_t ret
= 0, written
= 0;
1175 off_t orig_offset
= stream
->out_fd_offset
;
1176 /* Default is on the disk */
1177 int outfd
= stream
->out_fd
;
1178 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1180 /* RCU lock for the relayd pointer */
1183 /* Flag that the current stream if set for network streaming. */
1184 if (stream
->net_seq_idx
!= -1) {
1185 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1186 if (relayd
== NULL
) {
1191 /* get the offset inside the fd to mmap */
1192 switch (consumer_data
.type
) {
1193 case LTTNG_CONSUMER_KERNEL
:
1194 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1196 case LTTNG_CONSUMER32_UST
:
1197 case LTTNG_CONSUMER64_UST
:
1198 ret
= lttng_ustctl_get_mmap_read_offset(stream
->chan
->handle
,
1199 stream
->buf
, &mmap_offset
);
1202 ERR("Unknown consumer_data type");
1207 PERROR("tracer ctl get_mmap_read_offset");
1212 /* Handle stream on the relayd if the output is on the network */
1214 unsigned long netlen
= len
;
1217 * Lock the control socket for the complete duration of the function
1218 * since from this point on we will use the socket.
1220 if (stream
->metadata_flag
) {
1221 /* Metadata requires the control socket. */
1222 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1223 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1226 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1228 /* Use the returned socket. */
1231 /* Write metadata stream id before payload */
1232 if (stream
->metadata_flag
) {
1233 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1240 /* Else, use the default set before which is the filesystem. */
1242 /* No streaming, we have to set the len with the full padding */
1248 ret
= write(outfd
, stream
->mmap_base
+ mmap_offset
, len
);
1249 } while (ret
< 0 && errno
== EINTR
);
1250 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1252 PERROR("Error in file write");
1257 } else if (ret
> len
) {
1258 PERROR("Error in file write (ret %zd > len %lu)", ret
, len
);
1266 /* This call is useless on a socket so better save a syscall. */
1268 /* This won't block, but will start writeout asynchronously */
1269 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret
,
1270 SYNC_FILE_RANGE_WRITE
);
1271 stream
->out_fd_offset
+= ret
;
1275 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1278 /* Unlock only if ctrl socket used */
1279 if (relayd
&& stream
->metadata_flag
) {
1280 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1288 * Splice the data from the ring buffer to the tracefile.
1290 * Returns the number of bytes spliced.
1292 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1293 struct lttng_consumer_local_data
*ctx
,
1294 struct lttng_consumer_stream
*stream
, unsigned long len
,
1295 unsigned long padding
)
1297 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1299 off_t orig_offset
= stream
->out_fd_offset
;
1300 int fd
= stream
->wait_fd
;
1301 /* Default is on the disk */
1302 int outfd
= stream
->out_fd
;
1303 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1306 switch (consumer_data
.type
) {
1307 case LTTNG_CONSUMER_KERNEL
:
1309 case LTTNG_CONSUMER32_UST
:
1310 case LTTNG_CONSUMER64_UST
:
1311 /* Not supported for user space tracing */
1314 ERR("Unknown consumer_data type");
1318 /* RCU lock for the relayd pointer */
1321 /* Flag that the current stream if set for network streaming. */
1322 if (stream
->net_seq_idx
!= -1) {
1323 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1324 if (relayd
== NULL
) {
1330 * Choose right pipe for splice. Metadata and trace data are handled by
1331 * different threads hence the use of two pipes in order not to race or
1332 * corrupt the written data.
1334 if (stream
->metadata_flag
) {
1335 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1337 splice_pipe
= ctx
->consumer_thread_pipe
;
1340 /* Write metadata stream id before payload */
1342 int total_len
= len
;
1344 if (stream
->metadata_flag
) {
1346 * Lock the control socket for the complete duration of the function
1347 * since from this point on we will use the socket.
1349 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1351 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1358 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1361 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1363 /* Use the returned socket. */
1366 ERR("Remote relayd disconnected. Stopping");
1370 /* No streaming, we have to set the len with the full padding */
1375 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1376 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1377 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1378 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1379 DBG("splice chan to pipe, ret %zd", ret_splice
);
1380 if (ret_splice
< 0) {
1381 PERROR("Error in relay splice");
1383 written
= ret_splice
;
1389 /* Handle stream on the relayd if the output is on the network */
1391 if (stream
->metadata_flag
) {
1392 size_t metadata_payload_size
=
1393 sizeof(struct lttcomm_relayd_metadata_payload
);
1395 /* Update counter to fit the spliced data */
1396 ret_splice
+= metadata_payload_size
;
1397 len
+= metadata_payload_size
;
1399 * We do this so the return value can match the len passed as
1400 * argument to this function.
1402 written
-= metadata_payload_size
;
1406 /* Splice data out */
1407 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1408 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1409 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1410 if (ret_splice
< 0) {
1411 PERROR("Error in file splice");
1413 written
= ret_splice
;
1417 } else if (ret_splice
> len
) {
1419 PERROR("Wrote more data than requested %zd (len: %lu)",
1421 written
+= ret_splice
;
1427 /* This call is useless on a socket so better save a syscall. */
1429 /* This won't block, but will start writeout asynchronously */
1430 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1431 SYNC_FILE_RANGE_WRITE
);
1432 stream
->out_fd_offset
+= ret_splice
;
1434 written
+= ret_splice
;
1436 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1443 /* send the appropriate error description to sessiond */
1446 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EBADF
);
1449 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1452 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1455 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1460 if (relayd
&& stream
->metadata_flag
) {
1461 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1469 * Take a snapshot for a specific fd
1471 * Returns 0 on success, < 0 on error
1473 int lttng_consumer_take_snapshot(struct lttng_consumer_local_data
*ctx
,
1474 struct lttng_consumer_stream
*stream
)
1476 switch (consumer_data
.type
) {
1477 case LTTNG_CONSUMER_KERNEL
:
1478 return lttng_kconsumer_take_snapshot(ctx
, stream
);
1479 case LTTNG_CONSUMER32_UST
:
1480 case LTTNG_CONSUMER64_UST
:
1481 return lttng_ustconsumer_take_snapshot(ctx
, stream
);
1483 ERR("Unknown consumer_data type");
1491 * Get the produced position
1493 * Returns 0 on success, < 0 on error
1495 int lttng_consumer_get_produced_snapshot(
1496 struct lttng_consumer_local_data
*ctx
,
1497 struct lttng_consumer_stream
*stream
,
1500 switch (consumer_data
.type
) {
1501 case LTTNG_CONSUMER_KERNEL
:
1502 return lttng_kconsumer_get_produced_snapshot(ctx
, stream
, pos
);
1503 case LTTNG_CONSUMER32_UST
:
1504 case LTTNG_CONSUMER64_UST
:
1505 return lttng_ustconsumer_get_produced_snapshot(ctx
, stream
, pos
);
1507 ERR("Unknown consumer_data type");
1513 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1514 int sock
, struct pollfd
*consumer_sockpoll
)
1516 switch (consumer_data
.type
) {
1517 case LTTNG_CONSUMER_KERNEL
:
1518 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1519 case LTTNG_CONSUMER32_UST
:
1520 case LTTNG_CONSUMER64_UST
:
1521 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1523 ERR("Unknown consumer_data type");
1530 * Iterate over all streams of the hashtable and free them properly.
1532 * WARNING: *MUST* be used with data stream only.
1534 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1537 struct lttng_ht_iter iter
;
1538 struct lttng_consumer_stream
*stream
;
1545 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1546 ret
= lttng_ht_del(ht
, &iter
);
1549 call_rcu(&stream
->node
.head
, consumer_free_stream
);
1553 lttng_ht_destroy(ht
);
1557 * Iterate over all streams of the hashtable and free them properly.
1559 * XXX: Should not be only for metadata stream or else use an other name.
1561 static void destroy_stream_ht(struct lttng_ht
*ht
)
1564 struct lttng_ht_iter iter
;
1565 struct lttng_consumer_stream
*stream
;
1572 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1573 ret
= lttng_ht_del(ht
, &iter
);
1576 call_rcu(&stream
->node
.head
, consumer_free_stream
);
1580 lttng_ht_destroy(ht
);
1584 * Clean up a metadata stream and free its memory.
1586 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1587 struct lttng_ht
*ht
)
1590 struct lttng_ht_iter iter
;
1591 struct lttng_consumer_channel
*free_chan
= NULL
;
1592 struct consumer_relayd_sock_pair
*relayd
;
1596 * This call should NEVER receive regular stream. It must always be
1597 * metadata stream and this is crucial for data structure synchronization.
1599 assert(stream
->metadata_flag
);
1601 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1604 /* Means the stream was allocated but not successfully added */
1608 pthread_mutex_lock(&consumer_data
.lock
);
1609 switch (consumer_data
.type
) {
1610 case LTTNG_CONSUMER_KERNEL
:
1611 if (stream
->mmap_base
!= NULL
) {
1612 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
1614 PERROR("munmap metadata stream");
1618 case LTTNG_CONSUMER32_UST
:
1619 case LTTNG_CONSUMER64_UST
:
1620 lttng_ustconsumer_del_stream(stream
);
1623 ERR("Unknown consumer_data type");
1629 iter
.iter
.node
= &stream
->node
.node
;
1630 ret
= lttng_ht_del(ht
, &iter
);
1634 if (stream
->out_fd
>= 0) {
1635 ret
= close(stream
->out_fd
);
1641 if (stream
->wait_fd
>= 0 && !stream
->wait_fd_is_copy
) {
1642 ret
= close(stream
->wait_fd
);
1648 if (stream
->shm_fd
>= 0 && stream
->wait_fd
!= stream
->shm_fd
) {
1649 ret
= close(stream
->shm_fd
);
1655 /* Check and cleanup relayd */
1657 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1658 if (relayd
!= NULL
) {
1659 uatomic_dec(&relayd
->refcount
);
1660 assert(uatomic_read(&relayd
->refcount
) >= 0);
1662 /* Closing streams requires to lock the control socket. */
1663 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1664 ret
= relayd_send_close_stream(&relayd
->control_sock
,
1665 stream
->relayd_stream_id
, stream
->next_net_seq_num
- 1);
1666 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1668 DBG("Unable to close stream on the relayd. Continuing");
1670 * Continue here. There is nothing we can do for the relayd.
1671 * Chances are that the relayd has closed the socket so we just
1672 * continue cleaning up.
1676 /* Both conditions are met, we destroy the relayd. */
1677 if (uatomic_read(&relayd
->refcount
) == 0 &&
1678 uatomic_read(&relayd
->destroy_flag
)) {
1679 destroy_relayd(relayd
);
1684 /* Atomically decrement channel refcount since other threads can use it. */
1685 uatomic_dec(&stream
->chan
->refcount
);
1686 if (!uatomic_read(&stream
->chan
->refcount
)
1687 && !uatomic_read(&stream
->chan
->nb_init_streams
)) {
1688 /* Go for channel deletion! */
1689 free_chan
= stream
->chan
;
1693 pthread_mutex_unlock(&consumer_data
.lock
);
1696 consumer_del_channel(free_chan
);
1700 call_rcu(&stream
->node
.head
, consumer_free_stream
);
1704 * Action done with the metadata stream when adding it to the consumer internal
1705 * data structures to handle it.
1707 static int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
,
1708 struct lttng_ht
*ht
)
1711 struct consumer_relayd_sock_pair
*relayd
;
1716 DBG3("Adding metadata stream %d to hash table", stream
->wait_fd
);
1718 pthread_mutex_lock(&consumer_data
.lock
);
1721 * From here, refcounts are updated so be _careful_ when returning an error
1726 /* Find relayd and, if one is found, increment refcount. */
1727 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1728 if (relayd
!= NULL
) {
1729 uatomic_inc(&relayd
->refcount
);
1732 /* Update channel refcount once added without error(s). */
1733 uatomic_inc(&stream
->chan
->refcount
);
1736 * When nb_init_streams reaches 0, we don't need to trigger any action in
1737 * terms of destroying the associated channel, because the action that
1738 * causes the count to become 0 also causes a stream to be added. The
1739 * channel deletion will thus be triggered by the following removal of this
1742 if (uatomic_read(&stream
->chan
->nb_init_streams
) > 0) {
1743 uatomic_dec(&stream
->chan
->nb_init_streams
);
1746 /* Steal stream identifier to avoid having streams with the same key */
1747 consumer_steal_stream_key(stream
->key
, ht
);
1749 lttng_ht_add_unique_ulong(ht
, &stream
->node
);
1752 pthread_mutex_unlock(&consumer_data
.lock
);
1757 * Thread polls on metadata file descriptor and write them on disk or on the
1760 void *consumer_thread_metadata_poll(void *data
)
1763 uint32_t revents
, nb_fd
;
1764 struct lttng_consumer_stream
*stream
= NULL
;
1765 struct lttng_ht_iter iter
;
1766 struct lttng_ht_node_ulong
*node
;
1767 struct lttng_poll_event events
;
1768 struct lttng_consumer_local_data
*ctx
= data
;
1771 rcu_register_thread();
1773 DBG("Thread metadata poll started");
1775 /* Size is set to 1 for the consumer_metadata pipe */
1776 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
1778 ERR("Poll set creation failed");
1782 ret
= lttng_poll_add(&events
, ctx
->consumer_metadata_pipe
[0], LPOLLIN
);
1788 DBG("Metadata main loop started");
1791 lttng_poll_reset(&events
);
1793 nb_fd
= LTTNG_POLL_GETNB(&events
);
1795 /* Only the metadata pipe is set */
1796 if (nb_fd
== 0 && consumer_quit
== 1) {
1801 DBG("Metadata poll wait with %d fd(s)", nb_fd
);
1802 ret
= lttng_poll_wait(&events
, -1);
1803 DBG("Metadata event catched in thread");
1805 if (errno
== EINTR
) {
1806 ERR("Poll EINTR catched");
1812 /* From here, the event is a metadata wait fd */
1813 for (i
= 0; i
< nb_fd
; i
++) {
1814 revents
= LTTNG_POLL_GETEV(&events
, i
);
1815 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1817 /* Just don't waste time if no returned events for the fd */
1822 if (pollfd
== ctx
->consumer_metadata_pipe
[0]) {
1823 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
1824 DBG("Metadata thread pipe hung up");
1826 * Remove the pipe from the poll set and continue the loop
1827 * since their might be data to consume.
1829 lttng_poll_del(&events
, ctx
->consumer_metadata_pipe
[0]);
1830 close(ctx
->consumer_metadata_pipe
[0]);
1832 } else if (revents
& LPOLLIN
) {
1834 /* Get the stream pointer received */
1835 ret
= read(pollfd
, &stream
, sizeof(stream
));
1836 } while (ret
< 0 && errno
== EINTR
);
1838 ret
< sizeof(struct lttng_consumer_stream
*)) {
1839 PERROR("read metadata stream");
1841 * Let's continue here and hope we can still work
1842 * without stopping the consumer. XXX: Should we?
1847 DBG("Adding metadata stream %d to poll set",
1850 ret
= consumer_add_metadata_stream(stream
, metadata_ht
);
1852 ERR("Unable to add metadata stream");
1853 /* Stream was not setup properly. Continuing. */
1854 consumer_del_metadata_stream(stream
, NULL
);
1858 /* Add metadata stream to the global poll events list */
1859 lttng_poll_add(&events
, stream
->wait_fd
,
1860 LPOLLIN
| LPOLLPRI
);
1863 /* Handle other stream */
1868 lttng_ht_lookup(metadata_ht
, (void *)((unsigned long) pollfd
),
1870 node
= lttng_ht_iter_get_node_ulong(&iter
);
1873 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
1876 /* Check for error event */
1877 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
1878 DBG("Metadata fd %d is hup|err.", pollfd
);
1879 if (!stream
->hangup_flush_done
1880 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
1881 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
1882 DBG("Attempting to flush and consume the UST buffers");
1883 lttng_ustconsumer_on_stream_hangup(stream
);
1885 /* We just flushed the stream now read it. */
1887 len
= ctx
->on_buffer_ready(stream
, ctx
);
1889 * We don't check the return value here since if we get
1890 * a negative len, it means an error occured thus we
1891 * simply remove it from the poll set and free the
1897 lttng_poll_del(&events
, stream
->wait_fd
);
1899 * This call update the channel states, closes file descriptors
1900 * and securely free the stream.
1902 consumer_del_metadata_stream(stream
, metadata_ht
);
1903 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
1904 /* Get the data out of the metadata file descriptor */
1905 DBG("Metadata available on fd %d", pollfd
);
1906 assert(stream
->wait_fd
== pollfd
);
1908 len
= ctx
->on_buffer_ready(stream
, ctx
);
1909 /* It's ok to have an unavailable sub-buffer */
1910 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
1913 } else if (len
> 0) {
1914 stream
->data_read
= 1;
1918 /* Release RCU lock for the stream looked up */
1925 DBG("Metadata poll thread exiting");
1926 lttng_poll_clean(&events
);
1929 destroy_stream_ht(metadata_ht
);
1932 rcu_unregister_thread();
1937 * This thread polls the fds in the set to consume the data and write
1938 * it to tracefile if necessary.
1940 void *consumer_thread_data_poll(void *data
)
1942 int num_rdy
, num_hup
, high_prio
, ret
, i
;
1943 struct pollfd
*pollfd
= NULL
;
1944 /* local view of the streams */
1945 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
1946 /* local view of consumer_data.fds_count */
1948 struct lttng_consumer_local_data
*ctx
= data
;
1951 rcu_register_thread();
1953 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
1954 if (data_ht
== NULL
) {
1958 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
));
1965 * the fds set has been updated, we need to update our
1966 * local array as well
1968 pthread_mutex_lock(&consumer_data
.lock
);
1969 if (consumer_data
.need_update
) {
1970 if (pollfd
!= NULL
) {
1974 if (local_stream
!= NULL
) {
1976 local_stream
= NULL
;
1979 /* allocate for all fds + 1 for the consumer_data_pipe */
1980 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
1981 if (pollfd
== NULL
) {
1982 PERROR("pollfd malloc");
1983 pthread_mutex_unlock(&consumer_data
.lock
);
1987 /* allocate for all fds + 1 for the consumer_data_pipe */
1988 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
1989 sizeof(struct lttng_consumer_stream
));
1990 if (local_stream
== NULL
) {
1991 PERROR("local_stream malloc");
1992 pthread_mutex_unlock(&consumer_data
.lock
);
1995 ret
= consumer_update_poll_array(ctx
, &pollfd
, local_stream
,
1998 ERR("Error in allocating pollfd or local_outfds");
1999 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2000 pthread_mutex_unlock(&consumer_data
.lock
);
2004 consumer_data
.need_update
= 0;
2006 pthread_mutex_unlock(&consumer_data
.lock
);
2008 /* No FDs and consumer_quit, consumer_cleanup the thread */
2009 if (nb_fd
== 0 && consumer_quit
== 1) {
2012 /* poll on the array of fds */
2014 DBG("polling on %d fd", nb_fd
+ 1);
2015 num_rdy
= poll(pollfd
, nb_fd
+ 1, consumer_poll_timeout
);
2016 DBG("poll num_rdy : %d", num_rdy
);
2017 if (num_rdy
== -1) {
2019 * Restart interrupted system call.
2021 if (errno
== EINTR
) {
2024 PERROR("Poll error");
2025 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2027 } else if (num_rdy
== 0) {
2028 DBG("Polling thread timed out");
2033 * If the consumer_data_pipe triggered poll go directly to the
2034 * beginning of the loop to update the array. We want to prioritize
2035 * array update over low-priority reads.
2037 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2038 size_t pipe_readlen
;
2040 DBG("consumer_data_pipe wake up");
2041 /* Consume 1 byte of pipe data */
2043 pipe_readlen
= read(ctx
->consumer_data_pipe
[0], &new_stream
,
2044 sizeof(new_stream
));
2045 } while (pipe_readlen
== -1 && errno
== EINTR
);
2048 * If the stream is NULL, just ignore it. It's also possible that
2049 * the sessiond poll thread changed the consumer_quit state and is
2050 * waking us up to test it.
2052 if (new_stream
== NULL
) {
2056 ret
= consumer_add_stream(new_stream
, data_ht
);
2058 ERR("Consumer add stream %d failed. Continuing",
2061 * At this point, if the add_stream fails, it is not in the
2062 * hash table thus passing the NULL value here.
2064 consumer_del_stream(new_stream
, NULL
);
2067 /* Continue to update the local streams and handle prio ones */
2071 /* Take care of high priority channels first. */
2072 for (i
= 0; i
< nb_fd
; i
++) {
2073 if (pollfd
[i
].revents
& POLLPRI
) {
2074 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2076 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2077 /* it's ok to have an unavailable sub-buffer */
2078 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2080 } else if (len
> 0) {
2081 local_stream
[i
]->data_read
= 1;
2087 * If we read high prio channel in this loop, try again
2088 * for more high prio data.
2094 /* Take care of low priority channels. */
2095 for (i
= 0; i
< nb_fd
; i
++) {
2096 if ((pollfd
[i
].revents
& POLLIN
) ||
2097 local_stream
[i
]->hangup_flush_done
) {
2098 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2099 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2100 /* it's ok to have an unavailable sub-buffer */
2101 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2103 } else if (len
> 0) {
2104 local_stream
[i
]->data_read
= 1;
2109 /* Handle hangup and errors */
2110 for (i
= 0; i
< nb_fd
; i
++) {
2111 if (!local_stream
[i
]->hangup_flush_done
2112 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2113 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2114 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2115 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2117 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2118 /* Attempt read again, for the data we just flushed. */
2119 local_stream
[i
]->data_read
= 1;
2122 * If the poll flag is HUP/ERR/NVAL and we have
2123 * read no data in this pass, we can remove the
2124 * stream from its hash table.
2126 if ((pollfd
[i
].revents
& POLLHUP
)) {
2127 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2128 if (!local_stream
[i
]->data_read
) {
2129 consumer_del_stream(local_stream
[i
], data_ht
);
2132 } else if (pollfd
[i
].revents
& POLLERR
) {
2133 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2134 if (!local_stream
[i
]->data_read
) {
2135 consumer_del_stream(local_stream
[i
], data_ht
);
2138 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2139 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2140 if (!local_stream
[i
]->data_read
) {
2141 consumer_del_stream(local_stream
[i
], data_ht
);
2145 local_stream
[i
]->data_read
= 0;
2149 DBG("polling thread exiting");
2150 if (pollfd
!= NULL
) {
2154 if (local_stream
!= NULL
) {
2156 local_stream
= NULL
;
2160 * Close the write side of the pipe so epoll_wait() in
2161 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2162 * read side of the pipe. If we close them both, epoll_wait strangely does
2163 * not return and could create a endless wait period if the pipe is the
2164 * only tracked fd in the poll set. The thread will take care of closing
2167 close(ctx
->consumer_metadata_pipe
[1]);
2170 destroy_data_stream_ht(data_ht
);
2173 rcu_unregister_thread();
2178 * This thread listens on the consumerd socket and receives the file
2179 * descriptors from the session daemon.
2181 void *consumer_thread_sessiond_poll(void *data
)
2183 int sock
, client_socket
, ret
;
2185 * structure to poll for incoming data on communication socket avoids
2186 * making blocking sockets.
2188 struct pollfd consumer_sockpoll
[2];
2189 struct lttng_consumer_local_data
*ctx
= data
;
2191 rcu_register_thread();
2193 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
2194 unlink(ctx
->consumer_command_sock_path
);
2195 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
2196 if (client_socket
< 0) {
2197 ERR("Cannot create command socket");
2201 ret
= lttcomm_listen_unix_sock(client_socket
);
2206 DBG("Sending ready command to lttng-sessiond");
2207 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
2208 /* return < 0 on error, but == 0 is not fatal */
2210 ERR("Error sending ready command to lttng-sessiond");
2214 ret
= fcntl(client_socket
, F_SETFL
, O_NONBLOCK
);
2216 PERROR("fcntl O_NONBLOCK");
2220 /* prepare the FDs to poll : to client socket and the should_quit pipe */
2221 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
2222 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
2223 consumer_sockpoll
[1].fd
= client_socket
;
2224 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2226 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2229 DBG("Connection on client_socket");
2231 /* Blocking call, waiting for transmission */
2232 sock
= lttcomm_accept_unix_sock(client_socket
);
2237 ret
= fcntl(sock
, F_SETFL
, O_NONBLOCK
);
2239 PERROR("fcntl O_NONBLOCK");
2243 /* update the polling structure to poll on the established socket */
2244 consumer_sockpoll
[1].fd
= sock
;
2245 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2248 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2251 DBG("Incoming command on sock");
2252 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2253 if (ret
== -ENOENT
) {
2254 DBG("Received STOP command");
2259 * This could simply be a session daemon quitting. Don't output
2262 DBG("Communication interrupted on command socket");
2265 if (consumer_quit
) {
2266 DBG("consumer_thread_receive_fds received quit from signal");
2269 DBG("received fds on sock");
2272 DBG("consumer_thread_receive_fds exiting");
2275 * when all fds have hung up, the polling thread
2281 * 2s of grace period, if no polling events occur during
2282 * this period, the polling thread will exit even if there
2283 * are still open FDs (should not happen, but safety mechanism).
2285 consumer_poll_timeout
= LTTNG_CONSUMER_POLL_TIMEOUT
;
2288 * Notify the data poll thread to poll back again and test the
2289 * consumer_quit state to quit gracefully.
2292 struct lttng_consumer_stream
*null_stream
= NULL
;
2294 ret
= write(ctx
->consumer_data_pipe
[1], &null_stream
,
2295 sizeof(null_stream
));
2296 } while (ret
< 0 && errno
== EINTR
);
2298 rcu_unregister_thread();
2302 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
2303 struct lttng_consumer_local_data
*ctx
)
2305 switch (consumer_data
.type
) {
2306 case LTTNG_CONSUMER_KERNEL
:
2307 return lttng_kconsumer_read_subbuffer(stream
, ctx
);
2308 case LTTNG_CONSUMER32_UST
:
2309 case LTTNG_CONSUMER64_UST
:
2310 return lttng_ustconsumer_read_subbuffer(stream
, ctx
);
2312 ERR("Unknown consumer_data type");
2318 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
2320 switch (consumer_data
.type
) {
2321 case LTTNG_CONSUMER_KERNEL
:
2322 return lttng_kconsumer_on_recv_stream(stream
);
2323 case LTTNG_CONSUMER32_UST
:
2324 case LTTNG_CONSUMER64_UST
:
2325 return lttng_ustconsumer_on_recv_stream(stream
);
2327 ERR("Unknown consumer_data type");
2334 * Allocate and set consumer data hash tables.
2336 void lttng_consumer_init(void)
2338 consumer_data
.stream_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
2339 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
2340 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
2342 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
2343 assert(metadata_ht
);
2344 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
2349 * Process the ADD_RELAYD command receive by a consumer.
2351 * This will create a relayd socket pair and add it to the relayd hash table.
2352 * The caller MUST acquire a RCU read side lock before calling it.
2354 int consumer_add_relayd_socket(int net_seq_idx
, int sock_type
,
2355 struct lttng_consumer_local_data
*ctx
, int sock
,
2356 struct pollfd
*consumer_sockpoll
, struct lttcomm_sock
*relayd_sock
)
2359 struct consumer_relayd_sock_pair
*relayd
;
2361 DBG("Consumer adding relayd socket (idx: %d)", net_seq_idx
);
2363 /* Get relayd reference if exists. */
2364 relayd
= consumer_find_relayd(net_seq_idx
);
2365 if (relayd
== NULL
) {
2366 /* Not found. Allocate one. */
2367 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
2368 if (relayd
== NULL
) {
2369 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
2374 /* Poll on consumer socket. */
2375 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2380 /* Get relayd socket from session daemon */
2381 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
2382 if (ret
!= sizeof(fd
)) {
2383 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
2388 /* Copy socket information and received FD */
2389 switch (sock_type
) {
2390 case LTTNG_STREAM_CONTROL
:
2391 /* Copy received lttcomm socket */
2392 lttcomm_copy_sock(&relayd
->control_sock
, relayd_sock
);
2393 ret
= lttcomm_create_sock(&relayd
->control_sock
);
2398 /* Close the created socket fd which is useless */
2399 close(relayd
->control_sock
.fd
);
2401 /* Assign new file descriptor */
2402 relayd
->control_sock
.fd
= fd
;
2404 case LTTNG_STREAM_DATA
:
2405 /* Copy received lttcomm socket */
2406 lttcomm_copy_sock(&relayd
->data_sock
, relayd_sock
);
2407 ret
= lttcomm_create_sock(&relayd
->data_sock
);
2412 /* Close the created socket fd which is useless */
2413 close(relayd
->data_sock
.fd
);
2415 /* Assign new file descriptor */
2416 relayd
->data_sock
.fd
= fd
;
2419 ERR("Unknown relayd socket type (%d)", sock_type
);
2423 DBG("Consumer %s socket created successfully with net idx %d (fd: %d)",
2424 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
2425 relayd
->net_seq_idx
, fd
);
2428 * Add relayd socket pair to consumer data hashtable. If object already
2429 * exists or on error, the function gracefully returns.