2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
17 #include <sys/socket.h>
18 #include <sys/types.h>
24 #include <bin/lttng-consumerd/health-consumerd.h>
25 #include <common/common.h>
26 #include <common/kernel-ctl/kernel-ctl.h>
27 #include <common/sessiond-comm/sessiond-comm.h>
28 #include <common/sessiond-comm/relayd.h>
29 #include <common/compat/fcntl.h>
30 #include <common/compat/endian.h>
31 #include <common/pipe.h>
32 #include <common/relayd/relayd.h>
33 #include <common/utils.h>
34 #include <common/consumer/consumer-stream.h>
35 #include <common/index/index.h>
36 #include <common/consumer/consumer-timer.h>
37 #include <common/optional.h>
38 #include <common/buffer-view.h>
39 #include <common/consumer/consumer.h>
40 #include <common/consumer/metadata-bucket.h>
42 #include "kernel-consumer.h"
44 extern struct lttng_consumer_global_data consumer_data
;
45 extern int consumer_poll_timeout
;
48 * Take a snapshot for a specific fd
50 * Returns 0 on success, < 0 on error
52 int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream
*stream
)
55 int infd
= stream
->wait_fd
;
57 ret
= kernctl_snapshot(infd
);
59 * -EAGAIN is not an error, it just means that there is no data to
62 if (ret
!= 0 && ret
!= -EAGAIN
) {
63 PERROR("Getting sub-buffer snapshot.");
70 * Sample consumed and produced positions for a specific fd.
72 * Returns 0 on success, < 0 on error.
74 int lttng_kconsumer_sample_snapshot_positions(
75 struct lttng_consumer_stream
*stream
)
79 return kernctl_snapshot_sample_positions(stream
->wait_fd
);
83 * Get the produced position
85 * Returns 0 on success, < 0 on error
87 int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
91 int infd
= stream
->wait_fd
;
93 ret
= kernctl_snapshot_get_produced(infd
, pos
);
95 PERROR("kernctl_snapshot_get_produced");
102 * Get the consumerd position
104 * Returns 0 on success, < 0 on error
106 int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream
*stream
,
110 int infd
= stream
->wait_fd
;
112 ret
= kernctl_snapshot_get_consumed(infd
, pos
);
114 PERROR("kernctl_snapshot_get_consumed");
121 int get_current_subbuf_addr(struct lttng_consumer_stream
*stream
,
125 unsigned long mmap_offset
;
126 const char *mmap_base
= stream
->mmap_base
;
128 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
130 PERROR("Failed to get mmap read offset");
134 *addr
= mmap_base
+ mmap_offset
;
140 * Take a snapshot of all the stream of a channel
141 * RCU read-side lock must be held across this function to ensure existence of
144 * Returns 0 on success, < 0 on error
146 static int lttng_kconsumer_snapshot_channel(
147 struct lttng_consumer_channel
*channel
,
148 uint64_t key
, char *path
, uint64_t relayd_id
,
149 uint64_t nb_packets_per_stream
,
150 struct lttng_consumer_local_data
*ctx
)
153 struct lttng_consumer_stream
*stream
;
155 DBG("Kernel consumer snapshot channel %" PRIu64
, key
);
157 /* Prevent channel modifications while we perform the snapshot.*/
158 pthread_mutex_lock(&channel
->lock
);
162 /* Splice is not supported yet for channel snapshot. */
163 if (channel
->output
!= CONSUMER_CHANNEL_MMAP
) {
164 ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot",
170 cds_list_for_each_entry(stream
, &channel
->streams
.head
, send_node
) {
171 unsigned long consumed_pos
, produced_pos
;
173 health_code_update();
176 * Lock stream because we are about to change its state.
178 pthread_mutex_lock(&stream
->lock
);
180 assert(channel
->trace_chunk
);
181 if (!lttng_trace_chunk_get(channel
->trace_chunk
)) {
183 * Can't happen barring an internal error as the channel
184 * holds a reference to the trace chunk.
186 ERR("Failed to acquire reference to channel's trace chunk");
190 assert(!stream
->trace_chunk
);
191 stream
->trace_chunk
= channel
->trace_chunk
;
194 * Assign the received relayd ID so we can use it for streaming. The streams
195 * are not visible to anyone so this is OK to change it.
197 stream
->net_seq_idx
= relayd_id
;
198 channel
->relayd_id
= relayd_id
;
199 if (relayd_id
!= (uint64_t) -1ULL) {
200 ret
= consumer_send_relayd_stream(stream
, path
);
202 ERR("sending stream to relayd");
206 ret
= consumer_stream_create_output_files(stream
,
211 DBG("Kernel consumer snapshot stream (%" PRIu64
")",
215 ret
= kernctl_buffer_flush_empty(stream
->wait_fd
);
218 * Doing a buffer flush which does not take into
219 * account empty packets. This is not perfect
220 * for stream intersection, but required as a
221 * fall-back when "flush_empty" is not
222 * implemented by lttng-modules.
224 ret
= kernctl_buffer_flush(stream
->wait_fd
);
226 ERR("Failed to flush kernel stream");
232 ret
= lttng_kconsumer_take_snapshot(stream
);
234 ERR("Taking kernel snapshot");
238 ret
= lttng_kconsumer_get_produced_snapshot(stream
, &produced_pos
);
240 ERR("Produced kernel snapshot position");
244 ret
= lttng_kconsumer_get_consumed_snapshot(stream
, &consumed_pos
);
246 ERR("Consumerd kernel snapshot position");
250 consumed_pos
= consumer_get_consume_start_pos(consumed_pos
,
251 produced_pos
, nb_packets_per_stream
,
252 stream
->max_sb_size
);
254 while ((long) (consumed_pos
- produced_pos
) < 0) {
256 unsigned long len
, padded_len
;
257 const char *subbuf_addr
;
258 struct lttng_buffer_view subbuf_view
;
260 health_code_update();
261 DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos
);
263 ret
= kernctl_get_subbuf(stream
->wait_fd
, &consumed_pos
);
265 if (ret
!= -EAGAIN
) {
266 PERROR("kernctl_get_subbuf snapshot");
269 DBG("Kernel consumer get subbuf failed. Skipping it.");
270 consumed_pos
+= stream
->max_sb_size
;
271 stream
->chan
->lost_packets
++;
275 ret
= kernctl_get_subbuf_size(stream
->wait_fd
, &len
);
277 ERR("Snapshot kernctl_get_subbuf_size");
278 goto error_put_subbuf
;
281 ret
= kernctl_get_padded_subbuf_size(stream
->wait_fd
, &padded_len
);
283 ERR("Snapshot kernctl_get_padded_subbuf_size");
284 goto error_put_subbuf
;
287 ret
= get_current_subbuf_addr(stream
, &subbuf_addr
);
289 goto error_put_subbuf
;
292 subbuf_view
= lttng_buffer_view_init(
293 subbuf_addr
, 0, padded_len
);
294 read_len
= lttng_consumer_on_read_subbuffer_mmap(
295 stream
, &subbuf_view
,
298 * We write the padded len in local tracefiles but the data len
299 * when using a relay. Display the error but continue processing
300 * to try to release the subbuffer.
302 if (relayd_id
!= (uint64_t) -1ULL) {
303 if (read_len
!= len
) {
304 ERR("Error sending to the relay (ret: %zd != len: %lu)",
308 if (read_len
!= padded_len
) {
309 ERR("Error writing to tracefile (ret: %zd != len: %lu)",
310 read_len
, padded_len
);
314 ret
= kernctl_put_subbuf(stream
->wait_fd
);
316 ERR("Snapshot kernctl_put_subbuf");
319 consumed_pos
+= stream
->max_sb_size
;
322 if (relayd_id
== (uint64_t) -1ULL) {
323 if (stream
->out_fd
>= 0) {
324 ret
= close(stream
->out_fd
);
326 PERROR("Kernel consumer snapshot close out_fd");
332 close_relayd_stream(stream
);
333 stream
->net_seq_idx
= (uint64_t) -1ULL;
335 lttng_trace_chunk_put(stream
->trace_chunk
);
336 stream
->trace_chunk
= NULL
;
337 pthread_mutex_unlock(&stream
->lock
);
345 ret
= kernctl_put_subbuf(stream
->wait_fd
);
347 ERR("Snapshot kernctl_put_subbuf error path");
350 pthread_mutex_unlock(&stream
->lock
);
353 pthread_mutex_unlock(&channel
->lock
);
358 * Read the whole metadata available for a snapshot.
359 * RCU read-side lock must be held across this function to ensure existence of
362 * Returns 0 on success, < 0 on error
364 static int lttng_kconsumer_snapshot_metadata(
365 struct lttng_consumer_channel
*metadata_channel
,
366 uint64_t key
, char *path
, uint64_t relayd_id
,
367 struct lttng_consumer_local_data
*ctx
)
369 int ret
, use_relayd
= 0;
371 struct lttng_consumer_stream
*metadata_stream
;
375 DBG("Kernel consumer snapshot metadata with key %" PRIu64
" at path %s",
380 metadata_stream
= metadata_channel
->metadata_stream
;
381 assert(metadata_stream
);
383 /* Take all the appropriate locks hehehe.*/
384 metadata_stream
->read_subbuffer_ops
.lock(metadata_stream
);
385 assert(metadata_channel
->trace_chunk
);
386 assert(metadata_stream
->trace_chunk
);
388 /* Flag once that we have a valid relayd for the stream. */
389 if (relayd_id
!= (uint64_t) -1ULL) {
394 ret
= consumer_send_relayd_stream(metadata_stream
, path
);
399 ret
= consumer_stream_create_output_files(metadata_stream
,
407 health_code_update();
409 ret_read
= lttng_consumer_read_subbuffer(metadata_stream
, ctx
, true);
411 ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)",
416 } while (ret_read
> 0);
419 close_relayd_stream(metadata_stream
);
420 metadata_stream
->net_seq_idx
= (uint64_t) -1ULL;
422 if (metadata_stream
->out_fd
>= 0) {
423 ret
= close(metadata_stream
->out_fd
);
425 PERROR("Kernel consumer snapshot metadata close out_fd");
427 * Don't go on error here since the snapshot was successful at this
428 * point but somehow the close failed.
431 metadata_stream
->out_fd
= -1;
432 lttng_trace_chunk_put(metadata_stream
->trace_chunk
);
433 metadata_stream
->trace_chunk
= NULL
;
439 metadata_stream
->read_subbuffer_ops
.unlock(metadata_stream
);
440 cds_list_del(&metadata_stream
->send_node
);
441 consumer_stream_destroy(metadata_stream
, NULL
);
442 metadata_channel
->metadata_stream
= NULL
;
448 * Receive command from session daemon and process it.
450 * Return 1 on success else a negative value or 0.
452 int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
453 int sock
, struct pollfd
*consumer_sockpoll
)
456 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
457 struct lttcomm_consumer_msg msg
;
459 health_code_update();
461 ret
= lttcomm_recv_unix_sock(sock
, &msg
, sizeof(msg
));
462 if (ret
!= sizeof(msg
)) {
464 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_CMD
);
470 health_code_update();
472 /* Deprecated command */
473 assert(msg
.cmd_type
!= LTTNG_CONSUMER_STOP
);
475 health_code_update();
477 /* relayd needs RCU read-side protection */
480 switch (msg
.cmd_type
) {
481 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET
:
483 uint32_t major
= msg
.u
.relayd_sock
.major
;
484 uint32_t minor
= msg
.u
.relayd_sock
.minor
;
485 enum lttcomm_sock_proto protocol
=
486 msg
.u
.relayd_sock
.relayd_socket_protocol
;
488 /* Session daemon status message are handled in the following call. */
489 consumer_add_relayd_socket(msg
.u
.relayd_sock
.net_index
,
490 msg
.u
.relayd_sock
.type
, ctx
, sock
,
491 consumer_sockpoll
, msg
.u
.relayd_sock
.session_id
,
492 msg
.u
.relayd_sock
.relayd_session_id
, major
,
496 case LTTNG_CONSUMER_ADD_CHANNEL
:
498 struct lttng_consumer_channel
*new_channel
;
500 const uint64_t chunk_id
= msg
.u
.channel
.chunk_id
.value
;
502 health_code_update();
504 /* First send a status message before receiving the fds. */
505 ret
= consumer_send_status_msg(sock
, ret_code
);
507 /* Somehow, the session daemon is not responding anymore. */
511 health_code_update();
513 DBG("consumer_add_channel %" PRIu64
, msg
.u
.channel
.channel_key
);
514 new_channel
= consumer_allocate_channel(msg
.u
.channel
.channel_key
,
515 msg
.u
.channel
.session_id
,
516 msg
.u
.channel
.chunk_id
.is_set
?
518 msg
.u
.channel
.pathname
,
520 msg
.u
.channel
.relayd_id
, msg
.u
.channel
.output
,
521 msg
.u
.channel
.tracefile_size
,
522 msg
.u
.channel
.tracefile_count
, 0,
523 msg
.u
.channel
.monitor
,
524 msg
.u
.channel
.live_timer_interval
,
525 msg
.u
.channel
.is_live
,
527 if (new_channel
== NULL
) {
528 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
531 new_channel
->nb_init_stream_left
= msg
.u
.channel
.nb_init_streams
;
532 switch (msg
.u
.channel
.output
) {
533 case LTTNG_EVENT_SPLICE
:
534 new_channel
->output
= CONSUMER_CHANNEL_SPLICE
;
536 case LTTNG_EVENT_MMAP
:
537 new_channel
->output
= CONSUMER_CHANNEL_MMAP
;
540 ERR("Channel output unknown %d", msg
.u
.channel
.output
);
544 /* Translate and save channel type. */
545 switch (msg
.u
.channel
.type
) {
546 case CONSUMER_CHANNEL_TYPE_DATA
:
547 case CONSUMER_CHANNEL_TYPE_METADATA
:
548 new_channel
->type
= msg
.u
.channel
.type
;
555 health_code_update();
557 if (ctx
->on_recv_channel
!= NULL
) {
558 ret_recv
= ctx
->on_recv_channel(new_channel
);
560 ret
= consumer_add_channel(new_channel
, ctx
);
561 } else if (ret_recv
< 0) {
565 ret
= consumer_add_channel(new_channel
, ctx
);
567 if (msg
.u
.channel
.type
== CONSUMER_CHANNEL_TYPE_DATA
&& !ret
) {
568 int monitor_start_ret
;
570 DBG("Consumer starting monitor timer");
571 consumer_timer_live_start(new_channel
,
572 msg
.u
.channel
.live_timer_interval
);
573 monitor_start_ret
= consumer_timer_monitor_start(
575 msg
.u
.channel
.monitor_timer_interval
);
576 if (monitor_start_ret
< 0) {
577 ERR("Starting channel monitoring timer failed");
583 health_code_update();
585 /* If we received an error in add_channel, we need to report it. */
587 ret
= consumer_send_status_msg(sock
, ret
);
596 case LTTNG_CONSUMER_ADD_STREAM
:
599 struct lttng_pipe
*stream_pipe
;
600 struct lttng_consumer_stream
*new_stream
;
601 struct lttng_consumer_channel
*channel
;
605 * Get stream's channel reference. Needed when adding the stream to the
608 channel
= consumer_find_channel(msg
.u
.stream
.channel_key
);
611 * We could not find the channel. Can happen if cpu hotplug
612 * happens while tearing down.
614 ERR("Unable to find channel key %" PRIu64
, msg
.u
.stream
.channel_key
);
615 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
618 health_code_update();
620 /* First send a status message before receiving the fds. */
621 ret
= consumer_send_status_msg(sock
, ret_code
);
623 /* Somehow, the session daemon is not responding anymore. */
624 goto error_add_stream_fatal
;
627 health_code_update();
629 if (ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
630 /* Channel was not found. */
631 goto error_add_stream_nosignal
;
636 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
639 goto error_add_stream_fatal
;
642 health_code_update();
644 /* Get stream file descriptor from socket */
645 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
646 if (ret
!= sizeof(fd
)) {
647 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
651 health_code_update();
654 * Send status code to session daemon only if the recv works. If the
655 * above recv() failed, the session daemon is notified through the
656 * error socket and the teardown is eventually done.
658 ret
= consumer_send_status_msg(sock
, ret_code
);
660 /* Somehow, the session daemon is not responding anymore. */
661 goto error_add_stream_nosignal
;
664 health_code_update();
666 pthread_mutex_lock(&channel
->lock
);
667 new_stream
= consumer_stream_create(
674 channel
->trace_chunk
,
679 if (new_stream
== NULL
) {
684 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
687 pthread_mutex_unlock(&channel
->lock
);
688 goto error_add_stream_nosignal
;
691 new_stream
->wait_fd
= fd
;
692 ret
= kernctl_get_max_subbuf_size(new_stream
->wait_fd
,
693 &new_stream
->max_sb_size
);
695 pthread_mutex_unlock(&channel
->lock
);
696 ERR("Failed to get kernel maximal subbuffer size");
697 goto error_add_stream_nosignal
;
700 consumer_stream_update_channel_attributes(new_stream
,
704 * We've just assigned the channel to the stream so increment the
705 * refcount right now. We don't need to increment the refcount for
706 * streams in no monitor because we handle manually the cleanup of
707 * those. It is very important to make sure there is NO prior
708 * consumer_del_stream() calls or else the refcount will be unbalanced.
710 if (channel
->monitor
) {
711 uatomic_inc(&new_stream
->chan
->refcount
);
715 * The buffer flush is done on the session daemon side for the kernel
716 * so no need for the stream "hangup_flush_done" variable to be
717 * tracked. This is important for a kernel stream since we don't rely
718 * on the flush state of the stream to read data. It's not the case for
719 * user space tracing.
721 new_stream
->hangup_flush_done
= 0;
723 health_code_update();
725 pthread_mutex_lock(&new_stream
->lock
);
726 if (ctx
->on_recv_stream
) {
727 ret
= ctx
->on_recv_stream(new_stream
);
729 pthread_mutex_unlock(&new_stream
->lock
);
730 pthread_mutex_unlock(&channel
->lock
);
731 consumer_stream_free(new_stream
);
732 goto error_add_stream_nosignal
;
735 health_code_update();
737 if (new_stream
->metadata_flag
) {
738 channel
->metadata_stream
= new_stream
;
741 /* Do not monitor this stream. */
742 if (!channel
->monitor
) {
743 DBG("Kernel consumer add stream %s in no monitor mode with "
744 "relayd id %" PRIu64
, new_stream
->name
,
745 new_stream
->net_seq_idx
);
746 cds_list_add(&new_stream
->send_node
, &channel
->streams
.head
);
747 pthread_mutex_unlock(&new_stream
->lock
);
748 pthread_mutex_unlock(&channel
->lock
);
752 /* Send stream to relayd if the stream has an ID. */
753 if (new_stream
->net_seq_idx
!= (uint64_t) -1ULL) {
754 ret
= consumer_send_relayd_stream(new_stream
,
755 new_stream
->chan
->pathname
);
757 pthread_mutex_unlock(&new_stream
->lock
);
758 pthread_mutex_unlock(&channel
->lock
);
759 consumer_stream_free(new_stream
);
760 goto error_add_stream_nosignal
;
764 * If adding an extra stream to an already
765 * existing channel (e.g. cpu hotplug), we need
766 * to send the "streams_sent" command to relayd.
768 if (channel
->streams_sent_to_relayd
) {
769 ret
= consumer_send_relayd_streams_sent(
770 new_stream
->net_seq_idx
);
772 pthread_mutex_unlock(&new_stream
->lock
);
773 pthread_mutex_unlock(&channel
->lock
);
774 goto error_add_stream_nosignal
;
778 pthread_mutex_unlock(&new_stream
->lock
);
779 pthread_mutex_unlock(&channel
->lock
);
781 /* Get the right pipe where the stream will be sent. */
782 if (new_stream
->metadata_flag
) {
783 consumer_add_metadata_stream(new_stream
);
784 stream_pipe
= ctx
->consumer_metadata_pipe
;
786 consumer_add_data_stream(new_stream
);
787 stream_pipe
= ctx
->consumer_data_pipe
;
790 /* Visible to other threads */
791 new_stream
->globally_visible
= 1;
793 health_code_update();
795 ret
= lttng_pipe_write(stream_pipe
, &new_stream
, sizeof(new_stream
));
797 ERR("Consumer write %s stream to pipe %d",
798 new_stream
->metadata_flag
? "metadata" : "data",
799 lttng_pipe_get_writefd(stream_pipe
));
800 if (new_stream
->metadata_flag
) {
801 consumer_del_stream_for_metadata(new_stream
);
803 consumer_del_stream_for_data(new_stream
);
805 goto error_add_stream_nosignal
;
808 DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64
,
809 new_stream
->name
, fd
, new_stream
->chan
->pathname
, new_stream
->relayd_stream_id
);
812 error_add_stream_nosignal
:
814 error_add_stream_fatal
:
817 case LTTNG_CONSUMER_STREAMS_SENT
:
819 struct lttng_consumer_channel
*channel
;
822 * Get stream's channel reference. Needed when adding the stream to the
825 channel
= consumer_find_channel(msg
.u
.sent_streams
.channel_key
);
828 * We could not find the channel. Can happen if cpu hotplug
829 * happens while tearing down.
831 ERR("Unable to find channel key %" PRIu64
,
832 msg
.u
.sent_streams
.channel_key
);
833 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
836 health_code_update();
839 * Send status code to session daemon.
841 ret
= consumer_send_status_msg(sock
, ret_code
);
842 if (ret
< 0 || ret_code
!= LTTCOMM_CONSUMERD_SUCCESS
) {
843 /* Somehow, the session daemon is not responding anymore. */
844 goto error_streams_sent_nosignal
;
847 health_code_update();
850 * We should not send this message if we don't monitor the
851 * streams in this channel.
853 if (!channel
->monitor
) {
854 goto end_error_streams_sent
;
857 health_code_update();
858 /* Send stream to relayd if the stream has an ID. */
859 if (msg
.u
.sent_streams
.net_seq_idx
!= (uint64_t) -1ULL) {
860 ret
= consumer_send_relayd_streams_sent(
861 msg
.u
.sent_streams
.net_seq_idx
);
863 goto error_streams_sent_nosignal
;
865 channel
->streams_sent_to_relayd
= true;
867 end_error_streams_sent
:
869 error_streams_sent_nosignal
:
872 case LTTNG_CONSUMER_UPDATE_STREAM
:
877 case LTTNG_CONSUMER_DESTROY_RELAYD
:
879 uint64_t index
= msg
.u
.destroy_relayd
.net_seq_idx
;
880 struct consumer_relayd_sock_pair
*relayd
;
882 DBG("Kernel consumer destroying relayd %" PRIu64
, index
);
884 /* Get relayd reference if exists. */
885 relayd
= consumer_find_relayd(index
);
886 if (relayd
== NULL
) {
887 DBG("Unable to find relayd %" PRIu64
, index
);
888 ret_code
= LTTCOMM_CONSUMERD_RELAYD_FAIL
;
892 * Each relayd socket pair has a refcount of stream attached to it
893 * which tells if the relayd is still active or not depending on the
896 * This will set the destroy flag of the relayd object and destroy it
897 * if the refcount reaches zero when called.
899 * The destroy can happen either here or when a stream fd hangs up.
902 consumer_flag_relayd_for_destroy(relayd
);
905 health_code_update();
907 ret
= consumer_send_status_msg(sock
, ret_code
);
909 /* Somehow, the session daemon is not responding anymore. */
915 case LTTNG_CONSUMER_DATA_PENDING
:
918 uint64_t id
= msg
.u
.data_pending
.session_id
;
920 DBG("Kernel consumer data pending command for id %" PRIu64
, id
);
922 ret
= consumer_data_pending(id
);
924 health_code_update();
926 /* Send back returned value to session daemon */
927 ret
= lttcomm_send_unix_sock(sock
, &ret
, sizeof(ret
));
929 PERROR("send data pending ret code");
934 * No need to send back a status message since the data pending
935 * returned value is the response.
939 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL
:
941 struct lttng_consumer_channel
*channel
;
942 uint64_t key
= msg
.u
.snapshot_channel
.key
;
944 channel
= consumer_find_channel(key
);
946 ERR("Channel %" PRIu64
" not found", key
);
947 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
949 if (msg
.u
.snapshot_channel
.metadata
== 1) {
950 ret
= lttng_kconsumer_snapshot_metadata(channel
, key
,
951 msg
.u
.snapshot_channel
.pathname
,
952 msg
.u
.snapshot_channel
.relayd_id
, ctx
);
954 ERR("Snapshot metadata failed");
955 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
958 ret
= lttng_kconsumer_snapshot_channel(channel
, key
,
959 msg
.u
.snapshot_channel
.pathname
,
960 msg
.u
.snapshot_channel
.relayd_id
,
961 msg
.u
.snapshot_channel
.nb_packets_per_stream
,
964 ERR("Snapshot channel failed");
965 ret_code
= LTTCOMM_CONSUMERD_SNAPSHOT_FAILED
;
969 health_code_update();
971 ret
= consumer_send_status_msg(sock
, ret_code
);
973 /* Somehow, the session daemon is not responding anymore. */
978 case LTTNG_CONSUMER_DESTROY_CHANNEL
:
980 uint64_t key
= msg
.u
.destroy_channel
.key
;
981 struct lttng_consumer_channel
*channel
;
983 channel
= consumer_find_channel(key
);
985 ERR("Kernel consumer destroy channel %" PRIu64
" not found", key
);
986 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
989 health_code_update();
991 ret
= consumer_send_status_msg(sock
, ret_code
);
993 /* Somehow, the session daemon is not responding anymore. */
994 goto end_destroy_channel
;
997 health_code_update();
999 /* Stop right now if no channel was found. */
1001 goto end_destroy_channel
;
1005 * This command should ONLY be issued for channel with streams set in
1008 assert(!channel
->monitor
);
1011 * The refcount should ALWAYS be 0 in the case of a channel in no
1014 assert(!uatomic_sub_return(&channel
->refcount
, 1));
1016 consumer_del_channel(channel
);
1017 end_destroy_channel
:
1020 case LTTNG_CONSUMER_DISCARDED_EVENTS
:
1024 struct lttng_consumer_channel
*channel
;
1025 uint64_t id
= msg
.u
.discarded_events
.session_id
;
1026 uint64_t key
= msg
.u
.discarded_events
.channel_key
;
1028 DBG("Kernel consumer discarded events command for session id %"
1029 PRIu64
", channel key %" PRIu64
, id
, key
);
1031 channel
= consumer_find_channel(key
);
1033 ERR("Kernel consumer discarded events channel %"
1034 PRIu64
" not found", key
);
1037 count
= channel
->discarded_events
;
1040 health_code_update();
1042 /* Send back returned value to session daemon */
1043 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1045 PERROR("send discarded events");
1051 case LTTNG_CONSUMER_LOST_PACKETS
:
1055 struct lttng_consumer_channel
*channel
;
1056 uint64_t id
= msg
.u
.lost_packets
.session_id
;
1057 uint64_t key
= msg
.u
.lost_packets
.channel_key
;
1059 DBG("Kernel consumer lost packets command for session id %"
1060 PRIu64
", channel key %" PRIu64
, id
, key
);
1062 channel
= consumer_find_channel(key
);
1064 ERR("Kernel consumer lost packets channel %"
1065 PRIu64
" not found", key
);
1068 count
= channel
->lost_packets
;
1071 health_code_update();
1073 /* Send back returned value to session daemon */
1074 ret
= lttcomm_send_unix_sock(sock
, &count
, sizeof(count
));
1076 PERROR("send lost packets");
1082 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE
:
1084 int channel_monitor_pipe
;
1086 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1087 /* Successfully received the command's type. */
1088 ret
= consumer_send_status_msg(sock
, ret_code
);
1093 ret
= lttcomm_recv_fds_unix_sock(sock
, &channel_monitor_pipe
,
1095 if (ret
!= sizeof(channel_monitor_pipe
)) {
1096 ERR("Failed to receive channel monitor pipe");
1100 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe
);
1101 ret
= consumer_timer_thread_set_channel_monitor_pipe(
1102 channel_monitor_pipe
);
1106 ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
1107 /* Set the pipe as non-blocking. */
1108 ret
= fcntl(channel_monitor_pipe
, F_GETFL
, 0);
1110 PERROR("fcntl get flags of the channel monitoring pipe");
1115 ret
= fcntl(channel_monitor_pipe
, F_SETFL
,
1116 flags
| O_NONBLOCK
);
1118 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
1121 DBG("Channel monitor pipe set as non-blocking");
1123 ret_code
= LTTCOMM_CONSUMERD_ALREADY_SET
;
1125 ret
= consumer_send_status_msg(sock
, ret_code
);
1131 case LTTNG_CONSUMER_ROTATE_CHANNEL
:
1133 struct lttng_consumer_channel
*channel
;
1134 uint64_t key
= msg
.u
.rotate_channel
.key
;
1136 DBG("Consumer rotate channel %" PRIu64
, key
);
1138 channel
= consumer_find_channel(key
);
1140 ERR("Channel %" PRIu64
" not found", key
);
1141 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1144 * Sample the rotate position of all the streams in this channel.
1146 ret
= lttng_consumer_rotate_channel(channel
, key
,
1147 msg
.u
.rotate_channel
.relayd_id
,
1148 msg
.u
.rotate_channel
.metadata
,
1151 ERR("Rotate channel failed");
1152 ret_code
= LTTCOMM_CONSUMERD_ROTATION_FAIL
;
1155 health_code_update();
1157 ret
= consumer_send_status_msg(sock
, ret_code
);
1159 /* Somehow, the session daemon is not responding anymore. */
1160 goto error_rotate_channel
;
1163 /* Rotate the streams that are ready right now. */
1164 ret
= lttng_consumer_rotate_ready_streams(
1167 ERR("Rotate ready streams failed");
1171 error_rotate_channel
:
1174 case LTTNG_CONSUMER_CLEAR_CHANNEL
:
1176 struct lttng_consumer_channel
*channel
;
1177 uint64_t key
= msg
.u
.clear_channel
.key
;
1179 channel
= consumer_find_channel(key
);
1181 DBG("Channel %" PRIu64
" not found", key
);
1182 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1184 ret
= lttng_consumer_clear_channel(channel
);
1186 ERR("Clear channel failed");
1190 health_code_update();
1192 ret
= consumer_send_status_msg(sock
, ret_code
);
1194 /* Somehow, the session daemon is not responding anymore. */
1200 case LTTNG_CONSUMER_INIT
:
1202 ret_code
= lttng_consumer_init_command(ctx
,
1203 msg
.u
.init
.sessiond_uuid
);
1204 health_code_update();
1205 ret
= consumer_send_status_msg(sock
, ret_code
);
1207 /* Somehow, the session daemon is not responding anymore. */
1212 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK
:
1214 const struct lttng_credentials credentials
= {
1215 .uid
= msg
.u
.create_trace_chunk
.credentials
.value
.uid
,
1216 .gid
= msg
.u
.create_trace_chunk
.credentials
.value
.gid
,
1218 const bool is_local_trace
=
1219 !msg
.u
.create_trace_chunk
.relayd_id
.is_set
;
1220 const uint64_t relayd_id
=
1221 msg
.u
.create_trace_chunk
.relayd_id
.value
;
1222 const char *chunk_override_name
=
1223 *msg
.u
.create_trace_chunk
.override_name
?
1224 msg
.u
.create_trace_chunk
.override_name
:
1226 struct lttng_directory_handle
*chunk_directory_handle
= NULL
;
1229 * The session daemon will only provide a chunk directory file
1230 * descriptor for local traces.
1232 if (is_local_trace
) {
1235 /* Acnowledge the reception of the command. */
1236 ret
= consumer_send_status_msg(sock
,
1237 LTTCOMM_CONSUMERD_SUCCESS
);
1239 /* Somehow, the session daemon is not responding anymore. */
1243 ret
= lttcomm_recv_fds_unix_sock(sock
, &chunk_dirfd
, 1);
1244 if (ret
!= sizeof(chunk_dirfd
)) {
1245 ERR("Failed to receive trace chunk directory file descriptor");
1249 DBG("Received trace chunk directory fd (%d)",
1251 chunk_directory_handle
= lttng_directory_handle_create_from_dirfd(
1253 if (!chunk_directory_handle
) {
1254 ERR("Failed to initialize chunk directory handle from directory file descriptor");
1255 if (close(chunk_dirfd
)) {
1256 PERROR("Failed to close chunk directory file descriptor");
1262 ret_code
= lttng_consumer_create_trace_chunk(
1263 !is_local_trace
? &relayd_id
: NULL
,
1264 msg
.u
.create_trace_chunk
.session_id
,
1265 msg
.u
.create_trace_chunk
.chunk_id
,
1266 (time_t) msg
.u
.create_trace_chunk
1267 .creation_timestamp
,
1268 chunk_override_name
,
1269 msg
.u
.create_trace_chunk
.credentials
.is_set
?
1272 chunk_directory_handle
);
1273 lttng_directory_handle_put(chunk_directory_handle
);
1274 goto end_msg_sessiond
;
1276 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK
:
1278 enum lttng_trace_chunk_command_type close_command
=
1279 msg
.u
.close_trace_chunk
.close_command
.value
;
1280 const uint64_t relayd_id
=
1281 msg
.u
.close_trace_chunk
.relayd_id
.value
;
1282 struct lttcomm_consumer_close_trace_chunk_reply reply
;
1283 char path
[LTTNG_PATH_MAX
];
1285 ret_code
= lttng_consumer_close_trace_chunk(
1286 msg
.u
.close_trace_chunk
.relayd_id
.is_set
?
1289 msg
.u
.close_trace_chunk
.session_id
,
1290 msg
.u
.close_trace_chunk
.chunk_id
,
1291 (time_t) msg
.u
.close_trace_chunk
.close_timestamp
,
1292 msg
.u
.close_trace_chunk
.close_command
.is_set
?
1295 reply
.ret_code
= ret_code
;
1296 reply
.path_length
= strlen(path
) + 1;
1297 ret
= lttcomm_send_unix_sock(sock
, &reply
, sizeof(reply
));
1298 if (ret
!= sizeof(reply
)) {
1301 ret
= lttcomm_send_unix_sock(sock
, path
, reply
.path_length
);
1302 if (ret
!= reply
.path_length
) {
1307 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS
:
1309 const uint64_t relayd_id
=
1310 msg
.u
.trace_chunk_exists
.relayd_id
.value
;
1312 ret_code
= lttng_consumer_trace_chunk_exists(
1313 msg
.u
.trace_chunk_exists
.relayd_id
.is_set
?
1315 msg
.u
.trace_chunk_exists
.session_id
,
1316 msg
.u
.trace_chunk_exists
.chunk_id
);
1317 goto end_msg_sessiond
;
1319 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS
:
1321 const uint64_t key
= msg
.u
.open_channel_packets
.key
;
1322 struct lttng_consumer_channel
*channel
=
1323 consumer_find_channel(key
);
1326 pthread_mutex_lock(&channel
->lock
);
1327 ret_code
= lttng_consumer_open_channel_packets(channel
);
1328 pthread_mutex_unlock(&channel
->lock
);
1330 WARN("Channel %" PRIu64
" not found", key
);
1331 ret_code
= LTTCOMM_CONSUMERD_CHAN_NOT_FOUND
;
1334 health_code_update();
1335 goto end_msg_sessiond
;
1343 * Return 1 to indicate success since the 0 value can be a socket
1344 * shutdown during the recv() or send() call.
1349 /* This will issue a consumer stop. */
1354 * The returned value here is not useful since either way we'll return 1 to
1355 * the caller because the session daemon socket management is done
1356 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1358 ret
= consumer_send_status_msg(sock
, ret_code
);
1364 health_code_update();
1370 * Sync metadata meaning request them to the session daemon and snapshot to the
1371 * metadata thread can consumer them.
1373 * Metadata stream lock MUST be acquired.
1375 enum sync_metadata_status
lttng_kconsumer_sync_metadata(
1376 struct lttng_consumer_stream
*metadata
)
1379 enum sync_metadata_status status
;
1383 ret
= kernctl_buffer_flush(metadata
->wait_fd
);
1385 ERR("Failed to flush kernel stream");
1386 status
= SYNC_METADATA_STATUS_ERROR
;
1390 ret
= kernctl_snapshot(metadata
->wait_fd
);
1392 if (errno
== EAGAIN
) {
1393 /* No new metadata, exit. */
1394 DBG("Sync metadata, no new kernel metadata");
1395 status
= SYNC_METADATA_STATUS_NO_DATA
;
1397 ERR("Sync metadata, taking kernel snapshot failed.");
1398 status
= SYNC_METADATA_STATUS_ERROR
;
1401 status
= SYNC_METADATA_STATUS_NEW_DATA
;
1409 int extract_common_subbuffer_info(struct lttng_consumer_stream
*stream
,
1410 struct stream_subbuffer
*subbuf
)
1414 ret
= kernctl_get_subbuf_size(
1415 stream
->wait_fd
, &subbuf
->info
.data
.subbuf_size
);
1420 ret
= kernctl_get_padded_subbuf_size(
1421 stream
->wait_fd
, &subbuf
->info
.data
.padded_subbuf_size
);
1431 int extract_metadata_subbuffer_info(struct lttng_consumer_stream
*stream
,
1432 struct stream_subbuffer
*subbuf
)
1436 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1441 ret
= kernctl_get_metadata_version(
1442 stream
->wait_fd
, &subbuf
->info
.metadata
.version
);
1452 int extract_data_subbuffer_info(struct lttng_consumer_stream
*stream
,
1453 struct stream_subbuffer
*subbuf
)
1457 ret
= extract_common_subbuffer_info(stream
, subbuf
);
1462 ret
= kernctl_get_packet_size(
1463 stream
->wait_fd
, &subbuf
->info
.data
.packet_size
);
1465 PERROR("Failed to get sub-buffer packet size");
1469 ret
= kernctl_get_content_size(
1470 stream
->wait_fd
, &subbuf
->info
.data
.content_size
);
1472 PERROR("Failed to get sub-buffer content size");
1476 ret
= kernctl_get_timestamp_begin(
1477 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_begin
);
1479 PERROR("Failed to get sub-buffer begin timestamp");
1483 ret
= kernctl_get_timestamp_end(
1484 stream
->wait_fd
, &subbuf
->info
.data
.timestamp_end
);
1486 PERROR("Failed to get sub-buffer end timestamp");
1490 ret
= kernctl_get_events_discarded(
1491 stream
->wait_fd
, &subbuf
->info
.data
.events_discarded
);
1493 PERROR("Failed to get sub-buffer events discarded count");
1497 ret
= kernctl_get_sequence_number(stream
->wait_fd
,
1498 &subbuf
->info
.data
.sequence_number
.value
);
1500 /* May not be supported by older LTTng-modules. */
1501 if (ret
!= -ENOTTY
) {
1502 PERROR("Failed to get sub-buffer sequence number");
1506 subbuf
->info
.data
.sequence_number
.is_set
= true;
1509 ret
= kernctl_get_stream_id(
1510 stream
->wait_fd
, &subbuf
->info
.data
.stream_id
);
1512 PERROR("Failed to get stream id");
1516 ret
= kernctl_get_instance_id(stream
->wait_fd
,
1517 &subbuf
->info
.data
.stream_instance_id
.value
);
1519 /* May not be supported by older LTTng-modules. */
1520 if (ret
!= -ENOTTY
) {
1521 PERROR("Failed to get stream instance id");
1525 subbuf
->info
.data
.stream_instance_id
.is_set
= true;
1532 int get_subbuffer_common(struct lttng_consumer_stream
*stream
,
1533 struct stream_subbuffer
*subbuffer
)
1537 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1540 * The caller only expects -ENODATA when there is no data to
1541 * read, but the kernel tracer returns -EAGAIN when there is
1542 * currently no data for a non-finalized stream, and -ENODATA
1543 * when there is no data for a finalized stream. Those can be
1544 * combined into a -ENODATA return value.
1546 if (ret
== -EAGAIN
) {
1553 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1560 int get_next_subbuffer_splice(struct lttng_consumer_stream
*stream
,
1561 struct stream_subbuffer
*subbuffer
)
1565 ret
= get_subbuffer_common(stream
, subbuffer
);
1570 subbuffer
->buffer
.fd
= stream
->wait_fd
;
1576 int get_next_subbuffer_mmap(struct lttng_consumer_stream
*stream
,
1577 struct stream_subbuffer
*subbuffer
)
1582 ret
= get_subbuffer_common(stream
, subbuffer
);
1587 ret
= get_current_subbuf_addr(stream
, &addr
);
1592 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1593 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1599 int get_next_subbuffer_metadata_check(struct lttng_consumer_stream
*stream
,
1600 struct stream_subbuffer
*subbuffer
)
1606 ret
= kernctl_get_next_subbuf_metadata_check(stream
->wait_fd
,
1612 ret
= stream
->read_subbuffer_ops
.extract_subbuffer_info(
1618 LTTNG_OPTIONAL_SET(&subbuffer
->info
.metadata
.coherent
, coherent
);
1620 ret
= get_current_subbuf_addr(stream
, &addr
);
1625 subbuffer
->buffer
.buffer
= lttng_buffer_view_init(
1626 addr
, 0, subbuffer
->info
.data
.padded_subbuf_size
);
1627 DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s",
1628 subbuffer
->info
.metadata
.padded_subbuf_size
,
1629 coherent
? "true" : "false");
1632 * The caller only expects -ENODATA when there is no data to read, but
1633 * the kernel tracer returns -EAGAIN when there is currently no data
1634 * for a non-finalized stream, and -ENODATA when there is no data for a
1635 * finalized stream. Those can be combined into a -ENODATA return value.
1637 if (ret
== -EAGAIN
) {
1645 int put_next_subbuffer(struct lttng_consumer_stream
*stream
,
1646 struct stream_subbuffer
*subbuffer
)
1648 const int ret
= kernctl_put_next_subbuf(stream
->wait_fd
);
1651 if (ret
== -EFAULT
) {
1652 PERROR("Error in unreserving sub buffer");
1653 } else if (ret
== -EIO
) {
1654 /* Should never happen with newer LTTng versions */
1655 PERROR("Reader has been pushed by the writer, last sub-buffer corrupted");
1663 bool is_get_next_check_metadata_available(int tracer_fd
)
1665 const int ret
= kernctl_get_next_subbuf_metadata_check(tracer_fd
, NULL
);
1666 const bool available
= ret
!= -ENOTTY
;
1669 /* get succeeded, make sure to put the subbuffer. */
1670 kernctl_put_subbuf(tracer_fd
);
1677 int signal_metadata(struct lttng_consumer_stream
*stream
,
1678 struct lttng_consumer_local_data
*ctx
)
1680 ASSERT_LOCKED(stream
->metadata_rdv_lock
);
1681 return pthread_cond_broadcast(&stream
->metadata_rdv
) ? -errno
: 0;
1685 int lttng_kconsumer_set_stream_ops(
1686 struct lttng_consumer_stream
*stream
)
1690 if (stream
->metadata_flag
&& stream
->chan
->is_live
) {
1691 DBG("Attempting to enable metadata bucketization for live consumers");
1692 if (is_get_next_check_metadata_available(stream
->wait_fd
)) {
1693 DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached");
1694 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1695 get_next_subbuffer_metadata_check
;
1696 ret
= consumer_stream_enable_metadata_bucketization(
1703 * The kernel tracer version is too old to indicate
1704 * when the metadata stream has reached a "coherent"
1705 * (parseable) point.
1707 * This means that a live viewer may see an incoherent
1708 * sequence of metadata and fail to parse it.
1710 WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream");
1711 metadata_bucket_destroy(stream
->metadata_bucket
);
1712 stream
->metadata_bucket
= NULL
;
1715 stream
->read_subbuffer_ops
.on_sleep
= signal_metadata
;
1718 if (!stream
->read_subbuffer_ops
.get_next_subbuffer
) {
1719 if (stream
->chan
->output
== CONSUMER_CHANNEL_MMAP
) {
1720 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1721 get_next_subbuffer_mmap
;
1723 stream
->read_subbuffer_ops
.get_next_subbuffer
=
1724 get_next_subbuffer_splice
;
1728 if (stream
->metadata_flag
) {
1729 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1730 extract_metadata_subbuffer_info
;
1732 stream
->read_subbuffer_ops
.extract_subbuffer_info
=
1733 extract_data_subbuffer_info
;
1734 if (stream
->chan
->is_live
) {
1735 stream
->read_subbuffer_ops
.send_live_beacon
=
1736 consumer_flush_kernel_index
;
1740 stream
->read_subbuffer_ops
.put_next_subbuffer
= put_next_subbuffer
;
1745 int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
1752 * Don't create anything if this is set for streaming or if there is
1753 * no current trace chunk on the parent channel.
1755 if (stream
->net_seq_idx
== (uint64_t) -1ULL && stream
->chan
->monitor
&&
1756 stream
->chan
->trace_chunk
) {
1757 ret
= consumer_stream_create_output_files(stream
, true);
1763 if (stream
->output
== LTTNG_EVENT_MMAP
) {
1764 /* get the len of the mmap region */
1765 unsigned long mmap_len
;
1767 ret
= kernctl_get_mmap_len(stream
->wait_fd
, &mmap_len
);
1769 PERROR("kernctl_get_mmap_len");
1770 goto error_close_fd
;
1772 stream
->mmap_len
= (size_t) mmap_len
;
1774 stream
->mmap_base
= mmap(NULL
, stream
->mmap_len
, PROT_READ
,
1775 MAP_PRIVATE
, stream
->wait_fd
, 0);
1776 if (stream
->mmap_base
== MAP_FAILED
) {
1777 PERROR("Error mmaping");
1779 goto error_close_fd
;
1783 ret
= lttng_kconsumer_set_stream_ops(stream
);
1785 goto error_close_fd
;
1788 /* we return 0 to let the library handle the FD internally */
1792 if (stream
->out_fd
>= 0) {
1795 err
= close(stream
->out_fd
);
1797 stream
->out_fd
= -1;
1804 * Check if data is still being extracted from the buffers for a specific
1805 * stream. Consumer data lock MUST be acquired before calling this function
1806 * and the stream lock.
1808 * Return 1 if the traced data are still getting read else 0 meaning that the
1809 * data is available for trace viewer reading.
1811 int lttng_kconsumer_data_pending(struct lttng_consumer_stream
*stream
)
1817 if (stream
->endpoint_status
!= CONSUMER_ENDPOINT_ACTIVE
) {
1822 ret
= kernctl_get_next_subbuf(stream
->wait_fd
);
1824 /* There is still data so let's put back this subbuffer. */
1825 ret
= kernctl_put_subbuf(stream
->wait_fd
);
1827 ret
= 1; /* Data is pending */
1831 /* Data is NOT pending and ready to be read. */