2 * Copyright (C) 2012 - David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License, version 2 only, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 51
16 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <urcu/list.h>
23 #include <urcu/uatomic.h>
26 #include <common/defaults.h>
27 #include <common/common.h>
28 #include <common/sessiond-comm/sessiond-comm.h>
29 #include <common/relayd/relayd.h>
30 #include <common/utils.h>
31 #include <common/compat/string.h>
32 #include <common/kernel-ctl/kernel-ctl.h>
33 #include <common/dynamic-buffer.h>
34 #include <common/buffer-view.h>
35 #include <lttng/trigger/trigger-internal.h>
36 #include <lttng/condition/condition.h>
37 #include <lttng/action/action.h>
38 #include <lttng/channel.h>
39 #include <lttng/channel-internal.h>
40 #include <lttng/rotate-internal.h>
41 #include <lttng/location-internal.h>
42 #include <lttng/userspace-probe-internal.h>
43 #include <common/string-utils/string-utils.h>
48 #include "health-sessiond.h"
50 #include "kernel-consumer.h"
51 #include "lttng-sessiond.h"
53 #include "lttng-syscall.h"
55 #include "buffer-registry.h"
56 #include "notification-thread.h"
57 #include "notification-thread-commands.h"
59 #include "rotation-thread.h"
61 #include "agent-thread.h"
65 /* Sleep for 100ms between each check for the shm path's deletion. */
66 #define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
68 static enum lttng_error_code
wait_on_path(void *path
);
71 * Command completion handler that is used by the destroy command
72 * when a session that has a non-default shm_path is being destroyed.
74 * See comment in cmd_destroy_session() for the rationale.
76 static struct destroy_completion_handler
{
77 struct cmd_completion_handler handler
;
78 char shm_path
[member_sizeof(struct ltt_session
, shm_path
)];
79 } destroy_completion_handler
= {
82 .data
= destroy_completion_handler
.shm_path
87 static struct cmd_completion_handler
*current_completion_handler
;
90 * Used to keep a unique index for each relayd socket created where this value
91 * is associated with streams on the consumer so it can match the right relayd
92 * to send to. It must be accessed with the relayd_net_seq_idx_lock
95 static pthread_mutex_t relayd_net_seq_idx_lock
= PTHREAD_MUTEX_INITIALIZER
;
96 static uint64_t relayd_net_seq_idx
;
98 static int validate_ust_event_name(const char *);
99 static int cmd_enable_event_internal(struct ltt_session
*session
,
100 struct lttng_domain
*domain
,
101 char *channel_name
, struct lttng_event
*event
,
102 char *filter_expression
,
103 struct lttng_filter_bytecode
*filter
,
104 struct lttng_event_exclusion
*exclusion
,
108 * Create a session path used by list_lttng_sessions for the case that the
109 * session consumer is on the network.
111 static int build_network_session_path(char *dst
, size_t size
,
112 struct ltt_session
*session
)
114 int ret
, kdata_port
, udata_port
;
115 struct lttng_uri
*kuri
= NULL
, *uuri
= NULL
, *uri
= NULL
;
116 char tmp_uurl
[PATH_MAX
], tmp_urls
[PATH_MAX
];
121 memset(tmp_urls
, 0, sizeof(tmp_urls
));
122 memset(tmp_uurl
, 0, sizeof(tmp_uurl
));
124 kdata_port
= udata_port
= DEFAULT_NETWORK_DATA_PORT
;
126 if (session
->kernel_session
&& session
->kernel_session
->consumer
) {
127 kuri
= &session
->kernel_session
->consumer
->dst
.net
.control
;
128 kdata_port
= session
->kernel_session
->consumer
->dst
.net
.data
.port
;
131 if (session
->ust_session
&& session
->ust_session
->consumer
) {
132 uuri
= &session
->ust_session
->consumer
->dst
.net
.control
;
133 udata_port
= session
->ust_session
->consumer
->dst
.net
.data
.port
;
136 if (uuri
== NULL
&& kuri
== NULL
) {
137 uri
= &session
->consumer
->dst
.net
.control
;
138 kdata_port
= session
->consumer
->dst
.net
.data
.port
;
139 } else if (kuri
&& uuri
) {
140 ret
= uri_compare(kuri
, uuri
);
144 /* Build uuri URL string */
145 ret
= uri_to_str_url(uuri
, tmp_uurl
, sizeof(tmp_uurl
));
152 } else if (kuri
&& uuri
== NULL
) {
154 } else if (uuri
&& kuri
== NULL
) {
158 ret
= uri_to_str_url(uri
, tmp_urls
, sizeof(tmp_urls
));
164 * Do we have a UST url set. If yes, this means we have both kernel and UST
167 if (*tmp_uurl
!= '\0') {
168 ret
= snprintf(dst
, size
, "[K]: %s [data: %d] -- [U]: %s [data: %d]",
169 tmp_urls
, kdata_port
, tmp_uurl
, udata_port
);
172 if (kuri
|| (!kuri
&& !uuri
)) {
175 /* No kernel URI, use the UST port. */
178 ret
= snprintf(dst
, size
, "%s [data: %d]", tmp_urls
, dport
);
186 * Get run-time attributes if the session has been started (discarded events,
189 static int get_kernel_runtime_stats(struct ltt_session
*session
,
190 struct ltt_kernel_channel
*kchan
, uint64_t *discarded_events
,
191 uint64_t *lost_packets
)
195 if (!session
->has_been_started
) {
197 *discarded_events
= 0;
202 ret
= consumer_get_discarded_events(session
->id
, kchan
->key
,
203 session
->kernel_session
->consumer
,
209 ret
= consumer_get_lost_packets(session
->id
, kchan
->key
,
210 session
->kernel_session
->consumer
,
221 * Get run-time attributes if the session has been started (discarded events,
224 static int get_ust_runtime_stats(struct ltt_session
*session
,
225 struct ltt_ust_channel
*uchan
, uint64_t *discarded_events
,
226 uint64_t *lost_packets
)
229 struct ltt_ust_session
*usess
;
231 if (!discarded_events
|| !lost_packets
) {
236 usess
= session
->ust_session
;
237 assert(discarded_events
);
238 assert(lost_packets
);
240 if (!usess
|| !session
->has_been_started
) {
241 *discarded_events
= 0;
247 if (usess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
248 ret
= ust_app_uid_get_channel_runtime_stats(usess
->id
,
249 &usess
->buffer_reg_uid_list
,
250 usess
->consumer
, uchan
->id
,
251 uchan
->attr
.overwrite
,
254 } else if (usess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
255 ret
= ust_app_pid_get_channel_runtime_stats(usess
,
256 uchan
, usess
->consumer
,
257 uchan
->attr
.overwrite
,
263 *discarded_events
+= uchan
->per_pid_closed_app_discarded
;
264 *lost_packets
+= uchan
->per_pid_closed_app_lost
;
266 ERR("Unsupported buffer type");
277 * Fill lttng_channel array of all channels.
279 static ssize_t
list_lttng_channels(enum lttng_domain_type domain
,
280 struct ltt_session
*session
, struct lttng_channel
*channels
,
281 struct lttng_channel_extended
*chan_exts
)
284 struct ltt_kernel_channel
*kchan
;
286 DBG("Listing channels for session %s", session
->name
);
289 case LTTNG_DOMAIN_KERNEL
:
290 /* Kernel channels */
291 if (session
->kernel_session
!= NULL
) {
292 cds_list_for_each_entry(kchan
,
293 &session
->kernel_session
->channel_list
.head
, list
) {
294 uint64_t discarded_events
, lost_packets
;
295 struct lttng_channel_extended
*extended
;
297 extended
= (struct lttng_channel_extended
*)
298 kchan
->channel
->attr
.extended
.ptr
;
300 ret
= get_kernel_runtime_stats(session
, kchan
,
301 &discarded_events
, &lost_packets
);
305 /* Copy lttng_channel struct to array */
306 memcpy(&channels
[i
], kchan
->channel
, sizeof(struct lttng_channel
));
307 channels
[i
].enabled
= kchan
->enabled
;
308 chan_exts
[i
].discarded_events
=
310 chan_exts
[i
].lost_packets
= lost_packets
;
311 chan_exts
[i
].monitor_timer_interval
=
312 extended
->monitor_timer_interval
;
313 chan_exts
[i
].blocking_timeout
= 0;
318 case LTTNG_DOMAIN_UST
:
320 struct lttng_ht_iter iter
;
321 struct ltt_ust_channel
*uchan
;
324 cds_lfht_for_each_entry(session
->ust_session
->domain_global
.channels
->ht
,
325 &iter
.iter
, uchan
, node
.node
) {
326 uint64_t discarded_events
= 0, lost_packets
= 0;
328 if (lttng_strncpy(channels
[i
].name
, uchan
->name
,
329 LTTNG_SYMBOL_NAME_LEN
)) {
332 channels
[i
].attr
.overwrite
= uchan
->attr
.overwrite
;
333 channels
[i
].attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
334 channels
[i
].attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
335 channels
[i
].attr
.switch_timer_interval
=
336 uchan
->attr
.switch_timer_interval
;
337 channels
[i
].attr
.read_timer_interval
=
338 uchan
->attr
.read_timer_interval
;
339 channels
[i
].enabled
= uchan
->enabled
;
340 channels
[i
].attr
.tracefile_size
= uchan
->tracefile_size
;
341 channels
[i
].attr
.tracefile_count
= uchan
->tracefile_count
;
344 * Map enum lttng_ust_output to enum lttng_event_output.
346 switch (uchan
->attr
.output
) {
348 channels
[i
].attr
.output
= LTTNG_EVENT_MMAP
;
352 * LTTNG_UST_MMAP is the only supported UST
359 chan_exts
[i
].monitor_timer_interval
=
360 uchan
->monitor_timer_interval
;
361 chan_exts
[i
].blocking_timeout
=
362 uchan
->attr
.u
.s
.blocking_timeout
;
364 ret
= get_ust_runtime_stats(session
, uchan
,
365 &discarded_events
, &lost_packets
);
369 chan_exts
[i
].discarded_events
= discarded_events
;
370 chan_exts
[i
].lost_packets
= lost_packets
;
382 return -LTTNG_ERR_FATAL
;
388 static int increment_extended_len(const char *filter_expression
,
389 struct lttng_event_exclusion
*exclusion
,
390 const struct lttng_userspace_probe_location
*probe_location
,
391 size_t *extended_len
)
395 *extended_len
+= sizeof(struct lttcomm_event_extended_header
);
397 if (filter_expression
) {
398 *extended_len
+= strlen(filter_expression
) + 1;
402 *extended_len
+= exclusion
->count
* LTTNG_SYMBOL_NAME_LEN
;
405 if (probe_location
) {
406 ret
= lttng_userspace_probe_location_serialize(probe_location
,
411 *extended_len
+= ret
;
418 static int append_extended_info(const char *filter_expression
,
419 struct lttng_event_exclusion
*exclusion
,
420 struct lttng_userspace_probe_location
*probe_location
,
424 size_t filter_len
= 0;
425 size_t nb_exclusions
= 0;
426 size_t userspace_probe_location_len
= 0;
427 struct lttng_dynamic_buffer location_buffer
;
428 struct lttcomm_event_extended_header extended_header
;
430 if (filter_expression
) {
431 filter_len
= strlen(filter_expression
) + 1;
435 nb_exclusions
= exclusion
->count
;
438 if (probe_location
) {
439 lttng_dynamic_buffer_init(&location_buffer
);
440 ret
= lttng_userspace_probe_location_serialize(probe_location
,
441 &location_buffer
, NULL
);
446 userspace_probe_location_len
= location_buffer
.size
;
449 /* Set header fields */
450 extended_header
.filter_len
= filter_len
;
451 extended_header
.nb_exclusions
= nb_exclusions
;
452 extended_header
.userspace_probe_location_len
= userspace_probe_location_len
;
455 memcpy(*extended_at
, &extended_header
, sizeof(extended_header
));
456 *extended_at
+= sizeof(extended_header
);
458 /* Copy filter string */
459 if (filter_expression
) {
460 memcpy(*extended_at
, filter_expression
, filter_len
);
461 *extended_at
+= filter_len
;
464 /* Copy exclusion names */
466 size_t len
= nb_exclusions
* LTTNG_SYMBOL_NAME_LEN
;
468 memcpy(*extended_at
, &exclusion
->names
, len
);
472 if (probe_location
) {
473 memcpy(*extended_at
, location_buffer
.data
, location_buffer
.size
);
474 *extended_at
+= location_buffer
.size
;
475 lttng_dynamic_buffer_reset(&location_buffer
);
483 * Create a list of agent domain events.
485 * Return number of events in list on success or else a negative value.
487 static int list_lttng_agent_events(struct agent
*agt
,
488 struct lttng_event
**events
, size_t *total_size
)
491 unsigned int nb_event
= 0;
492 struct agent_event
*event
;
493 struct lttng_event
*tmp_events
= NULL
;
494 struct lttng_ht_iter iter
;
495 size_t extended_len
= 0;
501 DBG3("Listing agent events");
504 nb_event
= lttng_ht_get_count(agt
->events
);
512 /* Compute required extended infos size */
513 extended_len
= nb_event
* sizeof(struct lttcomm_event_extended_header
);
516 * This is only valid because the commands which add events are
517 * processed in the same thread as the listing.
520 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
521 ret
= increment_extended_len(event
->filter_expression
, NULL
, NULL
,
524 DBG("Error computing the length of extended info message");
525 ret
= -LTTNG_ERR_FATAL
;
531 *total_size
= nb_event
* sizeof(*tmp_events
) + extended_len
;
532 tmp_events
= zmalloc(*total_size
);
534 PERROR("zmalloc agent events session");
535 ret
= -LTTNG_ERR_FATAL
;
539 extended_at
= ((uint8_t *) tmp_events
) +
540 nb_event
* sizeof(struct lttng_event
);
543 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
544 strncpy(tmp_events
[i
].name
, event
->name
, sizeof(tmp_events
[i
].name
));
545 tmp_events
[i
].name
[sizeof(tmp_events
[i
].name
) - 1] = '\0';
546 tmp_events
[i
].enabled
= event
->enabled
;
547 tmp_events
[i
].loglevel
= event
->loglevel_value
;
548 tmp_events
[i
].loglevel_type
= event
->loglevel_type
;
551 /* Append extended info */
552 ret
= append_extended_info(event
->filter_expression
, NULL
, NULL
,
555 DBG("Error appending extended info message");
556 ret
= -LTTNG_ERR_FATAL
;
561 *events
= tmp_events
;
563 assert(nb_event
== i
);
574 * Create a list of ust global domain events.
576 static int list_lttng_ust_global_events(char *channel_name
,
577 struct ltt_ust_domain_global
*ust_global
,
578 struct lttng_event
**events
, size_t *total_size
)
581 unsigned int nb_event
= 0;
582 struct lttng_ht_iter iter
;
583 struct lttng_ht_node_str
*node
;
584 struct ltt_ust_channel
*uchan
;
585 struct ltt_ust_event
*uevent
;
586 struct lttng_event
*tmp
;
587 size_t extended_len
= 0;
590 DBG("Listing UST global events for channel %s", channel_name
);
594 lttng_ht_lookup(ust_global
->channels
, (void *)channel_name
, &iter
);
595 node
= lttng_ht_iter_get_node_str(&iter
);
597 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
601 uchan
= caa_container_of(&node
->node
, struct ltt_ust_channel
, node
.node
);
603 nb_event
= lttng_ht_get_count(uchan
->events
);
610 DBG3("Listing UST global %d events", nb_event
);
612 /* Compute required extended infos size */
613 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
614 if (uevent
->internal
) {
619 ret
= increment_extended_len(uevent
->filter_expression
,
620 uevent
->exclusion
, NULL
, &extended_len
);
622 DBG("Error computing the length of extended info message");
623 ret
= -LTTNG_ERR_FATAL
;
628 /* All events are internal, skip. */
634 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
635 tmp
= zmalloc(*total_size
);
637 ret
= -LTTNG_ERR_FATAL
;
641 extended_at
= ((uint8_t *) tmp
) + nb_event
* sizeof(struct lttng_event
);
643 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
644 if (uevent
->internal
) {
645 /* This event should remain hidden from clients */
648 strncpy(tmp
[i
].name
, uevent
->attr
.name
, LTTNG_SYMBOL_NAME_LEN
);
649 tmp
[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
650 tmp
[i
].enabled
= uevent
->enabled
;
652 switch (uevent
->attr
.instrumentation
) {
653 case LTTNG_UST_TRACEPOINT
:
654 tmp
[i
].type
= LTTNG_EVENT_TRACEPOINT
;
656 case LTTNG_UST_PROBE
:
657 tmp
[i
].type
= LTTNG_EVENT_PROBE
;
659 case LTTNG_UST_FUNCTION
:
660 tmp
[i
].type
= LTTNG_EVENT_FUNCTION
;
664 tmp
[i
].loglevel
= uevent
->attr
.loglevel
;
665 switch (uevent
->attr
.loglevel_type
) {
666 case LTTNG_UST_LOGLEVEL_ALL
:
667 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
669 case LTTNG_UST_LOGLEVEL_RANGE
:
670 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_RANGE
;
672 case LTTNG_UST_LOGLEVEL_SINGLE
:
673 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_SINGLE
;
676 if (uevent
->filter
) {
679 if (uevent
->exclusion
) {
680 tmp
[i
].exclusion
= 1;
684 /* Append extended info */
685 ret
= append_extended_info(uevent
->filter_expression
,
686 uevent
->exclusion
, NULL
, &extended_at
);
688 DBG("Error appending extended info message");
689 ret
= -LTTNG_ERR_FATAL
;
702 * Fill lttng_event array of all kernel events in the channel.
704 static int list_lttng_kernel_events(char *channel_name
,
705 struct ltt_kernel_session
*kernel_session
,
706 struct lttng_event
**events
, size_t *total_size
)
709 unsigned int nb_event
;
710 struct ltt_kernel_event
*event
;
711 struct ltt_kernel_channel
*kchan
;
712 size_t extended_len
= 0;
715 kchan
= trace_kernel_get_channel_by_name(channel_name
, kernel_session
);
717 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
721 nb_event
= kchan
->event_count
;
723 DBG("Listing events for channel %s", kchan
->channel
->name
);
731 /* Compute required extended infos size */
732 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
733 ret
= increment_extended_len(event
->filter_expression
, NULL
,
734 event
->userspace_probe_location
,
737 DBG("Error computing the length of extended info message");
738 ret
= -LTTNG_ERR_FATAL
;
743 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
744 *events
= zmalloc(*total_size
);
745 if (*events
== NULL
) {
746 ret
= -LTTNG_ERR_FATAL
;
750 extended_at
= ((void *) *events
) +
751 nb_event
* sizeof(struct lttng_event
);
753 /* Kernel channels */
754 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
755 strncpy((*events
)[i
].name
, event
->event
->name
, LTTNG_SYMBOL_NAME_LEN
);
756 (*events
)[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
757 (*events
)[i
].enabled
= event
->enabled
;
758 (*events
)[i
].filter
=
759 (unsigned char) !!event
->filter_expression
;
761 switch (event
->event
->instrumentation
) {
762 case LTTNG_KERNEL_TRACEPOINT
:
763 (*events
)[i
].type
= LTTNG_EVENT_TRACEPOINT
;
765 case LTTNG_KERNEL_KRETPROBE
:
766 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
767 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
768 sizeof(struct lttng_kernel_kprobe
));
770 case LTTNG_KERNEL_KPROBE
:
771 (*events
)[i
].type
= LTTNG_EVENT_PROBE
;
772 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
773 sizeof(struct lttng_kernel_kprobe
));
775 case LTTNG_KERNEL_UPROBE
:
776 (*events
)[i
].type
= LTTNG_EVENT_USERSPACE_PROBE
;
778 case LTTNG_KERNEL_FUNCTION
:
779 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
780 memcpy(&((*events
)[i
].attr
.ftrace
), &event
->event
->u
.ftrace
,
781 sizeof(struct lttng_kernel_function
));
783 case LTTNG_KERNEL_NOOP
:
784 (*events
)[i
].type
= LTTNG_EVENT_NOOP
;
786 case LTTNG_KERNEL_SYSCALL
:
787 (*events
)[i
].type
= LTTNG_EVENT_SYSCALL
;
789 case LTTNG_KERNEL_ALL
:
797 /* Append extended info */
798 ret
= append_extended_info(event
->filter_expression
, NULL
,
799 event
->userspace_probe_location
, &extended_at
);
801 DBG("Error appending extended info message");
802 ret
= -LTTNG_ERR_FATAL
;
811 /* Negate the error code to differentiate the size from an error */
816 * Add URI so the consumer output object. Set the correct path depending on the
817 * domain adding the default trace directory.
819 static int add_uri_to_consumer(struct consumer_output
*consumer
,
820 struct lttng_uri
*uri
, enum lttng_domain_type domain
,
821 const char *session_name
)
824 const char *default_trace_dir
;
828 if (consumer
== NULL
) {
829 DBG("No consumer detected. Don't add URI. Stopping.");
830 ret
= LTTNG_ERR_NO_CONSUMER
;
835 case LTTNG_DOMAIN_KERNEL
:
836 default_trace_dir
= DEFAULT_KERNEL_TRACE_DIR
;
838 case LTTNG_DOMAIN_UST
:
839 default_trace_dir
= DEFAULT_UST_TRACE_DIR
;
843 * This case is possible is we try to add the URI to the global tracing
844 * session consumer object which in this case there is no subdir.
846 default_trace_dir
= "";
849 switch (uri
->dtype
) {
852 DBG2("Setting network URI to consumer");
854 if (consumer
->type
== CONSUMER_DST_NET
) {
855 if ((uri
->stype
== LTTNG_STREAM_CONTROL
&&
856 consumer
->dst
.net
.control_isset
) ||
857 (uri
->stype
== LTTNG_STREAM_DATA
&&
858 consumer
->dst
.net
.data_isset
)) {
859 ret
= LTTNG_ERR_URL_EXIST
;
863 memset(&consumer
->dst
.net
, 0, sizeof(consumer
->dst
.net
));
866 consumer
->type
= CONSUMER_DST_NET
;
868 /* Set URI into consumer output object */
869 ret
= consumer_set_network_uri(consumer
, uri
);
873 } else if (ret
== 1) {
875 * URI was the same in the consumer so we do not append the subdir
876 * again so to not duplicate output dir.
882 if (uri
->stype
== LTTNG_STREAM_CONTROL
&& strlen(uri
->subdir
) == 0) {
883 ret
= consumer_set_subdir(consumer
, session_name
);
885 ret
= LTTNG_ERR_FATAL
;
890 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
891 /* On a new subdir, reappend the default trace dir. */
892 strncat(consumer
->subdir
, default_trace_dir
,
893 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
894 DBG3("Append domain trace name to subdir %s", consumer
->subdir
);
899 DBG2("Setting trace directory path from URI to %s", uri
->dst
.path
);
900 memset(consumer
->dst
.session_root_path
, 0,
901 sizeof(consumer
->dst
.session_root_path
));
902 /* Explicit length checks for strcpy and strcat. */
903 if (strlen(uri
->dst
.path
) + strlen(default_trace_dir
)
904 >= sizeof(consumer
->dst
.session_root_path
)) {
905 ret
= LTTNG_ERR_FATAL
;
908 strcpy(consumer
->dst
.session_root_path
, uri
->dst
.path
);
909 /* Append default trace dir */
910 strcat(consumer
->dst
.session_root_path
, default_trace_dir
);
911 /* Flag consumer as local. */
912 consumer
->type
= CONSUMER_DST_LOCAL
;
923 * Init tracing by creating trace directory and sending fds kernel consumer.
925 static int init_kernel_tracing(struct ltt_kernel_session
*session
)
928 struct lttng_ht_iter iter
;
929 struct consumer_socket
*socket
;
935 if (session
->consumer_fds_sent
== 0 && session
->consumer
!= NULL
) {
936 cds_lfht_for_each_entry(session
->consumer
->socks
->ht
, &iter
.iter
,
938 pthread_mutex_lock(socket
->lock
);
939 ret
= kernel_consumer_send_session(socket
, session
);
940 pthread_mutex_unlock(socket
->lock
);
942 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
954 * Create a socket to the relayd using the URI.
956 * On success, the relayd_sock pointer is set to the created socket.
957 * Else, it remains untouched and an LTTng error code is returned.
959 static enum lttng_error_code
create_connect_relayd(struct lttng_uri
*uri
,
960 struct lttcomm_relayd_sock
**relayd_sock
,
961 struct consumer_output
*consumer
)
964 enum lttng_error_code status
= LTTNG_OK
;
965 struct lttcomm_relayd_sock
*rsock
;
967 rsock
= lttcomm_alloc_relayd_sock(uri
, RELAYD_VERSION_COMM_MAJOR
,
968 RELAYD_VERSION_COMM_MINOR
);
970 status
= LTTNG_ERR_FATAL
;
975 * Connect to relayd so we can proceed with a session creation. This call
976 * can possibly block for an arbitrary amount of time to set the health
977 * state to be in poll execution.
980 ret
= relayd_connect(rsock
);
983 ERR("Unable to reach lttng-relayd");
984 status
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
988 /* Create socket for control stream. */
989 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
990 DBG3("Creating relayd stream socket from URI");
992 /* Check relayd version */
993 ret
= relayd_version_check(rsock
);
994 if (ret
== LTTNG_ERR_RELAYD_VERSION_FAIL
) {
995 status
= LTTNG_ERR_RELAYD_VERSION_FAIL
;
997 } else if (ret
< 0) {
998 ERR("Unable to reach lttng-relayd");
999 status
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
1002 consumer
->relay_major_version
= rsock
->major
;
1003 consumer
->relay_minor_version
= rsock
->minor
;
1004 } else if (uri
->stype
== LTTNG_STREAM_DATA
) {
1005 DBG3("Creating relayd data socket from URI");
1007 /* Command is not valid */
1008 ERR("Relayd invalid stream type: %d", uri
->stype
);
1009 status
= LTTNG_ERR_INVALID
;
1013 *relayd_sock
= rsock
;
1018 /* The returned value is not useful since we are on an error path. */
1019 (void) relayd_close(rsock
);
1027 * Connect to the relayd using URI and send the socket to the right consumer.
1029 * The consumer socket lock must be held by the caller.
1031 * Returns LTTNG_OK on success or an LTTng error code on failure.
1033 static enum lttng_error_code
send_consumer_relayd_socket(
1034 unsigned int session_id
,
1035 struct lttng_uri
*relayd_uri
,
1036 struct consumer_output
*consumer
,
1037 struct consumer_socket
*consumer_sock
,
1038 char *session_name
, char *hostname
, int session_live_timer
)
1041 struct lttcomm_relayd_sock
*rsock
= NULL
;
1042 enum lttng_error_code status
;
1044 /* Connect to relayd and make version check if uri is the control. */
1045 status
= create_connect_relayd(relayd_uri
, &rsock
, consumer
);
1046 if (status
!= LTTNG_OK
) {
1047 goto relayd_comm_error
;
1051 /* Set the network sequence index if not set. */
1052 if (consumer
->net_seq_index
== (uint64_t) -1ULL) {
1053 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
1055 * Increment net_seq_idx because we are about to transfer the
1056 * new relayd socket to the consumer.
1057 * Assign unique key so the consumer can match streams.
1059 consumer
->net_seq_index
= ++relayd_net_seq_idx
;
1060 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
1063 /* Send relayd socket to consumer. */
1064 ret
= consumer_send_relayd_socket(consumer_sock
, rsock
, consumer
,
1065 relayd_uri
->stype
, session_id
,
1066 session_name
, hostname
, session_live_timer
);
1068 status
= LTTNG_ERR_ENABLE_CONSUMER_FAIL
;
1072 /* Flag that the corresponding socket was sent. */
1073 if (relayd_uri
->stype
== LTTNG_STREAM_CONTROL
) {
1074 consumer_sock
->control_sock_sent
= 1;
1075 } else if (relayd_uri
->stype
== LTTNG_STREAM_DATA
) {
1076 consumer_sock
->data_sock_sent
= 1;
1080 * Close socket which was dup on the consumer side. The session daemon does
1081 * NOT keep track of the relayd socket(s) once transfer to the consumer.
1085 if (status
!= LTTNG_OK
) {
1087 * The consumer output for this session should not be used anymore
1088 * since the relayd connection failed thus making any tracing or/and
1089 * streaming not usable.
1091 consumer
->enabled
= 0;
1093 (void) relayd_close(rsock
);
1101 * Send both relayd sockets to a specific consumer and domain. This is a
1102 * helper function to facilitate sending the information to the consumer for a
1105 * The consumer socket lock must be held by the caller.
1107 * Returns LTTNG_OK, or an LTTng error code on failure.
1109 static enum lttng_error_code
send_consumer_relayd_sockets(
1110 enum lttng_domain_type domain
,
1111 unsigned int session_id
, struct consumer_output
*consumer
,
1112 struct consumer_socket
*sock
, char *session_name
,
1113 char *hostname
, int session_live_timer
)
1115 enum lttng_error_code status
= LTTNG_OK
;
1120 /* Sending control relayd socket. */
1121 if (!sock
->control_sock_sent
) {
1122 status
= send_consumer_relayd_socket(session_id
,
1123 &consumer
->dst
.net
.control
, consumer
, sock
,
1124 session_name
, hostname
, session_live_timer
);
1125 if (status
!= LTTNG_OK
) {
1130 /* Sending data relayd socket. */
1131 if (!sock
->data_sock_sent
) {
1132 status
= send_consumer_relayd_socket(session_id
,
1133 &consumer
->dst
.net
.data
, consumer
, sock
,
1134 session_name
, hostname
, session_live_timer
);
1135 if (status
!= LTTNG_OK
) {
1145 * Setup relayd connections for a tracing session. First creates the socket to
1146 * the relayd and send them to the right domain consumer. Consumer type MUST be
1149 int cmd_setup_relayd(struct ltt_session
*session
)
1152 struct ltt_ust_session
*usess
;
1153 struct ltt_kernel_session
*ksess
;
1154 struct consumer_socket
*socket
;
1155 struct lttng_ht_iter iter
;
1159 usess
= session
->ust_session
;
1160 ksess
= session
->kernel_session
;
1162 DBG("Setting relayd for session %s", session
->name
);
1166 if (usess
&& usess
->consumer
&& usess
->consumer
->type
== CONSUMER_DST_NET
1167 && usess
->consumer
->enabled
) {
1168 /* For each consumer socket, send relayd sockets */
1169 cds_lfht_for_each_entry(usess
->consumer
->socks
->ht
, &iter
.iter
,
1170 socket
, node
.node
) {
1171 pthread_mutex_lock(socket
->lock
);
1172 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_UST
, session
->id
,
1173 usess
->consumer
, socket
,
1174 session
->name
, session
->hostname
,
1175 session
->live_timer
);
1176 pthread_mutex_unlock(socket
->lock
);
1177 if (ret
!= LTTNG_OK
) {
1180 /* Session is now ready for network streaming. */
1181 session
->net_handle
= 1;
1183 session
->consumer
->relay_major_version
=
1184 usess
->consumer
->relay_major_version
;
1185 session
->consumer
->relay_minor_version
=
1186 usess
->consumer
->relay_minor_version
;
1189 if (ksess
&& ksess
->consumer
&& ksess
->consumer
->type
== CONSUMER_DST_NET
1190 && ksess
->consumer
->enabled
) {
1191 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
, &iter
.iter
,
1192 socket
, node
.node
) {
1193 pthread_mutex_lock(socket
->lock
);
1194 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_KERNEL
, session
->id
,
1195 ksess
->consumer
, socket
,
1196 session
->name
, session
->hostname
,
1197 session
->live_timer
);
1198 pthread_mutex_unlock(socket
->lock
);
1199 if (ret
!= LTTNG_OK
) {
1202 /* Session is now ready for network streaming. */
1203 session
->net_handle
= 1;
1205 session
->consumer
->relay_major_version
=
1206 ksess
->consumer
->relay_major_version
;
1207 session
->consumer
->relay_minor_version
=
1208 ksess
->consumer
->relay_minor_version
;
1217 * Start a kernel session by opening all necessary streams.
1219 static int start_kernel_session(struct ltt_kernel_session
*ksess
, int wpipe
)
1222 struct ltt_kernel_channel
*kchan
;
1224 /* Open kernel metadata */
1225 if (ksess
->metadata
== NULL
&& ksess
->output_traces
) {
1226 ret
= kernel_open_metadata(ksess
);
1228 ret
= LTTNG_ERR_KERN_META_FAIL
;
1233 /* Open kernel metadata stream */
1234 if (ksess
->metadata
&& ksess
->metadata_stream_fd
< 0) {
1235 ret
= kernel_open_metadata_stream(ksess
);
1237 ERR("Kernel create metadata stream failed");
1238 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1243 /* For each channel */
1244 cds_list_for_each_entry(kchan
, &ksess
->channel_list
.head
, list
) {
1245 if (kchan
->stream_count
== 0) {
1246 ret
= kernel_open_channel_stream(kchan
);
1248 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1251 /* Update the stream global counter */
1252 ksess
->stream_count_global
+= ret
;
1256 /* Setup kernel consumer socket and send fds to it */
1257 ret
= init_kernel_tracing(ksess
);
1259 ret
= LTTNG_ERR_KERN_START_FAIL
;
1263 /* This start the kernel tracing */
1264 ret
= kernel_start_session(ksess
);
1266 ret
= LTTNG_ERR_KERN_START_FAIL
;
1270 /* Quiescent wait after starting trace */
1271 kernel_wait_quiescent(wpipe
);
1282 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1284 int cmd_disable_channel(struct ltt_session
*session
,
1285 enum lttng_domain_type domain
, char *channel_name
)
1288 struct ltt_ust_session
*usess
;
1290 usess
= session
->ust_session
;
1295 case LTTNG_DOMAIN_KERNEL
:
1297 ret
= channel_kernel_disable(session
->kernel_session
,
1299 if (ret
!= LTTNG_OK
) {
1303 kernel_wait_quiescent(kernel_tracer_fd
);
1306 case LTTNG_DOMAIN_UST
:
1308 struct ltt_ust_channel
*uchan
;
1309 struct lttng_ht
*chan_ht
;
1311 chan_ht
= usess
->domain_global
.channels
;
1313 uchan
= trace_ust_find_channel_by_name(chan_ht
, channel_name
);
1314 if (uchan
== NULL
) {
1315 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1319 ret
= channel_ust_disable(usess
, uchan
);
1320 if (ret
!= LTTNG_OK
) {
1326 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1338 * Command LTTNG_TRACK_PID processed by the client thread.
1340 * Called with session lock held.
1342 int cmd_track_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1350 case LTTNG_DOMAIN_KERNEL
:
1352 struct ltt_kernel_session
*ksess
;
1354 ksess
= session
->kernel_session
;
1356 ret
= kernel_track_pid(ksess
, pid
);
1357 if (ret
!= LTTNG_OK
) {
1361 kernel_wait_quiescent(kernel_tracer_fd
);
1364 case LTTNG_DOMAIN_UST
:
1366 struct ltt_ust_session
*usess
;
1368 usess
= session
->ust_session
;
1370 ret
= trace_ust_track_pid(usess
, pid
);
1371 if (ret
!= LTTNG_OK
) {
1377 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1389 * Command LTTNG_UNTRACK_PID processed by the client thread.
1391 * Called with session lock held.
1393 int cmd_untrack_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1401 case LTTNG_DOMAIN_KERNEL
:
1403 struct ltt_kernel_session
*ksess
;
1405 ksess
= session
->kernel_session
;
1407 ret
= kernel_untrack_pid(ksess
, pid
);
1408 if (ret
!= LTTNG_OK
) {
1412 kernel_wait_quiescent(kernel_tracer_fd
);
1415 case LTTNG_DOMAIN_UST
:
1417 struct ltt_ust_session
*usess
;
1419 usess
= session
->ust_session
;
1421 ret
= trace_ust_untrack_pid(usess
, pid
);
1422 if (ret
!= LTTNG_OK
) {
1428 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1440 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
1442 * The wpipe arguments is used as a notifier for the kernel thread.
1444 int cmd_enable_channel(struct ltt_session
*session
,
1445 struct lttng_domain
*domain
, struct lttng_channel
*attr
, int wpipe
)
1448 struct ltt_ust_session
*usess
= session
->ust_session
;
1449 struct lttng_ht
*chan_ht
;
1456 len
= lttng_strnlen(attr
->name
, sizeof(attr
->name
));
1458 /* Validate channel name */
1459 if (attr
->name
[0] == '.' ||
1460 memchr(attr
->name
, '/', len
) != NULL
) {
1461 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1465 DBG("Enabling channel %s for session %s", attr
->name
, session
->name
);
1470 * Don't try to enable a channel if the session has been started at
1471 * some point in time before. The tracer does not allow it.
1473 if (session
->has_been_started
) {
1474 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1479 * If the session is a live session, remove the switch timer, the
1480 * live timer does the same thing but sends also synchronisation
1481 * beacons for inactive streams.
1483 if (session
->live_timer
> 0) {
1484 attr
->attr
.live_timer_interval
= session
->live_timer
;
1485 attr
->attr
.switch_timer_interval
= 0;
1488 /* Check for feature support */
1489 switch (domain
->type
) {
1490 case LTTNG_DOMAIN_KERNEL
:
1492 if (kernel_supports_ring_buffer_snapshot_sample_positions(kernel_tracer_fd
) != 1) {
1493 /* Sampling position of buffer is not supported */
1494 WARN("Kernel tracer does not support buffer monitoring. "
1495 "Setting the monitor interval timer to 0 "
1496 "(disabled) for channel '%s' of session '%s'",
1497 attr
-> name
, session
->name
);
1498 lttng_channel_set_monitor_timer_interval(attr
, 0);
1502 case LTTNG_DOMAIN_UST
:
1504 case LTTNG_DOMAIN_JUL
:
1505 case LTTNG_DOMAIN_LOG4J
:
1506 case LTTNG_DOMAIN_PYTHON
:
1507 if (!agent_tracing_is_enabled()) {
1508 DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
1509 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
1514 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1518 switch (domain
->type
) {
1519 case LTTNG_DOMAIN_KERNEL
:
1521 struct ltt_kernel_channel
*kchan
;
1523 kchan
= trace_kernel_get_channel_by_name(attr
->name
,
1524 session
->kernel_session
);
1525 if (kchan
== NULL
) {
1526 ret
= channel_kernel_create(session
->kernel_session
, attr
, wpipe
);
1527 if (attr
->name
[0] != '\0') {
1528 session
->kernel_session
->has_non_default_channel
= 1;
1531 ret
= channel_kernel_enable(session
->kernel_session
, kchan
);
1534 if (ret
!= LTTNG_OK
) {
1538 kernel_wait_quiescent(kernel_tracer_fd
);
1541 case LTTNG_DOMAIN_UST
:
1542 case LTTNG_DOMAIN_JUL
:
1543 case LTTNG_DOMAIN_LOG4J
:
1544 case LTTNG_DOMAIN_PYTHON
:
1546 struct ltt_ust_channel
*uchan
;
1551 * Current agent implementation limitations force us to allow
1552 * only one channel at once in "agent" subdomains. Each
1553 * subdomain has a default channel name which must be strictly
1556 if (domain
->type
== LTTNG_DOMAIN_JUL
) {
1557 if (strncmp(attr
->name
, DEFAULT_JUL_CHANNEL_NAME
,
1558 LTTNG_SYMBOL_NAME_LEN
)) {
1559 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1562 } else if (domain
->type
== LTTNG_DOMAIN_LOG4J
) {
1563 if (strncmp(attr
->name
, DEFAULT_LOG4J_CHANNEL_NAME
,
1564 LTTNG_SYMBOL_NAME_LEN
)) {
1565 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1568 } else if (domain
->type
== LTTNG_DOMAIN_PYTHON
) {
1569 if (strncmp(attr
->name
, DEFAULT_PYTHON_CHANNEL_NAME
,
1570 LTTNG_SYMBOL_NAME_LEN
)) {
1571 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1576 chan_ht
= usess
->domain_global
.channels
;
1578 uchan
= trace_ust_find_channel_by_name(chan_ht
, attr
->name
);
1579 if (uchan
== NULL
) {
1580 ret
= channel_ust_create(usess
, attr
, domain
->buf_type
);
1581 if (attr
->name
[0] != '\0') {
1582 usess
->has_non_default_channel
= 1;
1585 ret
= channel_ust_enable(usess
, uchan
);
1590 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1601 * Command LTTNG_DISABLE_EVENT processed by the client thread.
1603 int cmd_disable_event(struct ltt_session
*session
,
1604 enum lttng_domain_type domain
, char *channel_name
,
1605 struct lttng_event
*event
)
1610 DBG("Disable event command for event \'%s\'", event
->name
);
1612 event_name
= event
->name
;
1614 /* Error out on unhandled search criteria */
1615 if (event
->loglevel_type
|| event
->loglevel
!= -1 || event
->enabled
1616 || event
->pid
|| event
->filter
|| event
->exclusion
) {
1617 ret
= LTTNG_ERR_UNK
;
1624 case LTTNG_DOMAIN_KERNEL
:
1626 struct ltt_kernel_channel
*kchan
;
1627 struct ltt_kernel_session
*ksess
;
1629 ksess
= session
->kernel_session
;
1632 * If a non-default channel has been created in the
1633 * session, explicitely require that -c chan_name needs
1636 if (ksess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1637 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1641 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
1642 if (kchan
== NULL
) {
1643 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
1647 switch (event
->type
) {
1648 case LTTNG_EVENT_ALL
:
1649 case LTTNG_EVENT_TRACEPOINT
:
1650 case LTTNG_EVENT_SYSCALL
:
1651 case LTTNG_EVENT_PROBE
:
1652 case LTTNG_EVENT_FUNCTION
:
1653 case LTTNG_EVENT_FUNCTION_ENTRY
:/* fall-through */
1654 if (event_name
[0] == '\0') {
1655 ret
= event_kernel_disable_event(kchan
,
1658 ret
= event_kernel_disable_event(kchan
,
1659 event_name
, event
->type
);
1661 if (ret
!= LTTNG_OK
) {
1666 ret
= LTTNG_ERR_UNK
;
1670 kernel_wait_quiescent(kernel_tracer_fd
);
1673 case LTTNG_DOMAIN_UST
:
1675 struct ltt_ust_channel
*uchan
;
1676 struct ltt_ust_session
*usess
;
1678 usess
= session
->ust_session
;
1680 if (validate_ust_event_name(event_name
)) {
1681 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
1686 * If a non-default channel has been created in the
1687 * session, explicitly require that -c chan_name needs
1690 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1691 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1695 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
1697 if (uchan
== NULL
) {
1698 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1702 switch (event
->type
) {
1703 case LTTNG_EVENT_ALL
:
1705 * An empty event name means that everything
1706 * should be disabled.
1708 if (event
->name
[0] == '\0') {
1709 ret
= event_ust_disable_all_tracepoints(usess
, uchan
);
1711 ret
= event_ust_disable_tracepoint(usess
, uchan
,
1714 if (ret
!= LTTNG_OK
) {
1719 ret
= LTTNG_ERR_UNK
;
1723 DBG3("Disable UST event %s in channel %s completed", event_name
,
1727 case LTTNG_DOMAIN_LOG4J
:
1728 case LTTNG_DOMAIN_JUL
:
1729 case LTTNG_DOMAIN_PYTHON
:
1732 struct ltt_ust_session
*usess
= session
->ust_session
;
1736 switch (event
->type
) {
1737 case LTTNG_EVENT_ALL
:
1740 ret
= LTTNG_ERR_UNK
;
1744 agt
= trace_ust_find_agent(usess
, domain
);
1746 ret
= -LTTNG_ERR_UST_EVENT_NOT_FOUND
;
1750 * An empty event name means that everything
1751 * should be disabled.
1753 if (event
->name
[0] == '\0') {
1754 ret
= event_agent_disable_all(usess
, agt
);
1756 ret
= event_agent_disable(usess
, agt
, event_name
);
1758 if (ret
!= LTTNG_OK
) {
1765 ret
= LTTNG_ERR_UND
;
1778 * Command LTTNG_ADD_CONTEXT processed by the client thread.
1780 int cmd_add_context(struct ltt_session
*session
, enum lttng_domain_type domain
,
1781 char *channel_name
, struct lttng_event_context
*ctx
, int kwpipe
)
1783 int ret
, chan_kern_created
= 0, chan_ust_created
= 0;
1784 char *app_ctx_provider_name
= NULL
, *app_ctx_name
= NULL
;
1787 * Don't try to add a context if the session has been started at
1788 * some point in time before. The tracer does not allow it and would
1789 * result in a corrupted trace.
1791 if (session
->has_been_started
) {
1792 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1796 if (ctx
->ctx
== LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
1797 app_ctx_provider_name
= ctx
->u
.app_ctx
.provider_name
;
1798 app_ctx_name
= ctx
->u
.app_ctx
.ctx_name
;
1802 case LTTNG_DOMAIN_KERNEL
:
1803 assert(session
->kernel_session
);
1805 if (session
->kernel_session
->channel_count
== 0) {
1806 /* Create default channel */
1807 ret
= channel_kernel_create(session
->kernel_session
, NULL
, kwpipe
);
1808 if (ret
!= LTTNG_OK
) {
1811 chan_kern_created
= 1;
1813 /* Add kernel context to kernel tracer */
1814 ret
= context_kernel_add(session
->kernel_session
, ctx
, channel_name
);
1815 if (ret
!= LTTNG_OK
) {
1819 case LTTNG_DOMAIN_JUL
:
1820 case LTTNG_DOMAIN_LOG4J
:
1823 * Validate channel name.
1824 * If no channel name is given and the domain is JUL or LOG4J,
1825 * set it to the appropriate domain-specific channel name. If
1826 * a name is provided but does not match the expexted channel
1827 * name, return an error.
1829 if (domain
== LTTNG_DOMAIN_JUL
&& *channel_name
&&
1830 strcmp(channel_name
,
1831 DEFAULT_JUL_CHANNEL_NAME
)) {
1832 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1834 } else if (domain
== LTTNG_DOMAIN_LOG4J
&& *channel_name
&&
1835 strcmp(channel_name
,
1836 DEFAULT_LOG4J_CHANNEL_NAME
)) {
1837 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1840 /* break is _not_ missing here. */
1842 case LTTNG_DOMAIN_UST
:
1844 struct ltt_ust_session
*usess
= session
->ust_session
;
1845 unsigned int chan_count
;
1849 chan_count
= lttng_ht_get_count(usess
->domain_global
.channels
);
1850 if (chan_count
== 0) {
1851 struct lttng_channel
*attr
;
1852 /* Create default channel */
1853 attr
= channel_new_default_attr(domain
, usess
->buffer_type
);
1855 ret
= LTTNG_ERR_FATAL
;
1859 ret
= channel_ust_create(usess
, attr
, usess
->buffer_type
);
1860 if (ret
!= LTTNG_OK
) {
1864 channel_attr_destroy(attr
);
1865 chan_ust_created
= 1;
1868 ret
= context_ust_add(usess
, domain
, ctx
, channel_name
);
1869 free(app_ctx_provider_name
);
1871 app_ctx_name
= NULL
;
1872 app_ctx_provider_name
= NULL
;
1873 if (ret
!= LTTNG_OK
) {
1879 ret
= LTTNG_ERR_UND
;
1887 if (chan_kern_created
) {
1888 struct ltt_kernel_channel
*kchan
=
1889 trace_kernel_get_channel_by_name(DEFAULT_CHANNEL_NAME
,
1890 session
->kernel_session
);
1891 /* Created previously, this should NOT fail. */
1893 kernel_destroy_channel(kchan
);
1896 if (chan_ust_created
) {
1897 struct ltt_ust_channel
*uchan
=
1898 trace_ust_find_channel_by_name(
1899 session
->ust_session
->domain_global
.channels
,
1900 DEFAULT_CHANNEL_NAME
);
1901 /* Created previously, this should NOT fail. */
1903 /* Remove from the channel list of the session. */
1904 trace_ust_delete_channel(session
->ust_session
->domain_global
.channels
,
1906 trace_ust_destroy_channel(uchan
);
1909 free(app_ctx_provider_name
);
1914 static inline bool name_starts_with(const char *name
, const char *prefix
)
1916 const size_t max_cmp_len
= min(strlen(prefix
), LTTNG_SYMBOL_NAME_LEN
);
1918 return !strncmp(name
, prefix
, max_cmp_len
);
1921 /* Perform userspace-specific event name validation */
1922 static int validate_ust_event_name(const char *name
)
1932 * Check name against all internal UST event component namespaces used
1935 if (name_starts_with(name
, DEFAULT_JUL_EVENT_COMPONENT
) ||
1936 name_starts_with(name
, DEFAULT_LOG4J_EVENT_COMPONENT
) ||
1937 name_starts_with(name
, DEFAULT_PYTHON_EVENT_COMPONENT
)) {
1946 * Internal version of cmd_enable_event() with a supplemental
1947 * "internal_event" flag which is used to enable internal events which should
1948 * be hidden from clients. Such events are used in the agent implementation to
1949 * enable the events through which all "agent" events are funeled.
1951 static int _cmd_enable_event(struct ltt_session
*session
,
1952 struct lttng_domain
*domain
,
1953 char *channel_name
, struct lttng_event
*event
,
1954 char *filter_expression
,
1955 struct lttng_filter_bytecode
*filter
,
1956 struct lttng_event_exclusion
*exclusion
,
1957 int wpipe
, bool internal_event
)
1959 int ret
= 0, channel_created
= 0;
1960 struct lttng_channel
*attr
= NULL
;
1964 assert(channel_name
);
1966 /* If we have a filter, we must have its filter expression */
1967 assert(!(!!filter_expression
^ !!filter
));
1969 /* Normalize event name as a globbing pattern */
1970 strutils_normalize_star_glob_pattern(event
->name
);
1972 /* Normalize exclusion names as globbing patterns */
1976 for (i
= 0; i
< exclusion
->count
; i
++) {
1977 char *name
= LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion
, i
);
1979 strutils_normalize_star_glob_pattern(name
);
1983 DBG("Enable event command for event \'%s\'", event
->name
);
1987 switch (domain
->type
) {
1988 case LTTNG_DOMAIN_KERNEL
:
1990 struct ltt_kernel_channel
*kchan
;
1993 * If a non-default channel has been created in the
1994 * session, explicitely require that -c chan_name needs
1997 if (session
->kernel_session
->has_non_default_channel
1998 && channel_name
[0] == '\0') {
1999 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2003 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2004 session
->kernel_session
);
2005 if (kchan
== NULL
) {
2006 attr
= channel_new_default_attr(LTTNG_DOMAIN_KERNEL
,
2007 LTTNG_BUFFER_GLOBAL
);
2009 ret
= LTTNG_ERR_FATAL
;
2012 if (lttng_strncpy(attr
->name
, channel_name
,
2013 sizeof(attr
->name
))) {
2014 ret
= LTTNG_ERR_INVALID
;
2018 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
2019 if (ret
!= LTTNG_OK
) {
2022 channel_created
= 1;
2025 /* Get the newly created kernel channel pointer */
2026 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2027 session
->kernel_session
);
2028 if (kchan
== NULL
) {
2029 /* This sould not happen... */
2030 ret
= LTTNG_ERR_FATAL
;
2034 switch (event
->type
) {
2035 case LTTNG_EVENT_ALL
:
2037 char *filter_expression_a
= NULL
;
2038 struct lttng_filter_bytecode
*filter_a
= NULL
;
2041 * We need to duplicate filter_expression and filter,
2042 * because ownership is passed to first enable
2045 if (filter_expression
) {
2046 filter_expression_a
= strdup(filter_expression
);
2047 if (!filter_expression_a
) {
2048 ret
= LTTNG_ERR_FATAL
;
2053 filter_a
= zmalloc(sizeof(*filter_a
) + filter
->len
);
2055 free(filter_expression_a
);
2056 ret
= LTTNG_ERR_FATAL
;
2059 memcpy(filter_a
, filter
, sizeof(*filter_a
) + filter
->len
);
2061 event
->type
= LTTNG_EVENT_TRACEPOINT
; /* Hack */
2062 ret
= event_kernel_enable_event(kchan
, event
,
2063 filter_expression
, filter
);
2064 /* We have passed ownership */
2065 filter_expression
= NULL
;
2067 if (ret
!= LTTNG_OK
) {
2068 if (channel_created
) {
2069 /* Let's not leak a useless channel. */
2070 kernel_destroy_channel(kchan
);
2072 free(filter_expression_a
);
2076 event
->type
= LTTNG_EVENT_SYSCALL
; /* Hack */
2077 ret
= event_kernel_enable_event(kchan
, event
,
2078 filter_expression_a
, filter_a
);
2079 /* We have passed ownership */
2080 filter_expression_a
= NULL
;
2082 if (ret
!= LTTNG_OK
) {
2087 case LTTNG_EVENT_PROBE
:
2088 case LTTNG_EVENT_USERSPACE_PROBE
:
2089 case LTTNG_EVENT_FUNCTION
:
2090 case LTTNG_EVENT_FUNCTION_ENTRY
:
2091 case LTTNG_EVENT_TRACEPOINT
:
2092 ret
= event_kernel_enable_event(kchan
, event
,
2093 filter_expression
, filter
);
2094 /* We have passed ownership */
2095 filter_expression
= NULL
;
2097 if (ret
!= LTTNG_OK
) {
2098 if (channel_created
) {
2099 /* Let's not leak a useless channel. */
2100 kernel_destroy_channel(kchan
);
2105 case LTTNG_EVENT_SYSCALL
:
2106 ret
= event_kernel_enable_event(kchan
, event
,
2107 filter_expression
, filter
);
2108 /* We have passed ownership */
2109 filter_expression
= NULL
;
2111 if (ret
!= LTTNG_OK
) {
2116 ret
= LTTNG_ERR_UNK
;
2120 kernel_wait_quiescent(kernel_tracer_fd
);
2123 case LTTNG_DOMAIN_UST
:
2125 struct ltt_ust_channel
*uchan
;
2126 struct ltt_ust_session
*usess
= session
->ust_session
;
2131 * If a non-default channel has been created in the
2132 * session, explicitely require that -c chan_name needs
2135 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
2136 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2140 /* Get channel from global UST domain */
2141 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2143 if (uchan
== NULL
) {
2144 /* Create default channel */
2145 attr
= channel_new_default_attr(LTTNG_DOMAIN_UST
,
2146 usess
->buffer_type
);
2148 ret
= LTTNG_ERR_FATAL
;
2151 if (lttng_strncpy(attr
->name
, channel_name
,
2152 sizeof(attr
->name
))) {
2153 ret
= LTTNG_ERR_INVALID
;
2157 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
2158 if (ret
!= LTTNG_OK
) {
2162 /* Get the newly created channel reference back */
2163 uchan
= trace_ust_find_channel_by_name(
2164 usess
->domain_global
.channels
, channel_name
);
2168 if (uchan
->domain
!= LTTNG_DOMAIN_UST
&& !internal_event
) {
2170 * Don't allow users to add UST events to channels which
2171 * are assigned to a userspace subdomain (JUL, Log4J,
2174 ret
= LTTNG_ERR_INVALID_CHANNEL_DOMAIN
;
2178 if (!internal_event
) {
2180 * Ensure the event name is not reserved for internal
2183 ret
= validate_ust_event_name(event
->name
);
2185 WARN("Userspace event name %s failed validation.",
2187 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
2192 /* At this point, the session and channel exist on the tracer */
2193 ret
= event_ust_enable_tracepoint(usess
, uchan
, event
,
2194 filter_expression
, filter
, exclusion
,
2196 /* We have passed ownership */
2197 filter_expression
= NULL
;
2200 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2201 goto already_enabled
;
2202 } else if (ret
!= LTTNG_OK
) {
2207 case LTTNG_DOMAIN_LOG4J
:
2208 case LTTNG_DOMAIN_JUL
:
2209 case LTTNG_DOMAIN_PYTHON
:
2211 const char *default_event_name
, *default_chan_name
;
2213 struct lttng_event uevent
;
2214 struct lttng_domain tmp_dom
;
2215 struct ltt_ust_session
*usess
= session
->ust_session
;
2219 if (!agent_tracing_is_enabled()) {
2220 DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
2221 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
2225 agt
= trace_ust_find_agent(usess
, domain
->type
);
2227 agt
= agent_create(domain
->type
);
2229 ret
= LTTNG_ERR_NOMEM
;
2232 agent_add(agt
, usess
->agents
);
2235 /* Create the default tracepoint. */
2236 memset(&uevent
, 0, sizeof(uevent
));
2237 uevent
.type
= LTTNG_EVENT_TRACEPOINT
;
2238 uevent
.loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
2239 default_event_name
= event_get_default_agent_ust_name(
2241 if (!default_event_name
) {
2242 ret
= LTTNG_ERR_FATAL
;
2245 strncpy(uevent
.name
, default_event_name
, sizeof(uevent
.name
));
2246 uevent
.name
[sizeof(uevent
.name
) - 1] = '\0';
2249 * The domain type is changed because we are about to enable the
2250 * default channel and event for the JUL domain that are hardcoded.
2251 * This happens in the UST domain.
2253 memcpy(&tmp_dom
, domain
, sizeof(tmp_dom
));
2254 tmp_dom
.type
= LTTNG_DOMAIN_UST
;
2256 switch (domain
->type
) {
2257 case LTTNG_DOMAIN_LOG4J
:
2258 default_chan_name
= DEFAULT_LOG4J_CHANNEL_NAME
;
2260 case LTTNG_DOMAIN_JUL
:
2261 default_chan_name
= DEFAULT_JUL_CHANNEL_NAME
;
2263 case LTTNG_DOMAIN_PYTHON
:
2264 default_chan_name
= DEFAULT_PYTHON_CHANNEL_NAME
;
2267 /* The switch/case we are in makes this impossible */
2272 char *filter_expression_copy
= NULL
;
2273 struct lttng_filter_bytecode
*filter_copy
= NULL
;
2276 const size_t filter_size
= sizeof(
2277 struct lttng_filter_bytecode
)
2280 filter_copy
= zmalloc(filter_size
);
2282 ret
= LTTNG_ERR_NOMEM
;
2285 memcpy(filter_copy
, filter
, filter_size
);
2287 filter_expression_copy
=
2288 strdup(filter_expression
);
2289 if (!filter_expression
) {
2290 ret
= LTTNG_ERR_NOMEM
;
2293 if (!filter_expression_copy
|| !filter_copy
) {
2294 free(filter_expression_copy
);
2300 ret
= cmd_enable_event_internal(session
, &tmp_dom
,
2301 (char *) default_chan_name
,
2302 &uevent
, filter_expression_copy
,
2303 filter_copy
, NULL
, wpipe
);
2306 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2307 goto already_enabled
;
2308 } else if (ret
!= LTTNG_OK
) {
2312 /* The wild card * means that everything should be enabled. */
2313 if (strncmp(event
->name
, "*", 1) == 0 && strlen(event
->name
) == 1) {
2314 ret
= event_agent_enable_all(usess
, agt
, event
, filter
,
2317 ret
= event_agent_enable(usess
, agt
, event
, filter
,
2321 filter_expression
= NULL
;
2322 if (ret
!= LTTNG_OK
) {
2329 ret
= LTTNG_ERR_UND
;
2337 free(filter_expression
);
2340 channel_attr_destroy(attr
);
2346 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2347 * We own filter, exclusion, and filter_expression.
2349 int cmd_enable_event(struct ltt_session
*session
, struct lttng_domain
*domain
,
2350 char *channel_name
, struct lttng_event
*event
,
2351 char *filter_expression
,
2352 struct lttng_filter_bytecode
*filter
,
2353 struct lttng_event_exclusion
*exclusion
,
2356 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2357 filter_expression
, filter
, exclusion
, wpipe
, false);
2361 * Enable an event which is internal to LTTng. An internal should
2362 * never be made visible to clients and are immune to checks such as
2365 static int cmd_enable_event_internal(struct ltt_session
*session
,
2366 struct lttng_domain
*domain
,
2367 char *channel_name
, struct lttng_event
*event
,
2368 char *filter_expression
,
2369 struct lttng_filter_bytecode
*filter
,
2370 struct lttng_event_exclusion
*exclusion
,
2373 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2374 filter_expression
, filter
, exclusion
, wpipe
, true);
2378 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2380 ssize_t
cmd_list_tracepoints(enum lttng_domain_type domain
,
2381 struct lttng_event
**events
)
2384 ssize_t nb_events
= 0;
2387 case LTTNG_DOMAIN_KERNEL
:
2388 nb_events
= kernel_list_events(kernel_tracer_fd
, events
);
2389 if (nb_events
< 0) {
2390 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2394 case LTTNG_DOMAIN_UST
:
2395 nb_events
= ust_app_list_events(events
);
2396 if (nb_events
< 0) {
2397 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2401 case LTTNG_DOMAIN_LOG4J
:
2402 case LTTNG_DOMAIN_JUL
:
2403 case LTTNG_DOMAIN_PYTHON
:
2404 nb_events
= agent_list_events(events
, domain
);
2405 if (nb_events
< 0) {
2406 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2411 ret
= LTTNG_ERR_UND
;
2418 /* Return negative value to differentiate return code */
2423 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
2425 ssize_t
cmd_list_tracepoint_fields(enum lttng_domain_type domain
,
2426 struct lttng_event_field
**fields
)
2429 ssize_t nb_fields
= 0;
2432 case LTTNG_DOMAIN_UST
:
2433 nb_fields
= ust_app_list_event_fields(fields
);
2434 if (nb_fields
< 0) {
2435 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2439 case LTTNG_DOMAIN_KERNEL
:
2440 default: /* fall-through */
2441 ret
= LTTNG_ERR_UND
;
2448 /* Return negative value to differentiate return code */
2452 ssize_t
cmd_list_syscalls(struct lttng_event
**events
)
2454 return syscall_table_list(events
);
2458 * Command LTTNG_LIST_TRACKER_PIDS processed by the client thread.
2460 * Called with session lock held.
2462 ssize_t
cmd_list_tracker_pids(struct ltt_session
*session
,
2463 enum lttng_domain_type domain
, int32_t **pids
)
2466 ssize_t nr_pids
= 0;
2469 case LTTNG_DOMAIN_KERNEL
:
2471 struct ltt_kernel_session
*ksess
;
2473 ksess
= session
->kernel_session
;
2474 nr_pids
= kernel_list_tracker_pids(ksess
, pids
);
2476 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2481 case LTTNG_DOMAIN_UST
:
2483 struct ltt_ust_session
*usess
;
2485 usess
= session
->ust_session
;
2486 nr_pids
= trace_ust_list_tracker_pids(usess
, pids
);
2488 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2493 case LTTNG_DOMAIN_LOG4J
:
2494 case LTTNG_DOMAIN_JUL
:
2495 case LTTNG_DOMAIN_PYTHON
:
2497 ret
= LTTNG_ERR_UND
;
2504 /* Return negative value to differentiate return code */
2509 int domain_mkdir(const struct consumer_output
*output
,
2510 const struct ltt_session
*session
,
2511 uid_t uid
, gid_t gid
)
2513 struct consumer_socket
*socket
;
2514 struct lttng_ht_iter iter
;
2518 if (!output
|| !output
->socks
) {
2519 ERR("No consumer output found");
2524 path
= zmalloc(LTTNG_PATH_MAX
* sizeof(char));
2526 ERR("Cannot allocate mkdir path");
2531 ret
= snprintf(path
, LTTNG_PATH_MAX
, "%s%s%s",
2532 session_get_base_path(session
),
2533 output
->chunk_path
, output
->subdir
);
2534 if (ret
< 0 || ret
>= LTTNG_PATH_MAX
) {
2540 DBG("Domain mkdir %s for session %" PRIu64
, path
, session
->id
);
2543 * We have to iterate to find a socket, but we only need to send the
2544 * rename command to one consumer, so we break after the first one.
2546 cds_lfht_for_each_entry(output
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
2547 pthread_mutex_lock(socket
->lock
);
2548 ret
= consumer_mkdir(socket
, session
->id
, output
, path
, uid
, gid
);
2549 pthread_mutex_unlock(socket
->lock
);
2551 ERR("Consumer mkdir");
2568 int session_mkdir(const struct ltt_session
*session
)
2571 struct consumer_output
*output
;
2576 * Unsupported feature in lttng-relayd before 2.11, not an error since it
2577 * is only needed for session rotation and the user will get an error
2580 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
2581 session
->consumer
->relay_major_version
== 2 &&
2582 session
->consumer
->relay_minor_version
< 11) {
2587 if (session
->kernel_session
) {
2588 output
= session
->kernel_session
->consumer
;
2589 uid
= session
->kernel_session
->uid
;
2590 gid
= session
->kernel_session
->gid
;
2591 ret
= domain_mkdir(output
, session
, uid
, gid
);
2593 ERR("Mkdir kernel");
2598 if (session
->ust_session
) {
2599 output
= session
->ust_session
->consumer
;
2600 uid
= session
->ust_session
->uid
;
2601 gid
= session
->ust_session
->gid
;
2602 ret
= domain_mkdir(output
, session
, uid
, gid
);
2616 * Command LTTNG_START_TRACE processed by the client thread.
2618 * Called with session mutex held.
2620 int cmd_start_trace(struct ltt_session
*session
)
2623 unsigned long nb_chan
= 0;
2624 struct ltt_kernel_session
*ksession
;
2625 struct ltt_ust_session
*usess
;
2629 /* Ease our life a bit ;) */
2630 ksession
= session
->kernel_session
;
2631 usess
= session
->ust_session
;
2633 /* Is the session already started? */
2634 if (session
->active
) {
2635 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2640 * Starting a session without channel is useless since after that it's not
2641 * possible to enable channel thus inform the client.
2643 if (usess
&& usess
->domain_global
.channels
) {
2644 nb_chan
+= lttng_ht_get_count(usess
->domain_global
.channels
);
2647 nb_chan
+= ksession
->channel_count
;
2650 ret
= LTTNG_ERR_NO_CHANNEL
;
2655 * Record the timestamp of the first time the session is started for
2656 * an eventual session rotation call.
2658 if (!session
->has_been_started
) {
2659 session
->current_chunk_start_ts
= time(NULL
);
2660 if (session
->current_chunk_start_ts
== (time_t) -1) {
2661 PERROR("Failed to retrieve the \"%s\" session's start time",
2663 ret
= LTTNG_ERR_FATAL
;
2666 if (!session
->snapshot_mode
&& session
->output_traces
) {
2667 ret
= session_mkdir(session
);
2669 ERR("Failed to create the session directories");
2670 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
2676 /* Kernel tracing */
2677 if (ksession
!= NULL
) {
2678 DBG("Start kernel tracing session %s", session
->name
);
2679 ret
= start_kernel_session(ksession
, kernel_tracer_fd
);
2680 if (ret
!= LTTNG_OK
) {
2685 /* Flag session that trace should start automatically */
2688 * Even though the start trace might fail, flag this session active so
2689 * other application coming in are started by default.
2693 ret
= ust_app_start_trace_all(usess
);
2695 ret
= LTTNG_ERR_UST_START_FAIL
;
2700 /* Flag this after a successful start. */
2701 session
->has_been_started
= 1;
2702 session
->active
= 1;
2705 * Clear the flag that indicates that a rotation was done while the
2706 * session was stopped.
2708 session
->rotated_after_last_stop
= false;
2710 if (session
->rotate_timer_period
) {
2711 ret
= timer_session_rotation_schedule_timer_start(session
,
2712 session
->rotate_timer_period
);
2714 ERR("Failed to enable rotate timer");
2715 ret
= LTTNG_ERR_UNK
;
2727 * Command LTTNG_STOP_TRACE processed by the client thread.
2729 int cmd_stop_trace(struct ltt_session
*session
)
2732 struct ltt_kernel_channel
*kchan
;
2733 struct ltt_kernel_session
*ksession
;
2734 struct ltt_ust_session
*usess
;
2735 bool error_occured
= false;
2739 DBG("Begin stop session %s (id %" PRIu64
")", session
->name
, session
->id
);
2741 ksession
= session
->kernel_session
;
2742 usess
= session
->ust_session
;
2744 /* Session is not active. Skip everythong and inform the client. */
2745 if (!session
->active
) {
2746 ret
= LTTNG_ERR_TRACE_ALREADY_STOPPED
;
2750 if (session
->rotation_schedule_timer_enabled
) {
2751 if (timer_session_rotation_schedule_timer_stop(
2753 ERR("Failed to stop the \"rotation schedule\" timer of session %s",
2759 * A rotation is still ongoing. The check timer will continue to wait
2760 * for the rotation to complete. When the rotation finally completes,
2761 * a check will be performed to rename the "active" chunk to the
2762 * expected "timestamp_begin-timestamp_end" format.
2764 if (session
->current_archive_id
> 0 &&
2765 session
->rotation_state
!= LTTNG_ROTATION_STATE_ONGOING
) {
2766 ret
= rename_active_chunk(session
);
2769 * This error should not prevent the user from stopping
2770 * the session. However, it will be reported at the end.
2772 error_occured
= true;
2777 if (ksession
&& ksession
->active
) {
2778 DBG("Stop kernel tracing");
2780 ret
= kernel_stop_session(ksession
);
2782 ret
= LTTNG_ERR_KERN_STOP_FAIL
;
2786 kernel_wait_quiescent(kernel_tracer_fd
);
2788 /* Flush metadata after stopping (if exists) */
2789 if (ksession
->metadata_stream_fd
>= 0) {
2790 ret
= kernel_metadata_flush_buffer(ksession
->metadata_stream_fd
);
2792 ERR("Kernel metadata flush failed");
2796 /* Flush all buffers after stopping */
2797 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
2798 ret
= kernel_flush_buffer(kchan
);
2800 ERR("Kernel flush buffer error");
2804 ksession
->active
= 0;
2805 DBG("Kernel session stopped %s (id %" PRIu64
")", session
->name
,
2809 if (usess
&& usess
->active
) {
2811 * Even though the stop trace might fail, flag this session inactive so
2812 * other application coming in are not started by default.
2816 ret
= ust_app_stop_trace_all(usess
);
2818 ret
= LTTNG_ERR_UST_STOP_FAIL
;
2823 /* Flag inactive after a successful stop. */
2824 session
->active
= 0;
2825 ret
= !error_occured
? LTTNG_OK
: LTTNG_ERR_UNK
;
2832 * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
2834 int cmd_set_consumer_uri(struct ltt_session
*session
, size_t nb_uri
,
2835 struct lttng_uri
*uris
)
2838 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
2839 struct ltt_ust_session
*usess
= session
->ust_session
;
2845 /* Can't set consumer URI if the session is active. */
2846 if (session
->active
) {
2847 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2851 /* Set the "global" consumer URIs */
2852 for (i
= 0; i
< nb_uri
; i
++) {
2853 ret
= add_uri_to_consumer(session
->consumer
,
2854 &uris
[i
], 0, session
->name
);
2855 if (ret
!= LTTNG_OK
) {
2860 /* Set UST session URIs */
2861 if (session
->ust_session
) {
2862 for (i
= 0; i
< nb_uri
; i
++) {
2863 ret
= add_uri_to_consumer(
2864 session
->ust_session
->consumer
,
2865 &uris
[i
], LTTNG_DOMAIN_UST
,
2867 if (ret
!= LTTNG_OK
) {
2873 /* Set kernel session URIs */
2874 if (session
->kernel_session
) {
2875 for (i
= 0; i
< nb_uri
; i
++) {
2876 ret
= add_uri_to_consumer(
2877 session
->kernel_session
->consumer
,
2878 &uris
[i
], LTTNG_DOMAIN_KERNEL
,
2880 if (ret
!= LTTNG_OK
) {
2887 * Make sure to set the session in output mode after we set URI since a
2888 * session can be created without URL (thus flagged in no output mode).
2890 session
->output_traces
= 1;
2892 ksess
->output_traces
= 1;
2896 usess
->output_traces
= 1;
2907 * Command LTTNG_CREATE_SESSION processed by the client thread.
2909 int cmd_create_session_uri(char *name
, struct lttng_uri
*uris
,
2910 size_t nb_uri
, lttng_sock_cred
*creds
, unsigned int live_timer
)
2913 struct ltt_session
*session
;
2919 * Verify if the session already exist
2921 * XXX: There is no need for the session lock list here since the caller
2922 * (process_client_msg) is holding it. We might want to change that so a
2923 * single command does not lock the entire session list.
2925 session
= session_find_by_name(name
);
2926 if (session
!= NULL
) {
2927 ret
= LTTNG_ERR_EXIST_SESS
;
2931 /* Create tracing session in the registry */
2932 ret
= session_create(name
, LTTNG_SOCK_GET_UID_CRED(creds
),
2933 LTTNG_SOCK_GET_GID_CRED(creds
));
2934 if (ret
!= LTTNG_OK
) {
2939 * Get the newly created session pointer back
2941 * XXX: There is no need for the session lock list here since the caller
2942 * (process_client_msg) is holding it. We might want to change that so a
2943 * single command does not lock the entire session list.
2945 session
= session_find_by_name(name
);
2948 session
->live_timer
= live_timer
;
2949 /* Create default consumer output for the session not yet created. */
2950 session
->consumer
= consumer_create_output(CONSUMER_DST_LOCAL
);
2951 if (session
->consumer
== NULL
) {
2952 ret
= LTTNG_ERR_FATAL
;
2953 goto consumer_error
;
2957 ret
= cmd_set_consumer_uri(session
, nb_uri
, uris
);
2958 if (ret
!= LTTNG_OK
) {
2959 goto consumer_error
;
2961 session
->output_traces
= 1;
2963 session
->output_traces
= 0;
2964 DBG2("Session %s created with no output", session
->name
);
2967 session
->consumer
->enabled
= 1;
2972 session_destroy(session
);
2979 * Command LTTNG_CREATE_SESSION_SNAPSHOT processed by the client thread.
2981 int cmd_create_session_snapshot(char *name
, struct lttng_uri
*uris
,
2982 size_t nb_uri
, lttng_sock_cred
*creds
)
2985 struct ltt_session
*session
;
2986 struct snapshot_output
*new_output
= NULL
;
2992 * Create session in no output mode with URIs set to NULL. The uris we've
2993 * received are for a default snapshot output if one.
2995 ret
= cmd_create_session_uri(name
, NULL
, 0, creds
, 0);
2996 if (ret
!= LTTNG_OK
) {
3000 /* Get the newly created session pointer back. This should NEVER fail. */
3001 session
= session_find_by_name(name
);
3004 /* Flag session for snapshot mode. */
3005 session
->snapshot_mode
= 1;
3007 /* Skip snapshot output creation if no URI is given. */
3012 new_output
= snapshot_output_alloc();
3014 ret
= LTTNG_ERR_NOMEM
;
3015 goto error_snapshot_alloc
;
3018 ret
= snapshot_output_init_with_uri(DEFAULT_SNAPSHOT_MAX_SIZE
, NULL
,
3019 uris
, nb_uri
, session
->consumer
, new_output
, &session
->snapshot
);
3021 if (ret
== -ENOMEM
) {
3022 ret
= LTTNG_ERR_NOMEM
;
3024 ret
= LTTNG_ERR_INVALID
;
3026 goto error_snapshot
;
3030 snapshot_add_output(&session
->snapshot
, new_output
);
3037 snapshot_output_destroy(new_output
);
3038 error_snapshot_alloc
:
3039 session_destroy(session
);
3045 * Command LTTNG_DESTROY_SESSION processed by the client thread.
3047 * Called with session lock held.
3049 int cmd_destroy_session(struct ltt_session
*session
, int wpipe
,
3050 struct notification_thread_handle
*notification_thread_handle
)
3053 struct ltt_ust_session
*usess
;
3054 struct ltt_kernel_session
*ksess
;
3059 usess
= session
->ust_session
;
3060 ksess
= session
->kernel_session
;
3062 DBG("Begin destroy session %s (id %" PRIu64
")", session
->name
, session
->id
);
3064 if (session
->rotation_pending_check_timer_enabled
) {
3065 if (timer_session_rotation_pending_check_stop(session
)) {
3066 ERR("Failed to stop the \"rotation pending check\" timer of session %s",
3071 if (session
->rotation_schedule_timer_enabled
) {
3072 if (timer_session_rotation_schedule_timer_stop(
3074 ERR("Failed to stop the \"rotation schedule\" timer of session %s",
3079 if (session
->rotate_size
) {
3080 unsubscribe_session_consumed_size_rotation(session
, notification_thread_handle
);
3081 session
->rotate_size
= 0;
3085 * The rename of the current chunk is performed at stop, but if we rotated
3086 * the session after the previous stop command, we need to rename the
3087 * new (and empty) chunk that was started in between.
3089 if (session
->rotated_after_last_stop
) {
3090 rename_active_chunk(session
);
3093 /* Clean kernel session teardown */
3094 kernel_destroy_session(ksess
);
3096 /* UST session teardown */
3098 /* Close any relayd session */
3099 consumer_output_send_destroy_relayd(usess
->consumer
);
3101 /* Destroy every UST application related to this session. */
3102 ret
= ust_app_destroy_trace_all(usess
);
3104 ERR("Error in ust_app_destroy_trace_all");
3107 /* Clean up the rest. */
3108 trace_ust_destroy_session(usess
);
3112 * Must notify the kernel thread here to update it's poll set in order to
3113 * remove the channel(s)' fd just destroyed.
3115 ret
= notify_thread_pipe(wpipe
);
3117 PERROR("write kernel poll pipe");
3120 if (session
->shm_path
[0]) {
3122 * When a session is created with an explicit shm_path,
3123 * the consumer daemon will create its shared memory files
3124 * at that location and will *not* unlink them. This is normal
3125 * as the intention of that feature is to make it possible
3126 * to retrieve the content of those files should a crash occur.
3128 * To ensure the content of those files can be used, the
3129 * sessiond daemon will replicate the content of the metadata
3130 * cache in a metadata file.
3132 * On clean-up, it is expected that the consumer daemon will
3133 * unlink the shared memory files and that the session daemon
3134 * will unlink the metadata file. Then, the session's directory
3135 * in the shm path can be removed.
3137 * Unfortunately, a flaw in the design of the sessiond's and
3138 * consumerd's tear down of channels makes it impossible to
3139 * determine when the sessiond _and_ the consumerd have both
3140 * destroyed their representation of a channel. For one, the
3141 * unlinking, close, and rmdir happen in deferred 'call_rcu'
3142 * callbacks in both daemons.
3144 * However, it is also impossible for the sessiond to know when
3145 * the consumer daemon is done destroying its channel(s) since
3146 * it occurs as a reaction to the closing of the channel's file
3147 * descriptor. There is no resulting communication initiated
3148 * from the consumerd to the sessiond to confirm that the
3149 * operation is completed (and was successful).
3151 * Until this is all fixed, the session daemon checks for the
3152 * removal of the session's shm path which makes it possible
3153 * to safely advertise a session as having been destroyed.
3155 * Prior to this fix, it was not possible to reliably save
3156 * a session making use of the --shm-path option, destroy it,
3157 * and load it again. This is because the creation of the
3158 * session would fail upon seeing the session's shm path
3159 * already in existence.
3161 * Note that none of the error paths in the check for the
3162 * directory's existence return an error. This is normal
3163 * as there isn't much that can be done. The session will
3164 * be destroyed properly, except that we can't offer the
3165 * guarantee that the same session can be re-created.
3167 current_completion_handler
= &destroy_completion_handler
.handler
;
3168 ret
= lttng_strncpy(destroy_completion_handler
.shm_path
,
3170 sizeof(destroy_completion_handler
.shm_path
));
3173 ret
= session_destroy(session
);
3179 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3181 int cmd_register_consumer(struct ltt_session
*session
,
3182 enum lttng_domain_type domain
, const char *sock_path
,
3183 struct consumer_data
*cdata
)
3186 struct consumer_socket
*socket
= NULL
;
3193 case LTTNG_DOMAIN_KERNEL
:
3195 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3199 /* Can't register a consumer if there is already one */
3200 if (ksess
->consumer_fds_sent
!= 0) {
3201 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3205 sock
= lttcomm_connect_unix_sock(sock_path
);
3207 ret
= LTTNG_ERR_CONNECT_FAIL
;
3210 cdata
->cmd_sock
= sock
;
3212 socket
= consumer_allocate_socket(&cdata
->cmd_sock
);
3213 if (socket
== NULL
) {
3216 PERROR("close register consumer");
3218 cdata
->cmd_sock
= -1;
3219 ret
= LTTNG_ERR_FATAL
;
3223 socket
->lock
= zmalloc(sizeof(pthread_mutex_t
));
3224 if (socket
->lock
== NULL
) {
3225 PERROR("zmalloc pthread mutex");
3226 ret
= LTTNG_ERR_FATAL
;
3229 pthread_mutex_init(socket
->lock
, NULL
);
3230 socket
->registered
= 1;
3233 consumer_add_socket(socket
, ksess
->consumer
);
3236 pthread_mutex_lock(&cdata
->pid_mutex
);
3238 pthread_mutex_unlock(&cdata
->pid_mutex
);
3243 /* TODO: Userspace tracing */
3244 ret
= LTTNG_ERR_UND
;
3252 consumer_destroy_socket(socket
);
3258 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3260 ssize_t
cmd_list_domains(struct ltt_session
*session
,
3261 struct lttng_domain
**domains
)
3266 struct lttng_ht_iter iter
;
3268 if (session
->kernel_session
!= NULL
) {
3269 DBG3("Listing domains found kernel domain");
3273 if (session
->ust_session
!= NULL
) {
3274 DBG3("Listing domains found UST global domain");
3278 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3280 if (agt
->being_used
) {
3291 *domains
= zmalloc(nb_dom
* sizeof(struct lttng_domain
));
3292 if (*domains
== NULL
) {
3293 ret
= LTTNG_ERR_FATAL
;
3297 if (session
->kernel_session
!= NULL
) {
3298 (*domains
)[index
].type
= LTTNG_DOMAIN_KERNEL
;
3300 /* Kernel session buffer type is always GLOBAL */
3301 (*domains
)[index
].buf_type
= LTTNG_BUFFER_GLOBAL
;
3306 if (session
->ust_session
!= NULL
) {
3307 (*domains
)[index
].type
= LTTNG_DOMAIN_UST
;
3308 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3312 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3314 if (agt
->being_used
) {
3315 (*domains
)[index
].type
= agt
->domain
;
3316 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3326 /* Return negative value to differentiate return code */
3332 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3334 ssize_t
cmd_list_channels(enum lttng_domain_type domain
,
3335 struct ltt_session
*session
, struct lttng_channel
**channels
)
3337 ssize_t nb_chan
= 0, payload_size
= 0, ret
;
3340 case LTTNG_DOMAIN_KERNEL
:
3341 if (session
->kernel_session
!= NULL
) {
3342 nb_chan
= session
->kernel_session
->channel_count
;
3344 DBG3("Number of kernel channels %zd", nb_chan
);
3346 ret
= -LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
3350 case LTTNG_DOMAIN_UST
:
3351 if (session
->ust_session
!= NULL
) {
3353 nb_chan
= lttng_ht_get_count(
3354 session
->ust_session
->domain_global
.channels
);
3357 DBG3("Number of UST global channels %zd", nb_chan
);
3359 ret
= -LTTNG_ERR_UST_CHAN_NOT_FOUND
;
3364 ret
= -LTTNG_ERR_UND
;
3369 const size_t channel_size
= sizeof(struct lttng_channel
) +
3370 sizeof(struct lttng_channel_extended
);
3371 struct lttng_channel_extended
*channel_exts
;
3373 payload_size
= nb_chan
* channel_size
;
3374 *channels
= zmalloc(payload_size
);
3375 if (*channels
== NULL
) {
3376 ret
= -LTTNG_ERR_FATAL
;
3380 channel_exts
= ((void *) *channels
) +
3381 (nb_chan
* sizeof(struct lttng_channel
));
3382 ret
= list_lttng_channels(domain
, session
, *channels
, channel_exts
);
3383 if (ret
!= LTTNG_OK
) {
3398 * Command LTTNG_LIST_EVENTS processed by the client thread.
3400 ssize_t
cmd_list_events(enum lttng_domain_type domain
,
3401 struct ltt_session
*session
, char *channel_name
,
3402 struct lttng_event
**events
, size_t *total_size
)
3405 ssize_t nb_event
= 0;
3408 case LTTNG_DOMAIN_KERNEL
:
3409 if (session
->kernel_session
!= NULL
) {
3410 nb_event
= list_lttng_kernel_events(channel_name
,
3411 session
->kernel_session
, events
,
3415 case LTTNG_DOMAIN_UST
:
3417 if (session
->ust_session
!= NULL
) {
3418 nb_event
= list_lttng_ust_global_events(channel_name
,
3419 &session
->ust_session
->domain_global
, events
,
3424 case LTTNG_DOMAIN_LOG4J
:
3425 case LTTNG_DOMAIN_JUL
:
3426 case LTTNG_DOMAIN_PYTHON
:
3427 if (session
->ust_session
) {
3428 struct lttng_ht_iter iter
;
3432 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
,
3433 &iter
.iter
, agt
, node
.node
) {
3434 if (agt
->domain
== domain
) {
3435 nb_event
= list_lttng_agent_events(
3445 ret
= LTTNG_ERR_UND
;
3452 /* Return negative value to differentiate return code */
3457 * Using the session list, filled a lttng_session array to send back to the
3458 * client for session listing.
3460 * The session list lock MUST be acquired before calling this function. Use
3461 * session_lock_list() and session_unlock_list().
3463 void cmd_list_lttng_sessions(struct lttng_session
*sessions
, uid_t uid
,
3468 struct ltt_session
*session
;
3469 struct ltt_session_list
*list
= session_get_list();
3471 DBG("Getting all available session for UID %d GID %d",
3474 * Iterate over session list and append data after the control struct in
3477 cds_list_for_each_entry(session
, &list
->head
, list
) {
3479 * Only list the sessions the user can control.
3481 if (!session_access_ok(session
, uid
, gid
)) {
3485 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3486 struct ltt_ust_session
*usess
= session
->ust_session
;
3488 if (session
->consumer
->type
== CONSUMER_DST_NET
||
3489 (ksess
&& ksess
->consumer
->type
== CONSUMER_DST_NET
) ||
3490 (usess
&& usess
->consumer
->type
== CONSUMER_DST_NET
)) {
3491 ret
= build_network_session_path(sessions
[i
].path
,
3492 sizeof(sessions
[i
].path
), session
);
3494 ret
= snprintf(sessions
[i
].path
, sizeof(sessions
[i
].path
), "%s",
3495 session
->consumer
->dst
.session_root_path
);
3498 PERROR("snprintf session path");
3502 strncpy(sessions
[i
].name
, session
->name
, NAME_MAX
);
3503 sessions
[i
].name
[NAME_MAX
- 1] = '\0';
3504 sessions
[i
].enabled
= session
->active
;
3505 sessions
[i
].snapshot_mode
= session
->snapshot_mode
;
3506 sessions
[i
].live_timer_interval
= session
->live_timer
;
3512 * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
3513 * ready for trace analysis (or any kind of reader) or else 1 for pending data.
3515 int cmd_data_pending(struct ltt_session
*session
)
3518 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3519 struct ltt_ust_session
*usess
= session
->ust_session
;
3523 DBG("Data pending for session %s", session
->name
);
3525 /* Session MUST be stopped to ask for data availability. */
3526 if (session
->active
) {
3527 ret
= LTTNG_ERR_SESSION_STARTED
;
3531 * If stopped, just make sure we've started before else the above call
3532 * will always send that there is data pending.
3534 * The consumer assumes that when the data pending command is received,
3535 * the trace has been started before or else no output data is written
3536 * by the streams which is a condition for data pending. So, this is
3537 * *VERY* important that we don't ask the consumer before a start
3540 if (!session
->has_been_started
) {
3546 /* A rotation is still pending, we have to wait. */
3547 if (session
->rotation_state
== LTTNG_ROTATION_STATE_ONGOING
) {
3548 DBG("Rotate still pending for session %s", session
->name
);
3553 if (ksess
&& ksess
->consumer
) {
3554 ret
= consumer_is_data_pending(ksess
->id
, ksess
->consumer
);
3556 /* Data is still being extracted for the kernel. */
3561 if (usess
&& usess
->consumer
) {
3562 ret
= consumer_is_data_pending(usess
->id
, usess
->consumer
);
3564 /* Data is still being extracted for the kernel. */
3569 /* Data is ready to be read by a viewer */
3577 * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
3579 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3581 int cmd_snapshot_add_output(struct ltt_session
*session
,
3582 struct lttng_snapshot_output
*output
, uint32_t *id
)
3585 struct snapshot_output
*new_output
;
3590 DBG("Cmd snapshot add output for session %s", session
->name
);
3593 * Can't create an output if the session is not set in no-output mode.
3595 if (session
->output_traces
) {
3596 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3600 /* Only one output is allowed until we have the "tee" feature. */
3601 if (session
->snapshot
.nb_output
== 1) {
3602 ret
= LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST
;
3606 new_output
= snapshot_output_alloc();
3608 ret
= LTTNG_ERR_NOMEM
;
3612 ret
= snapshot_output_init(output
->max_size
, output
->name
,
3613 output
->ctrl_url
, output
->data_url
, session
->consumer
, new_output
,
3614 &session
->snapshot
);
3616 if (ret
== -ENOMEM
) {
3617 ret
= LTTNG_ERR_NOMEM
;
3619 ret
= LTTNG_ERR_INVALID
;
3625 snapshot_add_output(&session
->snapshot
, new_output
);
3627 *id
= new_output
->id
;
3634 snapshot_output_destroy(new_output
);
3640 * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
3642 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3644 int cmd_snapshot_del_output(struct ltt_session
*session
,
3645 struct lttng_snapshot_output
*output
)
3648 struct snapshot_output
*sout
= NULL
;
3656 * Permission denied to create an output if the session is not
3657 * set in no output mode.
3659 if (session
->output_traces
) {
3660 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3665 DBG("Cmd snapshot del output id %" PRIu32
" for session %s", output
->id
,
3667 sout
= snapshot_find_output_by_id(output
->id
, &session
->snapshot
);
3668 } else if (*output
->name
!= '\0') {
3669 DBG("Cmd snapshot del output name %s for session %s", output
->name
,
3671 sout
= snapshot_find_output_by_name(output
->name
, &session
->snapshot
);
3674 ret
= LTTNG_ERR_INVALID
;
3678 snapshot_delete_output(&session
->snapshot
, sout
);
3679 snapshot_output_destroy(sout
);
3688 * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
3690 * If no output is available, outputs is untouched and 0 is returned.
3692 * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
3694 ssize_t
cmd_snapshot_list_outputs(struct ltt_session
*session
,
3695 struct lttng_snapshot_output
**outputs
)
3698 struct lttng_snapshot_output
*list
= NULL
;
3699 struct lttng_ht_iter iter
;
3700 struct snapshot_output
*output
;
3705 DBG("Cmd snapshot list outputs for session %s", session
->name
);
3708 * Permission denied to create an output if the session is not
3709 * set in no output mode.
3711 if (session
->output_traces
) {
3712 ret
= -LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3716 if (session
->snapshot
.nb_output
== 0) {
3721 list
= zmalloc(session
->snapshot
.nb_output
* sizeof(*list
));
3723 ret
= -LTTNG_ERR_NOMEM
;
3727 /* Copy list from session to the new list object. */
3729 cds_lfht_for_each_entry(session
->snapshot
.output_ht
->ht
, &iter
.iter
,
3730 output
, node
.node
) {
3731 assert(output
->consumer
);
3732 list
[idx
].id
= output
->id
;
3733 list
[idx
].max_size
= output
->max_size
;
3734 if (lttng_strncpy(list
[idx
].name
, output
->name
,
3735 sizeof(list
[idx
].name
))) {
3736 ret
= -LTTNG_ERR_INVALID
;
3739 if (output
->consumer
->type
== CONSUMER_DST_LOCAL
) {
3740 if (lttng_strncpy(list
[idx
].ctrl_url
,
3741 output
->consumer
->dst
.session_root_path
,
3742 sizeof(list
[idx
].ctrl_url
))) {
3743 ret
= -LTTNG_ERR_INVALID
;
3748 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.control
,
3749 list
[idx
].ctrl_url
, sizeof(list
[idx
].ctrl_url
));
3751 ret
= -LTTNG_ERR_NOMEM
;
3756 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.data
,
3757 list
[idx
].data_url
, sizeof(list
[idx
].data_url
));
3759 ret
= -LTTNG_ERR_NOMEM
;
3768 ret
= session
->snapshot
.nb_output
;
3777 * Check if we can regenerate the metadata for this session.
3778 * Only kernel, UST per-uid and non-live sessions are supported.
3780 * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
3783 int check_regenerate_metadata_support(struct ltt_session
*session
)
3789 if (session
->live_timer
!= 0) {
3790 ret
= LTTNG_ERR_LIVE_SESSION
;
3793 if (!session
->active
) {
3794 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
3797 if (session
->ust_session
) {
3798 switch (session
->ust_session
->buffer_type
) {
3799 case LTTNG_BUFFER_PER_UID
:
3801 case LTTNG_BUFFER_PER_PID
:
3802 ret
= LTTNG_ERR_PER_PID_SESSION
;
3806 ret
= LTTNG_ERR_UNK
;
3810 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
3811 session
->consumer
->relay_minor_version
< 8) {
3812 ret
= LTTNG_ERR_RELAYD_VERSION_FAIL
;
3822 int clear_metadata_file(int fd
)
3827 lseek_ret
= lseek(fd
, 0, SEEK_SET
);
3828 if (lseek_ret
< 0) {
3834 ret
= ftruncate(fd
, 0);
3836 PERROR("ftruncate");
3845 int ust_regenerate_metadata(struct ltt_ust_session
*usess
)
3848 struct buffer_reg_uid
*uid_reg
= NULL
;
3849 struct buffer_reg_session
*session_reg
= NULL
;
3852 cds_list_for_each_entry(uid_reg
, &usess
->buffer_reg_uid_list
, lnode
) {
3853 struct ust_registry_session
*registry
;
3854 struct ust_registry_channel
*chan
;
3855 struct lttng_ht_iter iter_chan
;
3857 session_reg
= uid_reg
->registry
;
3858 registry
= session_reg
->reg
.ust
;
3860 pthread_mutex_lock(®istry
->lock
);
3861 registry
->metadata_len_sent
= 0;
3862 memset(registry
->metadata
, 0, registry
->metadata_alloc_len
);
3863 registry
->metadata_len
= 0;
3864 registry
->metadata_version
++;
3865 if (registry
->metadata_fd
> 0) {
3866 /* Clear the metadata file's content. */
3867 ret
= clear_metadata_file(registry
->metadata_fd
);
3869 pthread_mutex_unlock(®istry
->lock
);
3874 ret
= ust_metadata_session_statedump(registry
, NULL
,
3875 registry
->major
, registry
->minor
);
3877 pthread_mutex_unlock(®istry
->lock
);
3878 ERR("Failed to generate session metadata (err = %d)",
3882 cds_lfht_for_each_entry(registry
->channels
->ht
, &iter_chan
.iter
,
3884 struct ust_registry_event
*event
;
3885 struct lttng_ht_iter iter_event
;
3887 ret
= ust_metadata_channel_statedump(registry
, chan
);
3889 pthread_mutex_unlock(®istry
->lock
);
3890 ERR("Failed to generate channel metadata "
3894 cds_lfht_for_each_entry(chan
->ht
->ht
, &iter_event
.iter
,
3896 ret
= ust_metadata_event_statedump(registry
,
3899 pthread_mutex_unlock(®istry
->lock
);
3900 ERR("Failed to generate event metadata "
3906 pthread_mutex_unlock(®istry
->lock
);
3915 * Command LTTNG_REGENERATE_METADATA from the lttng-ctl library.
3917 * Ask the consumer to truncate the existing metadata file(s) and
3918 * then regenerate the metadata. Live and per-pid sessions are not
3919 * supported and return an error.
3921 * Return 0 on success or else a LTTNG_ERR code.
3923 int cmd_regenerate_metadata(struct ltt_session
*session
)
3929 ret
= check_regenerate_metadata_support(session
);
3934 if (session
->kernel_session
) {
3935 ret
= kernctl_session_regenerate_metadata(
3936 session
->kernel_session
->fd
);
3938 ERR("Failed to regenerate the kernel metadata");
3943 if (session
->ust_session
) {
3944 ret
= ust_regenerate_metadata(session
->ust_session
);
3946 ERR("Failed to regenerate the UST metadata");
3950 DBG("Cmd metadata regenerate for session %s", session
->name
);
3958 * Command LTTNG_REGENERATE_STATEDUMP from the lttng-ctl library.
3960 * Ask the tracer to regenerate a new statedump.
3962 * Return 0 on success or else a LTTNG_ERR code.
3964 int cmd_regenerate_statedump(struct ltt_session
*session
)
3970 if (!session
->active
) {
3971 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
3975 if (session
->kernel_session
) {
3976 ret
= kernctl_session_regenerate_statedump(
3977 session
->kernel_session
->fd
);
3979 * Currently, the statedump in kernel can only fail if out
3983 if (ret
== -ENOMEM
) {
3984 ret
= LTTNG_ERR_REGEN_STATEDUMP_NOMEM
;
3986 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
3988 ERR("Failed to regenerate the kernel statedump");
3993 if (session
->ust_session
) {
3994 ret
= ust_app_regenerate_statedump_all(session
->ust_session
);
3996 * Currently, the statedump in UST always returns 0.
3999 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
4000 ERR("Failed to regenerate the UST statedump");
4004 DBG("Cmd regenerate statedump for session %s", session
->name
);
4011 int cmd_register_trigger(struct command_ctx
*cmd_ctx
, int sock
,
4012 struct notification_thread_handle
*notification_thread
)
4016 ssize_t sock_recv_len
;
4017 struct lttng_trigger
*trigger
= NULL
;
4018 struct lttng_buffer_view view
;
4019 struct lttng_dynamic_buffer trigger_buffer
;
4021 lttng_dynamic_buffer_init(&trigger_buffer
);
4022 trigger_len
= (size_t) cmd_ctx
->lsm
->u
.trigger
.length
;
4023 ret
= lttng_dynamic_buffer_set_size(&trigger_buffer
, trigger_len
);
4025 ret
= LTTNG_ERR_NOMEM
;
4029 sock_recv_len
= lttcomm_recv_unix_sock(sock
, trigger_buffer
.data
,
4031 if (sock_recv_len
< 0 || sock_recv_len
!= trigger_len
) {
4032 ERR("Failed to receive \"register trigger\" command payload");
4033 /* TODO: should this be a new error enum ? */
4034 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4038 view
= lttng_buffer_view_from_dynamic_buffer(&trigger_buffer
, 0, -1);
4039 if (lttng_trigger_create_from_buffer(&view
, &trigger
) !=
4041 ERR("Invalid trigger payload received in \"register trigger\" command");
4042 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4046 ret
= notification_thread_command_register_trigger(notification_thread
,
4048 /* Ownership of trigger was transferred. */
4051 lttng_trigger_destroy(trigger
);
4052 lttng_dynamic_buffer_reset(&trigger_buffer
);
4056 int cmd_unregister_trigger(struct command_ctx
*cmd_ctx
, int sock
,
4057 struct notification_thread_handle
*notification_thread
)
4061 ssize_t sock_recv_len
;
4062 struct lttng_trigger
*trigger
= NULL
;
4063 struct lttng_buffer_view view
;
4064 struct lttng_dynamic_buffer trigger_buffer
;
4066 lttng_dynamic_buffer_init(&trigger_buffer
);
4067 trigger_len
= (size_t) cmd_ctx
->lsm
->u
.trigger
.length
;
4068 ret
= lttng_dynamic_buffer_set_size(&trigger_buffer
, trigger_len
);
4070 ret
= LTTNG_ERR_NOMEM
;
4074 sock_recv_len
= lttcomm_recv_unix_sock(sock
, trigger_buffer
.data
,
4076 if (sock_recv_len
< 0 || sock_recv_len
!= trigger_len
) {
4077 ERR("Failed to receive \"unregister trigger\" command payload");
4078 /* TODO: should this be a new error enum ? */
4079 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4083 view
= lttng_buffer_view_from_dynamic_buffer(&trigger_buffer
, 0, -1);
4084 if (lttng_trigger_create_from_buffer(&view
, &trigger
) !=
4086 ERR("Invalid trigger payload received in \"unregister trigger\" command");
4087 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4091 ret
= notification_thread_command_unregister_trigger(notification_thread
,
4094 lttng_trigger_destroy(trigger
);
4095 lttng_dynamic_buffer_reset(&trigger_buffer
);
4100 * Send relayd sockets from snapshot output to consumer. Ignore request if the
4101 * snapshot output is *not* set with a remote destination.
4103 * Return LTTNG_OK on success or a LTTNG_ERR code.
4105 static enum lttng_error_code
set_relayd_for_snapshot(
4106 struct consumer_output
*consumer
,
4107 struct snapshot_output
*snap_output
,
4108 struct ltt_session
*session
)
4110 enum lttng_error_code status
= LTTNG_OK
;
4111 struct lttng_ht_iter iter
;
4112 struct consumer_socket
*socket
;
4115 assert(snap_output
);
4118 DBG2("Set relayd object from snapshot output");
4120 /* Ignore if snapshot consumer output is not network. */
4121 if (snap_output
->consumer
->type
!= CONSUMER_DST_NET
) {
4126 * For each consumer socket, create and send the relayd object of the
4130 cds_lfht_for_each_entry(snap_output
->consumer
->socks
->ht
, &iter
.iter
,
4131 socket
, node
.node
) {
4132 pthread_mutex_lock(socket
->lock
);
4133 status
= send_consumer_relayd_sockets(0, session
->id
,
4134 snap_output
->consumer
, socket
,
4135 session
->name
, session
->hostname
,
4136 session
->live_timer
);
4137 pthread_mutex_unlock(socket
->lock
);
4138 if (status
!= LTTNG_OK
) {
4150 * Record a kernel snapshot.
4152 * Return LTTNG_OK on success or a LTTNG_ERR code.
4154 static enum lttng_error_code
record_kernel_snapshot(struct ltt_kernel_session
*ksess
,
4155 struct snapshot_output
*output
, struct ltt_session
*session
,
4156 int wait
, uint64_t nb_packets_per_stream
)
4159 enum lttng_error_code status
;
4166 * Copy kernel session sockets so we can communicate with the right
4167 * consumer for the snapshot record command.
4169 ret
= consumer_copy_sockets(output
->consumer
, ksess
->consumer
);
4171 status
= LTTNG_ERR_NOMEM
;
4175 status
= set_relayd_for_snapshot(ksess
->consumer
, output
, session
);
4176 if (status
!= LTTNG_OK
) {
4177 goto error_snapshot
;
4180 status
= kernel_snapshot_record(ksess
, output
, wait
, nb_packets_per_stream
);
4181 if (status
!= LTTNG_OK
) {
4182 goto error_snapshot
;
4188 /* Clean up copied sockets so this output can use some other later on. */
4189 consumer_destroy_output_sockets(output
->consumer
);
4196 * Record a UST snapshot.
4198 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
4200 static enum lttng_error_code
record_ust_snapshot(struct ltt_ust_session
*usess
,
4201 struct snapshot_output
*output
, struct ltt_session
*session
,
4202 int wait
, uint64_t nb_packets_per_stream
)
4205 enum lttng_error_code status
;
4212 * Copy UST session sockets so we can communicate with the right
4213 * consumer for the snapshot record command.
4215 ret
= consumer_copy_sockets(output
->consumer
, usess
->consumer
);
4217 status
= LTTNG_ERR_NOMEM
;
4221 status
= set_relayd_for_snapshot(usess
->consumer
, output
, session
);
4222 if (status
!= LTTNG_OK
) {
4223 goto error_snapshot
;
4226 status
= ust_app_snapshot_record(usess
, output
, wait
, nb_packets_per_stream
);
4227 if (status
!= LTTNG_OK
) {
4228 goto error_snapshot
;
4232 /* Clean up copied sockets so this output can use some other later on. */
4233 consumer_destroy_output_sockets(output
->consumer
);
4239 uint64_t get_session_size_one_more_packet_per_stream(struct ltt_session
*session
,
4240 uint64_t cur_nr_packets
)
4242 uint64_t tot_size
= 0;
4244 if (session
->kernel_session
) {
4245 struct ltt_kernel_channel
*chan
;
4246 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
4248 cds_list_for_each_entry(chan
, &ksess
->channel_list
.head
, list
) {
4249 if (cur_nr_packets
>= chan
->channel
->attr
.num_subbuf
) {
4251 * Don't take channel into account if we
4252 * already grab all its packets.
4256 tot_size
+= chan
->channel
->attr
.subbuf_size
4257 * chan
->stream_count
;
4261 if (session
->ust_session
) {
4262 struct ltt_ust_session
*usess
= session
->ust_session
;
4264 tot_size
+= ust_app_get_size_one_more_packet_per_stream(usess
,
4272 * Calculate the number of packets we can grab from each stream that
4273 * fits within the overall snapshot max size.
4275 * Returns -1 on error, 0 means infinite number of packets, else > 0 is
4276 * the number of packets per stream.
4278 * TODO: this approach is not perfect: we consider the worse case
4279 * (packet filling the sub-buffers) as an upper bound, but we could do
4280 * better if we do this calculation while we actually grab the packet
4281 * content: we would know how much padding we don't actually store into
4284 * This algorithm is currently bounded by the number of packets per
4287 * Since we call this algorithm before actually grabbing the data, it's
4288 * an approximation: for instance, applications could appear/disappear
4289 * in between this call and actually grabbing data.
4292 int64_t get_session_nb_packets_per_stream(struct ltt_session
*session
, uint64_t max_size
)
4295 uint64_t cur_nb_packets
= 0;
4298 return 0; /* Infinite */
4301 size_left
= max_size
;
4303 uint64_t one_more_packet_tot_size
;
4305 one_more_packet_tot_size
= get_session_size_one_more_packet_per_stream(session
,
4307 if (!one_more_packet_tot_size
) {
4308 /* We are already grabbing all packets. */
4311 size_left
-= one_more_packet_tot_size
;
4312 if (size_left
< 0) {
4317 if (!cur_nb_packets
) {
4318 /* Not enough room to grab one packet of each stream, error. */
4321 return cur_nb_packets
;
4325 * Command LTTNG_SNAPSHOT_RECORD from lib lttng ctl.
4327 * The wait parameter is ignored so this call always wait for the snapshot to
4328 * complete before returning.
4330 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4332 int cmd_snapshot_record(struct ltt_session
*session
,
4333 struct lttng_snapshot_output
*output
, int wait
)
4335 enum lttng_error_code cmd_ret
= LTTNG_OK
;
4337 unsigned int use_tmp_output
= 0;
4338 struct snapshot_output tmp_output
;
4339 unsigned int snapshot_success
= 0;
4345 DBG("Cmd snapshot record for session %s", session
->name
);
4347 /* Get the datetime for the snapshot output directory. */
4348 ret
= utils_get_current_time_str("%Y%m%d-%H%M%S", datetime
,
4351 cmd_ret
= LTTNG_ERR_INVALID
;
4356 * Permission denied to create an output if the session is not
4357 * set in no output mode.
4359 if (session
->output_traces
) {
4360 cmd_ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
4364 /* The session needs to be started at least once. */
4365 if (!session
->has_been_started
) {
4366 cmd_ret
= LTTNG_ERR_START_SESSION_ONCE
;
4370 /* Use temporary output for the session. */
4371 if (*output
->ctrl_url
!= '\0') {
4372 ret
= snapshot_output_init(output
->max_size
, output
->name
,
4373 output
->ctrl_url
, output
->data_url
, session
->consumer
,
4376 if (ret
== -ENOMEM
) {
4377 cmd_ret
= LTTNG_ERR_NOMEM
;
4379 cmd_ret
= LTTNG_ERR_INVALID
;
4383 /* Use the global session count for the temporary snapshot. */
4384 tmp_output
.nb_snapshot
= session
->snapshot
.nb_snapshot
;
4386 /* Use the global datetime */
4387 memcpy(tmp_output
.datetime
, datetime
, sizeof(datetime
));
4391 if (use_tmp_output
) {
4392 int64_t nb_packets_per_stream
;
4394 nb_packets_per_stream
= get_session_nb_packets_per_stream(session
,
4395 tmp_output
.max_size
);
4396 if (nb_packets_per_stream
< 0) {
4397 cmd_ret
= LTTNG_ERR_MAX_SIZE_INVALID
;
4401 if (session
->kernel_session
) {
4402 cmd_ret
= record_kernel_snapshot(session
->kernel_session
,
4403 &tmp_output
, session
,
4404 wait
, nb_packets_per_stream
);
4405 if (cmd_ret
!= LTTNG_OK
) {
4410 if (session
->ust_session
) {
4411 cmd_ret
= record_ust_snapshot(session
->ust_session
,
4412 &tmp_output
, session
,
4413 wait
, nb_packets_per_stream
);
4414 if (cmd_ret
!= LTTNG_OK
) {
4419 snapshot_success
= 1;
4421 struct snapshot_output
*sout
;
4422 struct lttng_ht_iter iter
;
4425 cds_lfht_for_each_entry(session
->snapshot
.output_ht
->ht
,
4426 &iter
.iter
, sout
, node
.node
) {
4427 int64_t nb_packets_per_stream
;
4430 * Make a local copy of the output and assign the possible
4431 * temporary value given by the caller.
4433 memset(&tmp_output
, 0, sizeof(tmp_output
));
4434 memcpy(&tmp_output
, sout
, sizeof(tmp_output
));
4436 if (output
->max_size
!= (uint64_t) -1ULL) {
4437 tmp_output
.max_size
= output
->max_size
;
4440 nb_packets_per_stream
= get_session_nb_packets_per_stream(session
,
4441 tmp_output
.max_size
);
4442 if (nb_packets_per_stream
< 0) {
4443 cmd_ret
= LTTNG_ERR_MAX_SIZE_INVALID
;
4448 /* Use temporary name. */
4449 if (*output
->name
!= '\0') {
4450 if (lttng_strncpy(tmp_output
.name
, output
->name
,
4451 sizeof(tmp_output
.name
))) {
4452 cmd_ret
= LTTNG_ERR_INVALID
;
4458 tmp_output
.nb_snapshot
= session
->snapshot
.nb_snapshot
;
4459 memcpy(tmp_output
.datetime
, datetime
, sizeof(datetime
));
4461 if (session
->kernel_session
) {
4462 cmd_ret
= record_kernel_snapshot(session
->kernel_session
,
4463 &tmp_output
, session
,
4464 wait
, nb_packets_per_stream
);
4465 if (cmd_ret
!= LTTNG_OK
) {
4471 if (session
->ust_session
) {
4472 cmd_ret
= record_ust_snapshot(session
->ust_session
,
4473 &tmp_output
, session
,
4474 wait
, nb_packets_per_stream
);
4475 if (cmd_ret
!= LTTNG_OK
) {
4480 snapshot_success
= 1;
4485 if (snapshot_success
) {
4486 session
->snapshot
.nb_snapshot
++;
4488 cmd_ret
= LTTNG_ERR_SNAPSHOT_FAIL
;
4496 * Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
4498 int cmd_set_session_shm_path(struct ltt_session
*session
,
4499 const char *shm_path
)
4505 * Can only set shm path before session is started.
4507 if (session
->has_been_started
) {
4508 return LTTNG_ERR_SESSION_STARTED
;
4511 strncpy(session
->shm_path
, shm_path
,
4512 sizeof(session
->shm_path
));
4513 session
->shm_path
[sizeof(session
->shm_path
) - 1] = '\0';
4519 * Command LTTNG_ROTATE_SESSION from the lttng-ctl library.
4521 * Ask the consumer to rotate the session output directory.
4522 * The session lock must be held.
4524 * Returns LTTNG_OK on success or else a negative LTTng error code.
4526 int cmd_rotate_session(struct ltt_session
*session
,
4527 struct lttng_rotate_session_return
*rotate_return
)
4530 enum lttng_error_code cmd_ret
= LTTNG_OK
;
4532 struct tm
*timeinfo
;
4536 * Used to roll-back timestamps in case of failure to launch the
4539 time_t original_last_chunk_start_ts
, original_current_chunk_start_ts
;
4543 if (!session
->has_been_started
) {
4544 cmd_ret
= LTTNG_ERR_START_SESSION_ONCE
;
4548 if (session
->live_timer
|| session
->snapshot_mode
||
4549 !session
->output_traces
) {
4550 cmd_ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4555 * Unsupported feature in lttng-relayd before 2.11.
4557 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
4558 (session
->consumer
->relay_major_version
== 2 &&
4559 session
->consumer
->relay_minor_version
< 11)) {
4560 cmd_ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE_RELAY
;
4564 if (session
->rotation_state
== LTTNG_ROTATION_STATE_ONGOING
) {
4565 DBG("Refusing to launch a rotation; a rotation is already in progress for session %s",
4567 cmd_ret
= LTTNG_ERR_ROTATION_PENDING
;
4572 * After a stop, we only allow one rotation to occur, the other ones are
4573 * useless until a new start.
4575 if (session
->rotated_after_last_stop
) {
4576 DBG("Session \"%s\" was already rotated after stop, refusing rotation",
4578 cmd_ret
= LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP
;
4582 /* Special case for the first rotation. */
4583 if (session
->current_archive_id
== 0) {
4584 const char *base_path
= NULL
;
4586 assert(session
->kernel_session
|| session
->ust_session
);
4587 /* Either one of the two sessions is enough to get the root path. */
4588 base_path
= session_get_base_path(session
);
4591 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
4593 sizeof(session
->rotation_chunk
.current_rotate_path
));
4595 ERR("Failed to copy session base path to current rotation chunk path");
4596 cmd_ret
= LTTNG_ERR_UNK
;
4601 * The currently active tracing path is now the folder we
4604 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
4605 session
->rotation_chunk
.active_tracing_path
,
4606 sizeof(session
->rotation_chunk
.current_rotate_path
));
4608 ERR("Failed to copy the active tracing path to the current rotate path");
4609 cmd_ret
= LTTNG_ERR_UNK
;
4613 DBG("Current rotate path %s", session
->rotation_chunk
.current_rotate_path
);
4616 * Channels created after this point will belong to the next
4619 session
->current_archive_id
++;
4622 if (now
== (time_t) -1) {
4623 cmd_ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4627 /* Sample chunk bounds for roll-back in case of error. */
4628 original_last_chunk_start_ts
= session
->last_chunk_start_ts
;
4629 original_current_chunk_start_ts
= session
->current_chunk_start_ts
;
4631 session
->last_chunk_start_ts
= session
->current_chunk_start_ts
;
4632 session
->current_chunk_start_ts
= now
;
4634 timeinfo
= localtime(&now
);
4636 PERROR("Failed to sample local time in rotate session command");
4637 cmd_ret
= LTTNG_ERR_UNK
;
4640 strf_ret
= strftime(datetime
, sizeof(datetime
), "%Y%m%dT%H%M%S%z",
4643 ERR("Failed to format local time timestamp in rotate session command");
4644 cmd_ret
= LTTNG_ERR_UNK
;
4649 * A rotation has a local step even if the destination is a relay
4650 * daemon; the buffers must be consumed by the consumer daemon.
4652 session
->rotation_pending_local
= true;
4653 session
->rotation_pending_relay
=
4654 session_get_consumer_destination_type(session
) == CONSUMER_DST_NET
;
4655 session
->rotation_state
= LTTNG_ROTATION_STATE_ONGOING
;
4657 if (session
->kernel_session
) {
4659 * The active path for the next rotation/destroy.
4660 * Ex: ~/lttng-traces/auto-20170922-111748/20170922-111754-42
4662 ret
= snprintf(session
->rotation_chunk
.active_tracing_path
,
4663 sizeof(session
->rotation_chunk
.active_tracing_path
),
4665 session_get_base_path(session
),
4666 datetime
, session
->current_archive_id
+ 1);
4667 if (ret
< 0 || ret
== sizeof(session
->rotation_chunk
.active_tracing_path
)) {
4668 ERR("Failed to format active kernel tracing path in rotate session command");
4669 cmd_ret
= LTTNG_ERR_UNK
;
4673 * The sub-directory for the consumer
4674 * Ex: /20170922-111754-42/kernel
4676 ret
= snprintf(session
->kernel_session
->consumer
->chunk_path
,
4677 sizeof(session
->kernel_session
->consumer
->chunk_path
),
4678 "/%s-%" PRIu64
, datetime
,
4679 session
->current_archive_id
+ 1);
4680 if (ret
< 0 || ret
== sizeof(session
->kernel_session
->consumer
->chunk_path
)) {
4681 ERR("Failed to format the kernel consumer's sub-directory in rotate session command");
4682 cmd_ret
= LTTNG_ERR_UNK
;
4686 * Create the new chunk folder, before the rotation begins so we don't
4687 * race with the consumer/tracer activity.
4689 ret
= domain_mkdir(session
->kernel_session
->consumer
, session
,
4690 session
->kernel_session
->uid
,
4691 session
->kernel_session
->gid
);
4693 ERR("Failed to create kernel session tracing path at %s",
4694 session
->kernel_session
->consumer
->chunk_path
);
4695 cmd_ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
4698 cmd_ret
= kernel_rotate_session(session
);
4699 if (cmd_ret
!= LTTNG_OK
) {
4703 if (session
->ust_session
) {
4704 ret
= snprintf(session
->rotation_chunk
.active_tracing_path
,
4705 PATH_MAX
, "%s/%s-%" PRIu64
,
4706 session_get_base_path(session
),
4707 datetime
, session
->current_archive_id
+ 1);
4709 ERR("Failed to format active UST tracing path in rotate session command");
4710 cmd_ret
= LTTNG_ERR_UNK
;
4713 ret
= snprintf(session
->ust_session
->consumer
->chunk_path
,
4714 PATH_MAX
, "/%s-%" PRIu64
, datetime
,
4715 session
->current_archive_id
+ 1);
4717 ERR("Failed to format the UST consumer's sub-directory in rotate session command");
4718 cmd_ret
= LTTNG_ERR_UNK
;
4722 * Create the new chunk folder, before the rotation begins so we don't
4723 * race with the consumer/tracer activity.
4725 ret
= domain_mkdir(session
->ust_session
->consumer
, session
,
4726 session
->ust_session
->uid
,
4727 session
->ust_session
->gid
);
4729 cmd_ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
4732 cmd_ret
= ust_app_rotate_session(session
);
4733 if (cmd_ret
!= LTTNG_OK
) {
4738 ret
= timer_session_rotation_pending_check_start(session
,
4739 DEFAULT_ROTATE_PENDING_TIMER
);
4741 cmd_ret
= LTTNG_ERR_UNK
;
4745 if (!session
->active
) {
4746 session
->rotated_after_last_stop
= true;
4749 if (rotate_return
) {
4750 rotate_return
->rotation_id
= session
->current_archive_id
;
4753 ret
= notification_thread_command_session_rotation_ongoing(
4754 notification_thread_handle
,
4755 session
->name
, session
->uid
, session
->gid
,
4756 session
->current_archive_id
- 1);
4757 if (ret
!= LTTNG_OK
) {
4758 ERR("Failed to notify notification thread that a session rotation is ongoing for session %s",
4763 DBG("Cmd rotate session %s, archive_id %" PRIu64
" sent",
4764 session
->name
, session
->current_archive_id
- 1);
4766 ret
= (cmd_ret
== LTTNG_OK
) ? cmd_ret
: -((int) cmd_ret
);
4769 session
->last_chunk_start_ts
= original_last_chunk_start_ts
;
4770 session
->current_archive_id
= original_current_chunk_start_ts
;
4771 if (session_reset_rotation_state(session
,
4772 LTTNG_ROTATION_STATE_NO_ROTATION
)) {
4773 ERR("Failed to reset rotation state of session \"%s\"",
4780 * Command LTTNG_ROTATION_GET_INFO from the lttng-ctl library.
4782 * Check if the session has finished its rotation.
4784 * Return 0 on success or else a LTTNG_ERR code.
4786 int cmd_rotate_get_info(struct ltt_session
*session
,
4787 struct lttng_rotation_get_info_return
*info_return
,
4788 uint64_t rotation_id
)
4794 DBG("Cmd rotate_get_info session %s, rotation id %" PRIu64
, session
->name
,
4795 session
->current_archive_id
);
4797 if (session
->current_archive_id
!= rotation_id
) {
4798 info_return
->status
= (int32_t) LTTNG_ROTATION_STATE_EXPIRED
;
4803 switch (session
->rotation_state
) {
4804 case LTTNG_ROTATION_STATE_ONGOING
:
4805 DBG("Reporting that rotation id %" PRIu64
" of session %s is still pending",
4806 rotation_id
, session
->name
);
4808 case LTTNG_ROTATION_STATE_COMPLETED
:
4810 char *current_tracing_path_reply
;
4811 size_t current_tracing_path_reply_len
;
4813 switch (session_get_consumer_destination_type(session
)) {
4814 case CONSUMER_DST_LOCAL
:
4815 current_tracing_path_reply
=
4816 info_return
->location
.local
.absolute_path
;
4817 current_tracing_path_reply_len
=
4818 sizeof(info_return
->location
.local
.absolute_path
);
4819 info_return
->location_type
=
4820 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_LOCAL
;
4822 case CONSUMER_DST_NET
:
4823 current_tracing_path_reply
=
4824 info_return
->location
.relay
.relative_path
;
4825 current_tracing_path_reply_len
=
4826 sizeof(info_return
->location
.relay
.relative_path
);
4827 /* Currently the only supported relay protocol. */
4828 info_return
->location
.relay
.protocol
=
4829 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP
;
4831 ret
= lttng_strncpy(info_return
->location
.relay
.host
,
4832 session_get_net_consumer_hostname(session
),
4833 sizeof(info_return
->location
.relay
.host
));
4835 ERR("Failed to host name to rotate_get_info reply");
4836 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
4837 ret
= -LTTNG_ERR_UNK
;
4841 session_get_net_consumer_ports(session
,
4842 &info_return
->location
.relay
.ports
.control
,
4843 &info_return
->location
.relay
.ports
.data
);
4844 info_return
->location_type
=
4845 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_RELAY
;
4850 ret
= lttng_strncpy(current_tracing_path_reply
,
4851 session
->rotation_chunk
.current_rotate_path
,
4852 current_tracing_path_reply_len
);
4854 ERR("Failed to copy current tracing path to rotate_get_info reply");
4855 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
4856 ret
= -LTTNG_ERR_UNK
;
4862 case LTTNG_ROTATION_STATE_ERROR
:
4863 DBG("Reporting that an error occurred during rotation %" PRIu64
" of session %s",
4864 rotation_id
, session
->name
);
4870 info_return
->status
= (int32_t) session
->rotation_state
;
4877 * Command LTTNG_ROTATION_SET_SCHEDULE from the lttng-ctl library.
4879 * Configure the automatic rotation parameters.
4880 * 'activate' to true means activate the rotation schedule type with 'new_value'.
4881 * 'activate' to false means deactivate the rotation schedule and validate that
4882 * 'new_value' has the same value as the currently active value.
4884 * Return 0 on success or else a positive LTTNG_ERR code.
4886 int cmd_rotation_set_schedule(struct ltt_session
*session
,
4887 bool activate
, enum lttng_rotation_schedule_type schedule_type
,
4889 struct notification_thread_handle
*notification_thread_handle
)
4892 uint64_t *parameter_value
;
4896 DBG("Cmd rotate set schedule session %s", session
->name
);
4898 if (session
->live_timer
|| session
->snapshot_mode
||
4899 !session
->output_traces
) {
4900 DBG("Failing ROTATION_SET_SCHEDULE command as the rotation feature is not available for this session");
4901 ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4905 switch (schedule_type
) {
4906 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
4907 parameter_value
= &session
->rotate_size
;
4909 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
4910 parameter_value
= &session
->rotate_timer_period
;
4911 if (new_value
>= UINT_MAX
) {
4912 DBG("Failing ROTATION_SET_SCHEDULE command as the value requested for a periodic rotation schedule is invalid: %" PRIu64
" > %u (UINT_MAX)",
4913 new_value
, UINT_MAX
);
4914 ret
= LTTNG_ERR_INVALID
;
4919 WARN("Failing ROTATION_SET_SCHEDULE command on unknown schedule type");
4920 ret
= LTTNG_ERR_INVALID
;
4924 /* Improper use of the API. */
4925 if (new_value
== -1ULL) {
4926 WARN("Failing ROTATION_SET_SCHEDULE command as the value requested is -1");
4927 ret
= LTTNG_ERR_INVALID
;
4932 * As indicated in struct ltt_session's comments, a value of == 0 means
4933 * this schedule rotation type is not in use.
4935 * Reject the command if we were asked to activate a schedule that was
4938 if (activate
&& *parameter_value
!= 0) {
4939 DBG("Failing ROTATION_SET_SCHEDULE (activate) command as the schedule is already active");
4940 ret
= LTTNG_ERR_ROTATION_SCHEDULE_SET
;
4945 * Reject the command if we were asked to deactivate a schedule that was
4948 if (!activate
&& *parameter_value
== 0) {
4949 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as the schedule is already inactive");
4950 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
4955 * Reject the command if we were asked to deactivate a schedule that
4958 if (!activate
&& *parameter_value
!= new_value
) {
4959 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as an inexistant schedule was provided");
4960 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
4964 *parameter_value
= activate
? new_value
: 0;
4966 switch (schedule_type
) {
4967 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
4968 if (activate
&& session
->active
) {
4970 * Only start the timer if the session is active,
4971 * otherwise it will be started when the session starts.
4973 ret
= timer_session_rotation_schedule_timer_start(
4974 session
, new_value
);
4976 ERR("Failed to enable session rotation timer in ROTATION_SET_SCHEDULE command");
4977 ret
= LTTNG_ERR_UNK
;
4981 ret
= timer_session_rotation_schedule_timer_stop(
4984 ERR("Failed to disable session rotation timer in ROTATION_SET_SCHEDULE command");
4985 ret
= LTTNG_ERR_UNK
;
4990 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
4992 ret
= subscribe_session_consumed_size_rotation(session
,
4993 new_value
, notification_thread_handle
);
4995 ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command");
4996 ret
= LTTNG_ERR_UNK
;
5000 ret
= unsubscribe_session_consumed_size_rotation(session
,
5001 notification_thread_handle
);
5003 ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command");
5004 ret
= LTTNG_ERR_UNK
;
5011 /* Would have been caught before. */
5023 /* Wait for a given path to be removed before continuing. */
5024 static enum lttng_error_code
wait_on_path(void *path_data
)
5026 const char *shm_path
= path_data
;
5028 DBG("Waiting for the shm path at %s to be removed before completing session destruction",
5034 ret
= stat(shm_path
, &st
);
5036 if (errno
!= ENOENT
) {
5037 PERROR("stat() returned an error while checking for the existence of the shm path");
5039 DBG("shm path no longer exists, completing the destruction of session");
5043 if (!S_ISDIR(st
.st_mode
)) {
5044 ERR("The type of shm path %s returned by stat() is not a directory; aborting the wait for shm path removal",
5049 usleep(SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US
);
5055 * Returns a pointer to a handler to run on completion of a command.
5056 * Returns NULL if no handler has to be run for the last command executed.
5058 const struct cmd_completion_handler
*cmd_pop_completion_handler(void)
5060 struct cmd_completion_handler
*handler
= current_completion_handler
;
5062 current_completion_handler
= NULL
;
5067 * Init command subsystem.
5072 * Set network sequence index to 1 for streams to match a relayd
5073 * socket on the consumer side.
5075 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
5076 relayd_net_seq_idx
= 1;
5077 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
5079 DBG("Command subsystem initialized");