2 * Copyright (C) 2012 - David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License, version 2 only, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 51
16 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <urcu/list.h>
23 #include <urcu/uatomic.h>
26 #include <common/defaults.h>
27 #include <common/common.h>
28 #include <common/sessiond-comm/sessiond-comm.h>
29 #include <common/relayd/relayd.h>
30 #include <common/utils.h>
31 #include <common/compat/string.h>
32 #include <common/kernel-ctl/kernel-ctl.h>
33 #include <common/dynamic-buffer.h>
34 #include <common/buffer-view.h>
35 #include <lttng/trigger/trigger-internal.h>
36 #include <lttng/condition/condition.h>
37 #include <lttng/action/action.h>
38 #include <lttng/channel.h>
39 #include <lttng/channel-internal.h>
40 #include <lttng/rotate-internal.h>
41 #include <lttng/location-internal.h>
42 #include <lttng/userspace-probe-internal.h>
43 #include <common/string-utils/string-utils.h>
48 #include "health-sessiond.h"
50 #include "kernel-consumer.h"
51 #include "lttng-sessiond.h"
53 #include "lttng-syscall.h"
55 #include "buffer-registry.h"
56 #include "notification-thread.h"
57 #include "notification-thread-commands.h"
59 #include "rotation-thread.h"
61 #include "agent-thread.h"
65 /* Sleep for 100ms between each check for the shm path's deletion. */
66 #define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
68 static enum lttng_error_code
wait_on_path(void *path
);
71 * Command completion handler that is used by the destroy command
72 * when a session that has a non-default shm_path is being destroyed.
74 * See comment in cmd_destroy_session() for the rationale.
76 static struct destroy_completion_handler
{
77 struct cmd_completion_handler handler
;
78 char shm_path
[member_sizeof(struct ltt_session
, shm_path
)];
79 } destroy_completion_handler
= {
82 .data
= destroy_completion_handler
.shm_path
87 static struct cmd_completion_handler
*current_completion_handler
;
90 * Used to keep a unique index for each relayd socket created where this value
91 * is associated with streams on the consumer so it can match the right relayd
92 * to send to. It must be accessed with the relayd_net_seq_idx_lock
95 static pthread_mutex_t relayd_net_seq_idx_lock
= PTHREAD_MUTEX_INITIALIZER
;
96 static uint64_t relayd_net_seq_idx
;
98 static int validate_ust_event_name(const char *);
99 static int cmd_enable_event_internal(struct ltt_session
*session
,
100 struct lttng_domain
*domain
,
101 char *channel_name
, struct lttng_event
*event
,
102 char *filter_expression
,
103 struct lttng_filter_bytecode
*filter
,
104 struct lttng_event_exclusion
*exclusion
,
108 * Create a session path used by list_lttng_sessions for the case that the
109 * session consumer is on the network.
111 static int build_network_session_path(char *dst
, size_t size
,
112 struct ltt_session
*session
)
114 int ret
, kdata_port
, udata_port
;
115 struct lttng_uri
*kuri
= NULL
, *uuri
= NULL
, *uri
= NULL
;
116 char tmp_uurl
[PATH_MAX
], tmp_urls
[PATH_MAX
];
121 memset(tmp_urls
, 0, sizeof(tmp_urls
));
122 memset(tmp_uurl
, 0, sizeof(tmp_uurl
));
124 kdata_port
= udata_port
= DEFAULT_NETWORK_DATA_PORT
;
126 if (session
->kernel_session
&& session
->kernel_session
->consumer
) {
127 kuri
= &session
->kernel_session
->consumer
->dst
.net
.control
;
128 kdata_port
= session
->kernel_session
->consumer
->dst
.net
.data
.port
;
131 if (session
->ust_session
&& session
->ust_session
->consumer
) {
132 uuri
= &session
->ust_session
->consumer
->dst
.net
.control
;
133 udata_port
= session
->ust_session
->consumer
->dst
.net
.data
.port
;
136 if (uuri
== NULL
&& kuri
== NULL
) {
137 uri
= &session
->consumer
->dst
.net
.control
;
138 kdata_port
= session
->consumer
->dst
.net
.data
.port
;
139 } else if (kuri
&& uuri
) {
140 ret
= uri_compare(kuri
, uuri
);
144 /* Build uuri URL string */
145 ret
= uri_to_str_url(uuri
, tmp_uurl
, sizeof(tmp_uurl
));
152 } else if (kuri
&& uuri
== NULL
) {
154 } else if (uuri
&& kuri
== NULL
) {
158 ret
= uri_to_str_url(uri
, tmp_urls
, sizeof(tmp_urls
));
164 * Do we have a UST url set. If yes, this means we have both kernel and UST
167 if (*tmp_uurl
!= '\0') {
168 ret
= snprintf(dst
, size
, "[K]: %s [data: %d] -- [U]: %s [data: %d]",
169 tmp_urls
, kdata_port
, tmp_uurl
, udata_port
);
172 if (kuri
|| (!kuri
&& !uuri
)) {
175 /* No kernel URI, use the UST port. */
178 ret
= snprintf(dst
, size
, "%s [data: %d]", tmp_urls
, dport
);
186 * Get run-time attributes if the session has been started (discarded events,
189 static int get_kernel_runtime_stats(struct ltt_session
*session
,
190 struct ltt_kernel_channel
*kchan
, uint64_t *discarded_events
,
191 uint64_t *lost_packets
)
195 if (!session
->has_been_started
) {
197 *discarded_events
= 0;
202 ret
= consumer_get_discarded_events(session
->id
, kchan
->key
,
203 session
->kernel_session
->consumer
,
209 ret
= consumer_get_lost_packets(session
->id
, kchan
->key
,
210 session
->kernel_session
->consumer
,
221 * Get run-time attributes if the session has been started (discarded events,
224 static int get_ust_runtime_stats(struct ltt_session
*session
,
225 struct ltt_ust_channel
*uchan
, uint64_t *discarded_events
,
226 uint64_t *lost_packets
)
229 struct ltt_ust_session
*usess
;
231 if (!discarded_events
|| !lost_packets
) {
236 usess
= session
->ust_session
;
237 assert(discarded_events
);
238 assert(lost_packets
);
240 if (!usess
|| !session
->has_been_started
) {
241 *discarded_events
= 0;
247 if (usess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
248 ret
= ust_app_uid_get_channel_runtime_stats(usess
->id
,
249 &usess
->buffer_reg_uid_list
,
250 usess
->consumer
, uchan
->id
,
251 uchan
->attr
.overwrite
,
254 } else if (usess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
255 ret
= ust_app_pid_get_channel_runtime_stats(usess
,
256 uchan
, usess
->consumer
,
257 uchan
->attr
.overwrite
,
263 *discarded_events
+= uchan
->per_pid_closed_app_discarded
;
264 *lost_packets
+= uchan
->per_pid_closed_app_lost
;
266 ERR("Unsupported buffer type");
277 * Fill lttng_channel array of all channels.
279 static ssize_t
list_lttng_channels(enum lttng_domain_type domain
,
280 struct ltt_session
*session
, struct lttng_channel
*channels
,
281 struct lttng_channel_extended
*chan_exts
)
284 struct ltt_kernel_channel
*kchan
;
286 DBG("Listing channels for session %s", session
->name
);
289 case LTTNG_DOMAIN_KERNEL
:
290 /* Kernel channels */
291 if (session
->kernel_session
!= NULL
) {
292 cds_list_for_each_entry(kchan
,
293 &session
->kernel_session
->channel_list
.head
, list
) {
294 uint64_t discarded_events
, lost_packets
;
295 struct lttng_channel_extended
*extended
;
297 extended
= (struct lttng_channel_extended
*)
298 kchan
->channel
->attr
.extended
.ptr
;
300 ret
= get_kernel_runtime_stats(session
, kchan
,
301 &discarded_events
, &lost_packets
);
305 /* Copy lttng_channel struct to array */
306 memcpy(&channels
[i
], kchan
->channel
, sizeof(struct lttng_channel
));
307 channels
[i
].enabled
= kchan
->enabled
;
308 chan_exts
[i
].discarded_events
=
310 chan_exts
[i
].lost_packets
= lost_packets
;
311 chan_exts
[i
].monitor_timer_interval
=
312 extended
->monitor_timer_interval
;
313 chan_exts
[i
].blocking_timeout
= 0;
318 case LTTNG_DOMAIN_UST
:
320 struct lttng_ht_iter iter
;
321 struct ltt_ust_channel
*uchan
;
324 cds_lfht_for_each_entry(session
->ust_session
->domain_global
.channels
->ht
,
325 &iter
.iter
, uchan
, node
.node
) {
326 uint64_t discarded_events
= 0, lost_packets
= 0;
328 if (lttng_strncpy(channels
[i
].name
, uchan
->name
,
329 LTTNG_SYMBOL_NAME_LEN
)) {
332 channels
[i
].attr
.overwrite
= uchan
->attr
.overwrite
;
333 channels
[i
].attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
334 channels
[i
].attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
335 channels
[i
].attr
.switch_timer_interval
=
336 uchan
->attr
.switch_timer_interval
;
337 channels
[i
].attr
.read_timer_interval
=
338 uchan
->attr
.read_timer_interval
;
339 channels
[i
].enabled
= uchan
->enabled
;
340 channels
[i
].attr
.tracefile_size
= uchan
->tracefile_size
;
341 channels
[i
].attr
.tracefile_count
= uchan
->tracefile_count
;
344 * Map enum lttng_ust_output to enum lttng_event_output.
346 switch (uchan
->attr
.output
) {
348 channels
[i
].attr
.output
= LTTNG_EVENT_MMAP
;
352 * LTTNG_UST_MMAP is the only supported UST
359 chan_exts
[i
].monitor_timer_interval
=
360 uchan
->monitor_timer_interval
;
361 chan_exts
[i
].blocking_timeout
=
362 uchan
->attr
.u
.s
.blocking_timeout
;
364 ret
= get_ust_runtime_stats(session
, uchan
,
365 &discarded_events
, &lost_packets
);
369 chan_exts
[i
].discarded_events
= discarded_events
;
370 chan_exts
[i
].lost_packets
= lost_packets
;
382 return -LTTNG_ERR_FATAL
;
388 static int increment_extended_len(const char *filter_expression
,
389 struct lttng_event_exclusion
*exclusion
,
390 const struct lttng_userspace_probe_location
*probe_location
,
391 size_t *extended_len
)
395 *extended_len
+= sizeof(struct lttcomm_event_extended_header
);
397 if (filter_expression
) {
398 *extended_len
+= strlen(filter_expression
) + 1;
402 *extended_len
+= exclusion
->count
* LTTNG_SYMBOL_NAME_LEN
;
405 if (probe_location
) {
406 ret
= lttng_userspace_probe_location_serialize(probe_location
,
411 *extended_len
+= ret
;
418 static int append_extended_info(const char *filter_expression
,
419 struct lttng_event_exclusion
*exclusion
,
420 struct lttng_userspace_probe_location
*probe_location
,
424 size_t filter_len
= 0;
425 size_t nb_exclusions
= 0;
426 size_t userspace_probe_location_len
= 0;
427 struct lttng_dynamic_buffer location_buffer
;
428 struct lttcomm_event_extended_header extended_header
;
430 if (filter_expression
) {
431 filter_len
= strlen(filter_expression
) + 1;
435 nb_exclusions
= exclusion
->count
;
438 if (probe_location
) {
439 lttng_dynamic_buffer_init(&location_buffer
);
440 ret
= lttng_userspace_probe_location_serialize(probe_location
,
441 &location_buffer
, NULL
);
446 userspace_probe_location_len
= location_buffer
.size
;
449 /* Set header fields */
450 extended_header
.filter_len
= filter_len
;
451 extended_header
.nb_exclusions
= nb_exclusions
;
452 extended_header
.userspace_probe_location_len
= userspace_probe_location_len
;
455 memcpy(*extended_at
, &extended_header
, sizeof(extended_header
));
456 *extended_at
+= sizeof(extended_header
);
458 /* Copy filter string */
459 if (filter_expression
) {
460 memcpy(*extended_at
, filter_expression
, filter_len
);
461 *extended_at
+= filter_len
;
464 /* Copy exclusion names */
466 size_t len
= nb_exclusions
* LTTNG_SYMBOL_NAME_LEN
;
468 memcpy(*extended_at
, &exclusion
->names
, len
);
472 if (probe_location
) {
473 memcpy(*extended_at
, location_buffer
.data
, location_buffer
.size
);
474 *extended_at
+= location_buffer
.size
;
475 lttng_dynamic_buffer_reset(&location_buffer
);
483 * Create a list of agent domain events.
485 * Return number of events in list on success or else a negative value.
487 static int list_lttng_agent_events(struct agent
*agt
,
488 struct lttng_event
**events
, size_t *total_size
)
491 unsigned int nb_event
= 0;
492 struct agent_event
*event
;
493 struct lttng_event
*tmp_events
= NULL
;
494 struct lttng_ht_iter iter
;
495 size_t extended_len
= 0;
501 DBG3("Listing agent events");
504 nb_event
= lttng_ht_get_count(agt
->events
);
512 /* Compute required extended infos size */
513 extended_len
= nb_event
* sizeof(struct lttcomm_event_extended_header
);
516 * This is only valid because the commands which add events are
517 * processed in the same thread as the listing.
520 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
521 ret
= increment_extended_len(event
->filter_expression
, NULL
, NULL
,
524 DBG("Error computing the length of extended info message");
525 ret
= -LTTNG_ERR_FATAL
;
531 *total_size
= nb_event
* sizeof(*tmp_events
) + extended_len
;
532 tmp_events
= zmalloc(*total_size
);
534 PERROR("zmalloc agent events session");
535 ret
= -LTTNG_ERR_FATAL
;
539 extended_at
= ((uint8_t *) tmp_events
) +
540 nb_event
* sizeof(struct lttng_event
);
543 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
544 strncpy(tmp_events
[i
].name
, event
->name
, sizeof(tmp_events
[i
].name
));
545 tmp_events
[i
].name
[sizeof(tmp_events
[i
].name
) - 1] = '\0';
546 tmp_events
[i
].enabled
= event
->enabled
;
547 tmp_events
[i
].loglevel
= event
->loglevel_value
;
548 tmp_events
[i
].loglevel_type
= event
->loglevel_type
;
551 /* Append extended info */
552 ret
= append_extended_info(event
->filter_expression
, NULL
, NULL
,
555 DBG("Error appending extended info message");
556 ret
= -LTTNG_ERR_FATAL
;
561 *events
= tmp_events
;
563 assert(nb_event
== i
);
574 * Create a list of ust global domain events.
576 static int list_lttng_ust_global_events(char *channel_name
,
577 struct ltt_ust_domain_global
*ust_global
,
578 struct lttng_event
**events
, size_t *total_size
)
581 unsigned int nb_event
= 0;
582 struct lttng_ht_iter iter
;
583 struct lttng_ht_node_str
*node
;
584 struct ltt_ust_channel
*uchan
;
585 struct ltt_ust_event
*uevent
;
586 struct lttng_event
*tmp
;
587 size_t extended_len
= 0;
590 DBG("Listing UST global events for channel %s", channel_name
);
594 lttng_ht_lookup(ust_global
->channels
, (void *)channel_name
, &iter
);
595 node
= lttng_ht_iter_get_node_str(&iter
);
597 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
601 uchan
= caa_container_of(&node
->node
, struct ltt_ust_channel
, node
.node
);
603 nb_event
= lttng_ht_get_count(uchan
->events
);
610 DBG3("Listing UST global %d events", nb_event
);
612 /* Compute required extended infos size */
613 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
614 if (uevent
->internal
) {
619 ret
= increment_extended_len(uevent
->filter_expression
,
620 uevent
->exclusion
, NULL
, &extended_len
);
622 DBG("Error computing the length of extended info message");
623 ret
= -LTTNG_ERR_FATAL
;
628 /* All events are internal, skip. */
634 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
635 tmp
= zmalloc(*total_size
);
637 ret
= -LTTNG_ERR_FATAL
;
641 extended_at
= ((uint8_t *) tmp
) + nb_event
* sizeof(struct lttng_event
);
643 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
644 if (uevent
->internal
) {
645 /* This event should remain hidden from clients */
648 strncpy(tmp
[i
].name
, uevent
->attr
.name
, LTTNG_SYMBOL_NAME_LEN
);
649 tmp
[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
650 tmp
[i
].enabled
= uevent
->enabled
;
652 switch (uevent
->attr
.instrumentation
) {
653 case LTTNG_UST_TRACEPOINT
:
654 tmp
[i
].type
= LTTNG_EVENT_TRACEPOINT
;
656 case LTTNG_UST_PROBE
:
657 tmp
[i
].type
= LTTNG_EVENT_PROBE
;
659 case LTTNG_UST_FUNCTION
:
660 tmp
[i
].type
= LTTNG_EVENT_FUNCTION
;
664 tmp
[i
].loglevel
= uevent
->attr
.loglevel
;
665 switch (uevent
->attr
.loglevel_type
) {
666 case LTTNG_UST_LOGLEVEL_ALL
:
667 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
669 case LTTNG_UST_LOGLEVEL_RANGE
:
670 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_RANGE
;
672 case LTTNG_UST_LOGLEVEL_SINGLE
:
673 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_SINGLE
;
676 if (uevent
->filter
) {
679 if (uevent
->exclusion
) {
680 tmp
[i
].exclusion
= 1;
684 /* Append extended info */
685 ret
= append_extended_info(uevent
->filter_expression
,
686 uevent
->exclusion
, NULL
, &extended_at
);
688 DBG("Error appending extended info message");
689 ret
= -LTTNG_ERR_FATAL
;
702 * Fill lttng_event array of all kernel events in the channel.
704 static int list_lttng_kernel_events(char *channel_name
,
705 struct ltt_kernel_session
*kernel_session
,
706 struct lttng_event
**events
, size_t *total_size
)
709 unsigned int nb_event
;
710 struct ltt_kernel_event
*event
;
711 struct ltt_kernel_channel
*kchan
;
712 size_t extended_len
= 0;
715 kchan
= trace_kernel_get_channel_by_name(channel_name
, kernel_session
);
717 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
721 nb_event
= kchan
->event_count
;
723 DBG("Listing events for channel %s", kchan
->channel
->name
);
731 /* Compute required extended infos size */
732 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
733 ret
= increment_extended_len(event
->filter_expression
, NULL
,
734 event
->userspace_probe_location
,
737 DBG("Error computing the length of extended info message");
738 ret
= -LTTNG_ERR_FATAL
;
743 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
744 *events
= zmalloc(*total_size
);
745 if (*events
== NULL
) {
746 ret
= -LTTNG_ERR_FATAL
;
750 extended_at
= ((void *) *events
) +
751 nb_event
* sizeof(struct lttng_event
);
753 /* Kernel channels */
754 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
755 strncpy((*events
)[i
].name
, event
->event
->name
, LTTNG_SYMBOL_NAME_LEN
);
756 (*events
)[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
757 (*events
)[i
].enabled
= event
->enabled
;
758 (*events
)[i
].filter
=
759 (unsigned char) !!event
->filter_expression
;
761 switch (event
->event
->instrumentation
) {
762 case LTTNG_KERNEL_TRACEPOINT
:
763 (*events
)[i
].type
= LTTNG_EVENT_TRACEPOINT
;
765 case LTTNG_KERNEL_KRETPROBE
:
766 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
767 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
768 sizeof(struct lttng_kernel_kprobe
));
770 case LTTNG_KERNEL_KPROBE
:
771 (*events
)[i
].type
= LTTNG_EVENT_PROBE
;
772 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
773 sizeof(struct lttng_kernel_kprobe
));
775 case LTTNG_KERNEL_UPROBE
:
776 (*events
)[i
].type
= LTTNG_EVENT_USERSPACE_PROBE
;
778 case LTTNG_KERNEL_FUNCTION
:
779 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
780 memcpy(&((*events
)[i
].attr
.ftrace
), &event
->event
->u
.ftrace
,
781 sizeof(struct lttng_kernel_function
));
783 case LTTNG_KERNEL_NOOP
:
784 (*events
)[i
].type
= LTTNG_EVENT_NOOP
;
786 case LTTNG_KERNEL_SYSCALL
:
787 (*events
)[i
].type
= LTTNG_EVENT_SYSCALL
;
789 case LTTNG_KERNEL_ALL
:
797 /* Append extended info */
798 ret
= append_extended_info(event
->filter_expression
, NULL
,
799 event
->userspace_probe_location
, &extended_at
);
801 DBG("Error appending extended info message");
802 ret
= -LTTNG_ERR_FATAL
;
811 /* Negate the error code to differentiate the size from an error */
816 * Add URI so the consumer output object. Set the correct path depending on the
817 * domain adding the default trace directory.
819 static int add_uri_to_consumer(struct consumer_output
*consumer
,
820 struct lttng_uri
*uri
, enum lttng_domain_type domain
,
821 const char *session_name
)
824 const char *default_trace_dir
;
828 if (consumer
== NULL
) {
829 DBG("No consumer detected. Don't add URI. Stopping.");
830 ret
= LTTNG_ERR_NO_CONSUMER
;
835 case LTTNG_DOMAIN_KERNEL
:
836 default_trace_dir
= DEFAULT_KERNEL_TRACE_DIR
;
838 case LTTNG_DOMAIN_UST
:
839 default_trace_dir
= DEFAULT_UST_TRACE_DIR
;
843 * This case is possible is we try to add the URI to the global tracing
844 * session consumer object which in this case there is no subdir.
846 default_trace_dir
= "";
849 switch (uri
->dtype
) {
852 DBG2("Setting network URI to consumer");
854 if (consumer
->type
== CONSUMER_DST_NET
) {
855 if ((uri
->stype
== LTTNG_STREAM_CONTROL
&&
856 consumer
->dst
.net
.control_isset
) ||
857 (uri
->stype
== LTTNG_STREAM_DATA
&&
858 consumer
->dst
.net
.data_isset
)) {
859 ret
= LTTNG_ERR_URL_EXIST
;
863 memset(&consumer
->dst
.net
, 0, sizeof(consumer
->dst
.net
));
866 consumer
->type
= CONSUMER_DST_NET
;
868 /* Set URI into consumer output object */
869 ret
= consumer_set_network_uri(consumer
, uri
);
873 } else if (ret
== 1) {
875 * URI was the same in the consumer so we do not append the subdir
876 * again so to not duplicate output dir.
882 if (uri
->stype
== LTTNG_STREAM_CONTROL
&& strlen(uri
->subdir
) == 0) {
883 ret
= consumer_set_subdir(consumer
, session_name
);
885 ret
= LTTNG_ERR_FATAL
;
890 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
891 /* On a new subdir, reappend the default trace dir. */
892 strncat(consumer
->subdir
, default_trace_dir
,
893 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
894 DBG3("Append domain trace name to subdir %s", consumer
->subdir
);
899 DBG2("Setting trace directory path from URI to %s", uri
->dst
.path
);
900 memset(consumer
->dst
.session_root_path
, 0,
901 sizeof(consumer
->dst
.session_root_path
));
902 /* Explicit length checks for strcpy and strcat. */
903 if (strlen(uri
->dst
.path
) + strlen(default_trace_dir
)
904 >= sizeof(consumer
->dst
.session_root_path
)) {
905 ret
= LTTNG_ERR_FATAL
;
908 strcpy(consumer
->dst
.session_root_path
, uri
->dst
.path
);
909 /* Append default trace dir */
910 strcat(consumer
->dst
.session_root_path
, default_trace_dir
);
911 /* Flag consumer as local. */
912 consumer
->type
= CONSUMER_DST_LOCAL
;
923 * Init tracing by creating trace directory and sending fds kernel consumer.
925 static int init_kernel_tracing(struct ltt_kernel_session
*session
)
928 struct lttng_ht_iter iter
;
929 struct consumer_socket
*socket
;
935 if (session
->consumer_fds_sent
== 0 && session
->consumer
!= NULL
) {
936 cds_lfht_for_each_entry(session
->consumer
->socks
->ht
, &iter
.iter
,
938 pthread_mutex_lock(socket
->lock
);
939 ret
= kernel_consumer_send_session(socket
, session
);
940 pthread_mutex_unlock(socket
->lock
);
942 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
954 * Create a socket to the relayd using the URI.
956 * On success, the relayd_sock pointer is set to the created socket.
957 * Else, it remains untouched and an LTTng error code is returned.
959 static enum lttng_error_code
create_connect_relayd(struct lttng_uri
*uri
,
960 struct lttcomm_relayd_sock
**relayd_sock
,
961 struct consumer_output
*consumer
)
964 enum lttng_error_code status
= LTTNG_OK
;
965 struct lttcomm_relayd_sock
*rsock
;
967 rsock
= lttcomm_alloc_relayd_sock(uri
, RELAYD_VERSION_COMM_MAJOR
,
968 RELAYD_VERSION_COMM_MINOR
);
970 status
= LTTNG_ERR_FATAL
;
975 * Connect to relayd so we can proceed with a session creation. This call
976 * can possibly block for an arbitrary amount of time to set the health
977 * state to be in poll execution.
980 ret
= relayd_connect(rsock
);
983 ERR("Unable to reach lttng-relayd");
984 status
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
988 /* Create socket for control stream. */
989 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
990 DBG3("Creating relayd stream socket from URI");
992 /* Check relayd version */
993 ret
= relayd_version_check(rsock
);
994 if (ret
== LTTNG_ERR_RELAYD_VERSION_FAIL
) {
995 status
= LTTNG_ERR_RELAYD_VERSION_FAIL
;
997 } else if (ret
< 0) {
998 ERR("Unable to reach lttng-relayd");
999 status
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
1002 consumer
->relay_major_version
= rsock
->major
;
1003 consumer
->relay_minor_version
= rsock
->minor
;
1004 } else if (uri
->stype
== LTTNG_STREAM_DATA
) {
1005 DBG3("Creating relayd data socket from URI");
1007 /* Command is not valid */
1008 ERR("Relayd invalid stream type: %d", uri
->stype
);
1009 status
= LTTNG_ERR_INVALID
;
1013 *relayd_sock
= rsock
;
1018 /* The returned value is not useful since we are on an error path. */
1019 (void) relayd_close(rsock
);
1027 * Connect to the relayd using URI and send the socket to the right consumer.
1029 * The consumer socket lock must be held by the caller.
1031 * Returns LTTNG_OK on success or an LTTng error code on failure.
1033 static enum lttng_error_code
send_consumer_relayd_socket(
1034 unsigned int session_id
,
1035 struct lttng_uri
*relayd_uri
,
1036 struct consumer_output
*consumer
,
1037 struct consumer_socket
*consumer_sock
,
1038 char *session_name
, char *hostname
, int session_live_timer
)
1041 struct lttcomm_relayd_sock
*rsock
= NULL
;
1042 enum lttng_error_code status
;
1044 /* Connect to relayd and make version check if uri is the control. */
1045 status
= create_connect_relayd(relayd_uri
, &rsock
, consumer
);
1046 if (status
!= LTTNG_OK
) {
1047 goto relayd_comm_error
;
1051 /* Set the network sequence index if not set. */
1052 if (consumer
->net_seq_index
== (uint64_t) -1ULL) {
1053 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
1055 * Increment net_seq_idx because we are about to transfer the
1056 * new relayd socket to the consumer.
1057 * Assign unique key so the consumer can match streams.
1059 consumer
->net_seq_index
= ++relayd_net_seq_idx
;
1060 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
1063 /* Send relayd socket to consumer. */
1064 ret
= consumer_send_relayd_socket(consumer_sock
, rsock
, consumer
,
1065 relayd_uri
->stype
, session_id
,
1066 session_name
, hostname
, session_live_timer
);
1068 status
= LTTNG_ERR_ENABLE_CONSUMER_FAIL
;
1072 /* Flag that the corresponding socket was sent. */
1073 if (relayd_uri
->stype
== LTTNG_STREAM_CONTROL
) {
1074 consumer_sock
->control_sock_sent
= 1;
1075 } else if (relayd_uri
->stype
== LTTNG_STREAM_DATA
) {
1076 consumer_sock
->data_sock_sent
= 1;
1080 * Close socket which was dup on the consumer side. The session daemon does
1081 * NOT keep track of the relayd socket(s) once transfer to the consumer.
1085 if (status
!= LTTNG_OK
) {
1087 * The consumer output for this session should not be used anymore
1088 * since the relayd connection failed thus making any tracing or/and
1089 * streaming not usable.
1091 consumer
->enabled
= 0;
1093 (void) relayd_close(rsock
);
1101 * Send both relayd sockets to a specific consumer and domain. This is a
1102 * helper function to facilitate sending the information to the consumer for a
1105 * The consumer socket lock must be held by the caller.
1107 * Returns LTTNG_OK, or an LTTng error code on failure.
1109 static enum lttng_error_code
send_consumer_relayd_sockets(
1110 enum lttng_domain_type domain
,
1111 unsigned int session_id
, struct consumer_output
*consumer
,
1112 struct consumer_socket
*sock
, char *session_name
,
1113 char *hostname
, int session_live_timer
)
1115 enum lttng_error_code status
= LTTNG_OK
;
1120 /* Sending control relayd socket. */
1121 if (!sock
->control_sock_sent
) {
1122 status
= send_consumer_relayd_socket(session_id
,
1123 &consumer
->dst
.net
.control
, consumer
, sock
,
1124 session_name
, hostname
, session_live_timer
);
1125 if (status
!= LTTNG_OK
) {
1130 /* Sending data relayd socket. */
1131 if (!sock
->data_sock_sent
) {
1132 status
= send_consumer_relayd_socket(session_id
,
1133 &consumer
->dst
.net
.data
, consumer
, sock
,
1134 session_name
, hostname
, session_live_timer
);
1135 if (status
!= LTTNG_OK
) {
1145 * Setup relayd connections for a tracing session. First creates the socket to
1146 * the relayd and send them to the right domain consumer. Consumer type MUST be
1149 int cmd_setup_relayd(struct ltt_session
*session
)
1152 struct ltt_ust_session
*usess
;
1153 struct ltt_kernel_session
*ksess
;
1154 struct consumer_socket
*socket
;
1155 struct lttng_ht_iter iter
;
1159 usess
= session
->ust_session
;
1160 ksess
= session
->kernel_session
;
1162 DBG("Setting relayd for session %s", session
->name
);
1166 if (usess
&& usess
->consumer
&& usess
->consumer
->type
== CONSUMER_DST_NET
1167 && usess
->consumer
->enabled
) {
1168 /* For each consumer socket, send relayd sockets */
1169 cds_lfht_for_each_entry(usess
->consumer
->socks
->ht
, &iter
.iter
,
1170 socket
, node
.node
) {
1171 pthread_mutex_lock(socket
->lock
);
1172 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_UST
, session
->id
,
1173 usess
->consumer
, socket
,
1174 session
->name
, session
->hostname
,
1175 session
->live_timer
);
1176 pthread_mutex_unlock(socket
->lock
);
1177 if (ret
!= LTTNG_OK
) {
1180 /* Session is now ready for network streaming. */
1181 session
->net_handle
= 1;
1183 session
->consumer
->relay_major_version
=
1184 usess
->consumer
->relay_major_version
;
1185 session
->consumer
->relay_minor_version
=
1186 usess
->consumer
->relay_minor_version
;
1189 if (ksess
&& ksess
->consumer
&& ksess
->consumer
->type
== CONSUMER_DST_NET
1190 && ksess
->consumer
->enabled
) {
1191 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
, &iter
.iter
,
1192 socket
, node
.node
) {
1193 pthread_mutex_lock(socket
->lock
);
1194 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_KERNEL
, session
->id
,
1195 ksess
->consumer
, socket
,
1196 session
->name
, session
->hostname
,
1197 session
->live_timer
);
1198 pthread_mutex_unlock(socket
->lock
);
1199 if (ret
!= LTTNG_OK
) {
1202 /* Session is now ready for network streaming. */
1203 session
->net_handle
= 1;
1205 session
->consumer
->relay_major_version
=
1206 ksess
->consumer
->relay_major_version
;
1207 session
->consumer
->relay_minor_version
=
1208 ksess
->consumer
->relay_minor_version
;
1217 * Start a kernel session by opening all necessary streams.
1219 static int start_kernel_session(struct ltt_kernel_session
*ksess
, int wpipe
)
1222 struct ltt_kernel_channel
*kchan
;
1224 /* Open kernel metadata */
1225 if (ksess
->metadata
== NULL
&& ksess
->output_traces
) {
1226 ret
= kernel_open_metadata(ksess
);
1228 ret
= LTTNG_ERR_KERN_META_FAIL
;
1233 /* Open kernel metadata stream */
1234 if (ksess
->metadata
&& ksess
->metadata_stream_fd
< 0) {
1235 ret
= kernel_open_metadata_stream(ksess
);
1237 ERR("Kernel create metadata stream failed");
1238 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1243 /* For each channel */
1244 cds_list_for_each_entry(kchan
, &ksess
->channel_list
.head
, list
) {
1245 if (kchan
->stream_count
== 0) {
1246 ret
= kernel_open_channel_stream(kchan
);
1248 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1251 /* Update the stream global counter */
1252 ksess
->stream_count_global
+= ret
;
1256 /* Setup kernel consumer socket and send fds to it */
1257 ret
= init_kernel_tracing(ksess
);
1259 ret
= LTTNG_ERR_KERN_START_FAIL
;
1263 /* This start the kernel tracing */
1264 ret
= kernel_start_session(ksess
);
1266 ret
= LTTNG_ERR_KERN_START_FAIL
;
1270 /* Quiescent wait after starting trace */
1271 kernel_wait_quiescent(wpipe
);
1282 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1284 int cmd_disable_channel(struct ltt_session
*session
,
1285 enum lttng_domain_type domain
, char *channel_name
)
1288 struct ltt_ust_session
*usess
;
1290 usess
= session
->ust_session
;
1295 case LTTNG_DOMAIN_KERNEL
:
1297 ret
= channel_kernel_disable(session
->kernel_session
,
1299 if (ret
!= LTTNG_OK
) {
1303 kernel_wait_quiescent(kernel_tracer_fd
);
1306 case LTTNG_DOMAIN_UST
:
1308 struct ltt_ust_channel
*uchan
;
1309 struct lttng_ht
*chan_ht
;
1311 chan_ht
= usess
->domain_global
.channels
;
1313 uchan
= trace_ust_find_channel_by_name(chan_ht
, channel_name
);
1314 if (uchan
== NULL
) {
1315 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1319 ret
= channel_ust_disable(usess
, uchan
);
1320 if (ret
!= LTTNG_OK
) {
1326 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1338 * Command LTTNG_TRACK_PID processed by the client thread.
1340 * Called with session lock held.
1342 int cmd_track_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1350 case LTTNG_DOMAIN_KERNEL
:
1352 struct ltt_kernel_session
*ksess
;
1354 ksess
= session
->kernel_session
;
1356 ret
= kernel_track_pid(ksess
, pid
);
1357 if (ret
!= LTTNG_OK
) {
1361 kernel_wait_quiescent(kernel_tracer_fd
);
1364 case LTTNG_DOMAIN_UST
:
1366 struct ltt_ust_session
*usess
;
1368 usess
= session
->ust_session
;
1370 ret
= trace_ust_track_pid(usess
, pid
);
1371 if (ret
!= LTTNG_OK
) {
1377 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1389 * Command LTTNG_UNTRACK_PID processed by the client thread.
1391 * Called with session lock held.
1393 int cmd_untrack_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1401 case LTTNG_DOMAIN_KERNEL
:
1403 struct ltt_kernel_session
*ksess
;
1405 ksess
= session
->kernel_session
;
1407 ret
= kernel_untrack_pid(ksess
, pid
);
1408 if (ret
!= LTTNG_OK
) {
1412 kernel_wait_quiescent(kernel_tracer_fd
);
1415 case LTTNG_DOMAIN_UST
:
1417 struct ltt_ust_session
*usess
;
1419 usess
= session
->ust_session
;
1421 ret
= trace_ust_untrack_pid(usess
, pid
);
1422 if (ret
!= LTTNG_OK
) {
1428 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1440 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
1442 * The wpipe arguments is used as a notifier for the kernel thread.
1444 int cmd_enable_channel(struct ltt_session
*session
,
1445 struct lttng_domain
*domain
, struct lttng_channel
*attr
, int wpipe
)
1448 struct ltt_ust_session
*usess
= session
->ust_session
;
1449 struct lttng_ht
*chan_ht
;
1456 len
= lttng_strnlen(attr
->name
, sizeof(attr
->name
));
1458 /* Validate channel name */
1459 if (attr
->name
[0] == '.' ||
1460 memchr(attr
->name
, '/', len
) != NULL
) {
1461 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1465 DBG("Enabling channel %s for session %s", attr
->name
, session
->name
);
1470 * Don't try to enable a channel if the session has been started at
1471 * some point in time before. The tracer does not allow it.
1473 if (session
->has_been_started
) {
1474 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1479 * If the session is a live session, remove the switch timer, the
1480 * live timer does the same thing but sends also synchronisation
1481 * beacons for inactive streams.
1483 if (session
->live_timer
> 0) {
1484 attr
->attr
.live_timer_interval
= session
->live_timer
;
1485 attr
->attr
.switch_timer_interval
= 0;
1488 /* Check for feature support */
1489 switch (domain
->type
) {
1490 case LTTNG_DOMAIN_KERNEL
:
1492 if (kernel_supports_ring_buffer_snapshot_sample_positions(kernel_tracer_fd
) != 1) {
1493 /* Sampling position of buffer is not supported */
1494 WARN("Kernel tracer does not support buffer monitoring. "
1495 "Setting the monitor interval timer to 0 "
1496 "(disabled) for channel '%s' of session '%s'",
1497 attr
-> name
, session
->name
);
1498 lttng_channel_set_monitor_timer_interval(attr
, 0);
1502 case LTTNG_DOMAIN_UST
:
1504 case LTTNG_DOMAIN_JUL
:
1505 case LTTNG_DOMAIN_LOG4J
:
1506 case LTTNG_DOMAIN_PYTHON
:
1507 if (!agent_tracing_is_enabled()) {
1508 DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
1509 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
1514 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1518 switch (domain
->type
) {
1519 case LTTNG_DOMAIN_KERNEL
:
1521 struct ltt_kernel_channel
*kchan
;
1523 kchan
= trace_kernel_get_channel_by_name(attr
->name
,
1524 session
->kernel_session
);
1525 if (kchan
== NULL
) {
1526 ret
= channel_kernel_create(session
->kernel_session
, attr
, wpipe
);
1527 if (attr
->name
[0] != '\0') {
1528 session
->kernel_session
->has_non_default_channel
= 1;
1531 ret
= channel_kernel_enable(session
->kernel_session
, kchan
);
1534 if (ret
!= LTTNG_OK
) {
1538 kernel_wait_quiescent(kernel_tracer_fd
);
1541 case LTTNG_DOMAIN_UST
:
1542 case LTTNG_DOMAIN_JUL
:
1543 case LTTNG_DOMAIN_LOG4J
:
1544 case LTTNG_DOMAIN_PYTHON
:
1546 struct ltt_ust_channel
*uchan
;
1551 * Current agent implementation limitations force us to allow
1552 * only one channel at once in "agent" subdomains. Each
1553 * subdomain has a default channel name which must be strictly
1556 if (domain
->type
== LTTNG_DOMAIN_JUL
) {
1557 if (strncmp(attr
->name
, DEFAULT_JUL_CHANNEL_NAME
,
1558 LTTNG_SYMBOL_NAME_LEN
)) {
1559 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1562 } else if (domain
->type
== LTTNG_DOMAIN_LOG4J
) {
1563 if (strncmp(attr
->name
, DEFAULT_LOG4J_CHANNEL_NAME
,
1564 LTTNG_SYMBOL_NAME_LEN
)) {
1565 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1568 } else if (domain
->type
== LTTNG_DOMAIN_PYTHON
) {
1569 if (strncmp(attr
->name
, DEFAULT_PYTHON_CHANNEL_NAME
,
1570 LTTNG_SYMBOL_NAME_LEN
)) {
1571 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1576 chan_ht
= usess
->domain_global
.channels
;
1578 uchan
= trace_ust_find_channel_by_name(chan_ht
, attr
->name
);
1579 if (uchan
== NULL
) {
1580 ret
= channel_ust_create(usess
, attr
, domain
->buf_type
);
1581 if (attr
->name
[0] != '\0') {
1582 usess
->has_non_default_channel
= 1;
1585 ret
= channel_ust_enable(usess
, uchan
);
1590 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1601 * Command LTTNG_DISABLE_EVENT processed by the client thread.
1603 int cmd_disable_event(struct ltt_session
*session
,
1604 enum lttng_domain_type domain
, char *channel_name
,
1605 struct lttng_event
*event
)
1610 DBG("Disable event command for event \'%s\'", event
->name
);
1612 event_name
= event
->name
;
1614 /* Error out on unhandled search criteria */
1615 if (event
->loglevel_type
|| event
->loglevel
!= -1 || event
->enabled
1616 || event
->pid
|| event
->filter
|| event
->exclusion
) {
1617 ret
= LTTNG_ERR_UNK
;
1624 case LTTNG_DOMAIN_KERNEL
:
1626 struct ltt_kernel_channel
*kchan
;
1627 struct ltt_kernel_session
*ksess
;
1629 ksess
= session
->kernel_session
;
1632 * If a non-default channel has been created in the
1633 * session, explicitely require that -c chan_name needs
1636 if (ksess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1637 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1641 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
1642 if (kchan
== NULL
) {
1643 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
1647 switch (event
->type
) {
1648 case LTTNG_EVENT_ALL
:
1649 case LTTNG_EVENT_TRACEPOINT
:
1650 case LTTNG_EVENT_SYSCALL
:
1651 case LTTNG_EVENT_PROBE
:
1652 case LTTNG_EVENT_FUNCTION
:
1653 case LTTNG_EVENT_FUNCTION_ENTRY
:/* fall-through */
1654 if (event_name
[0] == '\0') {
1655 ret
= event_kernel_disable_event(kchan
,
1658 ret
= event_kernel_disable_event(kchan
,
1659 event_name
, event
->type
);
1661 if (ret
!= LTTNG_OK
) {
1666 ret
= LTTNG_ERR_UNK
;
1670 kernel_wait_quiescent(kernel_tracer_fd
);
1673 case LTTNG_DOMAIN_UST
:
1675 struct ltt_ust_channel
*uchan
;
1676 struct ltt_ust_session
*usess
;
1678 usess
= session
->ust_session
;
1680 if (validate_ust_event_name(event_name
)) {
1681 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
1686 * If a non-default channel has been created in the
1687 * session, explicitly require that -c chan_name needs
1690 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1691 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1695 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
1697 if (uchan
== NULL
) {
1698 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1702 switch (event
->type
) {
1703 case LTTNG_EVENT_ALL
:
1705 * An empty event name means that everything
1706 * should be disabled.
1708 if (event
->name
[0] == '\0') {
1709 ret
= event_ust_disable_all_tracepoints(usess
, uchan
);
1711 ret
= event_ust_disable_tracepoint(usess
, uchan
,
1714 if (ret
!= LTTNG_OK
) {
1719 ret
= LTTNG_ERR_UNK
;
1723 DBG3("Disable UST event %s in channel %s completed", event_name
,
1727 case LTTNG_DOMAIN_LOG4J
:
1728 case LTTNG_DOMAIN_JUL
:
1729 case LTTNG_DOMAIN_PYTHON
:
1732 struct ltt_ust_session
*usess
= session
->ust_session
;
1736 switch (event
->type
) {
1737 case LTTNG_EVENT_ALL
:
1740 ret
= LTTNG_ERR_UNK
;
1744 agt
= trace_ust_find_agent(usess
, domain
);
1746 ret
= -LTTNG_ERR_UST_EVENT_NOT_FOUND
;
1750 * An empty event name means that everything
1751 * should be disabled.
1753 if (event
->name
[0] == '\0') {
1754 ret
= event_agent_disable_all(usess
, agt
);
1756 ret
= event_agent_disable(usess
, agt
, event_name
);
1758 if (ret
!= LTTNG_OK
) {
1765 ret
= LTTNG_ERR_UND
;
1778 * Command LTTNG_ADD_CONTEXT processed by the client thread.
1780 int cmd_add_context(struct ltt_session
*session
, enum lttng_domain_type domain
,
1781 char *channel_name
, struct lttng_event_context
*ctx
, int kwpipe
)
1783 int ret
, chan_kern_created
= 0, chan_ust_created
= 0;
1784 char *app_ctx_provider_name
= NULL
, *app_ctx_name
= NULL
;
1787 * Don't try to add a context if the session has been started at
1788 * some point in time before. The tracer does not allow it and would
1789 * result in a corrupted trace.
1791 if (session
->has_been_started
) {
1792 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1796 if (ctx
->ctx
== LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
1797 app_ctx_provider_name
= ctx
->u
.app_ctx
.provider_name
;
1798 app_ctx_name
= ctx
->u
.app_ctx
.ctx_name
;
1802 case LTTNG_DOMAIN_KERNEL
:
1803 assert(session
->kernel_session
);
1805 if (session
->kernel_session
->channel_count
== 0) {
1806 /* Create default channel */
1807 ret
= channel_kernel_create(session
->kernel_session
, NULL
, kwpipe
);
1808 if (ret
!= LTTNG_OK
) {
1811 chan_kern_created
= 1;
1813 /* Add kernel context to kernel tracer */
1814 ret
= context_kernel_add(session
->kernel_session
, ctx
, channel_name
);
1815 if (ret
!= LTTNG_OK
) {
1819 case LTTNG_DOMAIN_JUL
:
1820 case LTTNG_DOMAIN_LOG4J
:
1823 * Validate channel name.
1824 * If no channel name is given and the domain is JUL or LOG4J,
1825 * set it to the appropriate domain-specific channel name. If
1826 * a name is provided but does not match the expexted channel
1827 * name, return an error.
1829 if (domain
== LTTNG_DOMAIN_JUL
&& *channel_name
&&
1830 strcmp(channel_name
,
1831 DEFAULT_JUL_CHANNEL_NAME
)) {
1832 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1834 } else if (domain
== LTTNG_DOMAIN_LOG4J
&& *channel_name
&&
1835 strcmp(channel_name
,
1836 DEFAULT_LOG4J_CHANNEL_NAME
)) {
1837 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1840 /* break is _not_ missing here. */
1842 case LTTNG_DOMAIN_UST
:
1844 struct ltt_ust_session
*usess
= session
->ust_session
;
1845 unsigned int chan_count
;
1849 chan_count
= lttng_ht_get_count(usess
->domain_global
.channels
);
1850 if (chan_count
== 0) {
1851 struct lttng_channel
*attr
;
1852 /* Create default channel */
1853 attr
= channel_new_default_attr(domain
, usess
->buffer_type
);
1855 ret
= LTTNG_ERR_FATAL
;
1859 ret
= channel_ust_create(usess
, attr
, usess
->buffer_type
);
1860 if (ret
!= LTTNG_OK
) {
1864 channel_attr_destroy(attr
);
1865 chan_ust_created
= 1;
1868 ret
= context_ust_add(usess
, domain
, ctx
, channel_name
);
1869 free(app_ctx_provider_name
);
1871 app_ctx_name
= NULL
;
1872 app_ctx_provider_name
= NULL
;
1873 if (ret
!= LTTNG_OK
) {
1879 ret
= LTTNG_ERR_UND
;
1887 if (chan_kern_created
) {
1888 struct ltt_kernel_channel
*kchan
=
1889 trace_kernel_get_channel_by_name(DEFAULT_CHANNEL_NAME
,
1890 session
->kernel_session
);
1891 /* Created previously, this should NOT fail. */
1893 kernel_destroy_channel(kchan
);
1896 if (chan_ust_created
) {
1897 struct ltt_ust_channel
*uchan
=
1898 trace_ust_find_channel_by_name(
1899 session
->ust_session
->domain_global
.channels
,
1900 DEFAULT_CHANNEL_NAME
);
1901 /* Created previously, this should NOT fail. */
1903 /* Remove from the channel list of the session. */
1904 trace_ust_delete_channel(session
->ust_session
->domain_global
.channels
,
1906 trace_ust_destroy_channel(uchan
);
1909 free(app_ctx_provider_name
);
1914 static inline bool name_starts_with(const char *name
, const char *prefix
)
1916 const size_t max_cmp_len
= min(strlen(prefix
), LTTNG_SYMBOL_NAME_LEN
);
1918 return !strncmp(name
, prefix
, max_cmp_len
);
1921 /* Perform userspace-specific event name validation */
1922 static int validate_ust_event_name(const char *name
)
1932 * Check name against all internal UST event component namespaces used
1935 if (name_starts_with(name
, DEFAULT_JUL_EVENT_COMPONENT
) ||
1936 name_starts_with(name
, DEFAULT_LOG4J_EVENT_COMPONENT
) ||
1937 name_starts_with(name
, DEFAULT_PYTHON_EVENT_COMPONENT
)) {
1946 * Internal version of cmd_enable_event() with a supplemental
1947 * "internal_event" flag which is used to enable internal events which should
1948 * be hidden from clients. Such events are used in the agent implementation to
1949 * enable the events through which all "agent" events are funeled.
1951 static int _cmd_enable_event(struct ltt_session
*session
,
1952 struct lttng_domain
*domain
,
1953 char *channel_name
, struct lttng_event
*event
,
1954 char *filter_expression
,
1955 struct lttng_filter_bytecode
*filter
,
1956 struct lttng_event_exclusion
*exclusion
,
1957 int wpipe
, bool internal_event
)
1959 int ret
= 0, channel_created
= 0;
1960 struct lttng_channel
*attr
= NULL
;
1964 assert(channel_name
);
1966 /* If we have a filter, we must have its filter expression */
1967 assert(!(!!filter_expression
^ !!filter
));
1969 /* Normalize event name as a globbing pattern */
1970 strutils_normalize_star_glob_pattern(event
->name
);
1972 /* Normalize exclusion names as globbing patterns */
1976 for (i
= 0; i
< exclusion
->count
; i
++) {
1977 char *name
= LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion
, i
);
1979 strutils_normalize_star_glob_pattern(name
);
1983 DBG("Enable event command for event \'%s\'", event
->name
);
1987 switch (domain
->type
) {
1988 case LTTNG_DOMAIN_KERNEL
:
1990 struct ltt_kernel_channel
*kchan
;
1993 * If a non-default channel has been created in the
1994 * session, explicitely require that -c chan_name needs
1997 if (session
->kernel_session
->has_non_default_channel
1998 && channel_name
[0] == '\0') {
1999 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2003 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2004 session
->kernel_session
);
2005 if (kchan
== NULL
) {
2006 attr
= channel_new_default_attr(LTTNG_DOMAIN_KERNEL
,
2007 LTTNG_BUFFER_GLOBAL
);
2009 ret
= LTTNG_ERR_FATAL
;
2012 if (lttng_strncpy(attr
->name
, channel_name
,
2013 sizeof(attr
->name
))) {
2014 ret
= LTTNG_ERR_INVALID
;
2018 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
2019 if (ret
!= LTTNG_OK
) {
2022 channel_created
= 1;
2025 /* Get the newly created kernel channel pointer */
2026 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2027 session
->kernel_session
);
2028 if (kchan
== NULL
) {
2029 /* This sould not happen... */
2030 ret
= LTTNG_ERR_FATAL
;
2034 switch (event
->type
) {
2035 case LTTNG_EVENT_ALL
:
2037 char *filter_expression_a
= NULL
;
2038 struct lttng_filter_bytecode
*filter_a
= NULL
;
2041 * We need to duplicate filter_expression and filter,
2042 * because ownership is passed to first enable
2045 if (filter_expression
) {
2046 filter_expression_a
= strdup(filter_expression
);
2047 if (!filter_expression_a
) {
2048 ret
= LTTNG_ERR_FATAL
;
2053 filter_a
= zmalloc(sizeof(*filter_a
) + filter
->len
);
2055 free(filter_expression_a
);
2056 ret
= LTTNG_ERR_FATAL
;
2059 memcpy(filter_a
, filter
, sizeof(*filter_a
) + filter
->len
);
2061 event
->type
= LTTNG_EVENT_TRACEPOINT
; /* Hack */
2062 ret
= event_kernel_enable_event(kchan
, event
,
2063 filter_expression
, filter
);
2064 /* We have passed ownership */
2065 filter_expression
= NULL
;
2067 if (ret
!= LTTNG_OK
) {
2068 if (channel_created
) {
2069 /* Let's not leak a useless channel. */
2070 kernel_destroy_channel(kchan
);
2072 free(filter_expression_a
);
2076 event
->type
= LTTNG_EVENT_SYSCALL
; /* Hack */
2077 ret
= event_kernel_enable_event(kchan
, event
,
2078 filter_expression_a
, filter_a
);
2079 /* We have passed ownership */
2080 filter_expression_a
= NULL
;
2082 if (ret
!= LTTNG_OK
) {
2087 case LTTNG_EVENT_PROBE
:
2088 case LTTNG_EVENT_USERSPACE_PROBE
:
2089 case LTTNG_EVENT_FUNCTION
:
2090 case LTTNG_EVENT_FUNCTION_ENTRY
:
2091 case LTTNG_EVENT_TRACEPOINT
:
2092 ret
= event_kernel_enable_event(kchan
, event
,
2093 filter_expression
, filter
);
2094 /* We have passed ownership */
2095 filter_expression
= NULL
;
2097 if (ret
!= LTTNG_OK
) {
2098 if (channel_created
) {
2099 /* Let's not leak a useless channel. */
2100 kernel_destroy_channel(kchan
);
2105 case LTTNG_EVENT_SYSCALL
:
2106 ret
= event_kernel_enable_event(kchan
, event
,
2107 filter_expression
, filter
);
2108 /* We have passed ownership */
2109 filter_expression
= NULL
;
2111 if (ret
!= LTTNG_OK
) {
2116 ret
= LTTNG_ERR_UNK
;
2120 kernel_wait_quiescent(kernel_tracer_fd
);
2123 case LTTNG_DOMAIN_UST
:
2125 struct ltt_ust_channel
*uchan
;
2126 struct ltt_ust_session
*usess
= session
->ust_session
;
2131 * If a non-default channel has been created in the
2132 * session, explicitely require that -c chan_name needs
2135 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
2136 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2140 /* Get channel from global UST domain */
2141 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2143 if (uchan
== NULL
) {
2144 /* Create default channel */
2145 attr
= channel_new_default_attr(LTTNG_DOMAIN_UST
,
2146 usess
->buffer_type
);
2148 ret
= LTTNG_ERR_FATAL
;
2151 if (lttng_strncpy(attr
->name
, channel_name
,
2152 sizeof(attr
->name
))) {
2153 ret
= LTTNG_ERR_INVALID
;
2157 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
2158 if (ret
!= LTTNG_OK
) {
2162 /* Get the newly created channel reference back */
2163 uchan
= trace_ust_find_channel_by_name(
2164 usess
->domain_global
.channels
, channel_name
);
2168 if (uchan
->domain
!= LTTNG_DOMAIN_UST
&& !internal_event
) {
2170 * Don't allow users to add UST events to channels which
2171 * are assigned to a userspace subdomain (JUL, Log4J,
2174 ret
= LTTNG_ERR_INVALID_CHANNEL_DOMAIN
;
2178 if (!internal_event
) {
2180 * Ensure the event name is not reserved for internal
2183 ret
= validate_ust_event_name(event
->name
);
2185 WARN("Userspace event name %s failed validation.",
2187 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
2192 /* At this point, the session and channel exist on the tracer */
2193 ret
= event_ust_enable_tracepoint(usess
, uchan
, event
,
2194 filter_expression
, filter
, exclusion
,
2196 /* We have passed ownership */
2197 filter_expression
= NULL
;
2200 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2201 goto already_enabled
;
2202 } else if (ret
!= LTTNG_OK
) {
2207 case LTTNG_DOMAIN_LOG4J
:
2208 case LTTNG_DOMAIN_JUL
:
2209 case LTTNG_DOMAIN_PYTHON
:
2211 const char *default_event_name
, *default_chan_name
;
2213 struct lttng_event uevent
;
2214 struct lttng_domain tmp_dom
;
2215 struct ltt_ust_session
*usess
= session
->ust_session
;
2219 if (!agent_tracing_is_enabled()) {
2220 DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
2221 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
2225 agt
= trace_ust_find_agent(usess
, domain
->type
);
2227 agt
= agent_create(domain
->type
);
2229 ret
= LTTNG_ERR_NOMEM
;
2232 agent_add(agt
, usess
->agents
);
2235 /* Create the default tracepoint. */
2236 memset(&uevent
, 0, sizeof(uevent
));
2237 uevent
.type
= LTTNG_EVENT_TRACEPOINT
;
2238 uevent
.loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
2239 default_event_name
= event_get_default_agent_ust_name(
2241 if (!default_event_name
) {
2242 ret
= LTTNG_ERR_FATAL
;
2245 strncpy(uevent
.name
, default_event_name
, sizeof(uevent
.name
));
2246 uevent
.name
[sizeof(uevent
.name
) - 1] = '\0';
2249 * The domain type is changed because we are about to enable the
2250 * default channel and event for the JUL domain that are hardcoded.
2251 * This happens in the UST domain.
2253 memcpy(&tmp_dom
, domain
, sizeof(tmp_dom
));
2254 tmp_dom
.type
= LTTNG_DOMAIN_UST
;
2256 switch (domain
->type
) {
2257 case LTTNG_DOMAIN_LOG4J
:
2258 default_chan_name
= DEFAULT_LOG4J_CHANNEL_NAME
;
2260 case LTTNG_DOMAIN_JUL
:
2261 default_chan_name
= DEFAULT_JUL_CHANNEL_NAME
;
2263 case LTTNG_DOMAIN_PYTHON
:
2264 default_chan_name
= DEFAULT_PYTHON_CHANNEL_NAME
;
2267 /* The switch/case we are in makes this impossible */
2272 char *filter_expression_copy
= NULL
;
2273 struct lttng_filter_bytecode
*filter_copy
= NULL
;
2276 const size_t filter_size
= sizeof(
2277 struct lttng_filter_bytecode
)
2280 filter_copy
= zmalloc(filter_size
);
2282 ret
= LTTNG_ERR_NOMEM
;
2285 memcpy(filter_copy
, filter
, filter_size
);
2287 filter_expression_copy
=
2288 strdup(filter_expression
);
2289 if (!filter_expression
) {
2290 ret
= LTTNG_ERR_NOMEM
;
2293 if (!filter_expression_copy
|| !filter_copy
) {
2294 free(filter_expression_copy
);
2300 ret
= cmd_enable_event_internal(session
, &tmp_dom
,
2301 (char *) default_chan_name
,
2302 &uevent
, filter_expression_copy
,
2303 filter_copy
, NULL
, wpipe
);
2306 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2307 goto already_enabled
;
2308 } else if (ret
!= LTTNG_OK
) {
2312 /* The wild card * means that everything should be enabled. */
2313 if (strncmp(event
->name
, "*", 1) == 0 && strlen(event
->name
) == 1) {
2314 ret
= event_agent_enable_all(usess
, agt
, event
, filter
,
2317 ret
= event_agent_enable(usess
, agt
, event
, filter
,
2321 filter_expression
= NULL
;
2322 if (ret
!= LTTNG_OK
) {
2329 ret
= LTTNG_ERR_UND
;
2337 free(filter_expression
);
2340 channel_attr_destroy(attr
);
2346 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2347 * We own filter, exclusion, and filter_expression.
2349 int cmd_enable_event(struct ltt_session
*session
, struct lttng_domain
*domain
,
2350 char *channel_name
, struct lttng_event
*event
,
2351 char *filter_expression
,
2352 struct lttng_filter_bytecode
*filter
,
2353 struct lttng_event_exclusion
*exclusion
,
2356 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2357 filter_expression
, filter
, exclusion
, wpipe
, false);
2361 * Enable an event which is internal to LTTng. An internal should
2362 * never be made visible to clients and are immune to checks such as
2365 static int cmd_enable_event_internal(struct ltt_session
*session
,
2366 struct lttng_domain
*domain
,
2367 char *channel_name
, struct lttng_event
*event
,
2368 char *filter_expression
,
2369 struct lttng_filter_bytecode
*filter
,
2370 struct lttng_event_exclusion
*exclusion
,
2373 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2374 filter_expression
, filter
, exclusion
, wpipe
, true);
2378 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2380 ssize_t
cmd_list_tracepoints(enum lttng_domain_type domain
,
2381 struct lttng_event
**events
)
2384 ssize_t nb_events
= 0;
2387 case LTTNG_DOMAIN_KERNEL
:
2388 nb_events
= kernel_list_events(kernel_tracer_fd
, events
);
2389 if (nb_events
< 0) {
2390 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2394 case LTTNG_DOMAIN_UST
:
2395 nb_events
= ust_app_list_events(events
);
2396 if (nb_events
< 0) {
2397 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2401 case LTTNG_DOMAIN_LOG4J
:
2402 case LTTNG_DOMAIN_JUL
:
2403 case LTTNG_DOMAIN_PYTHON
:
2404 nb_events
= agent_list_events(events
, domain
);
2405 if (nb_events
< 0) {
2406 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2411 ret
= LTTNG_ERR_UND
;
2418 /* Return negative value to differentiate return code */
2423 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
2425 ssize_t
cmd_list_tracepoint_fields(enum lttng_domain_type domain
,
2426 struct lttng_event_field
**fields
)
2429 ssize_t nb_fields
= 0;
2432 case LTTNG_DOMAIN_UST
:
2433 nb_fields
= ust_app_list_event_fields(fields
);
2434 if (nb_fields
< 0) {
2435 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2439 case LTTNG_DOMAIN_KERNEL
:
2440 default: /* fall-through */
2441 ret
= LTTNG_ERR_UND
;
2448 /* Return negative value to differentiate return code */
2452 ssize_t
cmd_list_syscalls(struct lttng_event
**events
)
2454 return syscall_table_list(events
);
2458 * Command LTTNG_LIST_TRACKER_PIDS processed by the client thread.
2460 * Called with session lock held.
2462 ssize_t
cmd_list_tracker_pids(struct ltt_session
*session
,
2463 enum lttng_domain_type domain
, int32_t **pids
)
2466 ssize_t nr_pids
= 0;
2469 case LTTNG_DOMAIN_KERNEL
:
2471 struct ltt_kernel_session
*ksess
;
2473 ksess
= session
->kernel_session
;
2474 nr_pids
= kernel_list_tracker_pids(ksess
, pids
);
2476 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2481 case LTTNG_DOMAIN_UST
:
2483 struct ltt_ust_session
*usess
;
2485 usess
= session
->ust_session
;
2486 nr_pids
= trace_ust_list_tracker_pids(usess
, pids
);
2488 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2493 case LTTNG_DOMAIN_LOG4J
:
2494 case LTTNG_DOMAIN_JUL
:
2495 case LTTNG_DOMAIN_PYTHON
:
2497 ret
= LTTNG_ERR_UND
;
2504 /* Return negative value to differentiate return code */
2509 int domain_mkdir(const struct consumer_output
*output
,
2510 const struct ltt_session
*session
,
2511 uid_t uid
, gid_t gid
)
2513 struct consumer_socket
*socket
;
2514 struct lttng_ht_iter iter
;
2518 if (!output
|| !output
->socks
) {
2519 ERR("No consumer output found");
2524 path
= zmalloc(LTTNG_PATH_MAX
* sizeof(char));
2526 ERR("Cannot allocate mkdir path");
2531 ret
= snprintf(path
, LTTNG_PATH_MAX
, "%s%s%s",
2532 session_get_base_path(session
),
2533 output
->chunk_path
, output
->subdir
);
2534 if (ret
< 0 || ret
>= LTTNG_PATH_MAX
) {
2540 DBG("Domain mkdir %s for session %" PRIu64
, path
, session
->id
);
2543 * We have to iterate to find a socket, but we only need to send the
2544 * rename command to one consumer, so we break after the first one.
2546 cds_lfht_for_each_entry(output
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
2547 pthread_mutex_lock(socket
->lock
);
2548 ret
= consumer_mkdir(socket
, session
->id
, output
, path
, uid
, gid
);
2549 pthread_mutex_unlock(socket
->lock
);
2551 ERR("Consumer mkdir");
2568 int session_mkdir(const struct ltt_session
*session
)
2571 struct consumer_output
*output
;
2576 * Unsupported feature in lttng-relayd before 2.11, not an error since it
2577 * is only needed for session rotation and the user will get an error
2580 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
2581 session
->consumer
->relay_major_version
== 2 &&
2582 session
->consumer
->relay_minor_version
< 11) {
2587 if (session
->kernel_session
) {
2588 output
= session
->kernel_session
->consumer
;
2589 uid
= session
->kernel_session
->uid
;
2590 gid
= session
->kernel_session
->gid
;
2591 ret
= domain_mkdir(output
, session
, uid
, gid
);
2593 ERR("Mkdir kernel");
2598 if (session
->ust_session
) {
2599 output
= session
->ust_session
->consumer
;
2600 uid
= session
->ust_session
->uid
;
2601 gid
= session
->ust_session
->gid
;
2602 ret
= domain_mkdir(output
, session
, uid
, gid
);
2616 * Command LTTNG_START_TRACE processed by the client thread.
2618 * Called with session mutex held.
2620 int cmd_start_trace(struct ltt_session
*session
)
2623 unsigned long nb_chan
= 0;
2624 struct ltt_kernel_session
*ksession
;
2625 struct ltt_ust_session
*usess
;
2629 /* Ease our life a bit ;) */
2630 ksession
= session
->kernel_session
;
2631 usess
= session
->ust_session
;
2633 /* Is the session already started? */
2634 if (session
->active
) {
2635 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2640 * Starting a session without channel is useless since after that it's not
2641 * possible to enable channel thus inform the client.
2643 if (usess
&& usess
->domain_global
.channels
) {
2644 nb_chan
+= lttng_ht_get_count(usess
->domain_global
.channels
);
2647 nb_chan
+= ksession
->channel_count
;
2650 ret
= LTTNG_ERR_NO_CHANNEL
;
2655 * Record the timestamp of the first time the session is started for
2656 * an eventual session rotation call.
2658 if (!session
->has_been_started
) {
2659 session
->current_chunk_start_ts
= time(NULL
);
2660 if (session
->current_chunk_start_ts
== (time_t) -1) {
2661 PERROR("Failed to retrieve the \"%s\" session's start time",
2663 ret
= LTTNG_ERR_FATAL
;
2666 if (!session
->snapshot_mode
&& session
->output_traces
) {
2667 ret
= session_mkdir(session
);
2669 ERR("Failed to create the session directories");
2670 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
2676 /* Kernel tracing */
2677 if (ksession
!= NULL
) {
2678 DBG("Start kernel tracing session %s", session
->name
);
2679 ret
= start_kernel_session(ksession
, kernel_tracer_fd
);
2680 if (ret
!= LTTNG_OK
) {
2685 /* Flag session that trace should start automatically */
2688 * Even though the start trace might fail, flag this session active so
2689 * other application coming in are started by default.
2693 ret
= ust_app_start_trace_all(usess
);
2695 ret
= LTTNG_ERR_UST_START_FAIL
;
2700 /* Flag this after a successful start. */
2701 session
->has_been_started
= 1;
2702 session
->active
= 1;
2705 * Clear the flag that indicates that a rotation was done while the
2706 * session was stopped.
2708 session
->rotated_after_last_stop
= false;
2710 if (session
->rotate_timer_period
) {
2711 ret
= timer_session_rotation_schedule_timer_start(session
,
2712 session
->rotate_timer_period
);
2714 ERR("Failed to enable rotate timer");
2715 ret
= LTTNG_ERR_UNK
;
2727 * Command LTTNG_STOP_TRACE processed by the client thread.
2729 int cmd_stop_trace(struct ltt_session
*session
)
2732 struct ltt_kernel_channel
*kchan
;
2733 struct ltt_kernel_session
*ksession
;
2734 struct ltt_ust_session
*usess
;
2735 bool error_occurred
= false;
2739 DBG("Begin stop session %s (id %" PRIu64
")", session
->name
, session
->id
);
2741 ksession
= session
->kernel_session
;
2742 usess
= session
->ust_session
;
2744 /* Session is not active. Skip everythong and inform the client. */
2745 if (!session
->active
) {
2746 ret
= LTTNG_ERR_TRACE_ALREADY_STOPPED
;
2751 if (ksession
&& ksession
->active
) {
2752 DBG("Stop kernel tracing");
2754 ret
= kernel_stop_session(ksession
);
2756 ret
= LTTNG_ERR_KERN_STOP_FAIL
;
2760 kernel_wait_quiescent(kernel_tracer_fd
);
2762 /* Flush metadata after stopping (if exists) */
2763 if (ksession
->metadata_stream_fd
>= 0) {
2764 ret
= kernel_metadata_flush_buffer(ksession
->metadata_stream_fd
);
2766 ERR("Kernel metadata flush failed");
2770 /* Flush all buffers after stopping */
2771 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
2772 ret
= kernel_flush_buffer(kchan
);
2774 ERR("Kernel flush buffer error");
2778 ksession
->active
= 0;
2779 DBG("Kernel session stopped %s (id %" PRIu64
")", session
->name
,
2783 if (usess
&& usess
->active
) {
2785 * Even though the stop trace might fail, flag this session inactive so
2786 * other application coming in are not started by default.
2790 ret
= ust_app_stop_trace_all(usess
);
2792 ret
= LTTNG_ERR_UST_STOP_FAIL
;
2797 /* Flag inactive after a successful stop. */
2798 session
->active
= 0;
2799 ret
= !error_occurred
? LTTNG_OK
: LTTNG_ERR_UNK
;
2806 * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
2808 int cmd_set_consumer_uri(struct ltt_session
*session
, size_t nb_uri
,
2809 struct lttng_uri
*uris
)
2812 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
2813 struct ltt_ust_session
*usess
= session
->ust_session
;
2819 /* Can't set consumer URI if the session is active. */
2820 if (session
->active
) {
2821 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2825 /* Set the "global" consumer URIs */
2826 for (i
= 0; i
< nb_uri
; i
++) {
2827 ret
= add_uri_to_consumer(session
->consumer
,
2828 &uris
[i
], 0, session
->name
);
2829 if (ret
!= LTTNG_OK
) {
2834 /* Set UST session URIs */
2835 if (session
->ust_session
) {
2836 for (i
= 0; i
< nb_uri
; i
++) {
2837 ret
= add_uri_to_consumer(
2838 session
->ust_session
->consumer
,
2839 &uris
[i
], LTTNG_DOMAIN_UST
,
2841 if (ret
!= LTTNG_OK
) {
2847 /* Set kernel session URIs */
2848 if (session
->kernel_session
) {
2849 for (i
= 0; i
< nb_uri
; i
++) {
2850 ret
= add_uri_to_consumer(
2851 session
->kernel_session
->consumer
,
2852 &uris
[i
], LTTNG_DOMAIN_KERNEL
,
2854 if (ret
!= LTTNG_OK
) {
2861 * Make sure to set the session in output mode after we set URI since a
2862 * session can be created without URL (thus flagged in no output mode).
2864 session
->output_traces
= 1;
2866 ksess
->output_traces
= 1;
2870 usess
->output_traces
= 1;
2881 * Command LTTNG_CREATE_SESSION processed by the client thread.
2883 int cmd_create_session_uri(char *name
, struct lttng_uri
*uris
,
2884 size_t nb_uri
, lttng_sock_cred
*creds
, unsigned int live_timer
)
2887 struct ltt_session
*session
= NULL
;
2892 /* Check if the session already exists. */
2893 session_lock_list();
2894 session
= session_find_by_name(name
);
2895 session_unlock_list();
2896 if (session
!= NULL
) {
2897 ret
= LTTNG_ERR_EXIST_SESS
;
2901 /* Create tracing session in the registry */
2902 ret
= session_create(name
, LTTNG_SOCK_GET_UID_CRED(creds
),
2903 LTTNG_SOCK_GET_GID_CRED(creds
));
2904 if (ret
!= LTTNG_OK
) {
2908 /* Get the newly created session pointer back. */
2909 session_lock_list();
2910 session
= session_find_by_name(name
);
2911 session_unlock_list();
2914 session
->live_timer
= live_timer
;
2915 /* Create default consumer output for the session not yet created. */
2916 session
->consumer
= consumer_create_output(CONSUMER_DST_LOCAL
);
2917 if (session
->consumer
== NULL
) {
2918 ret
= LTTNG_ERR_FATAL
;
2923 ret
= cmd_set_consumer_uri(session
, nb_uri
, uris
);
2924 if (ret
!= LTTNG_OK
) {
2927 session
->output_traces
= 1;
2929 session
->output_traces
= 0;
2930 DBG2("Session %s created with no output", session
->name
);
2933 session
->consumer
->enabled
= 1;
2938 session_lock_list();
2939 session_put(session
);
2940 session_unlock_list();
2946 * Command LTTNG_CREATE_SESSION_SNAPSHOT processed by the client thread.
2948 int cmd_create_session_snapshot(char *name
, struct lttng_uri
*uris
,
2949 size_t nb_uri
, lttng_sock_cred
*creds
)
2952 struct ltt_session
*session
= NULL
;
2953 struct snapshot_output
*new_output
= NULL
;
2959 * Create session in no output mode with URIs set to NULL. The uris we've
2960 * received are for a default snapshot output if one.
2962 ret
= cmd_create_session_uri(name
, NULL
, 0, creds
, 0);
2963 if (ret
!= LTTNG_OK
) {
2967 /* Get the newly created session pointer back. This should NEVER fail. */
2968 session_lock_list();
2969 session
= session_find_by_name(name
);
2970 session_unlock_list();
2973 /* Flag session for snapshot mode. */
2974 session
->snapshot_mode
= 1;
2976 /* Skip snapshot output creation if no URI is given. */
2982 new_output
= snapshot_output_alloc();
2984 ret
= LTTNG_ERR_NOMEM
;
2985 goto error_snapshot_alloc
;
2988 ret
= snapshot_output_init_with_uri(DEFAULT_SNAPSHOT_MAX_SIZE
, NULL
,
2989 uris
, nb_uri
, session
->consumer
, new_output
, &session
->snapshot
);
2991 if (ret
== -ENOMEM
) {
2992 ret
= LTTNG_ERR_NOMEM
;
2994 ret
= LTTNG_ERR_INVALID
;
2996 goto error_snapshot
;
3000 snapshot_add_output(&session
->snapshot
, new_output
);
3007 snapshot_output_destroy(new_output
);
3008 error_snapshot_alloc
:
3011 session_lock_list();
3012 session_put(session
);
3013 session_unlock_list();
3019 * Command LTTNG_DESTROY_SESSION processed by the client thread.
3021 * Called with session lock held.
3023 int cmd_destroy_session(struct ltt_session
*session
,
3024 struct notification_thread_handle
*notification_thread_handle
)
3031 DBG("Begin destroy session %s (id %" PRIu64
")", session
->name
, session
->id
);
3033 if (session
->rotation_schedule_timer_enabled
) {
3034 if (timer_session_rotation_schedule_timer_stop(
3036 ERR("Failed to stop the \"rotation schedule\" timer of session %s",
3041 if (session
->rotate_size
) {
3042 unsubscribe_session_consumed_size_rotation(session
, notification_thread_handle
);
3043 session
->rotate_size
= 0;
3046 if (session
->current_archive_id
!= 0) {
3047 if (!session
->rotated_after_last_stop
) {
3048 ret
= cmd_rotate_session(session
, NULL
);
3049 if (ret
!= LTTNG_OK
) {
3050 ERR("Failed to perform an implicit rotation as part of the rotation: %s", lttng_strerror(-ret
));
3054 * Rename the active chunk to ensure it has a name
3055 * of the form ts_begin-ts_end-id.
3057 * Note that no trace data has been produced since
3058 * the last rotation; the directory should be
3061 ret
= rename_active_chunk(session
);
3063 ERR("Failed to rename active chunk during the destruction of session \"%s\"",
3069 if (session
->shm_path
[0]) {
3071 * When a session is created with an explicit shm_path,
3072 * the consumer daemon will create its shared memory files
3073 * at that location and will *not* unlink them. This is normal
3074 * as the intention of that feature is to make it possible
3075 * to retrieve the content of those files should a crash occur.
3077 * To ensure the content of those files can be used, the
3078 * sessiond daemon will replicate the content of the metadata
3079 * cache in a metadata file.
3081 * On clean-up, it is expected that the consumer daemon will
3082 * unlink the shared memory files and that the session daemon
3083 * will unlink the metadata file. Then, the session's directory
3084 * in the shm path can be removed.
3086 * Unfortunately, a flaw in the design of the sessiond's and
3087 * consumerd's tear down of channels makes it impossible to
3088 * determine when the sessiond _and_ the consumerd have both
3089 * destroyed their representation of a channel. For one, the
3090 * unlinking, close, and rmdir happen in deferred 'call_rcu'
3091 * callbacks in both daemons.
3093 * However, it is also impossible for the sessiond to know when
3094 * the consumer daemon is done destroying its channel(s) since
3095 * it occurs as a reaction to the closing of the channel's file
3096 * descriptor. There is no resulting communication initiated
3097 * from the consumerd to the sessiond to confirm that the
3098 * operation is completed (and was successful).
3100 * Until this is all fixed, the session daemon checks for the
3101 * removal of the session's shm path which makes it possible
3102 * to safely advertise a session as having been destroyed.
3104 * Prior to this fix, it was not possible to reliably save
3105 * a session making use of the --shm-path option, destroy it,
3106 * and load it again. This is because the creation of the
3107 * session would fail upon seeing the session's shm path
3108 * already in existence.
3110 * Note that none of the error paths in the check for the
3111 * directory's existence return an error. This is normal
3112 * as there isn't much that can be done. The session will
3113 * be destroyed properly, except that we can't offer the
3114 * guarantee that the same session can be re-created.
3116 current_completion_handler
= &destroy_completion_handler
.handler
;
3117 ret
= lttng_strncpy(destroy_completion_handler
.shm_path
,
3119 sizeof(destroy_completion_handler
.shm_path
));
3124 * The session is destroyed. However, note that the command context
3125 * still holds a reference to the session, thus delaying its destruction
3126 * _at least_ up to the point when that reference is released.
3128 session_destroy(session
);
3135 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3137 int cmd_register_consumer(struct ltt_session
*session
,
3138 enum lttng_domain_type domain
, const char *sock_path
,
3139 struct consumer_data
*cdata
)
3142 struct consumer_socket
*socket
= NULL
;
3149 case LTTNG_DOMAIN_KERNEL
:
3151 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3155 /* Can't register a consumer if there is already one */
3156 if (ksess
->consumer_fds_sent
!= 0) {
3157 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3161 sock
= lttcomm_connect_unix_sock(sock_path
);
3163 ret
= LTTNG_ERR_CONNECT_FAIL
;
3166 cdata
->cmd_sock
= sock
;
3168 socket
= consumer_allocate_socket(&cdata
->cmd_sock
);
3169 if (socket
== NULL
) {
3172 PERROR("close register consumer");
3174 cdata
->cmd_sock
= -1;
3175 ret
= LTTNG_ERR_FATAL
;
3179 socket
->lock
= zmalloc(sizeof(pthread_mutex_t
));
3180 if (socket
->lock
== NULL
) {
3181 PERROR("zmalloc pthread mutex");
3182 ret
= LTTNG_ERR_FATAL
;
3185 pthread_mutex_init(socket
->lock
, NULL
);
3186 socket
->registered
= 1;
3189 consumer_add_socket(socket
, ksess
->consumer
);
3192 pthread_mutex_lock(&cdata
->pid_mutex
);
3194 pthread_mutex_unlock(&cdata
->pid_mutex
);
3199 /* TODO: Userspace tracing */
3200 ret
= LTTNG_ERR_UND
;
3208 consumer_destroy_socket(socket
);
3214 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3216 ssize_t
cmd_list_domains(struct ltt_session
*session
,
3217 struct lttng_domain
**domains
)
3222 struct lttng_ht_iter iter
;
3224 if (session
->kernel_session
!= NULL
) {
3225 DBG3("Listing domains found kernel domain");
3229 if (session
->ust_session
!= NULL
) {
3230 DBG3("Listing domains found UST global domain");
3234 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3236 if (agt
->being_used
) {
3247 *domains
= zmalloc(nb_dom
* sizeof(struct lttng_domain
));
3248 if (*domains
== NULL
) {
3249 ret
= LTTNG_ERR_FATAL
;
3253 if (session
->kernel_session
!= NULL
) {
3254 (*domains
)[index
].type
= LTTNG_DOMAIN_KERNEL
;
3256 /* Kernel session buffer type is always GLOBAL */
3257 (*domains
)[index
].buf_type
= LTTNG_BUFFER_GLOBAL
;
3262 if (session
->ust_session
!= NULL
) {
3263 (*domains
)[index
].type
= LTTNG_DOMAIN_UST
;
3264 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3268 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3270 if (agt
->being_used
) {
3271 (*domains
)[index
].type
= agt
->domain
;
3272 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3282 /* Return negative value to differentiate return code */
3288 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3290 ssize_t
cmd_list_channels(enum lttng_domain_type domain
,
3291 struct ltt_session
*session
, struct lttng_channel
**channels
)
3293 ssize_t nb_chan
= 0, payload_size
= 0, ret
;
3296 case LTTNG_DOMAIN_KERNEL
:
3297 if (session
->kernel_session
!= NULL
) {
3298 nb_chan
= session
->kernel_session
->channel_count
;
3300 DBG3("Number of kernel channels %zd", nb_chan
);
3302 ret
= -LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
3306 case LTTNG_DOMAIN_UST
:
3307 if (session
->ust_session
!= NULL
) {
3309 nb_chan
= lttng_ht_get_count(
3310 session
->ust_session
->domain_global
.channels
);
3313 DBG3("Number of UST global channels %zd", nb_chan
);
3315 ret
= -LTTNG_ERR_UST_CHAN_NOT_FOUND
;
3320 ret
= -LTTNG_ERR_UND
;
3325 const size_t channel_size
= sizeof(struct lttng_channel
) +
3326 sizeof(struct lttng_channel_extended
);
3327 struct lttng_channel_extended
*channel_exts
;
3329 payload_size
= nb_chan
* channel_size
;
3330 *channels
= zmalloc(payload_size
);
3331 if (*channels
== NULL
) {
3332 ret
= -LTTNG_ERR_FATAL
;
3336 channel_exts
= ((void *) *channels
) +
3337 (nb_chan
* sizeof(struct lttng_channel
));
3338 ret
= list_lttng_channels(domain
, session
, *channels
, channel_exts
);
3339 if (ret
!= LTTNG_OK
) {
3354 * Command LTTNG_LIST_EVENTS processed by the client thread.
3356 ssize_t
cmd_list_events(enum lttng_domain_type domain
,
3357 struct ltt_session
*session
, char *channel_name
,
3358 struct lttng_event
**events
, size_t *total_size
)
3361 ssize_t nb_event
= 0;
3364 case LTTNG_DOMAIN_KERNEL
:
3365 if (session
->kernel_session
!= NULL
) {
3366 nb_event
= list_lttng_kernel_events(channel_name
,
3367 session
->kernel_session
, events
,
3371 case LTTNG_DOMAIN_UST
:
3373 if (session
->ust_session
!= NULL
) {
3374 nb_event
= list_lttng_ust_global_events(channel_name
,
3375 &session
->ust_session
->domain_global
, events
,
3380 case LTTNG_DOMAIN_LOG4J
:
3381 case LTTNG_DOMAIN_JUL
:
3382 case LTTNG_DOMAIN_PYTHON
:
3383 if (session
->ust_session
) {
3384 struct lttng_ht_iter iter
;
3388 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
,
3389 &iter
.iter
, agt
, node
.node
) {
3390 if (agt
->domain
== domain
) {
3391 nb_event
= list_lttng_agent_events(
3401 ret
= LTTNG_ERR_UND
;
3408 /* Return negative value to differentiate return code */
3413 * Using the session list, filled a lttng_session array to send back to the
3414 * client for session listing.
3416 * The session list lock MUST be acquired before calling this function. Use
3417 * session_lock_list() and session_unlock_list().
3419 void cmd_list_lttng_sessions(struct lttng_session
*sessions
, uid_t uid
,
3424 struct ltt_session
*session
;
3425 struct ltt_session_list
*list
= session_get_list();
3427 DBG("Getting all available session for UID %d GID %d",
3430 * Iterate over session list and append data after the control struct in
3433 cds_list_for_each_entry(session
, &list
->head
, list
) {
3434 if (!session_get(session
)) {
3438 * Only list the sessions the user can control.
3440 if (!session_access_ok(session
, uid
, gid
) ||
3441 session
->destroyed
) {
3442 session_put(session
);
3446 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3447 struct ltt_ust_session
*usess
= session
->ust_session
;
3449 if (session
->consumer
->type
== CONSUMER_DST_NET
||
3450 (ksess
&& ksess
->consumer
->type
== CONSUMER_DST_NET
) ||
3451 (usess
&& usess
->consumer
->type
== CONSUMER_DST_NET
)) {
3452 ret
= build_network_session_path(sessions
[i
].path
,
3453 sizeof(sessions
[i
].path
), session
);
3455 ret
= snprintf(sessions
[i
].path
, sizeof(sessions
[i
].path
), "%s",
3456 session
->consumer
->dst
.session_root_path
);
3459 PERROR("snprintf session path");
3460 session_put(session
);
3464 strncpy(sessions
[i
].name
, session
->name
, NAME_MAX
);
3465 sessions
[i
].name
[NAME_MAX
- 1] = '\0';
3466 sessions
[i
].enabled
= session
->active
;
3467 sessions
[i
].snapshot_mode
= session
->snapshot_mode
;
3468 sessions
[i
].live_timer_interval
= session
->live_timer
;
3470 session_put(session
);
3475 * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
3476 * ready for trace analysis (or any kind of reader) or else 1 for pending data.
3478 int cmd_data_pending(struct ltt_session
*session
)
3481 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3482 struct ltt_ust_session
*usess
= session
->ust_session
;
3486 DBG("Data pending for session %s", session
->name
);
3488 /* Session MUST be stopped to ask for data availability. */
3489 if (session
->active
) {
3490 ret
= LTTNG_ERR_SESSION_STARTED
;
3494 * If stopped, just make sure we've started before else the above call
3495 * will always send that there is data pending.
3497 * The consumer assumes that when the data pending command is received,
3498 * the trace has been started before or else no output data is written
3499 * by the streams which is a condition for data pending. So, this is
3500 * *VERY* important that we don't ask the consumer before a start
3503 if (!session
->has_been_started
) {
3509 /* A rotation is still pending, we have to wait. */
3510 if (session
->rotation_state
== LTTNG_ROTATION_STATE_ONGOING
) {
3511 DBG("Rotate still pending for session %s", session
->name
);
3516 if (ksess
&& ksess
->consumer
) {
3517 ret
= consumer_is_data_pending(ksess
->id
, ksess
->consumer
);
3519 /* Data is still being extracted for the kernel. */
3524 if (usess
&& usess
->consumer
) {
3525 ret
= consumer_is_data_pending(usess
->id
, usess
->consumer
);
3527 /* Data is still being extracted for the kernel. */
3532 /* Data is ready to be read by a viewer */
3540 * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
3542 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3544 int cmd_snapshot_add_output(struct ltt_session
*session
,
3545 struct lttng_snapshot_output
*output
, uint32_t *id
)
3548 struct snapshot_output
*new_output
;
3553 DBG("Cmd snapshot add output for session %s", session
->name
);
3556 * Can't create an output if the session is not set in no-output mode.
3558 if (session
->output_traces
) {
3559 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3563 /* Only one output is allowed until we have the "tee" feature. */
3564 if (session
->snapshot
.nb_output
== 1) {
3565 ret
= LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST
;
3569 new_output
= snapshot_output_alloc();
3571 ret
= LTTNG_ERR_NOMEM
;
3575 ret
= snapshot_output_init(output
->max_size
, output
->name
,
3576 output
->ctrl_url
, output
->data_url
, session
->consumer
, new_output
,
3577 &session
->snapshot
);
3579 if (ret
== -ENOMEM
) {
3580 ret
= LTTNG_ERR_NOMEM
;
3582 ret
= LTTNG_ERR_INVALID
;
3588 snapshot_add_output(&session
->snapshot
, new_output
);
3590 *id
= new_output
->id
;
3597 snapshot_output_destroy(new_output
);
3603 * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
3605 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3607 int cmd_snapshot_del_output(struct ltt_session
*session
,
3608 struct lttng_snapshot_output
*output
)
3611 struct snapshot_output
*sout
= NULL
;
3619 * Permission denied to create an output if the session is not
3620 * set in no output mode.
3622 if (session
->output_traces
) {
3623 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3628 DBG("Cmd snapshot del output id %" PRIu32
" for session %s", output
->id
,
3630 sout
= snapshot_find_output_by_id(output
->id
, &session
->snapshot
);
3631 } else if (*output
->name
!= '\0') {
3632 DBG("Cmd snapshot del output name %s for session %s", output
->name
,
3634 sout
= snapshot_find_output_by_name(output
->name
, &session
->snapshot
);
3637 ret
= LTTNG_ERR_INVALID
;
3641 snapshot_delete_output(&session
->snapshot
, sout
);
3642 snapshot_output_destroy(sout
);
3651 * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
3653 * If no output is available, outputs is untouched and 0 is returned.
3655 * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
3657 ssize_t
cmd_snapshot_list_outputs(struct ltt_session
*session
,
3658 struct lttng_snapshot_output
**outputs
)
3661 struct lttng_snapshot_output
*list
= NULL
;
3662 struct lttng_ht_iter iter
;
3663 struct snapshot_output
*output
;
3668 DBG("Cmd snapshot list outputs for session %s", session
->name
);
3671 * Permission denied to create an output if the session is not
3672 * set in no output mode.
3674 if (session
->output_traces
) {
3675 ret
= -LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3679 if (session
->snapshot
.nb_output
== 0) {
3684 list
= zmalloc(session
->snapshot
.nb_output
* sizeof(*list
));
3686 ret
= -LTTNG_ERR_NOMEM
;
3690 /* Copy list from session to the new list object. */
3692 cds_lfht_for_each_entry(session
->snapshot
.output_ht
->ht
, &iter
.iter
,
3693 output
, node
.node
) {
3694 assert(output
->consumer
);
3695 list
[idx
].id
= output
->id
;
3696 list
[idx
].max_size
= output
->max_size
;
3697 if (lttng_strncpy(list
[idx
].name
, output
->name
,
3698 sizeof(list
[idx
].name
))) {
3699 ret
= -LTTNG_ERR_INVALID
;
3702 if (output
->consumer
->type
== CONSUMER_DST_LOCAL
) {
3703 if (lttng_strncpy(list
[idx
].ctrl_url
,
3704 output
->consumer
->dst
.session_root_path
,
3705 sizeof(list
[idx
].ctrl_url
))) {
3706 ret
= -LTTNG_ERR_INVALID
;
3711 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.control
,
3712 list
[idx
].ctrl_url
, sizeof(list
[idx
].ctrl_url
));
3714 ret
= -LTTNG_ERR_NOMEM
;
3719 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.data
,
3720 list
[idx
].data_url
, sizeof(list
[idx
].data_url
));
3722 ret
= -LTTNG_ERR_NOMEM
;
3731 ret
= session
->snapshot
.nb_output
;
3740 * Check if we can regenerate the metadata for this session.
3741 * Only kernel, UST per-uid and non-live sessions are supported.
3743 * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
3746 int check_regenerate_metadata_support(struct ltt_session
*session
)
3752 if (session
->live_timer
!= 0) {
3753 ret
= LTTNG_ERR_LIVE_SESSION
;
3756 if (!session
->active
) {
3757 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
3760 if (session
->ust_session
) {
3761 switch (session
->ust_session
->buffer_type
) {
3762 case LTTNG_BUFFER_PER_UID
:
3764 case LTTNG_BUFFER_PER_PID
:
3765 ret
= LTTNG_ERR_PER_PID_SESSION
;
3769 ret
= LTTNG_ERR_UNK
;
3773 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
3774 session
->consumer
->relay_minor_version
< 8) {
3775 ret
= LTTNG_ERR_RELAYD_VERSION_FAIL
;
3785 int clear_metadata_file(int fd
)
3790 lseek_ret
= lseek(fd
, 0, SEEK_SET
);
3791 if (lseek_ret
< 0) {
3797 ret
= ftruncate(fd
, 0);
3799 PERROR("ftruncate");
3808 int ust_regenerate_metadata(struct ltt_ust_session
*usess
)
3811 struct buffer_reg_uid
*uid_reg
= NULL
;
3812 struct buffer_reg_session
*session_reg
= NULL
;
3815 cds_list_for_each_entry(uid_reg
, &usess
->buffer_reg_uid_list
, lnode
) {
3816 struct ust_registry_session
*registry
;
3817 struct ust_registry_channel
*chan
;
3818 struct lttng_ht_iter iter_chan
;
3820 session_reg
= uid_reg
->registry
;
3821 registry
= session_reg
->reg
.ust
;
3823 pthread_mutex_lock(®istry
->lock
);
3824 registry
->metadata_len_sent
= 0;
3825 memset(registry
->metadata
, 0, registry
->metadata_alloc_len
);
3826 registry
->metadata_len
= 0;
3827 registry
->metadata_version
++;
3828 if (registry
->metadata_fd
> 0) {
3829 /* Clear the metadata file's content. */
3830 ret
= clear_metadata_file(registry
->metadata_fd
);
3832 pthread_mutex_unlock(®istry
->lock
);
3837 ret
= ust_metadata_session_statedump(registry
, NULL
,
3838 registry
->major
, registry
->minor
);
3840 pthread_mutex_unlock(®istry
->lock
);
3841 ERR("Failed to generate session metadata (err = %d)",
3845 cds_lfht_for_each_entry(registry
->channels
->ht
, &iter_chan
.iter
,
3847 struct ust_registry_event
*event
;
3848 struct lttng_ht_iter iter_event
;
3850 ret
= ust_metadata_channel_statedump(registry
, chan
);
3852 pthread_mutex_unlock(®istry
->lock
);
3853 ERR("Failed to generate channel metadata "
3857 cds_lfht_for_each_entry(chan
->ht
->ht
, &iter_event
.iter
,
3859 ret
= ust_metadata_event_statedump(registry
,
3862 pthread_mutex_unlock(®istry
->lock
);
3863 ERR("Failed to generate event metadata "
3869 pthread_mutex_unlock(®istry
->lock
);
3878 * Command LTTNG_REGENERATE_METADATA from the lttng-ctl library.
3880 * Ask the consumer to truncate the existing metadata file(s) and
3881 * then regenerate the metadata. Live and per-pid sessions are not
3882 * supported and return an error.
3884 * Return 0 on success or else a LTTNG_ERR code.
3886 int cmd_regenerate_metadata(struct ltt_session
*session
)
3892 ret
= check_regenerate_metadata_support(session
);
3897 if (session
->kernel_session
) {
3898 ret
= kernctl_session_regenerate_metadata(
3899 session
->kernel_session
->fd
);
3901 ERR("Failed to regenerate the kernel metadata");
3906 if (session
->ust_session
) {
3907 ret
= ust_regenerate_metadata(session
->ust_session
);
3909 ERR("Failed to regenerate the UST metadata");
3913 DBG("Cmd metadata regenerate for session %s", session
->name
);
3921 * Command LTTNG_REGENERATE_STATEDUMP from the lttng-ctl library.
3923 * Ask the tracer to regenerate a new statedump.
3925 * Return 0 on success or else a LTTNG_ERR code.
3927 int cmd_regenerate_statedump(struct ltt_session
*session
)
3933 if (!session
->active
) {
3934 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
3938 if (session
->kernel_session
) {
3939 ret
= kernctl_session_regenerate_statedump(
3940 session
->kernel_session
->fd
);
3942 * Currently, the statedump in kernel can only fail if out
3946 if (ret
== -ENOMEM
) {
3947 ret
= LTTNG_ERR_REGEN_STATEDUMP_NOMEM
;
3949 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
3951 ERR("Failed to regenerate the kernel statedump");
3956 if (session
->ust_session
) {
3957 ret
= ust_app_regenerate_statedump_all(session
->ust_session
);
3959 * Currently, the statedump in UST always returns 0.
3962 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
3963 ERR("Failed to regenerate the UST statedump");
3967 DBG("Cmd regenerate statedump for session %s", session
->name
);
3974 int cmd_register_trigger(struct command_ctx
*cmd_ctx
, int sock
,
3975 struct notification_thread_handle
*notification_thread
)
3979 ssize_t sock_recv_len
;
3980 struct lttng_trigger
*trigger
= NULL
;
3981 struct lttng_buffer_view view
;
3982 struct lttng_dynamic_buffer trigger_buffer
;
3984 lttng_dynamic_buffer_init(&trigger_buffer
);
3985 trigger_len
= (size_t) cmd_ctx
->lsm
->u
.trigger
.length
;
3986 ret
= lttng_dynamic_buffer_set_size(&trigger_buffer
, trigger_len
);
3988 ret
= LTTNG_ERR_NOMEM
;
3992 sock_recv_len
= lttcomm_recv_unix_sock(sock
, trigger_buffer
.data
,
3994 if (sock_recv_len
< 0 || sock_recv_len
!= trigger_len
) {
3995 ERR("Failed to receive \"register trigger\" command payload");
3996 /* TODO: should this be a new error enum ? */
3997 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4001 view
= lttng_buffer_view_from_dynamic_buffer(&trigger_buffer
, 0, -1);
4002 if (lttng_trigger_create_from_buffer(&view
, &trigger
) !=
4004 ERR("Invalid trigger payload received in \"register trigger\" command");
4005 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4009 ret
= notification_thread_command_register_trigger(notification_thread
,
4011 /* Ownership of trigger was transferred. */
4014 lttng_trigger_destroy(trigger
);
4015 lttng_dynamic_buffer_reset(&trigger_buffer
);
4019 int cmd_unregister_trigger(struct command_ctx
*cmd_ctx
, int sock
,
4020 struct notification_thread_handle
*notification_thread
)
4024 ssize_t sock_recv_len
;
4025 struct lttng_trigger
*trigger
= NULL
;
4026 struct lttng_buffer_view view
;
4027 struct lttng_dynamic_buffer trigger_buffer
;
4029 lttng_dynamic_buffer_init(&trigger_buffer
);
4030 trigger_len
= (size_t) cmd_ctx
->lsm
->u
.trigger
.length
;
4031 ret
= lttng_dynamic_buffer_set_size(&trigger_buffer
, trigger_len
);
4033 ret
= LTTNG_ERR_NOMEM
;
4037 sock_recv_len
= lttcomm_recv_unix_sock(sock
, trigger_buffer
.data
,
4039 if (sock_recv_len
< 0 || sock_recv_len
!= trigger_len
) {
4040 ERR("Failed to receive \"unregister trigger\" command payload");
4041 /* TODO: should this be a new error enum ? */
4042 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4046 view
= lttng_buffer_view_from_dynamic_buffer(&trigger_buffer
, 0, -1);
4047 if (lttng_trigger_create_from_buffer(&view
, &trigger
) !=
4049 ERR("Invalid trigger payload received in \"unregister trigger\" command");
4050 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4054 ret
= notification_thread_command_unregister_trigger(notification_thread
,
4057 lttng_trigger_destroy(trigger
);
4058 lttng_dynamic_buffer_reset(&trigger_buffer
);
4063 * Send relayd sockets from snapshot output to consumer. Ignore request if the
4064 * snapshot output is *not* set with a remote destination.
4066 * Return LTTNG_OK on success or a LTTNG_ERR code.
4068 static enum lttng_error_code
set_relayd_for_snapshot(
4069 struct consumer_output
*consumer
,
4070 struct snapshot_output
*snap_output
,
4071 struct ltt_session
*session
)
4073 enum lttng_error_code status
= LTTNG_OK
;
4074 struct lttng_ht_iter iter
;
4075 struct consumer_socket
*socket
;
4078 assert(snap_output
);
4081 DBG2("Set relayd object from snapshot output");
4083 /* Ignore if snapshot consumer output is not network. */
4084 if (snap_output
->consumer
->type
!= CONSUMER_DST_NET
) {
4089 * For each consumer socket, create and send the relayd object of the
4093 cds_lfht_for_each_entry(snap_output
->consumer
->socks
->ht
, &iter
.iter
,
4094 socket
, node
.node
) {
4095 pthread_mutex_lock(socket
->lock
);
4096 status
= send_consumer_relayd_sockets(0, session
->id
,
4097 snap_output
->consumer
, socket
,
4098 session
->name
, session
->hostname
,
4099 session
->live_timer
);
4100 pthread_mutex_unlock(socket
->lock
);
4101 if (status
!= LTTNG_OK
) {
4113 * Record a kernel snapshot.
4115 * Return LTTNG_OK on success or a LTTNG_ERR code.
4117 static enum lttng_error_code
record_kernel_snapshot(struct ltt_kernel_session
*ksess
,
4118 struct snapshot_output
*output
, struct ltt_session
*session
,
4119 int wait
, uint64_t nb_packets_per_stream
)
4122 enum lttng_error_code status
;
4129 * Copy kernel session sockets so we can communicate with the right
4130 * consumer for the snapshot record command.
4132 ret
= consumer_copy_sockets(output
->consumer
, ksess
->consumer
);
4134 status
= LTTNG_ERR_NOMEM
;
4138 status
= set_relayd_for_snapshot(ksess
->consumer
, output
, session
);
4139 if (status
!= LTTNG_OK
) {
4140 goto error_snapshot
;
4143 status
= kernel_snapshot_record(ksess
, output
, wait
, nb_packets_per_stream
);
4144 if (status
!= LTTNG_OK
) {
4145 goto error_snapshot
;
4151 /* Clean up copied sockets so this output can use some other later on. */
4152 consumer_destroy_output_sockets(output
->consumer
);
4159 * Record a UST snapshot.
4161 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
4163 static enum lttng_error_code
record_ust_snapshot(struct ltt_ust_session
*usess
,
4164 struct snapshot_output
*output
, struct ltt_session
*session
,
4165 int wait
, uint64_t nb_packets_per_stream
)
4168 enum lttng_error_code status
;
4175 * Copy UST session sockets so we can communicate with the right
4176 * consumer for the snapshot record command.
4178 ret
= consumer_copy_sockets(output
->consumer
, usess
->consumer
);
4180 status
= LTTNG_ERR_NOMEM
;
4184 status
= set_relayd_for_snapshot(usess
->consumer
, output
, session
);
4185 if (status
!= LTTNG_OK
) {
4186 goto error_snapshot
;
4189 status
= ust_app_snapshot_record(usess
, output
, wait
, nb_packets_per_stream
);
4190 if (status
!= LTTNG_OK
) {
4191 goto error_snapshot
;
4195 /* Clean up copied sockets so this output can use some other later on. */
4196 consumer_destroy_output_sockets(output
->consumer
);
4202 uint64_t get_session_size_one_more_packet_per_stream(struct ltt_session
*session
,
4203 uint64_t cur_nr_packets
)
4205 uint64_t tot_size
= 0;
4207 if (session
->kernel_session
) {
4208 struct ltt_kernel_channel
*chan
;
4209 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
4211 cds_list_for_each_entry(chan
, &ksess
->channel_list
.head
, list
) {
4212 if (cur_nr_packets
>= chan
->channel
->attr
.num_subbuf
) {
4214 * Don't take channel into account if we
4215 * already grab all its packets.
4219 tot_size
+= chan
->channel
->attr
.subbuf_size
4220 * chan
->stream_count
;
4224 if (session
->ust_session
) {
4225 struct ltt_ust_session
*usess
= session
->ust_session
;
4227 tot_size
+= ust_app_get_size_one_more_packet_per_stream(usess
,
4235 * Calculate the number of packets we can grab from each stream that
4236 * fits within the overall snapshot max size.
4238 * Returns -1 on error, 0 means infinite number of packets, else > 0 is
4239 * the number of packets per stream.
4241 * TODO: this approach is not perfect: we consider the worse case
4242 * (packet filling the sub-buffers) as an upper bound, but we could do
4243 * better if we do this calculation while we actually grab the packet
4244 * content: we would know how much padding we don't actually store into
4247 * This algorithm is currently bounded by the number of packets per
4250 * Since we call this algorithm before actually grabbing the data, it's
4251 * an approximation: for instance, applications could appear/disappear
4252 * in between this call and actually grabbing data.
4255 int64_t get_session_nb_packets_per_stream(struct ltt_session
*session
, uint64_t max_size
)
4258 uint64_t cur_nb_packets
= 0;
4261 return 0; /* Infinite */
4264 size_left
= max_size
;
4266 uint64_t one_more_packet_tot_size
;
4268 one_more_packet_tot_size
= get_session_size_one_more_packet_per_stream(session
,
4270 if (!one_more_packet_tot_size
) {
4271 /* We are already grabbing all packets. */
4274 size_left
-= one_more_packet_tot_size
;
4275 if (size_left
< 0) {
4280 if (!cur_nb_packets
) {
4281 /* Not enough room to grab one packet of each stream, error. */
4284 return cur_nb_packets
;
4288 * Command LTTNG_SNAPSHOT_RECORD from lib lttng ctl.
4290 * The wait parameter is ignored so this call always wait for the snapshot to
4291 * complete before returning.
4293 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4295 int cmd_snapshot_record(struct ltt_session
*session
,
4296 struct lttng_snapshot_output
*output
, int wait
)
4298 enum lttng_error_code cmd_ret
= LTTNG_OK
;
4300 unsigned int use_tmp_output
= 0;
4301 struct snapshot_output tmp_output
;
4302 unsigned int snapshot_success
= 0;
4308 DBG("Cmd snapshot record for session %s", session
->name
);
4310 /* Get the datetime for the snapshot output directory. */
4311 ret
= utils_get_current_time_str("%Y%m%d-%H%M%S", datetime
,
4314 cmd_ret
= LTTNG_ERR_INVALID
;
4319 * Permission denied to create an output if the session is not
4320 * set in no output mode.
4322 if (session
->output_traces
) {
4323 cmd_ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
4327 /* The session needs to be started at least once. */
4328 if (!session
->has_been_started
) {
4329 cmd_ret
= LTTNG_ERR_START_SESSION_ONCE
;
4333 /* Use temporary output for the session. */
4334 if (*output
->ctrl_url
!= '\0') {
4335 ret
= snapshot_output_init(output
->max_size
, output
->name
,
4336 output
->ctrl_url
, output
->data_url
, session
->consumer
,
4339 if (ret
== -ENOMEM
) {
4340 cmd_ret
= LTTNG_ERR_NOMEM
;
4342 cmd_ret
= LTTNG_ERR_INVALID
;
4346 /* Use the global session count for the temporary snapshot. */
4347 tmp_output
.nb_snapshot
= session
->snapshot
.nb_snapshot
;
4349 /* Use the global datetime */
4350 memcpy(tmp_output
.datetime
, datetime
, sizeof(datetime
));
4354 if (use_tmp_output
) {
4355 int64_t nb_packets_per_stream
;
4357 nb_packets_per_stream
= get_session_nb_packets_per_stream(session
,
4358 tmp_output
.max_size
);
4359 if (nb_packets_per_stream
< 0) {
4360 cmd_ret
= LTTNG_ERR_MAX_SIZE_INVALID
;
4364 if (session
->kernel_session
) {
4365 cmd_ret
= record_kernel_snapshot(session
->kernel_session
,
4366 &tmp_output
, session
,
4367 wait
, nb_packets_per_stream
);
4368 if (cmd_ret
!= LTTNG_OK
) {
4373 if (session
->ust_session
) {
4374 cmd_ret
= record_ust_snapshot(session
->ust_session
,
4375 &tmp_output
, session
,
4376 wait
, nb_packets_per_stream
);
4377 if (cmd_ret
!= LTTNG_OK
) {
4382 snapshot_success
= 1;
4384 struct snapshot_output
*sout
;
4385 struct lttng_ht_iter iter
;
4388 cds_lfht_for_each_entry(session
->snapshot
.output_ht
->ht
,
4389 &iter
.iter
, sout
, node
.node
) {
4390 int64_t nb_packets_per_stream
;
4393 * Make a local copy of the output and assign the possible
4394 * temporary value given by the caller.
4396 memset(&tmp_output
, 0, sizeof(tmp_output
));
4397 memcpy(&tmp_output
, sout
, sizeof(tmp_output
));
4399 if (output
->max_size
!= (uint64_t) -1ULL) {
4400 tmp_output
.max_size
= output
->max_size
;
4403 nb_packets_per_stream
= get_session_nb_packets_per_stream(session
,
4404 tmp_output
.max_size
);
4405 if (nb_packets_per_stream
< 0) {
4406 cmd_ret
= LTTNG_ERR_MAX_SIZE_INVALID
;
4411 /* Use temporary name. */
4412 if (*output
->name
!= '\0') {
4413 if (lttng_strncpy(tmp_output
.name
, output
->name
,
4414 sizeof(tmp_output
.name
))) {
4415 cmd_ret
= LTTNG_ERR_INVALID
;
4421 tmp_output
.nb_snapshot
= session
->snapshot
.nb_snapshot
;
4422 memcpy(tmp_output
.datetime
, datetime
, sizeof(datetime
));
4424 if (session
->kernel_session
) {
4425 cmd_ret
= record_kernel_snapshot(session
->kernel_session
,
4426 &tmp_output
, session
,
4427 wait
, nb_packets_per_stream
);
4428 if (cmd_ret
!= LTTNG_OK
) {
4434 if (session
->ust_session
) {
4435 cmd_ret
= record_ust_snapshot(session
->ust_session
,
4436 &tmp_output
, session
,
4437 wait
, nb_packets_per_stream
);
4438 if (cmd_ret
!= LTTNG_OK
) {
4443 snapshot_success
= 1;
4448 if (snapshot_success
) {
4449 session
->snapshot
.nb_snapshot
++;
4451 cmd_ret
= LTTNG_ERR_SNAPSHOT_FAIL
;
4459 * Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
4461 int cmd_set_session_shm_path(struct ltt_session
*session
,
4462 const char *shm_path
)
4468 * Can only set shm path before session is started.
4470 if (session
->has_been_started
) {
4471 return LTTNG_ERR_SESSION_STARTED
;
4474 strncpy(session
->shm_path
, shm_path
,
4475 sizeof(session
->shm_path
));
4476 session
->shm_path
[sizeof(session
->shm_path
) - 1] = '\0';
4482 * Command LTTNG_ROTATE_SESSION from the lttng-ctl library.
4484 * Ask the consumer to rotate the session output directory.
4485 * The session lock must be held.
4487 * Returns LTTNG_OK on success or else a negative LTTng error code.
4489 int cmd_rotate_session(struct ltt_session
*session
,
4490 struct lttng_rotate_session_return
*rotate_return
)
4493 enum lttng_error_code cmd_ret
= LTTNG_OK
;
4495 struct tm
*timeinfo
;
4499 * Used to roll-back timestamps in case of failure to launch the
4502 time_t original_last_chunk_start_ts
, original_current_chunk_start_ts
;
4506 if (!session
->has_been_started
) {
4507 cmd_ret
= LTTNG_ERR_START_SESSION_ONCE
;
4511 if (session
->live_timer
|| session
->snapshot_mode
||
4512 !session
->output_traces
) {
4513 cmd_ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4518 * Unsupported feature in lttng-relayd before 2.11.
4520 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
4521 (session
->consumer
->relay_major_version
== 2 &&
4522 session
->consumer
->relay_minor_version
< 11)) {
4523 cmd_ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE_RELAY
;
4527 if (session
->rotation_state
== LTTNG_ROTATION_STATE_ONGOING
) {
4528 DBG("Refusing to launch a rotation; a rotation is already in progress for session %s",
4530 cmd_ret
= LTTNG_ERR_ROTATION_PENDING
;
4535 * After a stop, we only allow one rotation to occur, the other ones are
4536 * useless until a new start.
4538 if (session
->rotated_after_last_stop
) {
4539 DBG("Session \"%s\" was already rotated after stop, refusing rotation",
4541 cmd_ret
= LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP
;
4545 /* Special case for the first rotation. */
4546 if (session
->current_archive_id
== 0) {
4547 const char *base_path
= NULL
;
4549 assert(session
->kernel_session
|| session
->ust_session
);
4550 /* Either one of the two sessions is enough to get the root path. */
4551 base_path
= session_get_base_path(session
);
4554 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
4556 sizeof(session
->rotation_chunk
.current_rotate_path
));
4558 ERR("Failed to copy session base path to current rotation chunk path");
4559 cmd_ret
= LTTNG_ERR_UNK
;
4564 * The currently active tracing path is now the folder we
4567 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
4568 session
->rotation_chunk
.active_tracing_path
,
4569 sizeof(session
->rotation_chunk
.current_rotate_path
));
4571 ERR("Failed to copy the active tracing path to the current rotate path");
4572 cmd_ret
= LTTNG_ERR_UNK
;
4576 DBG("Current rotate path %s", session
->rotation_chunk
.current_rotate_path
);
4579 * Channels created after this point will belong to the next
4582 session
->current_archive_id
++;
4585 if (now
== (time_t) -1) {
4586 cmd_ret
= LTTNG_ERR_UNK
;
4590 /* Sample chunk bounds for roll-back in case of error. */
4591 original_last_chunk_start_ts
= session
->last_chunk_start_ts
;
4592 original_current_chunk_start_ts
= session
->current_chunk_start_ts
;
4594 session
->last_chunk_start_ts
= session
->current_chunk_start_ts
;
4595 session
->current_chunk_start_ts
= now
;
4597 timeinfo
= localtime(&now
);
4599 PERROR("Failed to sample local time in rotate session command");
4600 cmd_ret
= LTTNG_ERR_UNK
;
4603 strf_ret
= strftime(datetime
, sizeof(datetime
), "%Y%m%dT%H%M%S%z",
4606 ERR("Failed to format local time timestamp in rotate session command");
4607 cmd_ret
= LTTNG_ERR_UNK
;
4612 * A rotation has a local step even if the destination is a relay
4613 * daemon; the buffers must be consumed by the consumer daemon.
4615 session
->rotation_pending_local
= true;
4616 session
->rotation_pending_relay
=
4617 session_get_consumer_destination_type(session
) == CONSUMER_DST_NET
;
4618 session
->rotation_state
= LTTNG_ROTATION_STATE_ONGOING
;
4620 if (session
->kernel_session
) {
4622 * The active path for the next rotation/destroy.
4623 * Ex: ~/lttng-traces/auto-20170922-111748/20170922-111754-42
4625 ret
= snprintf(session
->rotation_chunk
.active_tracing_path
,
4626 sizeof(session
->rotation_chunk
.active_tracing_path
),
4628 session_get_base_path(session
),
4629 datetime
, session
->current_archive_id
+ 1);
4630 if (ret
< 0 || ret
== sizeof(session
->rotation_chunk
.active_tracing_path
)) {
4631 ERR("Failed to format active kernel tracing path in rotate session command");
4632 cmd_ret
= LTTNG_ERR_UNK
;
4636 * The sub-directory for the consumer
4637 * Ex: /20170922-111754-42/kernel
4639 ret
= snprintf(session
->kernel_session
->consumer
->chunk_path
,
4640 sizeof(session
->kernel_session
->consumer
->chunk_path
),
4641 "/%s-%" PRIu64
, datetime
,
4642 session
->current_archive_id
+ 1);
4643 if (ret
< 0 || ret
== sizeof(session
->kernel_session
->consumer
->chunk_path
)) {
4644 ERR("Failed to format the kernel consumer's sub-directory in rotate session command");
4645 cmd_ret
= LTTNG_ERR_UNK
;
4649 * Create the new chunk folder, before the rotation begins so we don't
4650 * race with the consumer/tracer activity.
4652 ret
= domain_mkdir(session
->kernel_session
->consumer
, session
,
4653 session
->kernel_session
->uid
,
4654 session
->kernel_session
->gid
);
4656 ERR("Failed to create kernel session tracing path at %s",
4657 session
->kernel_session
->consumer
->chunk_path
);
4658 cmd_ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
4661 cmd_ret
= kernel_rotate_session(session
);
4662 if (cmd_ret
!= LTTNG_OK
) {
4666 if (session
->ust_session
) {
4667 ret
= snprintf(session
->rotation_chunk
.active_tracing_path
,
4668 PATH_MAX
, "%s/%s-%" PRIu64
,
4669 session_get_base_path(session
),
4670 datetime
, session
->current_archive_id
+ 1);
4672 ERR("Failed to format active UST tracing path in rotate session command");
4673 cmd_ret
= LTTNG_ERR_UNK
;
4676 ret
= snprintf(session
->ust_session
->consumer
->chunk_path
,
4677 PATH_MAX
, "/%s-%" PRIu64
, datetime
,
4678 session
->current_archive_id
+ 1);
4680 ERR("Failed to format the UST consumer's sub-directory in rotate session command");
4681 cmd_ret
= LTTNG_ERR_UNK
;
4685 * Create the new chunk folder, before the rotation begins so we don't
4686 * race with the consumer/tracer activity.
4688 ret
= domain_mkdir(session
->ust_session
->consumer
, session
,
4689 session
->ust_session
->uid
,
4690 session
->ust_session
->gid
);
4692 cmd_ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
4695 cmd_ret
= ust_app_rotate_session(session
);
4696 if (cmd_ret
!= LTTNG_OK
) {
4701 ret
= timer_session_rotation_pending_check_start(session
,
4702 DEFAULT_ROTATE_PENDING_TIMER
);
4704 cmd_ret
= LTTNG_ERR_UNK
;
4708 if (!session
->active
) {
4709 session
->rotated_after_last_stop
= true;
4712 if (rotate_return
) {
4713 rotate_return
->rotation_id
= session
->current_archive_id
;
4716 ret
= notification_thread_command_session_rotation_ongoing(
4717 notification_thread_handle
,
4718 session
->name
, session
->uid
, session
->gid
,
4719 session
->current_archive_id
- 1);
4720 if (ret
!= LTTNG_OK
) {
4721 ERR("Failed to notify notification thread that a session rotation is ongoing for session %s",
4726 DBG("Cmd rotate session %s, archive_id %" PRIu64
" sent",
4727 session
->name
, session
->current_archive_id
- 1);
4729 ret
= (cmd_ret
== LTTNG_OK
) ? cmd_ret
: -((int) cmd_ret
);
4732 session
->last_chunk_start_ts
= original_last_chunk_start_ts
;
4733 session
->current_archive_id
= original_current_chunk_start_ts
;
4734 if (session_reset_rotation_state(session
,
4735 LTTNG_ROTATION_STATE_NO_ROTATION
)) {
4736 ERR("Failed to reset rotation state of session \"%s\"",
4743 * Command LTTNG_ROTATION_GET_INFO from the lttng-ctl library.
4745 * Check if the session has finished its rotation.
4747 * Return 0 on success or else a LTTNG_ERR code.
4749 int cmd_rotate_get_info(struct ltt_session
*session
,
4750 struct lttng_rotation_get_info_return
*info_return
,
4751 uint64_t rotation_id
)
4757 DBG("Cmd rotate_get_info session %s, rotation id %" PRIu64
, session
->name
,
4758 session
->current_archive_id
);
4760 if (session
->current_archive_id
!= rotation_id
) {
4761 info_return
->status
= (int32_t) LTTNG_ROTATION_STATE_EXPIRED
;
4766 switch (session
->rotation_state
) {
4767 case LTTNG_ROTATION_STATE_ONGOING
:
4768 DBG("Reporting that rotation id %" PRIu64
" of session %s is still pending",
4769 rotation_id
, session
->name
);
4771 case LTTNG_ROTATION_STATE_COMPLETED
:
4773 char *current_tracing_path_reply
;
4774 size_t current_tracing_path_reply_len
;
4776 switch (session_get_consumer_destination_type(session
)) {
4777 case CONSUMER_DST_LOCAL
:
4778 current_tracing_path_reply
=
4779 info_return
->location
.local
.absolute_path
;
4780 current_tracing_path_reply_len
=
4781 sizeof(info_return
->location
.local
.absolute_path
);
4782 info_return
->location_type
=
4783 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_LOCAL
;
4785 case CONSUMER_DST_NET
:
4786 current_tracing_path_reply
=
4787 info_return
->location
.relay
.relative_path
;
4788 current_tracing_path_reply_len
=
4789 sizeof(info_return
->location
.relay
.relative_path
);
4790 /* Currently the only supported relay protocol. */
4791 info_return
->location
.relay
.protocol
=
4792 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP
;
4794 ret
= lttng_strncpy(info_return
->location
.relay
.host
,
4795 session_get_net_consumer_hostname(session
),
4796 sizeof(info_return
->location
.relay
.host
));
4798 ERR("Failed to host name to rotate_get_info reply");
4799 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
4800 ret
= -LTTNG_ERR_UNK
;
4804 session_get_net_consumer_ports(session
,
4805 &info_return
->location
.relay
.ports
.control
,
4806 &info_return
->location
.relay
.ports
.data
);
4807 info_return
->location_type
=
4808 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_RELAY
;
4813 ret
= lttng_strncpy(current_tracing_path_reply
,
4814 session
->rotation_chunk
.current_rotate_path
,
4815 current_tracing_path_reply_len
);
4817 ERR("Failed to copy current tracing path to rotate_get_info reply");
4818 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
4819 ret
= -LTTNG_ERR_UNK
;
4825 case LTTNG_ROTATION_STATE_ERROR
:
4826 DBG("Reporting that an error occurred during rotation %" PRIu64
" of session %s",
4827 rotation_id
, session
->name
);
4833 info_return
->status
= (int32_t) session
->rotation_state
;
4840 * Command LTTNG_ROTATION_SET_SCHEDULE from the lttng-ctl library.
4842 * Configure the automatic rotation parameters.
4843 * 'activate' to true means activate the rotation schedule type with 'new_value'.
4844 * 'activate' to false means deactivate the rotation schedule and validate that
4845 * 'new_value' has the same value as the currently active value.
4847 * Return 0 on success or else a positive LTTNG_ERR code.
4849 int cmd_rotation_set_schedule(struct ltt_session
*session
,
4850 bool activate
, enum lttng_rotation_schedule_type schedule_type
,
4852 struct notification_thread_handle
*notification_thread_handle
)
4855 uint64_t *parameter_value
;
4859 DBG("Cmd rotate set schedule session %s", session
->name
);
4861 if (session
->live_timer
|| session
->snapshot_mode
||
4862 !session
->output_traces
) {
4863 DBG("Failing ROTATION_SET_SCHEDULE command as the rotation feature is not available for this session");
4864 ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4868 switch (schedule_type
) {
4869 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
4870 parameter_value
= &session
->rotate_size
;
4872 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
4873 parameter_value
= &session
->rotate_timer_period
;
4874 if (new_value
>= UINT_MAX
) {
4875 DBG("Failing ROTATION_SET_SCHEDULE command as the value requested for a periodic rotation schedule is invalid: %" PRIu64
" > %u (UINT_MAX)",
4876 new_value
, UINT_MAX
);
4877 ret
= LTTNG_ERR_INVALID
;
4882 WARN("Failing ROTATION_SET_SCHEDULE command on unknown schedule type");
4883 ret
= LTTNG_ERR_INVALID
;
4887 /* Improper use of the API. */
4888 if (new_value
== -1ULL) {
4889 WARN("Failing ROTATION_SET_SCHEDULE command as the value requested is -1");
4890 ret
= LTTNG_ERR_INVALID
;
4895 * As indicated in struct ltt_session's comments, a value of == 0 means
4896 * this schedule rotation type is not in use.
4898 * Reject the command if we were asked to activate a schedule that was
4901 if (activate
&& *parameter_value
!= 0) {
4902 DBG("Failing ROTATION_SET_SCHEDULE (activate) command as the schedule is already active");
4903 ret
= LTTNG_ERR_ROTATION_SCHEDULE_SET
;
4908 * Reject the command if we were asked to deactivate a schedule that was
4911 if (!activate
&& *parameter_value
== 0) {
4912 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as the schedule is already inactive");
4913 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
4918 * Reject the command if we were asked to deactivate a schedule that
4921 if (!activate
&& *parameter_value
!= new_value
) {
4922 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as an inexistant schedule was provided");
4923 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
4927 *parameter_value
= activate
? new_value
: 0;
4929 switch (schedule_type
) {
4930 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
4931 if (activate
&& session
->active
) {
4933 * Only start the timer if the session is active,
4934 * otherwise it will be started when the session starts.
4936 ret
= timer_session_rotation_schedule_timer_start(
4937 session
, new_value
);
4939 ERR("Failed to enable session rotation timer in ROTATION_SET_SCHEDULE command");
4940 ret
= LTTNG_ERR_UNK
;
4944 ret
= timer_session_rotation_schedule_timer_stop(
4947 ERR("Failed to disable session rotation timer in ROTATION_SET_SCHEDULE command");
4948 ret
= LTTNG_ERR_UNK
;
4953 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
4955 ret
= subscribe_session_consumed_size_rotation(session
,
4956 new_value
, notification_thread_handle
);
4958 ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command");
4959 ret
= LTTNG_ERR_UNK
;
4963 ret
= unsubscribe_session_consumed_size_rotation(session
,
4964 notification_thread_handle
);
4966 ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command");
4967 ret
= LTTNG_ERR_UNK
;
4974 /* Would have been caught before. */
4986 /* Wait for a given path to be removed before continuing. */
4987 static enum lttng_error_code
wait_on_path(void *path_data
)
4989 const char *shm_path
= path_data
;
4991 DBG("Waiting for the shm path at %s to be removed before completing session destruction",
4997 ret
= stat(shm_path
, &st
);
4999 if (errno
!= ENOENT
) {
5000 PERROR("stat() returned an error while checking for the existence of the shm path");
5002 DBG("shm path no longer exists, completing the destruction of session");
5006 if (!S_ISDIR(st
.st_mode
)) {
5007 ERR("The type of shm path %s returned by stat() is not a directory; aborting the wait for shm path removal",
5012 usleep(SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US
);
5018 * Returns a pointer to a handler to run on completion of a command.
5019 * Returns NULL if no handler has to be run for the last command executed.
5021 const struct cmd_completion_handler
*cmd_pop_completion_handler(void)
5023 struct cmd_completion_handler
*handler
= current_completion_handler
;
5025 current_completion_handler
= NULL
;
5030 * Init command subsystem.
5035 * Set network sequence index to 1 for streams to match a relayd
5036 * socket on the consumer side.
5038 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
5039 relayd_net_seq_idx
= 1;
5040 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
5042 DBG("Command subsystem initialized");