2 * Copyright (C) 2012 - David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License, version 2 only, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 51
16 * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
22 #include <urcu/list.h>
23 #include <urcu/uatomic.h>
26 #include <common/defaults.h>
27 #include <common/common.h>
28 #include <common/sessiond-comm/sessiond-comm.h>
29 #include <common/relayd/relayd.h>
30 #include <common/utils.h>
31 #include <common/compat/string.h>
32 #include <common/kernel-ctl/kernel-ctl.h>
33 #include <common/dynamic-buffer.h>
34 #include <common/buffer-view.h>
35 #include <lttng/trigger/trigger-internal.h>
36 #include <lttng/condition/condition.h>
37 #include <lttng/action/action.h>
38 #include <lttng/channel.h>
39 #include <lttng/channel-internal.h>
40 #include <lttng/rotate-internal.h>
41 #include <lttng/location-internal.h>
42 #include <common/string-utils/string-utils.h>
47 #include "health-sessiond.h"
49 #include "kernel-consumer.h"
50 #include "lttng-sessiond.h"
52 #include "lttng-syscall.h"
54 #include "buffer-registry.h"
55 #include "notification-thread.h"
56 #include "notification-thread-commands.h"
58 #include "rotation-thread.h"
59 #include "sessiond-timer.h"
60 #include "agent-thread.h"
64 /* Sleep for 100ms between each check for the shm path's deletion. */
65 #define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
67 static enum lttng_error_code
wait_on_path(void *path
);
70 * Command completion handler that is used by the destroy command
71 * when a session that has a non-default shm_path is being destroyed.
73 * See comment in cmd_destroy_session() for the rationale.
75 static struct destroy_completion_handler
{
76 struct cmd_completion_handler handler
;
77 char shm_path
[member_sizeof(struct ltt_session
, shm_path
)];
78 } destroy_completion_handler
= {
81 .data
= destroy_completion_handler
.shm_path
86 static struct cmd_completion_handler
*current_completion_handler
;
89 * Used to keep a unique index for each relayd socket created where this value
90 * is associated with streams on the consumer so it can match the right relayd
91 * to send to. It must be accessed with the relayd_net_seq_idx_lock
94 static pthread_mutex_t relayd_net_seq_idx_lock
= PTHREAD_MUTEX_INITIALIZER
;
95 static uint64_t relayd_net_seq_idx
;
97 static int validate_ust_event_name(const char *);
98 static int cmd_enable_event_internal(struct ltt_session
*session
,
99 struct lttng_domain
*domain
,
100 char *channel_name
, struct lttng_event
*event
,
101 char *filter_expression
,
102 struct lttng_filter_bytecode
*filter
,
103 struct lttng_event_exclusion
*exclusion
,
107 * Create a session path used by list_lttng_sessions for the case that the
108 * session consumer is on the network.
110 static int build_network_session_path(char *dst
, size_t size
,
111 struct ltt_session
*session
)
113 int ret
, kdata_port
, udata_port
;
114 struct lttng_uri
*kuri
= NULL
, *uuri
= NULL
, *uri
= NULL
;
115 char tmp_uurl
[PATH_MAX
], tmp_urls
[PATH_MAX
];
120 memset(tmp_urls
, 0, sizeof(tmp_urls
));
121 memset(tmp_uurl
, 0, sizeof(tmp_uurl
));
123 kdata_port
= udata_port
= DEFAULT_NETWORK_DATA_PORT
;
125 if (session
->kernel_session
&& session
->kernel_session
->consumer
) {
126 kuri
= &session
->kernel_session
->consumer
->dst
.net
.control
;
127 kdata_port
= session
->kernel_session
->consumer
->dst
.net
.data
.port
;
130 if (session
->ust_session
&& session
->ust_session
->consumer
) {
131 uuri
= &session
->ust_session
->consumer
->dst
.net
.control
;
132 udata_port
= session
->ust_session
->consumer
->dst
.net
.data
.port
;
135 if (uuri
== NULL
&& kuri
== NULL
) {
136 uri
= &session
->consumer
->dst
.net
.control
;
137 kdata_port
= session
->consumer
->dst
.net
.data
.port
;
138 } else if (kuri
&& uuri
) {
139 ret
= uri_compare(kuri
, uuri
);
143 /* Build uuri URL string */
144 ret
= uri_to_str_url(uuri
, tmp_uurl
, sizeof(tmp_uurl
));
151 } else if (kuri
&& uuri
== NULL
) {
153 } else if (uuri
&& kuri
== NULL
) {
157 ret
= uri_to_str_url(uri
, tmp_urls
, sizeof(tmp_urls
));
163 * Do we have a UST url set. If yes, this means we have both kernel and UST
166 if (*tmp_uurl
!= '\0') {
167 ret
= snprintf(dst
, size
, "[K]: %s [data: %d] -- [U]: %s [data: %d]",
168 tmp_urls
, kdata_port
, tmp_uurl
, udata_port
);
171 if (kuri
|| (!kuri
&& !uuri
)) {
174 /* No kernel URI, use the UST port. */
177 ret
= snprintf(dst
, size
, "%s [data: %d]", tmp_urls
, dport
);
185 * Get run-time attributes if the session has been started (discarded events,
188 static int get_kernel_runtime_stats(struct ltt_session
*session
,
189 struct ltt_kernel_channel
*kchan
, uint64_t *discarded_events
,
190 uint64_t *lost_packets
)
194 if (!session
->has_been_started
) {
196 *discarded_events
= 0;
201 ret
= consumer_get_discarded_events(session
->id
, kchan
->key
,
202 session
->kernel_session
->consumer
,
208 ret
= consumer_get_lost_packets(session
->id
, kchan
->key
,
209 session
->kernel_session
->consumer
,
220 * Get run-time attributes if the session has been started (discarded events,
223 static int get_ust_runtime_stats(struct ltt_session
*session
,
224 struct ltt_ust_channel
*uchan
, uint64_t *discarded_events
,
225 uint64_t *lost_packets
)
228 struct ltt_ust_session
*usess
;
230 if (!discarded_events
|| !lost_packets
) {
235 usess
= session
->ust_session
;
236 assert(discarded_events
);
237 assert(lost_packets
);
239 if (!usess
|| !session
->has_been_started
) {
240 *discarded_events
= 0;
246 if (usess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
247 ret
= ust_app_uid_get_channel_runtime_stats(usess
->id
,
248 &usess
->buffer_reg_uid_list
,
249 usess
->consumer
, uchan
->id
,
250 uchan
->attr
.overwrite
,
253 } else if (usess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
254 ret
= ust_app_pid_get_channel_runtime_stats(usess
,
255 uchan
, usess
->consumer
,
256 uchan
->attr
.overwrite
,
262 *discarded_events
+= uchan
->per_pid_closed_app_discarded
;
263 *lost_packets
+= uchan
->per_pid_closed_app_lost
;
265 ERR("Unsupported buffer type");
276 * Fill lttng_channel array of all channels.
278 static ssize_t
list_lttng_channels(enum lttng_domain_type domain
,
279 struct ltt_session
*session
, struct lttng_channel
*channels
,
280 struct lttng_channel_extended
*chan_exts
)
283 struct ltt_kernel_channel
*kchan
;
285 DBG("Listing channels for session %s", session
->name
);
288 case LTTNG_DOMAIN_KERNEL
:
289 /* Kernel channels */
290 if (session
->kernel_session
!= NULL
) {
291 cds_list_for_each_entry(kchan
,
292 &session
->kernel_session
->channel_list
.head
, list
) {
293 uint64_t discarded_events
, lost_packets
;
294 struct lttng_channel_extended
*extended
;
296 extended
= (struct lttng_channel_extended
*)
297 kchan
->channel
->attr
.extended
.ptr
;
299 ret
= get_kernel_runtime_stats(session
, kchan
,
300 &discarded_events
, &lost_packets
);
304 /* Copy lttng_channel struct to array */
305 memcpy(&channels
[i
], kchan
->channel
, sizeof(struct lttng_channel
));
306 channels
[i
].enabled
= kchan
->enabled
;
307 chan_exts
[i
].discarded_events
=
309 chan_exts
[i
].lost_packets
= lost_packets
;
310 chan_exts
[i
].monitor_timer_interval
=
311 extended
->monitor_timer_interval
;
312 chan_exts
[i
].blocking_timeout
= 0;
317 case LTTNG_DOMAIN_UST
:
319 struct lttng_ht_iter iter
;
320 struct ltt_ust_channel
*uchan
;
323 cds_lfht_for_each_entry(session
->ust_session
->domain_global
.channels
->ht
,
324 &iter
.iter
, uchan
, node
.node
) {
325 uint64_t discarded_events
= 0, lost_packets
= 0;
327 if (lttng_strncpy(channels
[i
].name
, uchan
->name
,
328 LTTNG_SYMBOL_NAME_LEN
)) {
331 channels
[i
].attr
.overwrite
= uchan
->attr
.overwrite
;
332 channels
[i
].attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
333 channels
[i
].attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
334 channels
[i
].attr
.switch_timer_interval
=
335 uchan
->attr
.switch_timer_interval
;
336 channels
[i
].attr
.read_timer_interval
=
337 uchan
->attr
.read_timer_interval
;
338 channels
[i
].enabled
= uchan
->enabled
;
339 channels
[i
].attr
.tracefile_size
= uchan
->tracefile_size
;
340 channels
[i
].attr
.tracefile_count
= uchan
->tracefile_count
;
343 * Map enum lttng_ust_output to enum lttng_event_output.
345 switch (uchan
->attr
.output
) {
347 channels
[i
].attr
.output
= LTTNG_EVENT_MMAP
;
351 * LTTNG_UST_MMAP is the only supported UST
358 chan_exts
[i
].monitor_timer_interval
=
359 uchan
->monitor_timer_interval
;
360 chan_exts
[i
].blocking_timeout
=
361 uchan
->attr
.u
.s
.blocking_timeout
;
363 ret
= get_ust_runtime_stats(session
, uchan
,
364 &discarded_events
, &lost_packets
);
368 chan_exts
[i
].discarded_events
= discarded_events
;
369 chan_exts
[i
].lost_packets
= lost_packets
;
381 return -LTTNG_ERR_FATAL
;
387 static void increment_extended_len(const char *filter_expression
,
388 struct lttng_event_exclusion
*exclusion
, size_t *extended_len
)
390 *extended_len
+= sizeof(struct lttcomm_event_extended_header
);
392 if (filter_expression
) {
393 *extended_len
+= strlen(filter_expression
) + 1;
397 *extended_len
+= exclusion
->count
* LTTNG_SYMBOL_NAME_LEN
;
401 static void append_extended_info(const char *filter_expression
,
402 struct lttng_event_exclusion
*exclusion
, void **extended_at
)
404 struct lttcomm_event_extended_header extended_header
;
405 size_t filter_len
= 0;
406 size_t nb_exclusions
= 0;
408 if (filter_expression
) {
409 filter_len
= strlen(filter_expression
) + 1;
413 nb_exclusions
= exclusion
->count
;
416 /* Set header fields */
417 extended_header
.filter_len
= filter_len
;
418 extended_header
.nb_exclusions
= nb_exclusions
;
421 memcpy(*extended_at
, &extended_header
, sizeof(extended_header
));
422 *extended_at
+= sizeof(extended_header
);
424 /* Copy filter string */
425 if (filter_expression
) {
426 memcpy(*extended_at
, filter_expression
, filter_len
);
427 *extended_at
+= filter_len
;
430 /* Copy exclusion names */
432 size_t len
= nb_exclusions
* LTTNG_SYMBOL_NAME_LEN
;
434 memcpy(*extended_at
, &exclusion
->names
, len
);
440 * Create a list of agent domain events.
442 * Return number of events in list on success or else a negative value.
444 static int list_lttng_agent_events(struct agent
*agt
,
445 struct lttng_event
**events
, size_t *total_size
)
448 unsigned int nb_event
= 0;
449 struct agent_event
*event
;
450 struct lttng_event
*tmp_events
;
451 struct lttng_ht_iter iter
;
452 size_t extended_len
= 0;
458 DBG3("Listing agent events");
461 nb_event
= lttng_ht_get_count(agt
->events
);
469 /* Compute required extended infos size */
470 extended_len
= nb_event
* sizeof(struct lttcomm_event_extended_header
);
473 * This is only valid because the commands which add events are
474 * processed in the same thread as the listing.
477 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
478 increment_extended_len(event
->filter_expression
, NULL
,
483 *total_size
= nb_event
* sizeof(*tmp_events
) + extended_len
;
484 tmp_events
= zmalloc(*total_size
);
486 PERROR("zmalloc agent events session");
487 ret
= -LTTNG_ERR_FATAL
;
491 extended_at
= ((uint8_t *) tmp_events
) +
492 nb_event
* sizeof(struct lttng_event
);
495 cds_lfht_for_each_entry(agt
->events
->ht
, &iter
.iter
, event
, node
.node
) {
496 strncpy(tmp_events
[i
].name
, event
->name
, sizeof(tmp_events
[i
].name
));
497 tmp_events
[i
].name
[sizeof(tmp_events
[i
].name
) - 1] = '\0';
498 tmp_events
[i
].enabled
= event
->enabled
;
499 tmp_events
[i
].loglevel
= event
->loglevel_value
;
500 tmp_events
[i
].loglevel_type
= event
->loglevel_type
;
503 /* Append extended info */
504 append_extended_info(event
->filter_expression
, NULL
,
509 *events
= tmp_events
;
513 assert(nb_event
== i
);
518 * Create a list of ust global domain events.
520 static int list_lttng_ust_global_events(char *channel_name
,
521 struct ltt_ust_domain_global
*ust_global
,
522 struct lttng_event
**events
, size_t *total_size
)
525 unsigned int nb_event
= 0;
526 struct lttng_ht_iter iter
;
527 struct lttng_ht_node_str
*node
;
528 struct ltt_ust_channel
*uchan
;
529 struct ltt_ust_event
*uevent
;
530 struct lttng_event
*tmp
;
531 size_t extended_len
= 0;
534 DBG("Listing UST global events for channel %s", channel_name
);
538 lttng_ht_lookup(ust_global
->channels
, (void *)channel_name
, &iter
);
539 node
= lttng_ht_iter_get_node_str(&iter
);
541 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
545 uchan
= caa_container_of(&node
->node
, struct ltt_ust_channel
, node
.node
);
547 nb_event
= lttng_ht_get_count(uchan
->events
);
554 DBG3("Listing UST global %d events", nb_event
);
556 /* Compute required extended infos size */
557 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
558 if (uevent
->internal
) {
563 increment_extended_len(uevent
->filter_expression
,
564 uevent
->exclusion
, &extended_len
);
567 /* All events are internal, skip. */
573 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
574 tmp
= zmalloc(*total_size
);
576 ret
= -LTTNG_ERR_FATAL
;
580 extended_at
= ((uint8_t *) tmp
) + nb_event
* sizeof(struct lttng_event
);
582 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
583 if (uevent
->internal
) {
584 /* This event should remain hidden from clients */
587 strncpy(tmp
[i
].name
, uevent
->attr
.name
, LTTNG_SYMBOL_NAME_LEN
);
588 tmp
[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
589 tmp
[i
].enabled
= uevent
->enabled
;
591 switch (uevent
->attr
.instrumentation
) {
592 case LTTNG_UST_TRACEPOINT
:
593 tmp
[i
].type
= LTTNG_EVENT_TRACEPOINT
;
595 case LTTNG_UST_PROBE
:
596 tmp
[i
].type
= LTTNG_EVENT_PROBE
;
598 case LTTNG_UST_FUNCTION
:
599 tmp
[i
].type
= LTTNG_EVENT_FUNCTION
;
603 tmp
[i
].loglevel
= uevent
->attr
.loglevel
;
604 switch (uevent
->attr
.loglevel_type
) {
605 case LTTNG_UST_LOGLEVEL_ALL
:
606 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
608 case LTTNG_UST_LOGLEVEL_RANGE
:
609 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_RANGE
;
611 case LTTNG_UST_LOGLEVEL_SINGLE
:
612 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_SINGLE
;
615 if (uevent
->filter
) {
618 if (uevent
->exclusion
) {
619 tmp
[i
].exclusion
= 1;
623 /* Append extended info */
624 append_extended_info(uevent
->filter_expression
,
625 uevent
->exclusion
, &extended_at
);
636 * Fill lttng_event array of all kernel events in the channel.
638 static int list_lttng_kernel_events(char *channel_name
,
639 struct ltt_kernel_session
*kernel_session
,
640 struct lttng_event
**events
, size_t *total_size
)
643 unsigned int nb_event
;
644 struct ltt_kernel_event
*event
;
645 struct ltt_kernel_channel
*kchan
;
646 size_t extended_len
= 0;
649 kchan
= trace_kernel_get_channel_by_name(channel_name
, kernel_session
);
651 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
655 nb_event
= kchan
->event_count
;
657 DBG("Listing events for channel %s", kchan
->channel
->name
);
665 /* Compute required extended infos size */
666 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
667 increment_extended_len(event
->filter_expression
, NULL
,
671 *total_size
= nb_event
* sizeof(struct lttng_event
) + extended_len
;
672 *events
= zmalloc(*total_size
);
673 if (*events
== NULL
) {
674 ret
= LTTNG_ERR_FATAL
;
678 extended_at
= ((void *) *events
) +
679 nb_event
* sizeof(struct lttng_event
);
681 /* Kernel channels */
682 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
683 strncpy((*events
)[i
].name
, event
->event
->name
, LTTNG_SYMBOL_NAME_LEN
);
684 (*events
)[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
685 (*events
)[i
].enabled
= event
->enabled
;
686 (*events
)[i
].filter
=
687 (unsigned char) !!event
->filter_expression
;
689 switch (event
->event
->instrumentation
) {
690 case LTTNG_KERNEL_TRACEPOINT
:
691 (*events
)[i
].type
= LTTNG_EVENT_TRACEPOINT
;
693 case LTTNG_KERNEL_KRETPROBE
:
694 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
695 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
696 sizeof(struct lttng_kernel_kprobe
));
698 case LTTNG_KERNEL_KPROBE
:
699 (*events
)[i
].type
= LTTNG_EVENT_PROBE
;
700 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
701 sizeof(struct lttng_kernel_kprobe
));
703 case LTTNG_KERNEL_FUNCTION
:
704 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
705 memcpy(&((*events
)[i
].attr
.ftrace
), &event
->event
->u
.ftrace
,
706 sizeof(struct lttng_kernel_function
));
708 case LTTNG_KERNEL_NOOP
:
709 (*events
)[i
].type
= LTTNG_EVENT_NOOP
;
711 case LTTNG_KERNEL_SYSCALL
:
712 (*events
)[i
].type
= LTTNG_EVENT_SYSCALL
;
714 case LTTNG_KERNEL_ALL
:
720 /* Append extended info */
721 append_extended_info(event
->filter_expression
, NULL
,
729 /* Negate the error code to differentiate the size from an error */
734 * Add URI so the consumer output object. Set the correct path depending on the
735 * domain adding the default trace directory.
737 static int add_uri_to_consumer(struct consumer_output
*consumer
,
738 struct lttng_uri
*uri
, enum lttng_domain_type domain
,
739 const char *session_name
)
742 const char *default_trace_dir
;
746 if (consumer
== NULL
) {
747 DBG("No consumer detected. Don't add URI. Stopping.");
748 ret
= LTTNG_ERR_NO_CONSUMER
;
753 case LTTNG_DOMAIN_KERNEL
:
754 default_trace_dir
= DEFAULT_KERNEL_TRACE_DIR
;
756 case LTTNG_DOMAIN_UST
:
757 default_trace_dir
= DEFAULT_UST_TRACE_DIR
;
761 * This case is possible is we try to add the URI to the global tracing
762 * session consumer object which in this case there is no subdir.
764 default_trace_dir
= "";
767 switch (uri
->dtype
) {
770 DBG2("Setting network URI to consumer");
772 if (consumer
->type
== CONSUMER_DST_NET
) {
773 if ((uri
->stype
== LTTNG_STREAM_CONTROL
&&
774 consumer
->dst
.net
.control_isset
) ||
775 (uri
->stype
== LTTNG_STREAM_DATA
&&
776 consumer
->dst
.net
.data_isset
)) {
777 ret
= LTTNG_ERR_URL_EXIST
;
781 memset(&consumer
->dst
.net
, 0, sizeof(consumer
->dst
.net
));
784 consumer
->type
= CONSUMER_DST_NET
;
786 /* Set URI into consumer output object */
787 ret
= consumer_set_network_uri(consumer
, uri
);
791 } else if (ret
== 1) {
793 * URI was the same in the consumer so we do not append the subdir
794 * again so to not duplicate output dir.
800 if (uri
->stype
== LTTNG_STREAM_CONTROL
&& strlen(uri
->subdir
) == 0) {
801 ret
= consumer_set_subdir(consumer
, session_name
);
803 ret
= LTTNG_ERR_FATAL
;
808 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
809 /* On a new subdir, reappend the default trace dir. */
810 strncat(consumer
->subdir
, default_trace_dir
,
811 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
812 DBG3("Append domain trace name to subdir %s", consumer
->subdir
);
817 DBG2("Setting trace directory path from URI to %s", uri
->dst
.path
);
818 memset(consumer
->dst
.session_root_path
, 0,
819 sizeof(consumer
->dst
.session_root_path
));
820 /* Explicit length checks for strcpy and strcat. */
821 if (strlen(uri
->dst
.path
) + strlen(default_trace_dir
)
822 >= sizeof(consumer
->dst
.session_root_path
)) {
823 ret
= LTTNG_ERR_FATAL
;
826 strcpy(consumer
->dst
.session_root_path
, uri
->dst
.path
);
827 /* Append default trace dir */
828 strcat(consumer
->dst
.session_root_path
, default_trace_dir
);
829 /* Flag consumer as local. */
830 consumer
->type
= CONSUMER_DST_LOCAL
;
841 * Init tracing by creating trace directory and sending fds kernel consumer.
843 static int init_kernel_tracing(struct ltt_kernel_session
*session
)
846 struct lttng_ht_iter iter
;
847 struct consumer_socket
*socket
;
853 if (session
->consumer_fds_sent
== 0 && session
->consumer
!= NULL
) {
854 cds_lfht_for_each_entry(session
->consumer
->socks
->ht
, &iter
.iter
,
856 pthread_mutex_lock(socket
->lock
);
857 ret
= kernel_consumer_send_session(socket
, session
);
858 pthread_mutex_unlock(socket
->lock
);
860 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
872 * Create a socket to the relayd using the URI.
874 * On success, the relayd_sock pointer is set to the created socket.
875 * Else, it's stays untouched and a lttcomm error code is returned.
877 static int create_connect_relayd(struct lttng_uri
*uri
,
878 struct lttcomm_relayd_sock
**relayd_sock
,
879 struct consumer_output
*consumer
)
882 struct lttcomm_relayd_sock
*rsock
;
884 rsock
= lttcomm_alloc_relayd_sock(uri
, RELAYD_VERSION_COMM_MAJOR
,
885 RELAYD_VERSION_COMM_MINOR
);
887 ret
= LTTNG_ERR_FATAL
;
892 * Connect to relayd so we can proceed with a session creation. This call
893 * can possibly block for an arbitrary amount of time to set the health
894 * state to be in poll execution.
897 ret
= relayd_connect(rsock
);
900 ERR("Unable to reach lttng-relayd");
901 ret
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
905 /* Create socket for control stream. */
906 if (uri
->stype
== LTTNG_STREAM_CONTROL
) {
907 DBG3("Creating relayd stream socket from URI");
909 /* Check relayd version */
910 ret
= relayd_version_check(rsock
);
911 if (ret
== LTTNG_ERR_RELAYD_VERSION_FAIL
) {
913 } else if (ret
< 0) {
914 ERR("Unable to reach lttng-relayd");
915 ret
= LTTNG_ERR_RELAYD_CONNECT_FAIL
;
918 consumer
->relay_major_version
= rsock
->major
;
919 consumer
->relay_minor_version
= rsock
->minor
;
920 } else if (uri
->stype
== LTTNG_STREAM_DATA
) {
921 DBG3("Creating relayd data socket from URI");
923 /* Command is not valid */
924 ERR("Relayd invalid stream type: %d", uri
->stype
);
925 ret
= LTTNG_ERR_INVALID
;
929 *relayd_sock
= rsock
;
934 /* The returned value is not useful since we are on an error path. */
935 (void) relayd_close(rsock
);
943 * Connect to the relayd using URI and send the socket to the right consumer.
945 * The consumer socket lock must be held by the caller.
947 static int send_consumer_relayd_socket(unsigned int session_id
,
948 struct lttng_uri
*relayd_uri
,
949 struct consumer_output
*consumer
,
950 struct consumer_socket
*consumer_sock
,
951 char *session_name
, char *hostname
, int session_live_timer
)
954 struct lttcomm_relayd_sock
*rsock
= NULL
;
956 /* Connect to relayd and make version check if uri is the control. */
957 ret
= create_connect_relayd(relayd_uri
, &rsock
, consumer
);
958 if (ret
!= LTTNG_OK
) {
959 goto relayd_comm_error
;
963 /* Set the network sequence index if not set. */
964 if (consumer
->net_seq_index
== (uint64_t) -1ULL) {
965 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
967 * Increment net_seq_idx because we are about to transfer the
968 * new relayd socket to the consumer.
969 * Assign unique key so the consumer can match streams.
971 consumer
->net_seq_index
= ++relayd_net_seq_idx
;
972 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
975 /* Send relayd socket to consumer. */
976 ret
= consumer_send_relayd_socket(consumer_sock
, rsock
, consumer
,
977 relayd_uri
->stype
, session_id
,
978 session_name
, hostname
, session_live_timer
);
980 ret
= LTTNG_ERR_ENABLE_CONSUMER_FAIL
;
984 /* Flag that the corresponding socket was sent. */
985 if (relayd_uri
->stype
== LTTNG_STREAM_CONTROL
) {
986 consumer_sock
->control_sock_sent
= 1;
987 } else if (relayd_uri
->stype
== LTTNG_STREAM_DATA
) {
988 consumer_sock
->data_sock_sent
= 1;
994 * Close socket which was dup on the consumer side. The session daemon does
995 * NOT keep track of the relayd socket(s) once transfer to the consumer.
999 if (ret
!= LTTNG_OK
) {
1001 * The consumer output for this session should not be used anymore
1002 * since the relayd connection failed thus making any tracing or/and
1003 * streaming not usable.
1005 consumer
->enabled
= 0;
1007 (void) relayd_close(rsock
);
1015 * Send both relayd sockets to a specific consumer and domain. This is a
1016 * helper function to facilitate sending the information to the consumer for a
1019 * The consumer socket lock must be held by the caller.
1021 static int send_consumer_relayd_sockets(enum lttng_domain_type domain
,
1022 unsigned int session_id
, struct consumer_output
*consumer
,
1023 struct consumer_socket
*sock
, char *session_name
,
1024 char *hostname
, int session_live_timer
)
1031 /* Sending control relayd socket. */
1032 if (!sock
->control_sock_sent
) {
1033 ret
= send_consumer_relayd_socket(session_id
,
1034 &consumer
->dst
.net
.control
, consumer
, sock
,
1035 session_name
, hostname
, session_live_timer
);
1036 if (ret
!= LTTNG_OK
) {
1041 /* Sending data relayd socket. */
1042 if (!sock
->data_sock_sent
) {
1043 ret
= send_consumer_relayd_socket(session_id
,
1044 &consumer
->dst
.net
.data
, consumer
, sock
,
1045 session_name
, hostname
, session_live_timer
);
1046 if (ret
!= LTTNG_OK
) {
1056 * Setup relayd connections for a tracing session. First creates the socket to
1057 * the relayd and send them to the right domain consumer. Consumer type MUST be
1060 int cmd_setup_relayd(struct ltt_session
*session
)
1063 struct ltt_ust_session
*usess
;
1064 struct ltt_kernel_session
*ksess
;
1065 struct consumer_socket
*socket
;
1066 struct lttng_ht_iter iter
;
1070 usess
= session
->ust_session
;
1071 ksess
= session
->kernel_session
;
1073 DBG("Setting relayd for session %s", session
->name
);
1077 if (usess
&& usess
->consumer
&& usess
->consumer
->type
== CONSUMER_DST_NET
1078 && usess
->consumer
->enabled
) {
1079 /* For each consumer socket, send relayd sockets */
1080 cds_lfht_for_each_entry(usess
->consumer
->socks
->ht
, &iter
.iter
,
1081 socket
, node
.node
) {
1082 pthread_mutex_lock(socket
->lock
);
1083 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_UST
, session
->id
,
1084 usess
->consumer
, socket
,
1085 session
->name
, session
->hostname
,
1086 session
->live_timer
);
1087 pthread_mutex_unlock(socket
->lock
);
1088 if (ret
!= LTTNG_OK
) {
1091 /* Session is now ready for network streaming. */
1092 session
->net_handle
= 1;
1094 session
->consumer
->relay_major_version
=
1095 usess
->consumer
->relay_major_version
;
1096 session
->consumer
->relay_minor_version
=
1097 usess
->consumer
->relay_minor_version
;
1100 if (ksess
&& ksess
->consumer
&& ksess
->consumer
->type
== CONSUMER_DST_NET
1101 && ksess
->consumer
->enabled
) {
1102 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
, &iter
.iter
,
1103 socket
, node
.node
) {
1104 pthread_mutex_lock(socket
->lock
);
1105 ret
= send_consumer_relayd_sockets(LTTNG_DOMAIN_KERNEL
, session
->id
,
1106 ksess
->consumer
, socket
,
1107 session
->name
, session
->hostname
,
1108 session
->live_timer
);
1109 pthread_mutex_unlock(socket
->lock
);
1110 if (ret
!= LTTNG_OK
) {
1113 /* Session is now ready for network streaming. */
1114 session
->net_handle
= 1;
1116 session
->consumer
->relay_major_version
=
1117 ksess
->consumer
->relay_major_version
;
1118 session
->consumer
->relay_minor_version
=
1119 ksess
->consumer
->relay_minor_version
;
1128 * Start a kernel session by opening all necessary streams.
1130 static int start_kernel_session(struct ltt_kernel_session
*ksess
, int wpipe
)
1133 struct ltt_kernel_channel
*kchan
;
1135 /* Open kernel metadata */
1136 if (ksess
->metadata
== NULL
&& ksess
->output_traces
) {
1137 ret
= kernel_open_metadata(ksess
);
1139 ret
= LTTNG_ERR_KERN_META_FAIL
;
1144 /* Open kernel metadata stream */
1145 if (ksess
->metadata
&& ksess
->metadata_stream_fd
< 0) {
1146 ret
= kernel_open_metadata_stream(ksess
);
1148 ERR("Kernel create metadata stream failed");
1149 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1154 /* For each channel */
1155 cds_list_for_each_entry(kchan
, &ksess
->channel_list
.head
, list
) {
1156 if (kchan
->stream_count
== 0) {
1157 ret
= kernel_open_channel_stream(kchan
);
1159 ret
= LTTNG_ERR_KERN_STREAM_FAIL
;
1162 /* Update the stream global counter */
1163 ksess
->stream_count_global
+= ret
;
1167 /* Setup kernel consumer socket and send fds to it */
1168 ret
= init_kernel_tracing(ksess
);
1170 ret
= LTTNG_ERR_KERN_START_FAIL
;
1174 /* This start the kernel tracing */
1175 ret
= kernel_start_session(ksess
);
1177 ret
= LTTNG_ERR_KERN_START_FAIL
;
1181 /* Quiescent wait after starting trace */
1182 kernel_wait_quiescent(wpipe
);
1193 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1195 int cmd_disable_channel(struct ltt_session
*session
,
1196 enum lttng_domain_type domain
, char *channel_name
)
1199 struct ltt_ust_session
*usess
;
1201 usess
= session
->ust_session
;
1206 case LTTNG_DOMAIN_KERNEL
:
1208 ret
= channel_kernel_disable(session
->kernel_session
,
1210 if (ret
!= LTTNG_OK
) {
1214 kernel_wait_quiescent(kernel_tracer_fd
);
1217 case LTTNG_DOMAIN_UST
:
1219 struct ltt_ust_channel
*uchan
;
1220 struct lttng_ht
*chan_ht
;
1222 chan_ht
= usess
->domain_global
.channels
;
1224 uchan
= trace_ust_find_channel_by_name(chan_ht
, channel_name
);
1225 if (uchan
== NULL
) {
1226 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1230 ret
= channel_ust_disable(usess
, uchan
);
1231 if (ret
!= LTTNG_OK
) {
1237 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1249 * Command LTTNG_TRACK_PID processed by the client thread.
1251 * Called with session lock held.
1253 int cmd_track_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1261 case LTTNG_DOMAIN_KERNEL
:
1263 struct ltt_kernel_session
*ksess
;
1265 ksess
= session
->kernel_session
;
1267 ret
= kernel_track_pid(ksess
, pid
);
1268 if (ret
!= LTTNG_OK
) {
1272 kernel_wait_quiescent(kernel_tracer_fd
);
1275 case LTTNG_DOMAIN_UST
:
1277 struct ltt_ust_session
*usess
;
1279 usess
= session
->ust_session
;
1281 ret
= trace_ust_track_pid(usess
, pid
);
1282 if (ret
!= LTTNG_OK
) {
1288 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1300 * Command LTTNG_UNTRACK_PID processed by the client thread.
1302 * Called with session lock held.
1304 int cmd_untrack_pid(struct ltt_session
*session
, enum lttng_domain_type domain
,
1312 case LTTNG_DOMAIN_KERNEL
:
1314 struct ltt_kernel_session
*ksess
;
1316 ksess
= session
->kernel_session
;
1318 ret
= kernel_untrack_pid(ksess
, pid
);
1319 if (ret
!= LTTNG_OK
) {
1323 kernel_wait_quiescent(kernel_tracer_fd
);
1326 case LTTNG_DOMAIN_UST
:
1328 struct ltt_ust_session
*usess
;
1330 usess
= session
->ust_session
;
1332 ret
= trace_ust_untrack_pid(usess
, pid
);
1333 if (ret
!= LTTNG_OK
) {
1339 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1351 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
1353 * The wpipe arguments is used as a notifier for the kernel thread.
1355 int cmd_enable_channel(struct ltt_session
*session
,
1356 struct lttng_domain
*domain
, struct lttng_channel
*attr
, int wpipe
)
1359 struct ltt_ust_session
*usess
= session
->ust_session
;
1360 struct lttng_ht
*chan_ht
;
1367 len
= lttng_strnlen(attr
->name
, sizeof(attr
->name
));
1369 /* Validate channel name */
1370 if (attr
->name
[0] == '.' ||
1371 memchr(attr
->name
, '/', len
) != NULL
) {
1372 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1376 DBG("Enabling channel %s for session %s", attr
->name
, session
->name
);
1381 * Don't try to enable a channel if the session has been started at
1382 * some point in time before. The tracer does not allow it.
1384 if (session
->has_been_started
) {
1385 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1390 * If the session is a live session, remove the switch timer, the
1391 * live timer does the same thing but sends also synchronisation
1392 * beacons for inactive streams.
1394 if (session
->live_timer
> 0) {
1395 attr
->attr
.live_timer_interval
= session
->live_timer
;
1396 attr
->attr
.switch_timer_interval
= 0;
1399 /* Check for feature support */
1400 switch (domain
->type
) {
1401 case LTTNG_DOMAIN_KERNEL
:
1403 if (kernel_supports_ring_buffer_snapshot_sample_positions(kernel_tracer_fd
) != 1) {
1404 /* Sampling position of buffer is not supported */
1405 WARN("Kernel tracer does not support buffer monitoring. "
1406 "Setting the monitor interval timer to 0 "
1407 "(disabled) for channel '%s' of session '%s'",
1408 attr
-> name
, session
->name
);
1409 lttng_channel_set_monitor_timer_interval(attr
, 0);
1413 case LTTNG_DOMAIN_UST
:
1415 case LTTNG_DOMAIN_JUL
:
1416 case LTTNG_DOMAIN_LOG4J
:
1417 case LTTNG_DOMAIN_PYTHON
:
1418 if (!agent_tracing_is_enabled()) {
1419 DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
1420 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
1425 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1429 switch (domain
->type
) {
1430 case LTTNG_DOMAIN_KERNEL
:
1432 struct ltt_kernel_channel
*kchan
;
1434 kchan
= trace_kernel_get_channel_by_name(attr
->name
,
1435 session
->kernel_session
);
1436 if (kchan
== NULL
) {
1437 ret
= channel_kernel_create(session
->kernel_session
, attr
, wpipe
);
1438 if (attr
->name
[0] != '\0') {
1439 session
->kernel_session
->has_non_default_channel
= 1;
1442 ret
= channel_kernel_enable(session
->kernel_session
, kchan
);
1445 if (ret
!= LTTNG_OK
) {
1449 kernel_wait_quiescent(kernel_tracer_fd
);
1452 case LTTNG_DOMAIN_UST
:
1453 case LTTNG_DOMAIN_JUL
:
1454 case LTTNG_DOMAIN_LOG4J
:
1455 case LTTNG_DOMAIN_PYTHON
:
1457 struct ltt_ust_channel
*uchan
;
1462 * Current agent implementation limitations force us to allow
1463 * only one channel at once in "agent" subdomains. Each
1464 * subdomain has a default channel name which must be strictly
1467 if (domain
->type
== LTTNG_DOMAIN_JUL
) {
1468 if (strncmp(attr
->name
, DEFAULT_JUL_CHANNEL_NAME
,
1469 LTTNG_SYMBOL_NAME_LEN
)) {
1470 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1473 } else if (domain
->type
== LTTNG_DOMAIN_LOG4J
) {
1474 if (strncmp(attr
->name
, DEFAULT_LOG4J_CHANNEL_NAME
,
1475 LTTNG_SYMBOL_NAME_LEN
)) {
1476 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1479 } else if (domain
->type
== LTTNG_DOMAIN_PYTHON
) {
1480 if (strncmp(attr
->name
, DEFAULT_PYTHON_CHANNEL_NAME
,
1481 LTTNG_SYMBOL_NAME_LEN
)) {
1482 ret
= LTTNG_ERR_INVALID_CHANNEL_NAME
;
1487 chan_ht
= usess
->domain_global
.channels
;
1489 uchan
= trace_ust_find_channel_by_name(chan_ht
, attr
->name
);
1490 if (uchan
== NULL
) {
1491 ret
= channel_ust_create(usess
, attr
, domain
->buf_type
);
1492 if (attr
->name
[0] != '\0') {
1493 usess
->has_non_default_channel
= 1;
1496 ret
= channel_ust_enable(usess
, uchan
);
1501 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1512 * Command LTTNG_DISABLE_EVENT processed by the client thread.
1514 int cmd_disable_event(struct ltt_session
*session
,
1515 enum lttng_domain_type domain
, char *channel_name
,
1516 struct lttng_event
*event
)
1521 DBG("Disable event command for event \'%s\'", event
->name
);
1523 event_name
= event
->name
;
1525 /* Error out on unhandled search criteria */
1526 if (event
->loglevel_type
|| event
->loglevel
!= -1 || event
->enabled
1527 || event
->pid
|| event
->filter
|| event
->exclusion
) {
1528 ret
= LTTNG_ERR_UNK
;
1535 case LTTNG_DOMAIN_KERNEL
:
1537 struct ltt_kernel_channel
*kchan
;
1538 struct ltt_kernel_session
*ksess
;
1540 ksess
= session
->kernel_session
;
1543 * If a non-default channel has been created in the
1544 * session, explicitely require that -c chan_name needs
1547 if (ksess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1548 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1552 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
1553 if (kchan
== NULL
) {
1554 ret
= LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
1558 switch (event
->type
) {
1559 case LTTNG_EVENT_ALL
:
1560 case LTTNG_EVENT_TRACEPOINT
:
1561 case LTTNG_EVENT_SYSCALL
:
1562 case LTTNG_EVENT_PROBE
:
1563 case LTTNG_EVENT_FUNCTION
:
1564 case LTTNG_EVENT_FUNCTION_ENTRY
:/* fall-through */
1565 if (event_name
[0] == '\0') {
1566 ret
= event_kernel_disable_event(kchan
,
1569 ret
= event_kernel_disable_event(kchan
,
1570 event_name
, event
->type
);
1572 if (ret
!= LTTNG_OK
) {
1577 ret
= LTTNG_ERR_UNK
;
1581 kernel_wait_quiescent(kernel_tracer_fd
);
1584 case LTTNG_DOMAIN_UST
:
1586 struct ltt_ust_channel
*uchan
;
1587 struct ltt_ust_session
*usess
;
1589 usess
= session
->ust_session
;
1591 if (validate_ust_event_name(event_name
)) {
1592 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
1597 * If a non-default channel has been created in the
1598 * session, explicitly require that -c chan_name needs
1601 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
1602 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1606 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
1608 if (uchan
== NULL
) {
1609 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1613 switch (event
->type
) {
1614 case LTTNG_EVENT_ALL
:
1616 * An empty event name means that everything
1617 * should be disabled.
1619 if (event
->name
[0] == '\0') {
1620 ret
= event_ust_disable_all_tracepoints(usess
, uchan
);
1622 ret
= event_ust_disable_tracepoint(usess
, uchan
,
1625 if (ret
!= LTTNG_OK
) {
1630 ret
= LTTNG_ERR_UNK
;
1634 DBG3("Disable UST event %s in channel %s completed", event_name
,
1638 case LTTNG_DOMAIN_LOG4J
:
1639 case LTTNG_DOMAIN_JUL
:
1640 case LTTNG_DOMAIN_PYTHON
:
1643 struct ltt_ust_session
*usess
= session
->ust_session
;
1647 switch (event
->type
) {
1648 case LTTNG_EVENT_ALL
:
1651 ret
= LTTNG_ERR_UNK
;
1655 agt
= trace_ust_find_agent(usess
, domain
);
1657 ret
= -LTTNG_ERR_UST_EVENT_NOT_FOUND
;
1661 * An empty event name means that everything
1662 * should be disabled.
1664 if (event
->name
[0] == '\0') {
1665 ret
= event_agent_disable_all(usess
, agt
);
1667 ret
= event_agent_disable(usess
, agt
, event_name
);
1669 if (ret
!= LTTNG_OK
) {
1676 ret
= LTTNG_ERR_UND
;
1689 * Command LTTNG_ADD_CONTEXT processed by the client thread.
1691 int cmd_add_context(struct ltt_session
*session
, enum lttng_domain_type domain
,
1692 char *channel_name
, struct lttng_event_context
*ctx
, int kwpipe
)
1694 int ret
, chan_kern_created
= 0, chan_ust_created
= 0;
1695 char *app_ctx_provider_name
= NULL
, *app_ctx_name
= NULL
;
1698 * Don't try to add a context if the session has been started at
1699 * some point in time before. The tracer does not allow it and would
1700 * result in a corrupted trace.
1702 if (session
->has_been_started
) {
1703 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
1707 if (ctx
->ctx
== LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
1708 app_ctx_provider_name
= ctx
->u
.app_ctx
.provider_name
;
1709 app_ctx_name
= ctx
->u
.app_ctx
.ctx_name
;
1713 case LTTNG_DOMAIN_KERNEL
:
1714 assert(session
->kernel_session
);
1716 if (session
->kernel_session
->channel_count
== 0) {
1717 /* Create default channel */
1718 ret
= channel_kernel_create(session
->kernel_session
, NULL
, kwpipe
);
1719 if (ret
!= LTTNG_OK
) {
1722 chan_kern_created
= 1;
1724 /* Add kernel context to kernel tracer */
1725 ret
= context_kernel_add(session
->kernel_session
, ctx
, channel_name
);
1726 if (ret
!= LTTNG_OK
) {
1730 case LTTNG_DOMAIN_JUL
:
1731 case LTTNG_DOMAIN_LOG4J
:
1734 * Validate channel name.
1735 * If no channel name is given and the domain is JUL or LOG4J,
1736 * set it to the appropriate domain-specific channel name. If
1737 * a name is provided but does not match the expexted channel
1738 * name, return an error.
1740 if (domain
== LTTNG_DOMAIN_JUL
&& *channel_name
&&
1741 strcmp(channel_name
,
1742 DEFAULT_JUL_CHANNEL_NAME
)) {
1743 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1745 } else if (domain
== LTTNG_DOMAIN_LOG4J
&& *channel_name
&&
1746 strcmp(channel_name
,
1747 DEFAULT_LOG4J_CHANNEL_NAME
)) {
1748 ret
= LTTNG_ERR_UST_CHAN_NOT_FOUND
;
1751 /* break is _not_ missing here. */
1753 case LTTNG_DOMAIN_UST
:
1755 struct ltt_ust_session
*usess
= session
->ust_session
;
1756 unsigned int chan_count
;
1760 chan_count
= lttng_ht_get_count(usess
->domain_global
.channels
);
1761 if (chan_count
== 0) {
1762 struct lttng_channel
*attr
;
1763 /* Create default channel */
1764 attr
= channel_new_default_attr(domain
, usess
->buffer_type
);
1766 ret
= LTTNG_ERR_FATAL
;
1770 ret
= channel_ust_create(usess
, attr
, usess
->buffer_type
);
1771 if (ret
!= LTTNG_OK
) {
1775 channel_attr_destroy(attr
);
1776 chan_ust_created
= 1;
1779 ret
= context_ust_add(usess
, domain
, ctx
, channel_name
);
1780 free(app_ctx_provider_name
);
1782 app_ctx_name
= NULL
;
1783 app_ctx_provider_name
= NULL
;
1784 if (ret
!= LTTNG_OK
) {
1790 ret
= LTTNG_ERR_UND
;
1798 if (chan_kern_created
) {
1799 struct ltt_kernel_channel
*kchan
=
1800 trace_kernel_get_channel_by_name(DEFAULT_CHANNEL_NAME
,
1801 session
->kernel_session
);
1802 /* Created previously, this should NOT fail. */
1804 kernel_destroy_channel(kchan
);
1807 if (chan_ust_created
) {
1808 struct ltt_ust_channel
*uchan
=
1809 trace_ust_find_channel_by_name(
1810 session
->ust_session
->domain_global
.channels
,
1811 DEFAULT_CHANNEL_NAME
);
1812 /* Created previously, this should NOT fail. */
1814 /* Remove from the channel list of the session. */
1815 trace_ust_delete_channel(session
->ust_session
->domain_global
.channels
,
1817 trace_ust_destroy_channel(uchan
);
1820 free(app_ctx_provider_name
);
1825 static inline bool name_starts_with(const char *name
, const char *prefix
)
1827 const size_t max_cmp_len
= min(strlen(prefix
), LTTNG_SYMBOL_NAME_LEN
);
1829 return !strncmp(name
, prefix
, max_cmp_len
);
1832 /* Perform userspace-specific event name validation */
1833 static int validate_ust_event_name(const char *name
)
1843 * Check name against all internal UST event component namespaces used
1846 if (name_starts_with(name
, DEFAULT_JUL_EVENT_COMPONENT
) ||
1847 name_starts_with(name
, DEFAULT_LOG4J_EVENT_COMPONENT
) ||
1848 name_starts_with(name
, DEFAULT_PYTHON_EVENT_COMPONENT
)) {
1857 * Internal version of cmd_enable_event() with a supplemental
1858 * "internal_event" flag which is used to enable internal events which should
1859 * be hidden from clients. Such events are used in the agent implementation to
1860 * enable the events through which all "agent" events are funeled.
1862 static int _cmd_enable_event(struct ltt_session
*session
,
1863 struct lttng_domain
*domain
,
1864 char *channel_name
, struct lttng_event
*event
,
1865 char *filter_expression
,
1866 struct lttng_filter_bytecode
*filter
,
1867 struct lttng_event_exclusion
*exclusion
,
1868 int wpipe
, bool internal_event
)
1870 int ret
= 0, channel_created
= 0;
1871 struct lttng_channel
*attr
= NULL
;
1875 assert(channel_name
);
1877 /* If we have a filter, we must have its filter expression */
1878 assert(!(!!filter_expression
^ !!filter
));
1880 /* Normalize event name as a globbing pattern */
1881 strutils_normalize_star_glob_pattern(event
->name
);
1883 /* Normalize exclusion names as globbing patterns */
1887 for (i
= 0; i
< exclusion
->count
; i
++) {
1888 char *name
= LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion
, i
);
1890 strutils_normalize_star_glob_pattern(name
);
1894 DBG("Enable event command for event \'%s\'", event
->name
);
1898 switch (domain
->type
) {
1899 case LTTNG_DOMAIN_KERNEL
:
1901 struct ltt_kernel_channel
*kchan
;
1904 * If a non-default channel has been created in the
1905 * session, explicitely require that -c chan_name needs
1908 if (session
->kernel_session
->has_non_default_channel
1909 && channel_name
[0] == '\0') {
1910 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
1914 kchan
= trace_kernel_get_channel_by_name(channel_name
,
1915 session
->kernel_session
);
1916 if (kchan
== NULL
) {
1917 attr
= channel_new_default_attr(LTTNG_DOMAIN_KERNEL
,
1918 LTTNG_BUFFER_GLOBAL
);
1920 ret
= LTTNG_ERR_FATAL
;
1923 if (lttng_strncpy(attr
->name
, channel_name
,
1924 sizeof(attr
->name
))) {
1925 ret
= LTTNG_ERR_INVALID
;
1929 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
1930 if (ret
!= LTTNG_OK
) {
1933 channel_created
= 1;
1936 /* Get the newly created kernel channel pointer */
1937 kchan
= trace_kernel_get_channel_by_name(channel_name
,
1938 session
->kernel_session
);
1939 if (kchan
== NULL
) {
1940 /* This sould not happen... */
1941 ret
= LTTNG_ERR_FATAL
;
1945 switch (event
->type
) {
1946 case LTTNG_EVENT_ALL
:
1948 char *filter_expression_a
= NULL
;
1949 struct lttng_filter_bytecode
*filter_a
= NULL
;
1952 * We need to duplicate filter_expression and filter,
1953 * because ownership is passed to first enable
1956 if (filter_expression
) {
1957 filter_expression_a
= strdup(filter_expression
);
1958 if (!filter_expression_a
) {
1959 ret
= LTTNG_ERR_FATAL
;
1964 filter_a
= zmalloc(sizeof(*filter_a
) + filter
->len
);
1966 free(filter_expression_a
);
1967 ret
= LTTNG_ERR_FATAL
;
1970 memcpy(filter_a
, filter
, sizeof(*filter_a
) + filter
->len
);
1972 event
->type
= LTTNG_EVENT_TRACEPOINT
; /* Hack */
1973 ret
= event_kernel_enable_event(kchan
, event
,
1974 filter_expression
, filter
);
1975 /* We have passed ownership */
1976 filter_expression
= NULL
;
1978 if (ret
!= LTTNG_OK
) {
1979 if (channel_created
) {
1980 /* Let's not leak a useless channel. */
1981 kernel_destroy_channel(kchan
);
1983 free(filter_expression_a
);
1987 event
->type
= LTTNG_EVENT_SYSCALL
; /* Hack */
1988 ret
= event_kernel_enable_event(kchan
, event
,
1989 filter_expression_a
, filter_a
);
1990 /* We have passed ownership */
1991 filter_expression_a
= NULL
;
1993 if (ret
!= LTTNG_OK
) {
1998 case LTTNG_EVENT_PROBE
:
1999 case LTTNG_EVENT_FUNCTION
:
2000 case LTTNG_EVENT_FUNCTION_ENTRY
:
2001 case LTTNG_EVENT_TRACEPOINT
:
2002 ret
= event_kernel_enable_event(kchan
, event
,
2003 filter_expression
, filter
);
2004 /* We have passed ownership */
2005 filter_expression
= NULL
;
2007 if (ret
!= LTTNG_OK
) {
2008 if (channel_created
) {
2009 /* Let's not leak a useless channel. */
2010 kernel_destroy_channel(kchan
);
2015 case LTTNG_EVENT_SYSCALL
:
2016 ret
= event_kernel_enable_event(kchan
, event
,
2017 filter_expression
, filter
);
2018 /* We have passed ownership */
2019 filter_expression
= NULL
;
2021 if (ret
!= LTTNG_OK
) {
2026 ret
= LTTNG_ERR_UNK
;
2030 kernel_wait_quiescent(kernel_tracer_fd
);
2033 case LTTNG_DOMAIN_UST
:
2035 struct ltt_ust_channel
*uchan
;
2036 struct ltt_ust_session
*usess
= session
->ust_session
;
2041 * If a non-default channel has been created in the
2042 * session, explicitely require that -c chan_name needs
2045 if (usess
->has_non_default_channel
&& channel_name
[0] == '\0') {
2046 ret
= LTTNG_ERR_NEED_CHANNEL_NAME
;
2050 /* Get channel from global UST domain */
2051 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2053 if (uchan
== NULL
) {
2054 /* Create default channel */
2055 attr
= channel_new_default_attr(LTTNG_DOMAIN_UST
,
2056 usess
->buffer_type
);
2058 ret
= LTTNG_ERR_FATAL
;
2061 if (lttng_strncpy(attr
->name
, channel_name
,
2062 sizeof(attr
->name
))) {
2063 ret
= LTTNG_ERR_INVALID
;
2067 ret
= cmd_enable_channel(session
, domain
, attr
, wpipe
);
2068 if (ret
!= LTTNG_OK
) {
2072 /* Get the newly created channel reference back */
2073 uchan
= trace_ust_find_channel_by_name(
2074 usess
->domain_global
.channels
, channel_name
);
2078 if (uchan
->domain
!= LTTNG_DOMAIN_UST
&& !internal_event
) {
2080 * Don't allow users to add UST events to channels which
2081 * are assigned to a userspace subdomain (JUL, Log4J,
2084 ret
= LTTNG_ERR_INVALID_CHANNEL_DOMAIN
;
2088 if (!internal_event
) {
2090 * Ensure the event name is not reserved for internal
2093 ret
= validate_ust_event_name(event
->name
);
2095 WARN("Userspace event name %s failed validation.",
2097 ret
= LTTNG_ERR_INVALID_EVENT_NAME
;
2102 /* At this point, the session and channel exist on the tracer */
2103 ret
= event_ust_enable_tracepoint(usess
, uchan
, event
,
2104 filter_expression
, filter
, exclusion
,
2106 /* We have passed ownership */
2107 filter_expression
= NULL
;
2110 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2111 goto already_enabled
;
2112 } else if (ret
!= LTTNG_OK
) {
2117 case LTTNG_DOMAIN_LOG4J
:
2118 case LTTNG_DOMAIN_JUL
:
2119 case LTTNG_DOMAIN_PYTHON
:
2121 const char *default_event_name
, *default_chan_name
;
2123 struct lttng_event uevent
;
2124 struct lttng_domain tmp_dom
;
2125 struct ltt_ust_session
*usess
= session
->ust_session
;
2129 if (!agent_tracing_is_enabled()) {
2130 DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
2131 ret
= LTTNG_ERR_AGENT_TRACING_DISABLED
;
2135 agt
= trace_ust_find_agent(usess
, domain
->type
);
2137 agt
= agent_create(domain
->type
);
2139 ret
= LTTNG_ERR_NOMEM
;
2142 agent_add(agt
, usess
->agents
);
2145 /* Create the default tracepoint. */
2146 memset(&uevent
, 0, sizeof(uevent
));
2147 uevent
.type
= LTTNG_EVENT_TRACEPOINT
;
2148 uevent
.loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
2149 default_event_name
= event_get_default_agent_ust_name(
2151 if (!default_event_name
) {
2152 ret
= LTTNG_ERR_FATAL
;
2155 strncpy(uevent
.name
, default_event_name
, sizeof(uevent
.name
));
2156 uevent
.name
[sizeof(uevent
.name
) - 1] = '\0';
2159 * The domain type is changed because we are about to enable the
2160 * default channel and event for the JUL domain that are hardcoded.
2161 * This happens in the UST domain.
2163 memcpy(&tmp_dom
, domain
, sizeof(tmp_dom
));
2164 tmp_dom
.type
= LTTNG_DOMAIN_UST
;
2166 switch (domain
->type
) {
2167 case LTTNG_DOMAIN_LOG4J
:
2168 default_chan_name
= DEFAULT_LOG4J_CHANNEL_NAME
;
2170 case LTTNG_DOMAIN_JUL
:
2171 default_chan_name
= DEFAULT_JUL_CHANNEL_NAME
;
2173 case LTTNG_DOMAIN_PYTHON
:
2174 default_chan_name
= DEFAULT_PYTHON_CHANNEL_NAME
;
2177 /* The switch/case we are in makes this impossible */
2182 char *filter_expression_copy
= NULL
;
2183 struct lttng_filter_bytecode
*filter_copy
= NULL
;
2186 const size_t filter_size
= sizeof(
2187 struct lttng_filter_bytecode
)
2190 filter_copy
= zmalloc(filter_size
);
2192 ret
= LTTNG_ERR_NOMEM
;
2195 memcpy(filter_copy
, filter
, filter_size
);
2197 filter_expression_copy
=
2198 strdup(filter_expression
);
2199 if (!filter_expression
) {
2200 ret
= LTTNG_ERR_NOMEM
;
2203 if (!filter_expression_copy
|| !filter_copy
) {
2204 free(filter_expression_copy
);
2210 ret
= cmd_enable_event_internal(session
, &tmp_dom
,
2211 (char *) default_chan_name
,
2212 &uevent
, filter_expression_copy
,
2213 filter_copy
, NULL
, wpipe
);
2216 if (ret
== LTTNG_ERR_UST_EVENT_ENABLED
) {
2217 goto already_enabled
;
2218 } else if (ret
!= LTTNG_OK
) {
2222 /* The wild card * means that everything should be enabled. */
2223 if (strncmp(event
->name
, "*", 1) == 0 && strlen(event
->name
) == 1) {
2224 ret
= event_agent_enable_all(usess
, agt
, event
, filter
,
2227 ret
= event_agent_enable(usess
, agt
, event
, filter
,
2231 filter_expression
= NULL
;
2232 if (ret
!= LTTNG_OK
) {
2239 ret
= LTTNG_ERR_UND
;
2247 free(filter_expression
);
2250 channel_attr_destroy(attr
);
2256 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2257 * We own filter, exclusion, and filter_expression.
2259 int cmd_enable_event(struct ltt_session
*session
, struct lttng_domain
*domain
,
2260 char *channel_name
, struct lttng_event
*event
,
2261 char *filter_expression
,
2262 struct lttng_filter_bytecode
*filter
,
2263 struct lttng_event_exclusion
*exclusion
,
2266 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2267 filter_expression
, filter
, exclusion
, wpipe
, false);
2271 * Enable an event which is internal to LTTng. An internal should
2272 * never be made visible to clients and are immune to checks such as
2275 static int cmd_enable_event_internal(struct ltt_session
*session
,
2276 struct lttng_domain
*domain
,
2277 char *channel_name
, struct lttng_event
*event
,
2278 char *filter_expression
,
2279 struct lttng_filter_bytecode
*filter
,
2280 struct lttng_event_exclusion
*exclusion
,
2283 return _cmd_enable_event(session
, domain
, channel_name
, event
,
2284 filter_expression
, filter
, exclusion
, wpipe
, true);
2288 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2290 ssize_t
cmd_list_tracepoints(enum lttng_domain_type domain
,
2291 struct lttng_event
**events
)
2294 ssize_t nb_events
= 0;
2297 case LTTNG_DOMAIN_KERNEL
:
2298 nb_events
= kernel_list_events(kernel_tracer_fd
, events
);
2299 if (nb_events
< 0) {
2300 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2304 case LTTNG_DOMAIN_UST
:
2305 nb_events
= ust_app_list_events(events
);
2306 if (nb_events
< 0) {
2307 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2311 case LTTNG_DOMAIN_LOG4J
:
2312 case LTTNG_DOMAIN_JUL
:
2313 case LTTNG_DOMAIN_PYTHON
:
2314 nb_events
= agent_list_events(events
, domain
);
2315 if (nb_events
< 0) {
2316 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2321 ret
= LTTNG_ERR_UND
;
2328 /* Return negative value to differentiate return code */
2333 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
2335 ssize_t
cmd_list_tracepoint_fields(enum lttng_domain_type domain
,
2336 struct lttng_event_field
**fields
)
2339 ssize_t nb_fields
= 0;
2342 case LTTNG_DOMAIN_UST
:
2343 nb_fields
= ust_app_list_event_fields(fields
);
2344 if (nb_fields
< 0) {
2345 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2349 case LTTNG_DOMAIN_KERNEL
:
2350 default: /* fall-through */
2351 ret
= LTTNG_ERR_UND
;
2358 /* Return negative value to differentiate return code */
2362 ssize_t
cmd_list_syscalls(struct lttng_event
**events
)
2364 return syscall_table_list(events
);
2368 * Command LTTNG_LIST_TRACKER_PIDS processed by the client thread.
2370 * Called with session lock held.
2372 ssize_t
cmd_list_tracker_pids(struct ltt_session
*session
,
2373 enum lttng_domain_type domain
, int32_t **pids
)
2376 ssize_t nr_pids
= 0;
2379 case LTTNG_DOMAIN_KERNEL
:
2381 struct ltt_kernel_session
*ksess
;
2383 ksess
= session
->kernel_session
;
2384 nr_pids
= kernel_list_tracker_pids(ksess
, pids
);
2386 ret
= LTTNG_ERR_KERN_LIST_FAIL
;
2391 case LTTNG_DOMAIN_UST
:
2393 struct ltt_ust_session
*usess
;
2395 usess
= session
->ust_session
;
2396 nr_pids
= trace_ust_list_tracker_pids(usess
, pids
);
2398 ret
= LTTNG_ERR_UST_LIST_FAIL
;
2403 case LTTNG_DOMAIN_LOG4J
:
2404 case LTTNG_DOMAIN_JUL
:
2405 case LTTNG_DOMAIN_PYTHON
:
2407 ret
= LTTNG_ERR_UND
;
2414 /* Return negative value to differentiate return code */
2419 int domain_mkdir(const struct consumer_output
*output
,
2420 const struct ltt_session
*session
,
2421 uid_t uid
, gid_t gid
)
2423 struct consumer_socket
*socket
;
2424 struct lttng_ht_iter iter
;
2428 if (!output
|| !output
->socks
) {
2429 ERR("No consumer output found");
2434 path
= zmalloc(LTTNG_PATH_MAX
* sizeof(char));
2436 ERR("Cannot allocate mkdir path");
2441 ret
= snprintf(path
, LTTNG_PATH_MAX
, "%s%s%s",
2442 session_get_base_path(session
),
2443 output
->chunk_path
, output
->subdir
);
2444 if (ret
< 0 || ret
>= LTTNG_PATH_MAX
) {
2450 DBG("Domain mkdir %s for session %" PRIu64
, path
, session
->id
);
2453 * We have to iterate to find a socket, but we only need to send the
2454 * rename command to one consumer, so we break after the first one.
2456 cds_lfht_for_each_entry(output
->socks
->ht
, &iter
.iter
, socket
, node
.node
) {
2457 pthread_mutex_lock(socket
->lock
);
2458 ret
= consumer_mkdir(socket
, session
->id
, output
, path
, uid
, gid
);
2459 pthread_mutex_unlock(socket
->lock
);
2461 ERR("Consumer mkdir");
2478 int session_mkdir(const struct ltt_session
*session
)
2481 struct consumer_output
*output
;
2486 * Unsupported feature in lttng-relayd before 2.11, not an error since it
2487 * is only needed for session rotation and the user will get an error
2490 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
2491 session
->consumer
->relay_major_version
== 2 &&
2492 session
->consumer
->relay_minor_version
< 11) {
2497 if (session
->kernel_session
) {
2498 output
= session
->kernel_session
->consumer
;
2499 uid
= session
->kernel_session
->uid
;
2500 gid
= session
->kernel_session
->gid
;
2501 ret
= domain_mkdir(output
, session
, uid
, gid
);
2503 ERR("Mkdir kernel");
2508 if (session
->ust_session
) {
2509 output
= session
->ust_session
->consumer
;
2510 uid
= session
->ust_session
->uid
;
2511 gid
= session
->ust_session
->gid
;
2512 ret
= domain_mkdir(output
, session
, uid
, gid
);
2526 * Command LTTNG_START_TRACE processed by the client thread.
2528 * Called with session mutex held.
2530 int cmd_start_trace(struct ltt_session
*session
)
2533 unsigned long nb_chan
= 0;
2534 struct ltt_kernel_session
*ksession
;
2535 struct ltt_ust_session
*usess
;
2539 /* Ease our life a bit ;) */
2540 ksession
= session
->kernel_session
;
2541 usess
= session
->ust_session
;
2543 /* Is the session already started? */
2544 if (session
->active
) {
2545 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2550 * Starting a session without channel is useless since after that it's not
2551 * possible to enable channel thus inform the client.
2553 if (usess
&& usess
->domain_global
.channels
) {
2554 nb_chan
+= lttng_ht_get_count(usess
->domain_global
.channels
);
2557 nb_chan
+= ksession
->channel_count
;
2560 ret
= LTTNG_ERR_NO_CHANNEL
;
2565 * Record the timestamp of the first time the session is started for
2566 * an eventual session rotation call.
2568 if (!session
->has_been_started
) {
2569 session
->current_chunk_start_ts
= time(NULL
);
2570 if (session
->current_chunk_start_ts
== (time_t) -1) {
2571 PERROR("Failed to retrieve the \"%s\" session's start time",
2573 ret
= LTTNG_ERR_FATAL
;
2576 if (!session
->snapshot_mode
&& session
->output_traces
) {
2577 ret
= session_mkdir(session
);
2579 ERR("Failed to create the session directories");
2580 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
2586 /* Kernel tracing */
2587 if (ksession
!= NULL
) {
2588 DBG("Start kernel tracing session %s", session
->name
);
2589 ret
= start_kernel_session(ksession
, kernel_tracer_fd
);
2590 if (ret
!= LTTNG_OK
) {
2595 /* Flag session that trace should start automatically */
2598 * Even though the start trace might fail, flag this session active so
2599 * other application coming in are started by default.
2603 ret
= ust_app_start_trace_all(usess
);
2605 ret
= LTTNG_ERR_UST_START_FAIL
;
2610 /* Flag this after a successful start. */
2611 session
->has_been_started
= 1;
2612 session
->active
= 1;
2615 * Clear the flag that indicates that a rotation was done while the
2616 * session was stopped.
2618 session
->rotated_after_last_stop
= false;
2620 if (session
->rotate_timer_period
) {
2621 ret
= sessiond_rotate_timer_start(session
,
2622 session
->rotate_timer_period
);
2624 ERR("Failed to enable rotate timer");
2625 ret
= LTTNG_ERR_UNK
;
2637 int rename_active_chunk(struct ltt_session
*session
)
2641 session
->current_archive_id
++;
2644 * The currently active tracing path is now the folder we
2647 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
2648 session
->rotation_chunk
.active_tracing_path
,
2649 sizeof(session
->rotation_chunk
.current_rotate_path
));
2651 ERR("Failed to copy active tracing path");
2655 ret
= rename_complete_chunk(session
, time(NULL
));
2657 ERR("Failed to rename current rotate path");
2662 * We just renamed, the folder, we didn't do an actual rotation, so
2663 * the active tracing path is now the renamed folder and we have to
2664 * restore the rotate count.
2666 ret
= lttng_strncpy(session
->rotation_chunk
.active_tracing_path
,
2667 session
->rotation_chunk
.current_rotate_path
,
2668 sizeof(session
->rotation_chunk
.active_tracing_path
));
2670 ERR("Failed to rename active session chunk tracing path");
2674 session
->current_archive_id
--;
2679 * Command LTTNG_STOP_TRACE processed by the client thread.
2681 int cmd_stop_trace(struct ltt_session
*session
)
2684 struct ltt_kernel_channel
*kchan
;
2685 struct ltt_kernel_session
*ksession
;
2686 struct ltt_ust_session
*usess
;
2687 bool error_occured
= false;
2691 DBG("Begin stop session %s (id %" PRIu64
")", session
->name
, session
->id
);
2693 ksession
= session
->kernel_session
;
2694 usess
= session
->ust_session
;
2696 /* Session is not active. Skip everythong and inform the client. */
2697 if (!session
->active
) {
2698 ret
= LTTNG_ERR_TRACE_ALREADY_STOPPED
;
2702 if (session
->rotate_relay_pending_timer_enabled
) {
2703 sessiond_timer_rotate_pending_stop(session
);
2706 if (session
->rotate_timer_enabled
) {
2707 sessiond_rotate_timer_stop(session
);
2710 if (session
->current_archive_id
> 0 && !session
->rotate_pending
) {
2711 ret
= rename_active_chunk(session
);
2714 * This error should not prevent the user from stopping
2715 * the session. However, it will be reported at the end.
2717 error_occured
= true;
2722 if (ksession
&& ksession
->active
) {
2723 DBG("Stop kernel tracing");
2725 ret
= kernel_stop_session(ksession
);
2727 ret
= LTTNG_ERR_KERN_STOP_FAIL
;
2731 kernel_wait_quiescent(kernel_tracer_fd
);
2733 /* Flush metadata after stopping (if exists) */
2734 if (ksession
->metadata_stream_fd
>= 0) {
2735 ret
= kernel_metadata_flush_buffer(ksession
->metadata_stream_fd
);
2737 ERR("Kernel metadata flush failed");
2741 /* Flush all buffers after stopping */
2742 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
2743 ret
= kernel_flush_buffer(kchan
);
2745 ERR("Kernel flush buffer error");
2749 ksession
->active
= 0;
2750 DBG("Kernel session stopped %s (id %" PRIu64
")", session
->name
,
2754 if (usess
&& usess
->active
) {
2756 * Even though the stop trace might fail, flag this session inactive so
2757 * other application coming in are not started by default.
2761 ret
= ust_app_stop_trace_all(usess
);
2763 ret
= LTTNG_ERR_UST_STOP_FAIL
;
2768 /* Flag inactive after a successful stop. */
2769 session
->active
= 0;
2770 ret
= !error_occured
? LTTNG_OK
: LTTNG_ERR_UNK
;
2777 * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
2779 int cmd_set_consumer_uri(struct ltt_session
*session
, size_t nb_uri
,
2780 struct lttng_uri
*uris
)
2783 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
2784 struct ltt_ust_session
*usess
= session
->ust_session
;
2790 /* Can't set consumer URI if the session is active. */
2791 if (session
->active
) {
2792 ret
= LTTNG_ERR_TRACE_ALREADY_STARTED
;
2796 /* Set the "global" consumer URIs */
2797 for (i
= 0; i
< nb_uri
; i
++) {
2798 ret
= add_uri_to_consumer(session
->consumer
,
2799 &uris
[i
], 0, session
->name
);
2800 if (ret
!= LTTNG_OK
) {
2805 /* Set UST session URIs */
2806 if (session
->ust_session
) {
2807 for (i
= 0; i
< nb_uri
; i
++) {
2808 ret
= add_uri_to_consumer(
2809 session
->ust_session
->consumer
,
2810 &uris
[i
], LTTNG_DOMAIN_UST
,
2812 if (ret
!= LTTNG_OK
) {
2818 /* Set kernel session URIs */
2819 if (session
->kernel_session
) {
2820 for (i
= 0; i
< nb_uri
; i
++) {
2821 ret
= add_uri_to_consumer(
2822 session
->kernel_session
->consumer
,
2823 &uris
[i
], LTTNG_DOMAIN_KERNEL
,
2825 if (ret
!= LTTNG_OK
) {
2832 * Make sure to set the session in output mode after we set URI since a
2833 * session can be created without URL (thus flagged in no output mode).
2835 session
->output_traces
= 1;
2837 ksess
->output_traces
= 1;
2841 usess
->output_traces
= 1;
2852 * Command LTTNG_CREATE_SESSION processed by the client thread.
2854 int cmd_create_session_uri(char *name
, struct lttng_uri
*uris
,
2855 size_t nb_uri
, lttng_sock_cred
*creds
, unsigned int live_timer
)
2858 struct ltt_session
*session
;
2864 * Verify if the session already exist
2866 * XXX: There is no need for the session lock list here since the caller
2867 * (process_client_msg) is holding it. We might want to change that so a
2868 * single command does not lock the entire session list.
2870 session
= session_find_by_name(name
);
2871 if (session
!= NULL
) {
2872 ret
= LTTNG_ERR_EXIST_SESS
;
2876 /* Create tracing session in the registry */
2877 ret
= session_create(name
, LTTNG_SOCK_GET_UID_CRED(creds
),
2878 LTTNG_SOCK_GET_GID_CRED(creds
));
2879 if (ret
!= LTTNG_OK
) {
2884 * Get the newly created session pointer back
2886 * XXX: There is no need for the session lock list here since the caller
2887 * (process_client_msg) is holding it. We might want to change that so a
2888 * single command does not lock the entire session list.
2890 session
= session_find_by_name(name
);
2893 session
->live_timer
= live_timer
;
2894 /* Create default consumer output for the session not yet created. */
2895 session
->consumer
= consumer_create_output(CONSUMER_DST_LOCAL
);
2896 if (session
->consumer
== NULL
) {
2897 ret
= LTTNG_ERR_FATAL
;
2898 goto consumer_error
;
2902 ret
= cmd_set_consumer_uri(session
, nb_uri
, uris
);
2903 if (ret
!= LTTNG_OK
) {
2904 goto consumer_error
;
2906 session
->output_traces
= 1;
2908 session
->output_traces
= 0;
2909 DBG2("Session %s created with no output", session
->name
);
2912 session
->consumer
->enabled
= 1;
2917 session_destroy(session
);
2924 * Command LTTNG_CREATE_SESSION_SNAPSHOT processed by the client thread.
2926 int cmd_create_session_snapshot(char *name
, struct lttng_uri
*uris
,
2927 size_t nb_uri
, lttng_sock_cred
*creds
)
2930 struct ltt_session
*session
;
2931 struct snapshot_output
*new_output
= NULL
;
2937 * Create session in no output mode with URIs set to NULL. The uris we've
2938 * received are for a default snapshot output if one.
2940 ret
= cmd_create_session_uri(name
, NULL
, 0, creds
, 0);
2941 if (ret
!= LTTNG_OK
) {
2945 /* Get the newly created session pointer back. This should NEVER fail. */
2946 session
= session_find_by_name(name
);
2949 /* Flag session for snapshot mode. */
2950 session
->snapshot_mode
= 1;
2952 /* Skip snapshot output creation if no URI is given. */
2957 new_output
= snapshot_output_alloc();
2959 ret
= LTTNG_ERR_NOMEM
;
2960 goto error_snapshot_alloc
;
2963 ret
= snapshot_output_init_with_uri(DEFAULT_SNAPSHOT_MAX_SIZE
, NULL
,
2964 uris
, nb_uri
, session
->consumer
, new_output
, &session
->snapshot
);
2966 if (ret
== -ENOMEM
) {
2967 ret
= LTTNG_ERR_NOMEM
;
2969 ret
= LTTNG_ERR_INVALID
;
2971 goto error_snapshot
;
2975 snapshot_add_output(&session
->snapshot
, new_output
);
2982 snapshot_output_destroy(new_output
);
2983 error_snapshot_alloc
:
2984 session_destroy(session
);
2990 * Command LTTNG_DESTROY_SESSION processed by the client thread.
2992 * Called with session lock held.
2994 int cmd_destroy_session(struct ltt_session
*session
, int wpipe
,
2995 struct notification_thread_handle
*notification_thread_handle
)
2998 struct ltt_ust_session
*usess
;
2999 struct ltt_kernel_session
*ksess
;
3004 usess
= session
->ust_session
;
3005 ksess
= session
->kernel_session
;
3007 DBG("Begin destroy session %s (id %" PRIu64
")", session
->name
, session
->id
);
3009 if (session
->rotate_relay_pending_timer_enabled
) {
3010 sessiond_timer_rotate_pending_stop(session
);
3013 if (session
->rotate_timer_enabled
) {
3014 sessiond_rotate_timer_stop(session
);
3017 if (session
->rotate_size
) {
3018 unsubscribe_session_consumed_size_rotation(session
, notification_thread_handle
);
3019 session
->rotate_size
= 0;
3023 * The rename of the current chunk is performed at stop, but if we rotated
3024 * the session after the previous stop command, we need to rename the
3025 * new (and empty) chunk that was started in between.
3027 if (session
->rotated_after_last_stop
) {
3028 rename_active_chunk(session
);
3031 /* Clean kernel session teardown */
3032 kernel_destroy_session(ksess
);
3034 /* UST session teardown */
3036 /* Close any relayd session */
3037 consumer_output_send_destroy_relayd(usess
->consumer
);
3039 /* Destroy every UST application related to this session. */
3040 ret
= ust_app_destroy_trace_all(usess
);
3042 ERR("Error in ust_app_destroy_trace_all");
3045 /* Clean up the rest. */
3046 trace_ust_destroy_session(usess
);
3050 * Must notify the kernel thread here to update it's poll set in order to
3051 * remove the channel(s)' fd just destroyed.
3053 ret
= notify_thread_pipe(wpipe
);
3055 PERROR("write kernel poll pipe");
3058 if (session
->shm_path
[0]) {
3060 * When a session is created with an explicit shm_path,
3061 * the consumer daemon will create its shared memory files
3062 * at that location and will *not* unlink them. This is normal
3063 * as the intention of that feature is to make it possible
3064 * to retrieve the content of those files should a crash occur.
3066 * To ensure the content of those files can be used, the
3067 * sessiond daemon will replicate the content of the metadata
3068 * cache in a metadata file.
3070 * On clean-up, it is expected that the consumer daemon will
3071 * unlink the shared memory files and that the session daemon
3072 * will unlink the metadata file. Then, the session's directory
3073 * in the shm path can be removed.
3075 * Unfortunately, a flaw in the design of the sessiond's and
3076 * consumerd's tear down of channels makes it impossible to
3077 * determine when the sessiond _and_ the consumerd have both
3078 * destroyed their representation of a channel. For one, the
3079 * unlinking, close, and rmdir happen in deferred 'call_rcu'
3080 * callbacks in both daemons.
3082 * However, it is also impossible for the sessiond to know when
3083 * the consumer daemon is done destroying its channel(s) since
3084 * it occurs as a reaction to the closing of the channel's file
3085 * descriptor. There is no resulting communication initiated
3086 * from the consumerd to the sessiond to confirm that the
3087 * operation is completed (and was successful).
3089 * Until this is all fixed, the session daemon checks for the
3090 * removal of the session's shm path which makes it possible
3091 * to safely advertise a session as having been destroyed.
3093 * Prior to this fix, it was not possible to reliably save
3094 * a session making use of the --shm-path option, destroy it,
3095 * and load it again. This is because the creation of the
3096 * session would fail upon seeing the session's shm path
3097 * already in existence.
3099 * Note that none of the error paths in the check for the
3100 * directory's existence return an error. This is normal
3101 * as there isn't much that can be done. The session will
3102 * be destroyed properly, except that we can't offer the
3103 * guarantee that the same session can be re-created.
3105 current_completion_handler
= &destroy_completion_handler
.handler
;
3106 ret
= lttng_strncpy(destroy_completion_handler
.shm_path
,
3108 sizeof(destroy_completion_handler
.shm_path
));
3111 ret
= session_destroy(session
);
3117 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3119 int cmd_register_consumer(struct ltt_session
*session
,
3120 enum lttng_domain_type domain
, const char *sock_path
,
3121 struct consumer_data
*cdata
)
3124 struct consumer_socket
*socket
= NULL
;
3131 case LTTNG_DOMAIN_KERNEL
:
3133 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3137 /* Can't register a consumer if there is already one */
3138 if (ksess
->consumer_fds_sent
!= 0) {
3139 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3143 sock
= lttcomm_connect_unix_sock(sock_path
);
3145 ret
= LTTNG_ERR_CONNECT_FAIL
;
3148 cdata
->cmd_sock
= sock
;
3150 socket
= consumer_allocate_socket(&cdata
->cmd_sock
);
3151 if (socket
== NULL
) {
3154 PERROR("close register consumer");
3156 cdata
->cmd_sock
= -1;
3157 ret
= LTTNG_ERR_FATAL
;
3161 socket
->lock
= zmalloc(sizeof(pthread_mutex_t
));
3162 if (socket
->lock
== NULL
) {
3163 PERROR("zmalloc pthread mutex");
3164 ret
= LTTNG_ERR_FATAL
;
3167 pthread_mutex_init(socket
->lock
, NULL
);
3168 socket
->registered
= 1;
3171 consumer_add_socket(socket
, ksess
->consumer
);
3174 pthread_mutex_lock(&cdata
->pid_mutex
);
3176 pthread_mutex_unlock(&cdata
->pid_mutex
);
3181 /* TODO: Userspace tracing */
3182 ret
= LTTNG_ERR_UND
;
3190 consumer_destroy_socket(socket
);
3196 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3198 ssize_t
cmd_list_domains(struct ltt_session
*session
,
3199 struct lttng_domain
**domains
)
3204 struct lttng_ht_iter iter
;
3206 if (session
->kernel_session
!= NULL
) {
3207 DBG3("Listing domains found kernel domain");
3211 if (session
->ust_session
!= NULL
) {
3212 DBG3("Listing domains found UST global domain");
3216 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3218 if (agt
->being_used
) {
3229 *domains
= zmalloc(nb_dom
* sizeof(struct lttng_domain
));
3230 if (*domains
== NULL
) {
3231 ret
= LTTNG_ERR_FATAL
;
3235 if (session
->kernel_session
!= NULL
) {
3236 (*domains
)[index
].type
= LTTNG_DOMAIN_KERNEL
;
3238 /* Kernel session buffer type is always GLOBAL */
3239 (*domains
)[index
].buf_type
= LTTNG_BUFFER_GLOBAL
;
3244 if (session
->ust_session
!= NULL
) {
3245 (*domains
)[index
].type
= LTTNG_DOMAIN_UST
;
3246 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3250 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
, &iter
.iter
,
3252 if (agt
->being_used
) {
3253 (*domains
)[index
].type
= agt
->domain
;
3254 (*domains
)[index
].buf_type
= session
->ust_session
->buffer_type
;
3264 /* Return negative value to differentiate return code */
3270 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3272 ssize_t
cmd_list_channels(enum lttng_domain_type domain
,
3273 struct ltt_session
*session
, struct lttng_channel
**channels
)
3275 ssize_t nb_chan
= 0, payload_size
= 0, ret
;
3278 case LTTNG_DOMAIN_KERNEL
:
3279 if (session
->kernel_session
!= NULL
) {
3280 nb_chan
= session
->kernel_session
->channel_count
;
3282 DBG3("Number of kernel channels %zd", nb_chan
);
3284 ret
= -LTTNG_ERR_KERN_CHAN_NOT_FOUND
;
3288 case LTTNG_DOMAIN_UST
:
3289 if (session
->ust_session
!= NULL
) {
3291 nb_chan
= lttng_ht_get_count(
3292 session
->ust_session
->domain_global
.channels
);
3295 DBG3("Number of UST global channels %zd", nb_chan
);
3297 ret
= -LTTNG_ERR_UST_CHAN_NOT_FOUND
;
3302 ret
= -LTTNG_ERR_UND
;
3307 const size_t channel_size
= sizeof(struct lttng_channel
) +
3308 sizeof(struct lttng_channel_extended
);
3309 struct lttng_channel_extended
*channel_exts
;
3311 payload_size
= nb_chan
* channel_size
;
3312 *channels
= zmalloc(payload_size
);
3313 if (*channels
== NULL
) {
3314 ret
= -LTTNG_ERR_FATAL
;
3318 channel_exts
= ((void *) *channels
) +
3319 (nb_chan
* sizeof(struct lttng_channel
));
3320 ret
= list_lttng_channels(domain
, session
, *channels
, channel_exts
);
3321 if (ret
!= LTTNG_OK
) {
3336 * Command LTTNG_LIST_EVENTS processed by the client thread.
3338 ssize_t
cmd_list_events(enum lttng_domain_type domain
,
3339 struct ltt_session
*session
, char *channel_name
,
3340 struct lttng_event
**events
, size_t *total_size
)
3343 ssize_t nb_event
= 0;
3346 case LTTNG_DOMAIN_KERNEL
:
3347 if (session
->kernel_session
!= NULL
) {
3348 nb_event
= list_lttng_kernel_events(channel_name
,
3349 session
->kernel_session
, events
,
3353 case LTTNG_DOMAIN_UST
:
3355 if (session
->ust_session
!= NULL
) {
3356 nb_event
= list_lttng_ust_global_events(channel_name
,
3357 &session
->ust_session
->domain_global
, events
,
3362 case LTTNG_DOMAIN_LOG4J
:
3363 case LTTNG_DOMAIN_JUL
:
3364 case LTTNG_DOMAIN_PYTHON
:
3365 if (session
->ust_session
) {
3366 struct lttng_ht_iter iter
;
3370 cds_lfht_for_each_entry(session
->ust_session
->agents
->ht
,
3371 &iter
.iter
, agt
, node
.node
) {
3372 if (agt
->domain
== domain
) {
3373 nb_event
= list_lttng_agent_events(
3383 ret
= LTTNG_ERR_UND
;
3390 /* Return negative value to differentiate return code */
3395 * Using the session list, filled a lttng_session array to send back to the
3396 * client for session listing.
3398 * The session list lock MUST be acquired before calling this function. Use
3399 * session_lock_list() and session_unlock_list().
3401 void cmd_list_lttng_sessions(struct lttng_session
*sessions
, uid_t uid
,
3406 struct ltt_session
*session
;
3407 struct ltt_session_list
*list
= session_get_list();
3409 DBG("Getting all available session for UID %d GID %d",
3412 * Iterate over session list and append data after the control struct in
3415 cds_list_for_each_entry(session
, &list
->head
, list
) {
3417 * Only list the sessions the user can control.
3419 if (!session_access_ok(session
, uid
, gid
)) {
3423 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3424 struct ltt_ust_session
*usess
= session
->ust_session
;
3426 if (session
->consumer
->type
== CONSUMER_DST_NET
||
3427 (ksess
&& ksess
->consumer
->type
== CONSUMER_DST_NET
) ||
3428 (usess
&& usess
->consumer
->type
== CONSUMER_DST_NET
)) {
3429 ret
= build_network_session_path(sessions
[i
].path
,
3430 sizeof(sessions
[i
].path
), session
);
3432 ret
= snprintf(sessions
[i
].path
, sizeof(sessions
[i
].path
), "%s",
3433 session
->consumer
->dst
.session_root_path
);
3436 PERROR("snprintf session path");
3440 strncpy(sessions
[i
].name
, session
->name
, NAME_MAX
);
3441 sessions
[i
].name
[NAME_MAX
- 1] = '\0';
3442 sessions
[i
].enabled
= session
->active
;
3443 sessions
[i
].snapshot_mode
= session
->snapshot_mode
;
3444 sessions
[i
].live_timer_interval
= session
->live_timer
;
3450 * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
3451 * ready for trace analysis (or any kind of reader) or else 1 for pending data.
3453 int cmd_data_pending(struct ltt_session
*session
)
3456 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
3457 struct ltt_ust_session
*usess
= session
->ust_session
;
3461 DBG("Data pending for session %s", session
->name
);
3463 /* Session MUST be stopped to ask for data availability. */
3464 if (session
->active
) {
3465 ret
= LTTNG_ERR_SESSION_STARTED
;
3469 * If stopped, just make sure we've started before else the above call
3470 * will always send that there is data pending.
3472 * The consumer assumes that when the data pending command is received,
3473 * the trace has been started before or else no output data is written
3474 * by the streams which is a condition for data pending. So, this is
3475 * *VERY* important that we don't ask the consumer before a start
3478 if (!session
->has_been_started
) {
3485 * A rotation is still pending, we have to wait.
3487 if (session
->rotate_pending
) {
3488 DBG("Rotate still pending for session %s", session
->name
);
3493 if (ksess
&& ksess
->consumer
) {
3494 ret
= consumer_is_data_pending(ksess
->id
, ksess
->consumer
);
3496 /* Data is still being extracted for the kernel. */
3501 if (usess
&& usess
->consumer
) {
3502 ret
= consumer_is_data_pending(usess
->id
, usess
->consumer
);
3504 /* Data is still being extracted for the kernel. */
3509 /* Data is ready to be read by a viewer */
3517 * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
3519 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3521 int cmd_snapshot_add_output(struct ltt_session
*session
,
3522 struct lttng_snapshot_output
*output
, uint32_t *id
)
3525 struct snapshot_output
*new_output
;
3530 DBG("Cmd snapshot add output for session %s", session
->name
);
3533 * Can't create an output if the session is not set in no-output mode.
3535 if (session
->output_traces
) {
3536 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3540 /* Only one output is allowed until we have the "tee" feature. */
3541 if (session
->snapshot
.nb_output
== 1) {
3542 ret
= LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST
;
3546 new_output
= snapshot_output_alloc();
3548 ret
= LTTNG_ERR_NOMEM
;
3552 ret
= snapshot_output_init(output
->max_size
, output
->name
,
3553 output
->ctrl_url
, output
->data_url
, session
->consumer
, new_output
,
3554 &session
->snapshot
);
3556 if (ret
== -ENOMEM
) {
3557 ret
= LTTNG_ERR_NOMEM
;
3559 ret
= LTTNG_ERR_INVALID
;
3565 snapshot_add_output(&session
->snapshot
, new_output
);
3567 *id
= new_output
->id
;
3574 snapshot_output_destroy(new_output
);
3580 * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
3582 * Return LTTNG_OK on success or else a LTTNG_ERR code.
3584 int cmd_snapshot_del_output(struct ltt_session
*session
,
3585 struct lttng_snapshot_output
*output
)
3588 struct snapshot_output
*sout
= NULL
;
3596 * Permission denied to create an output if the session is not
3597 * set in no output mode.
3599 if (session
->output_traces
) {
3600 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3605 DBG("Cmd snapshot del output id %" PRIu32
" for session %s", output
->id
,
3607 sout
= snapshot_find_output_by_id(output
->id
, &session
->snapshot
);
3608 } else if (*output
->name
!= '\0') {
3609 DBG("Cmd snapshot del output name %s for session %s", output
->name
,
3611 sout
= snapshot_find_output_by_name(output
->name
, &session
->snapshot
);
3614 ret
= LTTNG_ERR_INVALID
;
3618 snapshot_delete_output(&session
->snapshot
, sout
);
3619 snapshot_output_destroy(sout
);
3628 * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
3630 * If no output is available, outputs is untouched and 0 is returned.
3632 * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
3634 ssize_t
cmd_snapshot_list_outputs(struct ltt_session
*session
,
3635 struct lttng_snapshot_output
**outputs
)
3638 struct lttng_snapshot_output
*list
= NULL
;
3639 struct lttng_ht_iter iter
;
3640 struct snapshot_output
*output
;
3645 DBG("Cmd snapshot list outputs for session %s", session
->name
);
3648 * Permission denied to create an output if the session is not
3649 * set in no output mode.
3651 if (session
->output_traces
) {
3652 ret
= -LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
3656 if (session
->snapshot
.nb_output
== 0) {
3661 list
= zmalloc(session
->snapshot
.nb_output
* sizeof(*list
));
3663 ret
= -LTTNG_ERR_NOMEM
;
3667 /* Copy list from session to the new list object. */
3669 cds_lfht_for_each_entry(session
->snapshot
.output_ht
->ht
, &iter
.iter
,
3670 output
, node
.node
) {
3671 assert(output
->consumer
);
3672 list
[idx
].id
= output
->id
;
3673 list
[idx
].max_size
= output
->max_size
;
3674 if (lttng_strncpy(list
[idx
].name
, output
->name
,
3675 sizeof(list
[idx
].name
))) {
3676 ret
= -LTTNG_ERR_INVALID
;
3679 if (output
->consumer
->type
== CONSUMER_DST_LOCAL
) {
3680 if (lttng_strncpy(list
[idx
].ctrl_url
,
3681 output
->consumer
->dst
.session_root_path
,
3682 sizeof(list
[idx
].ctrl_url
))) {
3683 ret
= -LTTNG_ERR_INVALID
;
3688 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.control
,
3689 list
[idx
].ctrl_url
, sizeof(list
[idx
].ctrl_url
));
3691 ret
= -LTTNG_ERR_NOMEM
;
3696 ret
= uri_to_str_url(&output
->consumer
->dst
.net
.data
,
3697 list
[idx
].data_url
, sizeof(list
[idx
].data_url
));
3699 ret
= -LTTNG_ERR_NOMEM
;
3708 ret
= session
->snapshot
.nb_output
;
3717 * Check if we can regenerate the metadata for this session.
3718 * Only kernel, UST per-uid and non-live sessions are supported.
3720 * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
3723 int check_regenerate_metadata_support(struct ltt_session
*session
)
3729 if (session
->live_timer
!= 0) {
3730 ret
= LTTNG_ERR_LIVE_SESSION
;
3733 if (!session
->active
) {
3734 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
3737 if (session
->ust_session
) {
3738 switch (session
->ust_session
->buffer_type
) {
3739 case LTTNG_BUFFER_PER_UID
:
3741 case LTTNG_BUFFER_PER_PID
:
3742 ret
= LTTNG_ERR_PER_PID_SESSION
;
3746 ret
= LTTNG_ERR_UNK
;
3750 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
3751 session
->consumer
->relay_minor_version
< 8) {
3752 ret
= LTTNG_ERR_RELAYD_VERSION_FAIL
;
3762 int clear_metadata_file(int fd
)
3767 lseek_ret
= lseek(fd
, 0, SEEK_SET
);
3768 if (lseek_ret
< 0) {
3774 ret
= ftruncate(fd
, 0);
3776 PERROR("ftruncate");
3785 int ust_regenerate_metadata(struct ltt_ust_session
*usess
)
3788 struct buffer_reg_uid
*uid_reg
= NULL
;
3789 struct buffer_reg_session
*session_reg
= NULL
;
3792 cds_list_for_each_entry(uid_reg
, &usess
->buffer_reg_uid_list
, lnode
) {
3793 struct ust_registry_session
*registry
;
3794 struct ust_registry_channel
*chan
;
3795 struct lttng_ht_iter iter_chan
;
3797 session_reg
= uid_reg
->registry
;
3798 registry
= session_reg
->reg
.ust
;
3800 pthread_mutex_lock(®istry
->lock
);
3801 registry
->metadata_len_sent
= 0;
3802 memset(registry
->metadata
, 0, registry
->metadata_alloc_len
);
3803 registry
->metadata_len
= 0;
3804 registry
->metadata_version
++;
3805 if (registry
->metadata_fd
> 0) {
3806 /* Clear the metadata file's content. */
3807 ret
= clear_metadata_file(registry
->metadata_fd
);
3809 pthread_mutex_unlock(®istry
->lock
);
3814 ret
= ust_metadata_session_statedump(registry
, NULL
,
3815 registry
->major
, registry
->minor
);
3817 pthread_mutex_unlock(®istry
->lock
);
3818 ERR("Failed to generate session metadata (err = %d)",
3822 cds_lfht_for_each_entry(registry
->channels
->ht
, &iter_chan
.iter
,
3824 struct ust_registry_event
*event
;
3825 struct lttng_ht_iter iter_event
;
3827 ret
= ust_metadata_channel_statedump(registry
, chan
);
3829 pthread_mutex_unlock(®istry
->lock
);
3830 ERR("Failed to generate channel metadata "
3834 cds_lfht_for_each_entry(chan
->ht
->ht
, &iter_event
.iter
,
3836 ret
= ust_metadata_event_statedump(registry
,
3839 pthread_mutex_unlock(®istry
->lock
);
3840 ERR("Failed to generate event metadata "
3846 pthread_mutex_unlock(®istry
->lock
);
3855 * Command LTTNG_REGENERATE_METADATA from the lttng-ctl library.
3857 * Ask the consumer to truncate the existing metadata file(s) and
3858 * then regenerate the metadata. Live and per-pid sessions are not
3859 * supported and return an error.
3861 * Return 0 on success or else a LTTNG_ERR code.
3863 int cmd_regenerate_metadata(struct ltt_session
*session
)
3869 ret
= check_regenerate_metadata_support(session
);
3874 if (session
->kernel_session
) {
3875 ret
= kernctl_session_regenerate_metadata(
3876 session
->kernel_session
->fd
);
3878 ERR("Failed to regenerate the kernel metadata");
3883 if (session
->ust_session
) {
3884 ret
= ust_regenerate_metadata(session
->ust_session
);
3886 ERR("Failed to regenerate the UST metadata");
3890 DBG("Cmd metadata regenerate for session %s", session
->name
);
3898 * Command LTTNG_REGENERATE_STATEDUMP from the lttng-ctl library.
3900 * Ask the tracer to regenerate a new statedump.
3902 * Return 0 on success or else a LTTNG_ERR code.
3904 int cmd_regenerate_statedump(struct ltt_session
*session
)
3910 if (!session
->active
) {
3911 ret
= LTTNG_ERR_SESSION_NOT_STARTED
;
3915 if (session
->kernel_session
) {
3916 ret
= kernctl_session_regenerate_statedump(
3917 session
->kernel_session
->fd
);
3919 * Currently, the statedump in kernel can only fail if out
3923 if (ret
== -ENOMEM
) {
3924 ret
= LTTNG_ERR_REGEN_STATEDUMP_NOMEM
;
3926 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
3928 ERR("Failed to regenerate the kernel statedump");
3933 if (session
->ust_session
) {
3934 ret
= ust_app_regenerate_statedump_all(session
->ust_session
);
3936 * Currently, the statedump in UST always returns 0.
3939 ret
= LTTNG_ERR_REGEN_STATEDUMP_FAIL
;
3940 ERR("Failed to regenerate the UST statedump");
3944 DBG("Cmd regenerate statedump for session %s", session
->name
);
3951 int cmd_register_trigger(struct command_ctx
*cmd_ctx
, int sock
,
3952 struct notification_thread_handle
*notification_thread
)
3956 ssize_t sock_recv_len
;
3957 struct lttng_trigger
*trigger
= NULL
;
3958 struct lttng_buffer_view view
;
3959 struct lttng_dynamic_buffer trigger_buffer
;
3961 lttng_dynamic_buffer_init(&trigger_buffer
);
3962 trigger_len
= (size_t) cmd_ctx
->lsm
->u
.trigger
.length
;
3963 ret
= lttng_dynamic_buffer_set_size(&trigger_buffer
, trigger_len
);
3965 ret
= LTTNG_ERR_NOMEM
;
3969 sock_recv_len
= lttcomm_recv_unix_sock(sock
, trigger_buffer
.data
,
3971 if (sock_recv_len
< 0 || sock_recv_len
!= trigger_len
) {
3972 ERR("Failed to receive \"register trigger\" command payload");
3973 /* TODO: should this be a new error enum ? */
3974 ret
= LTTNG_ERR_INVALID_TRIGGER
;
3978 view
= lttng_buffer_view_from_dynamic_buffer(&trigger_buffer
, 0, -1);
3979 if (lttng_trigger_create_from_buffer(&view
, &trigger
) !=
3981 ERR("Invalid trigger payload received in \"register trigger\" command");
3982 ret
= LTTNG_ERR_INVALID_TRIGGER
;
3986 ret
= notification_thread_command_register_trigger(notification_thread
,
3988 /* Ownership of trigger was transferred. */
3991 lttng_trigger_destroy(trigger
);
3992 lttng_dynamic_buffer_reset(&trigger_buffer
);
3996 int cmd_unregister_trigger(struct command_ctx
*cmd_ctx
, int sock
,
3997 struct notification_thread_handle
*notification_thread
)
4001 ssize_t sock_recv_len
;
4002 struct lttng_trigger
*trigger
= NULL
;
4003 struct lttng_buffer_view view
;
4004 struct lttng_dynamic_buffer trigger_buffer
;
4006 lttng_dynamic_buffer_init(&trigger_buffer
);
4007 trigger_len
= (size_t) cmd_ctx
->lsm
->u
.trigger
.length
;
4008 ret
= lttng_dynamic_buffer_set_size(&trigger_buffer
, trigger_len
);
4010 ret
= LTTNG_ERR_NOMEM
;
4014 sock_recv_len
= lttcomm_recv_unix_sock(sock
, trigger_buffer
.data
,
4016 if (sock_recv_len
< 0 || sock_recv_len
!= trigger_len
) {
4017 ERR("Failed to receive \"unregister trigger\" command payload");
4018 /* TODO: should this be a new error enum ? */
4019 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4023 view
= lttng_buffer_view_from_dynamic_buffer(&trigger_buffer
, 0, -1);
4024 if (lttng_trigger_create_from_buffer(&view
, &trigger
) !=
4026 ERR("Invalid trigger payload received in \"unregister trigger\" command");
4027 ret
= LTTNG_ERR_INVALID_TRIGGER
;
4031 ret
= notification_thread_command_unregister_trigger(notification_thread
,
4034 lttng_trigger_destroy(trigger
);
4035 lttng_dynamic_buffer_reset(&trigger_buffer
);
4040 * Send relayd sockets from snapshot output to consumer. Ignore request if the
4041 * snapshot output is *not* set with a remote destination.
4043 * Return 0 on success or a LTTNG_ERR code.
4045 static int set_relayd_for_snapshot(struct consumer_output
*consumer
,
4046 struct snapshot_output
*snap_output
, struct ltt_session
*session
)
4049 struct lttng_ht_iter iter
;
4050 struct consumer_socket
*socket
;
4053 assert(snap_output
);
4056 DBG2("Set relayd object from snapshot output");
4058 /* Ignore if snapshot consumer output is not network. */
4059 if (snap_output
->consumer
->type
!= CONSUMER_DST_NET
) {
4064 * For each consumer socket, create and send the relayd object of the
4068 cds_lfht_for_each_entry(snap_output
->consumer
->socks
->ht
, &iter
.iter
,
4069 socket
, node
.node
) {
4070 pthread_mutex_lock(socket
->lock
);
4071 ret
= send_consumer_relayd_sockets(0, session
->id
,
4072 snap_output
->consumer
, socket
,
4073 session
->name
, session
->hostname
,
4074 session
->live_timer
);
4075 pthread_mutex_unlock(socket
->lock
);
4076 if (ret
!= LTTNG_OK
) {
4088 * Record a kernel snapshot.
4090 * Return LTTNG_OK on success or a LTTNG_ERR code.
4092 static int record_kernel_snapshot(struct ltt_kernel_session
*ksess
,
4093 struct snapshot_output
*output
, struct ltt_session
*session
,
4094 int wait
, uint64_t nb_packets_per_stream
)
4104 * Copy kernel session sockets so we can communicate with the right
4105 * consumer for the snapshot record command.
4107 ret
= consumer_copy_sockets(output
->consumer
, ksess
->consumer
);
4109 ret
= LTTNG_ERR_NOMEM
;
4113 ret
= set_relayd_for_snapshot(ksess
->consumer
, output
, session
);
4114 if (ret
!= LTTNG_OK
) {
4115 goto error_snapshot
;
4118 ret
= kernel_snapshot_record(ksess
, output
, wait
, nb_packets_per_stream
);
4119 if (ret
!= LTTNG_OK
) {
4120 goto error_snapshot
;
4127 /* Clean up copied sockets so this output can use some other later on. */
4128 consumer_destroy_output_sockets(output
->consumer
);
4135 * Record a UST snapshot.
4137 * Return 0 on success or a LTTNG_ERR error code.
4139 static int record_ust_snapshot(struct ltt_ust_session
*usess
,
4140 struct snapshot_output
*output
, struct ltt_session
*session
,
4141 int wait
, uint64_t nb_packets_per_stream
)
4150 * Copy UST session sockets so we can communicate with the right
4151 * consumer for the snapshot record command.
4153 ret
= consumer_copy_sockets(output
->consumer
, usess
->consumer
);
4155 ret
= LTTNG_ERR_NOMEM
;
4159 ret
= set_relayd_for_snapshot(usess
->consumer
, output
, session
);
4160 if (ret
!= LTTNG_OK
) {
4161 goto error_snapshot
;
4164 ret
= ust_app_snapshot_record(usess
, output
, wait
, nb_packets_per_stream
);
4168 ret
= LTTNG_ERR_INVALID
;
4171 ret
= LTTNG_ERR_SNAPSHOT_FAIL
;
4174 goto error_snapshot
;
4180 /* Clean up copied sockets so this output can use some other later on. */
4181 consumer_destroy_output_sockets(output
->consumer
);
4187 uint64_t get_session_size_one_more_packet_per_stream(struct ltt_session
*session
,
4188 uint64_t cur_nr_packets
)
4190 uint64_t tot_size
= 0;
4192 if (session
->kernel_session
) {
4193 struct ltt_kernel_channel
*chan
;
4194 struct ltt_kernel_session
*ksess
= session
->kernel_session
;
4196 cds_list_for_each_entry(chan
, &ksess
->channel_list
.head
, list
) {
4197 if (cur_nr_packets
>= chan
->channel
->attr
.num_subbuf
) {
4199 * Don't take channel into account if we
4200 * already grab all its packets.
4204 tot_size
+= chan
->channel
->attr
.subbuf_size
4205 * chan
->stream_count
;
4209 if (session
->ust_session
) {
4210 struct ltt_ust_session
*usess
= session
->ust_session
;
4212 tot_size
+= ust_app_get_size_one_more_packet_per_stream(usess
,
4220 * Calculate the number of packets we can grab from each stream that
4221 * fits within the overall snapshot max size.
4223 * Returns -1 on error, 0 means infinite number of packets, else > 0 is
4224 * the number of packets per stream.
4226 * TODO: this approach is not perfect: we consider the worse case
4227 * (packet filling the sub-buffers) as an upper bound, but we could do
4228 * better if we do this calculation while we actually grab the packet
4229 * content: we would know how much padding we don't actually store into
4232 * This algorithm is currently bounded by the number of packets per
4235 * Since we call this algorithm before actually grabbing the data, it's
4236 * an approximation: for instance, applications could appear/disappear
4237 * in between this call and actually grabbing data.
4240 int64_t get_session_nb_packets_per_stream(struct ltt_session
*session
, uint64_t max_size
)
4243 uint64_t cur_nb_packets
= 0;
4246 return 0; /* Infinite */
4249 size_left
= max_size
;
4251 uint64_t one_more_packet_tot_size
;
4253 one_more_packet_tot_size
= get_session_size_one_more_packet_per_stream(session
,
4255 if (!one_more_packet_tot_size
) {
4256 /* We are already grabbing all packets. */
4259 size_left
-= one_more_packet_tot_size
;
4260 if (size_left
< 0) {
4265 if (!cur_nb_packets
) {
4266 /* Not enough room to grab one packet of each stream, error. */
4269 return cur_nb_packets
;
4273 * Command LTTNG_SNAPSHOT_RECORD from lib lttng ctl.
4275 * The wait parameter is ignored so this call always wait for the snapshot to
4276 * complete before returning.
4278 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4280 int cmd_snapshot_record(struct ltt_session
*session
,
4281 struct lttng_snapshot_output
*output
, int wait
)
4284 unsigned int use_tmp_output
= 0;
4285 struct snapshot_output tmp_output
;
4286 unsigned int snapshot_success
= 0;
4292 DBG("Cmd snapshot record for session %s", session
->name
);
4294 /* Get the datetime for the snapshot output directory. */
4295 ret
= utils_get_current_time_str("%Y%m%d-%H%M%S", datetime
,
4298 ret
= LTTNG_ERR_INVALID
;
4303 * Permission denied to create an output if the session is not
4304 * set in no output mode.
4306 if (session
->output_traces
) {
4307 ret
= LTTNG_ERR_NOT_SNAPSHOT_SESSION
;
4311 /* The session needs to be started at least once. */
4312 if (!session
->has_been_started
) {
4313 ret
= LTTNG_ERR_START_SESSION_ONCE
;
4317 /* Use temporary output for the session. */
4318 if (*output
->ctrl_url
!= '\0') {
4319 ret
= snapshot_output_init(output
->max_size
, output
->name
,
4320 output
->ctrl_url
, output
->data_url
, session
->consumer
,
4323 if (ret
== -ENOMEM
) {
4324 ret
= LTTNG_ERR_NOMEM
;
4326 ret
= LTTNG_ERR_INVALID
;
4330 /* Use the global session count for the temporary snapshot. */
4331 tmp_output
.nb_snapshot
= session
->snapshot
.nb_snapshot
;
4333 /* Use the global datetime */
4334 memcpy(tmp_output
.datetime
, datetime
, sizeof(datetime
));
4338 if (use_tmp_output
) {
4339 int64_t nb_packets_per_stream
;
4341 nb_packets_per_stream
= get_session_nb_packets_per_stream(session
,
4342 tmp_output
.max_size
);
4343 if (nb_packets_per_stream
< 0) {
4344 ret
= LTTNG_ERR_MAX_SIZE_INVALID
;
4348 if (session
->kernel_session
) {
4349 ret
= record_kernel_snapshot(session
->kernel_session
,
4350 &tmp_output
, session
,
4351 wait
, nb_packets_per_stream
);
4352 if (ret
!= LTTNG_OK
) {
4357 if (session
->ust_session
) {
4358 ret
= record_ust_snapshot(session
->ust_session
,
4359 &tmp_output
, session
,
4360 wait
, nb_packets_per_stream
);
4361 if (ret
!= LTTNG_OK
) {
4366 snapshot_success
= 1;
4368 struct snapshot_output
*sout
;
4369 struct lttng_ht_iter iter
;
4372 cds_lfht_for_each_entry(session
->snapshot
.output_ht
->ht
,
4373 &iter
.iter
, sout
, node
.node
) {
4374 int64_t nb_packets_per_stream
;
4377 * Make a local copy of the output and assign the possible
4378 * temporary value given by the caller.
4380 memset(&tmp_output
, 0, sizeof(tmp_output
));
4381 memcpy(&tmp_output
, sout
, sizeof(tmp_output
));
4383 if (output
->max_size
!= (uint64_t) -1ULL) {
4384 tmp_output
.max_size
= output
->max_size
;
4387 nb_packets_per_stream
= get_session_nb_packets_per_stream(session
,
4388 tmp_output
.max_size
);
4389 if (nb_packets_per_stream
< 0) {
4390 ret
= LTTNG_ERR_MAX_SIZE_INVALID
;
4395 /* Use temporary name. */
4396 if (*output
->name
!= '\0') {
4397 if (lttng_strncpy(tmp_output
.name
, output
->name
,
4398 sizeof(tmp_output
.name
))) {
4399 ret
= LTTNG_ERR_INVALID
;
4405 tmp_output
.nb_snapshot
= session
->snapshot
.nb_snapshot
;
4406 memcpy(tmp_output
.datetime
, datetime
, sizeof(datetime
));
4408 if (session
->kernel_session
) {
4409 ret
= record_kernel_snapshot(session
->kernel_session
,
4410 &tmp_output
, session
,
4411 wait
, nb_packets_per_stream
);
4412 if (ret
!= LTTNG_OK
) {
4418 if (session
->ust_session
) {
4419 ret
= record_ust_snapshot(session
->ust_session
,
4420 &tmp_output
, session
,
4421 wait
, nb_packets_per_stream
);
4422 if (ret
!= LTTNG_OK
) {
4427 snapshot_success
= 1;
4432 if (snapshot_success
) {
4433 session
->snapshot
.nb_snapshot
++;
4435 ret
= LTTNG_ERR_SNAPSHOT_FAIL
;
4443 * Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
4445 int cmd_set_session_shm_path(struct ltt_session
*session
,
4446 const char *shm_path
)
4452 * Can only set shm path before session is started.
4454 if (session
->has_been_started
) {
4455 return LTTNG_ERR_SESSION_STARTED
;
4458 strncpy(session
->shm_path
, shm_path
,
4459 sizeof(session
->shm_path
));
4460 session
->shm_path
[sizeof(session
->shm_path
) - 1] = '\0';
4466 * Command LTTNG_ROTATE_SESSION from the lttng-ctl library.
4468 * Ask the consumer to rotate the session output directory.
4469 * The session lock must be held.
4471 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4473 int cmd_rotate_session(struct ltt_session
*session
,
4474 struct lttng_rotate_session_return
*rotate_return
)
4478 struct tm
*timeinfo
;
4481 bool ust_active
= false;
4485 if (!session
->has_been_started
) {
4486 ret
= -LTTNG_ERR_START_SESSION_ONCE
;
4490 if (session
->live_timer
|| session
->snapshot_mode
||
4491 !session
->output_traces
) {
4492 ret
= -LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4497 * Unsupported feature in lttng-relayd before 2.11.
4499 if (session
->consumer
->type
== CONSUMER_DST_NET
&&
4500 (session
->consumer
->relay_major_version
== 2 &&
4501 session
->consumer
->relay_minor_version
< 11)) {
4502 ret
= -LTTNG_ERR_ROTATION_NOT_AVAILABLE_RELAY
;
4506 if (session
->rotate_pending
|| session
->rotate_pending_relay
) {
4507 ret
= -LTTNG_ERR_ROTATION_PENDING
;
4508 DBG("Rotate already in progress");
4513 * After a stop, we only allow one rotation to occur, the other ones are
4514 * useless until a new start.
4516 if (session
->rotated_after_last_stop
) {
4517 DBG("Session \"%s\" was already rotated after stop, refusing rotation",
4519 ret
= -LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP
;
4523 /* Special case for the first rotation. */
4524 if (session
->current_archive_id
== 0) {
4525 const char *base_path
= NULL
;
4527 /* Either one of the two sessions is enough to get the root path. */
4528 if (session
->kernel_session
) {
4529 base_path
= session_get_base_path(session
);
4530 } else if (session
->ust_session
) {
4531 base_path
= session_get_base_path(session
);
4536 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
4538 sizeof(session
->rotation_chunk
.current_rotate_path
));
4540 ERR("Failed to copy session base path to current rotation chunk path");
4541 ret
= -LTTNG_ERR_UNK
;
4546 * The currently active tracing path is now the folder we
4549 ret
= lttng_strncpy(session
->rotation_chunk
.current_rotate_path
,
4550 session
->rotation_chunk
.active_tracing_path
,
4551 sizeof(session
->rotation_chunk
.current_rotate_path
));
4553 ERR("Failed to copy the active tracing path to the current rotate path");
4554 ret
= -LTTNG_ERR_UNK
;
4558 DBG("Current rotate path %s", session
->rotation_chunk
.current_rotate_path
);
4560 session
->current_archive_id
++;
4561 session
->rotate_pending
= true;
4562 session
->rotation_state
= LTTNG_ROTATION_STATE_ONGOING
;
4563 ret
= notification_thread_command_session_rotation_ongoing(
4564 notification_thread_handle
,
4565 session
->name
, session
->uid
, session
->gid
,
4566 session
->current_archive_id
);
4567 if (ret
!= LTTNG_OK
) {
4568 ERR("Failed to notify notification thread that a session rotation is ongoing for session %s",
4573 * Create the path name for the next chunk.
4576 if (now
== (time_t) -1) {
4577 ret
= -LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4580 session
->last_chunk_start_ts
= session
->current_chunk_start_ts
;
4581 session
->current_chunk_start_ts
= now
;
4583 timeinfo
= localtime(&now
);
4585 PERROR("Failed to sample local time in rotate session command");
4586 ret
= -LTTNG_ERR_UNK
;
4589 strf_ret
= strftime(datetime
, sizeof(datetime
), "%Y%m%dT%H%M%S%z",
4592 ERR("Failed to format local time timestamp in rotate session command");
4593 ret
= -LTTNG_ERR_UNK
;
4596 if (session
->kernel_session
) {
4598 * The active path for the next rotation/destroy.
4599 * Ex: ~/lttng-traces/auto-20170922-111748/20170922-111754-42
4601 ret
= snprintf(session
->rotation_chunk
.active_tracing_path
,
4602 sizeof(session
->rotation_chunk
.active_tracing_path
),
4604 session_get_base_path(session
),
4605 datetime
, session
->current_archive_id
+ 1);
4606 if (ret
< 0 || ret
== sizeof(session
->rotation_chunk
.active_tracing_path
)) {
4607 ERR("Failed to format active kernel tracing path in rotate session command");
4608 ret
= -LTTNG_ERR_UNK
;
4612 * The sub-directory for the consumer
4613 * Ex: /20170922-111754-42/kernel
4615 ret
= snprintf(session
->kernel_session
->consumer
->chunk_path
,
4616 sizeof(session
->kernel_session
->consumer
->chunk_path
),
4617 "/%s-%" PRIu64
, datetime
,
4618 session
->current_archive_id
+ 1);
4619 if (ret
< 0 || ret
== sizeof(session
->kernel_session
->consumer
->chunk_path
)) {
4620 ERR("Failed to format the kernel consumer's sub-directory in rotate session command");
4621 ret
= -LTTNG_ERR_UNK
;
4625 * Create the new chunk folder, before the rotation begins so we don't
4626 * race with the consumer/tracer activity.
4628 ret
= domain_mkdir(session
->kernel_session
->consumer
, session
,
4629 session
->kernel_session
->uid
,
4630 session
->kernel_session
->gid
);
4632 ERR("Failed to create kernel session tracing path at %s",
4633 session
->kernel_session
->consumer
->chunk_path
);
4634 ret
= -LTTNG_ERR_CREATE_DIR_FAIL
;
4637 ret
= kernel_rotate_session(session
);
4638 if (ret
!= LTTNG_OK
) {
4643 if (session
->ust_session
) {
4644 ret
= snprintf(session
->rotation_chunk
.active_tracing_path
,
4645 PATH_MAX
, "%s/%s-%" PRIu64
,
4646 session_get_base_path(session
),
4647 datetime
, session
->current_archive_id
+ 1);
4649 ERR("Failed to format active UST tracing path in rotate session command");
4650 ret
= -LTTNG_ERR_UNK
;
4653 ret
= snprintf(session
->ust_session
->consumer
->chunk_path
,
4654 PATH_MAX
, "/%s-%" PRIu64
, datetime
,
4655 session
->current_archive_id
+ 1);
4657 ERR("Failed to format the UST consumer's sub-directory in rotate session command");
4658 ret
= -LTTNG_ERR_UNK
;
4662 * Create the new chunk folder, before the rotation begins so we don't
4663 * race with the consumer/tracer activity.
4665 ret
= domain_mkdir(session
->ust_session
->consumer
, session
,
4666 session
->ust_session
->uid
,
4667 session
->ust_session
->gid
);
4669 ret
= -LTTNG_ERR_CREATE_DIR_FAIL
;
4672 ret
= ust_app_rotate_session(session
, &ust_active
);
4673 if (ret
!= LTTNG_OK
) {
4677 * Handle the case where we did not start a rotation on any channel.
4678 * The consumer will never wake up the rotation thread to perform the
4679 * rename, so we have to do it here while we hold the session and
4680 * session_list locks.
4682 if (!session
->kernel_session
&& !ust_active
) {
4683 struct lttng_trace_archive_location
*location
;
4685 session
->rotate_pending
= false;
4686 session
->rotation_state
= LTTNG_ROTATION_STATE_COMPLETED
;
4687 ret
= rename_complete_chunk(session
, now
);
4689 ERR("Failed to rename completed rotation chunk");
4693 /* Ownership of location is transferred. */
4694 location
= session_get_trace_archive_location(session
);
4695 ret
= notification_thread_command_session_rotation_completed(
4696 notification_thread_handle
,
4700 session
->current_archive_id
,
4702 if (ret
!= LTTNG_OK
) {
4703 ERR("Failed to notify notification thread that rotation is complete for session %s",
4709 if (!session
->active
) {
4710 session
->rotated_after_last_stop
= true;
4713 if (rotate_return
) {
4714 rotate_return
->rotation_id
= session
->current_archive_id
;
4717 DBG("Cmd rotate session %s, current_archive_id %" PRIu64
" sent",
4718 session
->name
, session
->current_archive_id
);
4726 * Command LTTNG_ROTATION_GET_INFO from the lttng-ctl library.
4728 * Check if the session has finished its rotation.
4730 * Return 0 on success or else a LTTNG_ERR code.
4732 int cmd_rotate_get_info(struct ltt_session
*session
,
4733 struct lttng_rotation_get_info_return
*info_return
,
4734 uint64_t rotation_id
)
4740 DBG("Cmd rotate_get_info session %s, rotation id %" PRIu64
, session
->name
,
4741 session
->current_archive_id
);
4743 if (session
->current_archive_id
!= rotation_id
) {
4744 info_return
->status
= (int32_t) LTTNG_ROTATION_STATE_EXPIRED
;
4749 switch (session
->rotation_state
) {
4750 case LTTNG_ROTATION_STATE_ONGOING
:
4751 DBG("Reporting that rotation id %" PRIu64
" of session %s is still pending",
4752 rotation_id
, session
->name
);
4754 case LTTNG_ROTATION_STATE_COMPLETED
:
4756 char *current_tracing_path_reply
;
4757 size_t current_tracing_path_reply_len
;
4759 switch (session_get_consumer_destination_type(session
)) {
4760 case CONSUMER_DST_LOCAL
:
4761 current_tracing_path_reply
=
4762 info_return
->location
.local
.absolute_path
;
4763 current_tracing_path_reply_len
=
4764 sizeof(info_return
->location
.local
.absolute_path
);
4765 info_return
->location_type
=
4766 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_LOCAL
;
4768 case CONSUMER_DST_NET
:
4769 current_tracing_path_reply
=
4770 info_return
->location
.relay
.relative_path
;
4771 current_tracing_path_reply_len
=
4772 sizeof(info_return
->location
.relay
.relative_path
);
4773 /* Currently the only supported relay protocol. */
4774 info_return
->location
.relay
.protocol
=
4775 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP
;
4777 ret
= lttng_strncpy(info_return
->location
.relay
.host
,
4778 session_get_net_consumer_hostname(session
),
4779 sizeof(info_return
->location
.relay
.host
));
4781 ERR("Failed to host name to rotate_get_info reply");
4782 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
4783 ret
= -LTTNG_ERR_UNK
;
4787 session_get_net_consumer_ports(session
,
4788 &info_return
->location
.relay
.ports
.control
,
4789 &info_return
->location
.relay
.ports
.data
);
4790 info_return
->location_type
=
4791 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_RELAY
;
4796 ret
= lttng_strncpy(current_tracing_path_reply
,
4797 session
->rotation_chunk
.current_rotate_path
,
4798 current_tracing_path_reply_len
);
4800 ERR("Failed to copy current tracing path to rotate_get_info reply");
4801 info_return
->status
= LTTNG_ROTATION_STATUS_ERROR
;
4802 ret
= -LTTNG_ERR_UNK
;
4808 case LTTNG_ROTATION_STATE_ERROR
:
4809 DBG("Reporting that an error occurred during rotation %" PRIu64
" of session %s",
4810 rotation_id
, session
->name
);
4816 info_return
->status
= (int32_t) session
->rotation_state
;
4823 * Command LTTNG_ROTATION_SET_SCHEDULE from the lttng-ctl library.
4825 * Configure the automatic rotation parameters.
4826 * 'activate' to true means activate the rotation schedule type with 'new_value'.
4827 * 'activate' to false means deactivate the rotation schedule and validate that
4828 * 'new_value' has the same value as the currently active value.
4830 * Return 0 on success or else a positive LTTNG_ERR code.
4832 int cmd_rotation_set_schedule(struct ltt_session
*session
,
4833 bool activate
, enum lttng_rotation_schedule_type schedule_type
,
4835 struct notification_thread_handle
*notification_thread_handle
)
4838 uint64_t *parameter_value
;
4842 DBG("Cmd rotate set schedule session %s", session
->name
);
4844 if (session
->live_timer
|| session
->snapshot_mode
||
4845 !session
->output_traces
) {
4846 DBG("Failing ROTATION_SET_SCHEDULE command as the rotation feature is not available for this session");
4847 ret
= LTTNG_ERR_ROTATION_NOT_AVAILABLE
;
4851 switch (schedule_type
) {
4852 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
4853 parameter_value
= &session
->rotate_size
;
4855 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
4856 parameter_value
= &session
->rotate_timer_period
;
4857 if (new_value
>= UINT_MAX
) {
4858 DBG("Failing ROTATION_SET_SCHEDULE command as the value requested for a periodic rotation schedule is invalid: %" PRIu64
" > %u (UINT_MAX)",
4859 new_value
, UINT_MAX
);
4860 ret
= LTTNG_ERR_INVALID
;
4865 WARN("Failing ROTATION_SET_SCHEDULE command on unknown schedule type");
4866 ret
= LTTNG_ERR_INVALID
;
4870 /* Improper use of the API. */
4871 if (new_value
== -1ULL) {
4872 WARN("Failing ROTATION_SET_SCHEDULE command as the value requested is -1");
4873 ret
= LTTNG_ERR_INVALID
;
4878 * As indicated in struct ltt_session's comments, a value of == 0 means
4879 * this schedule rotation type is not in use.
4881 * Reject the command if we were asked to activate a schedule that was
4884 if (activate
&& *parameter_value
!= 0) {
4885 DBG("Failing ROTATION_SET_SCHEDULE (activate) command as the schedule is already active");
4886 ret
= LTTNG_ERR_ROTATION_SCHEDULE_SET
;
4891 * Reject the command if we were asked to deactivate a schedule that was
4894 if (!activate
&& *parameter_value
== 0) {
4895 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as the schedule is already inactive");
4896 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
4901 * Reject the command if we were asked to deactivate a schedule that
4904 if (!activate
&& *parameter_value
!= new_value
) {
4905 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as an inexistant schedule was provided");
4906 ret
= LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET
;
4910 *parameter_value
= activate
? new_value
: 0;
4912 switch (schedule_type
) {
4913 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC
:
4914 if (activate
&& session
->active
) {
4916 * Only start the timer if the session is active,
4917 * otherwise it will be started when the session starts.
4919 ret
= sessiond_rotate_timer_start(session
, new_value
);
4921 ERR("Failed to enable session rotation timer in ROTATION_SET_SCHEDULE command");
4922 ret
= LTTNG_ERR_UNK
;
4926 ret
= sessiond_rotate_timer_stop(session
);
4928 ERR("Failed to disable session rotation timer in ROTATION_SET_SCHEDULE command");
4929 ret
= LTTNG_ERR_UNK
;
4934 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD
:
4936 ret
= subscribe_session_consumed_size_rotation(session
,
4937 new_value
, notification_thread_handle
);
4939 ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command");
4940 ret
= LTTNG_ERR_UNK
;
4944 ret
= unsubscribe_session_consumed_size_rotation(session
,
4945 notification_thread_handle
);
4947 ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command");
4948 ret
= LTTNG_ERR_UNK
;
4955 /* Would have been caught before. */
4968 * Command ROTATE_GET_CURRENT_PATH from the lttng-ctl library.
4970 * Configure the automatic rotation parameters.
4971 * Set to -1ULL to disable them.
4973 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4975 int cmd_session_get_current_output(struct ltt_session
*session
,
4976 struct lttng_session_get_current_output_return
*output_return
)
4981 if (!session
->snapshot_mode
) {
4982 if (session
->current_archive_id
== 0) {
4983 if (session
->kernel_session
) {
4984 path
= session_get_base_path(session
);
4985 } else if (session
->ust_session
) {
4986 path
= session_get_base_path(session
);
4992 path
= session
->rotation_chunk
.active_tracing_path
;
4996 * A snapshot session does not have a "current" trace archive
5002 DBG("Cmd get current output for session %s, returning %s",
5003 session
->name
, path
);
5005 ret
= lttng_strncpy(output_return
->path
,
5007 sizeof(output_return
->path
));
5009 ERR("Failed to copy trace output path to session get current output command reply");
5010 ret
= -LTTNG_ERR_UNK
;
5019 /* Wait for a given path to be removed before continuing. */
5020 static enum lttng_error_code
wait_on_path(void *path_data
)
5022 const char *shm_path
= path_data
;
5024 DBG("Waiting for the shm path at %s to be removed before completing session destruction",
5030 ret
= stat(shm_path
, &st
);
5032 if (errno
!= ENOENT
) {
5033 PERROR("stat() returned an error while checking for the existence of the shm path");
5035 DBG("shm path no longer exists, completing the destruction of session");
5039 if (!S_ISDIR(st
.st_mode
)) {
5040 ERR("The type of shm path %s returned by stat() is not a directory; aborting the wait for shm path removal",
5045 usleep(SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US
);
5051 * Returns a pointer to a handler to run on completion of a command.
5052 * Returns NULL if no handler has to be run for the last command executed.
5054 const struct cmd_completion_handler
*cmd_pop_completion_handler(void)
5056 struct cmd_completion_handler
*handler
= current_completion_handler
;
5058 current_completion_handler
= NULL
;
5063 * Init command subsystem.
5068 * Set network sequence index to 1 for streams to match a relayd
5069 * socket on the consumer side.
5071 pthread_mutex_lock(&relayd_net_seq_idx_lock
);
5072 relayd_net_seq_idx
= 1;
5073 pthread_mutex_unlock(&relayd_net_seq_idx_lock
);
5075 DBG("Command subsystem initialized");